content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from re import T def sms_outbound_gateway(): """ SMS Outbound Gateway selection for the messaging framework """ # CRUD Strings s3.crud_strings["msg_sms_outbound_gateway"] = Storage( label_create = T("Create SMS Outbound Gateway"), title_display = T("SMS Outbound Gateway Details"), title_list = T("SMS Outbound Gateways"), title_update = T("Edit SMS Outbound Gateway"), label_list_button = T("List SMS Outbound Gateways"), label_delete_button = T("Delete SMS Outbound Gateway"), msg_record_created = T("SMS Outbound Gateway added"), msg_record_modified = T("SMS Outbound Gateway updated"), msg_record_deleted = T("SMS Outbound Gateway deleted"), msg_list_empty = T("No SMS Outbound Gateways currently registered"), ) return s3_rest_controller()
bb0796dabbfe14b6a7e2a1d25960beae3d065717
3,644,600
def _insert_volume(_migration, volume_number, volume_obj): """Find or create the corresponding volume, and insert the attribute.""" volumes = _migration["volumes"] volume_obj = deepcopy(volume_obj) volume_obj["volume"] = volume_number volumes.append(volume_obj) return volume_obj
3a89024aa5b2bc9fc2bb16094a1a95ca6fd43f63
3,644,601
def create_vnet(credentials, subscription_id, **kwargs): """ Create a Batch account :param credentials: msrestazure.azure_active_directory.AdalAuthentication :param subscription_id: str :param **resource_group: str :param **virtual_network_name: str :param **subnet_name: str :param **region: str """ network_client = NetworkManagementClient(credentials, subscription_id) resource_group_name = kwargs.get("resource_group", DefaultSettings.resource_group) virtual_network_name = kwargs.get("virtual_network_name", DefaultSettings.virtual_network_name) subnet_name = kwargs.get("subnet_name", DefaultSettings.subnet_name) # get vnet, and subnet if they exist virtual_network = subnet = None try: virtual_network = network_client.virtual_networks.get( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, ) except CloudError as e: pass if virtual_network: confirmation_prompt = "A virtual network with the same name ({}) was found. \n"\ "Please note that the existing address space and subnets may be changed or destroyed. \n"\ "Do you want to use this virtual network? (y/n): ".format(virtual_network_name) deny_error = AccountSetupError("Virtual network already exists, not recreating.") unrecognized_input_error = AccountSetupError("Input not recognized.") prompt_for_confirmation(confirmation_prompt, deny_error, unrecognized_input_error) virtual_network = network_client.virtual_networks.create_or_update( resource_group_name=resource_group_name, virtual_network_name=kwargs.get("virtual_network_name", DefaultSettings.virtual_network_name), parameters=VirtualNetwork( location=kwargs.get("region", DefaultSettings.region), address_space=AddressSpace(["10.0.0.0/24"]) ) ) virtual_network = virtual_network.result() subnet = network_client.subnets.create_or_update( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, subnet_name=subnet_name, subnet_parameters=Subnet( address_prefix='10.0.0.0/24' ) ) return subnet.result().id
3a0a670c89f4a2c427a205039bf12b3af42e6b0a
3,644,602
def to_inorder_iterative(root: dict, allow_none_value: bool = False) -> list: """ Convert a binary tree node to depth-first in-order list (iteratively). """ node = root node_list = [] stack = [] while node or len(stack) > 0: if node: stack.append(node) # push a node into the stack node = node.get('left') else: node = stack[-1] del stack[-1] # pop the node from stack node_value = node.get('value') if node_value is not None or allow_none_value: node_list.append(node_value) node = node.get('right') return node_list
90cf850f5e0fa91432bd2b9cde642e13fc0d8723
3,644,603
def symmetric_mean_absolute_percentage_error(a, b): """ Calculates symmetric Mean Absolute Percentage Error (sMAPE). Args: a (): ctual values. b (): Predicted values. Returns: sMAPE float %. """ a = np.reshape(a, (-1,)) b = np.reshape(b, (-1,)) return 100.0 * np.mean(2.0 * np.abs(a - b) / (np.abs(a) + np.abs(b))).item()
90557e6f2a702aeea27b2a881b6ef3e35c0b7f46
3,644,605
def exec_quiet(handle, *args, **kwargs): """ Like exe.execute but doesnt print the exception. """ try: val = handle(*args, **kwargs) except Exception: pass else: return val
d0e922672c8a2d302bc2bfcb30bec91d32988945
3,644,606
def make_modified_function_def(original_type, name, original, target): """Make the modified function definition. :return: the definition for the modified function """ arguments = format_method_arguments(name, original) argument_names = set(target.parameters) unavailable_arguments = [p for p in original.parameters if p not in argument_names] derived_from = format_derived_from(original_type, unavailable_arguments, original) raise_error = format_raise_errors(original_type, name, unavailable_arguments, original) return (""" {0} def {1}({2}):{3}""".format(derived_from, name, arguments, raise_error))
35263518c9edf9a710f66c6432ef9fb1a85df3fa
3,644,607
import pathlib import multiprocessing from functools import reduce def fetch_tiles(server, tile_def_generator, output=pathlib.Path('.'), force=False): """ fetch and store tiles @param server server definition object @param tile_def_generator generator of tile definitions consisting of [x, y, z, bbox] tuples @param output output folder path @param force flag to force to overwrite """ input_queue = multiprocessing.JoinableQueue() stop_event = multiprocessing.Event() statistic = multiprocessing.Manager().dict() workers = [] for i in range(server["concurrency"]): p = multiprocessing.Process(target=fetch_tile_worker, args=(i, input_queue, stop_event, server, output, force, statistic)) workers.append(p) p.start() for [x, y, z, *bbox] in tile_def_generator: input_queue.put([x, y, z, bbox]) input_queue.join() stop_event.set() for w in workers: w.join() def collect_result(s1, s2): if s1: return { "counter_total": s1["counter_total"] + s2["counter_total"], "counter_attempt": s1["counter_attempt"] + s2["counter_attempt"], "counter_ok": s1["counter_ok"] + s2["counter_ok"] } else: return s2 result = reduce(collect_result, statistic.values(), None) print ("Total: {}, Ok: {}, Failed: {}, Skipped: {}".format( result["counter_total"], result["counter_ok"], result["counter_attempt"] - result["counter_ok"], result["counter_total"] - result["counter_attempt"]))
6e42da71bff389b36485dc34222fa4a1b5d5abf3
3,644,608
def _UTMLetterDesignator(lat): """ This routine determines the correct UTM letter designator for the given latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S. Written by Chuck Gantz- chuck.gantz@globalstar.com """ if 84 >= lat >= 72: return 'X' elif 72 > lat >= 64: return 'W' elif 64 > lat >= 56: return 'V' elif 56 > lat >= 48: return 'U' elif 48 > lat >= 40: return 'T' elif 40 > lat >= 32: return 'S' elif 32 > lat >= 24: return 'R' elif 24 > lat >= 16: return 'Q' elif 16 > lat >= 8: return 'P' elif 8 > lat >= 0: return 'N' elif 0 > lat >= -8: return 'M' elif -8> lat >= -16: return 'L' elif -16 > lat >= -24: return 'K' elif -24 > lat >= -32: return 'J' elif -32 > lat >= -40: return 'H' elif -40 > lat >= -48: return 'G' elif -48 > lat >= -56: return 'F' elif -56 > lat >= -64: return 'E' elif -64 > lat >= -72: return 'D' elif -72 > lat >= -80: return 'C' else: return 'Z' # if the latitude is outside the UTM limits
3c5b9a54a9824d6755937aeaece8fa53483045fc
3,644,609
import re def is_ignored(file: str) -> bool: """ Check if the given file is ignored :param file: the file :return: if the file is ignored or not """ for ignored in config.get('input').get('ignored'): ignored_regex = re.compile(ignored) if re.match(ignored_regex, file): return True return False
22b038b17435b2a2a9c79d24136de8f6322d8093
3,644,610
def enumerate_phone_column_index_from_row(row): """Enumerates the phone column from a given row. Uses Regexs Parameters ---------- row : list list of cell values from row Returns ------- int phone column index enumerated from row """ # initial phone_column_index value phone_column_index = -1 # generate cell values from row cell_values = get_cell_values_from_row(row) # iterate through cell values for i in range(len(cell_values)): value = cell_values[i] # Check if value matches "[Pp]hone || 0000000000" if is_value_phone_identifier(value): phone_column_index = i break return phone_column_index
bbc5f1abceb51d5a7385d235c7dae9ed07ffcc1f
3,644,612
def fetch_words(url): """ Fetch a list of words from URL Args: url: The URL of a UTF-8 text doxument Returns: A list of strings containing the words from the document. """ story = urlopen(url) story_words = [] for line in story: line_words = line.decode('utf-8').split() for word in line_words: story_words.append(word) story.close() return story_words
939bc5409b4ae824a3671e555d9ebdf8454c6358
3,644,613
from typing import Dict def dot_keys_to_nested(data: Dict) -> Dict: """old['aaaa.bbbb'] -> d['aaaa']['bbbb'] Args: data (Dict): [description] Returns: Dict: [description] """ rules = defaultdict(lambda: dict()) for key, val in data.items(): if '.' in key: key, _, param = key.partition('.') rules[key][param] = val return rules
06bbf2b154c1cfb840d556c082bcb0b9282c44be
3,644,615
def vec2adjmat(source, target, weight=None, symmetric=True): """Convert source and target into adjacency matrix. Parameters ---------- source : list The source node. target : list The target node. weight : list of int The Weights between the source-target values symmetric : bool, optional Make the adjacency matrix symmetric with the same number of rows as columns. The default is True. Returns ------- pd.DataFrame adjacency matrix. Examples -------- >>> source=['Cloudy','Cloudy','Sprinkler','Rain'] >>> target=['Sprinkler','Rain','Wet_Grass','Wet_Grass'] >>> vec2adjmat(source, target) >>> >>> weight=[1,2,1,3] >>> vec2adjmat(source, target, weight=weight) """ if len(source)!=len(target): raise Exception('[hnet] >Source and Target should have equal elements.') if weight is None: weight = [1]*len(source) df = pd.DataFrame(np.c_[source, target], columns=['source','target']) # Make adjacency matrix adjmat = pd.crosstab(df['source'], df['target'], values=weight, aggfunc='sum').fillna(0) # Get all unique nodes nodes = np.unique(list(adjmat.columns.values)+list(adjmat.index.values)) # nodes = np.unique(np.c_[adjmat.columns.values, adjmat.index.values].flatten()) # Make the adjacency matrix symmetric if symmetric: # Add missing columns node_columns = np.setdiff1d(nodes, adjmat.columns.values) for node in node_columns: adjmat[node]=0 # Add missing rows node_rows = np.setdiff1d(nodes, adjmat.index.values) adjmat=adjmat.T for node in node_rows: adjmat[node]=0 adjmat=adjmat.T # Sort to make ordering of columns and rows similar [IA, IB] = ismember(adjmat.columns.values, adjmat.index.values) adjmat = adjmat.iloc[IB,:] adjmat.index.name='source' adjmat.columns.name='target' return(adjmat)
fa5e8370557b8e1f37cbe2957ec16c614c4ad70d
3,644,616
def is_valid(number): """Check if the number provided is a valid PAN. This checks the length and formatting.""" try: return bool(validate(number)) except ValidationError: return False
2a2c99c29072e402cc046fa1eef8eefd2c4ee0af
3,644,617
def remove_spaces(string: str): """Removes all whitespaces from the given string""" if string is None: return "" return "".join(l for l in str(string) if l not in WHITESPACES)
5dc928124c0a080f8e283450dc9754b4b301f047
3,644,618
def decode_event_to_internal(abi, log_event): """ Enforce the binary for internal usage. """ # Note: All addresses inside the event_data must be decoded. decoded_event = decode_event(abi, log_event) if not decoded_event: raise UnknownEventType() # copy the attribute dict because that data structure is immutable data = dict(decoded_event) args = dict(data['args']) data['args'] = args # translate from web3's to raiden's name convention data['block_number'] = log_event.pop('blockNumber') data['transaction_hash'] = log_event.pop('transactionHash') assert data['block_number'], 'The event must have the block_number' assert data['transaction_hash'], 'The event must have the transaction hash field' event = data['event'] if event == EVENT_TOKEN_NETWORK_CREATED: args['token_network_address'] = to_canonical_address(args['token_network_address']) args['token_address'] = to_canonical_address(args['token_address']) elif event == ChannelEvent.OPENED: args['participant1'] = to_canonical_address(args['participant1']) args['participant2'] = to_canonical_address(args['participant2']) elif event == ChannelEvent.DEPOSIT: args['participant'] = to_canonical_address(args['participant']) elif event == ChannelEvent.BALANCE_PROOF_UPDATED: args['closing_participant'] = to_canonical_address(args['closing_participant']) elif event == ChannelEvent.CLOSED: args['closing_participant'] = to_canonical_address(args['closing_participant']) elif event == ChannelEvent.UNLOCKED: args['participant'] = to_canonical_address(args['participant']) args['partner'] = to_canonical_address(args['partner']) return Event( originating_contract=to_canonical_address(log_event['address']), event_data=data, )
1005507ea129be88462b75685d5b8a993fd6f0cd
3,644,619
def run_ode_solver(system, slope_func, **options): """Computes a numerical solution to a differential equation. `system` must contain `init` with initial conditions, `t_0` with the start time, and `t_end` with the end time. It can contain any other parameters required by the slope function. `options` can be any legal options of `scipy.integrate.solve_ivp` system: System object slope_func: function that computes slopes returns: TimeFrame """ # make sure `system` contains `init` if not hasattr(system, 'init'): msg = """It looks like `system` does not contain `init` as a system variable. `init` should be a State object that specifies the initial condition:""" raise ValueError(msg) # make sure `system` contains `t_end` if not hasattr(system, 't_end'): msg = """It looks like `system` does not contain `t_end` as a system variable. `t_end` should be the final time:""" raise ValueError(msg) # make the system parameters available as globals unpack(system) # the default value for t_0 is 0 t_0 = getattr(system, 't_0', 0) # try running the slope function with the initial conditions # try: # slope_func(init, t_0, system) # except Exception as e: # msg = """Before running scipy.integrate.solve_ivp, I tried # running the slope function you provided with the # initial conditions in `system` and `t=t_0` and I got # the following error:""" # logger.error(msg) # raise(e) # wrap the slope function to reverse the arguments and add `system` f = lambda t, y: slope_func(y, t, system) def wrap_event(event): """Wrap the event functions. Make events terminal by default. """ wrapped = lambda t, y: event(y, t, system) wrapped.terminal = getattr(event, 'terminal', True) wrapped.direction = getattr(event, 'direction', 0) return wrapped # wrap the event functions so they take the right arguments events = options.pop('events', []) try: events = [wrap_event(event) for event in events] except TypeError: events = wrap_event(events) # remove dimensions from the initial conditions. # we need this because otherwise `init` gets copied into the # results array along with its units init_no_dim = [getattr(x, 'magnitude', x) for x in init] # if the user did not provide t_eval or events, return # equally spaced points if 't_eval' not in options: if not events: options['t_eval'] = linspace(t_0, t_end, 51) # run the solver with units_off(): bunch = solve_ivp(f, [t_0, t_end], init_no_dim, events=events, **options) # separate the results from the details y = bunch.pop('y') t = bunch.pop('t') details = ModSimSeries(bunch) # pack the results into a TimeFrame results = TimeFrame(np.transpose(y), index=t, columns=init.index) return results, details
ae2994aeaca5590d61921a25877807a1611841cd
3,644,620
def case_configuration(group_id, det_obj, edges): """ Get all the needed information of the detectors for the chosen edges, as well as only those trajectories that map onto one of the edges. Parameters ---------- group_id det_obj edges Returns ------- """ ds = det_obj.detector_selection(edges) id_ft_pan = list(set(det_obj.features.index.get_level_values(0)) & set(edges)) id_ft_pan.sort() ds_ft = det_obj.features.loc[(id_ft_pan,)] ds_ft.attrs = det_obj.features.attrs lt = td.get_lt(group_id=group_id, edges=edges, gdf=True) return ds, ds_ft, lt
8e6910943e705fa952992ef5e2551140e578e8ef
3,644,621
def _remarks(item: str) -> str: """Returns the remarks. Reserved for later parsing""" return item
d515837e52ee88edeb5bdb5e8f2d37ed28789362
3,644,622
from datetime import datetime def _date_to_datetime(value): """Convert a date to a datetime for datastore storage. Args: value: A datetime.date object. Returns: A datetime object with time set to 0:00. """ assert isinstance(value, datetime.date) return datetime.datetime(value.year, value.month, value.day)
872efa93fd256f38c2c82f979d0942114c6254b9
3,644,624
def get_posted_float(key): """ Retrieve a named float value from a POSTed form :param key: Value key :return: Value or None if not specified """ value = request.form[key] return float(value) if value else None
1d88d4b0fc09f8e0d323bb8cc0aba83ac5fa9d9f
3,644,626
def te_ds(mass, norm_vel, x_ratios, source_distance, te_einstein, gamma, sigma_total, \ xval, val): """Returns the probability of a sampled value T_E by weighting from the T_E probability distribution of the data """ if min(xval) < te_einstein <= max(xval): omegac = gamma*norm_vel*np.sqrt(x_ratios*(1.-x_ratios))*(mass)**(-1./2.) pte = np.interp(te_einstein, xval, val) # print(pte) # print(probulge_ds(omegac,source_distance,mass,norm_vel,\ # x_ratios,sigma_total,var = "unfixed")) # print(probdisk_ds(omegac,source_distance,mass,norm_vel,\ # x_ratios,sigma_total,var = "unfixed")) # print(Big_phi_source(source_distance,SIGMA_SOURCE_T)) prob = galfunc.probulge_ds(omegac, source_distance, mass, norm_vel, x_ratios, \ sigma_total, var="unfixed")+\ galfunc.probdisk_ds(omegac, source_distance, mass, norm_vel, x_ratios, \ sigma_total, var="unfixed") prob2 = galfunc.big_phi_source(source_distance, SIGMA_SOURCE_T)*pte # print('interal' , prob) # print('interal2' , prob2) return prob*prob2 else: return 0.
4322a1266610f047e2efef023d8f0cccf80e697a
3,644,627
def roulette(fitness_values, return_size, elite=0): """ Perform a roulette wheel selection Return return_size item indices """ sorted_indices = np.argsort(fitness_values) c_sorted = np.sort(fitness_values).cumsum() c_sorted /= np.max(c_sorted) sampled = [sorted_indices[np.sum(np.random.rand() > c_sorted)] for _ in \ xrange(return_size)] elites = sorted_indices[::-1][:elite].tolist() return sampled, elites
6ac06a28a6ce22ef8828597985332913aa0987e2
3,644,628
def create_sample_tree(): """ 1 / \ 2 3 / \ 4 5 """ root = TreeNode(1) root.left = TreeNode(2) root.right = TreeNode(3) root.right.left = TreeNode(4) root.right.right = TreeNode(5) return root
7e5c252cf66df3fed236e0891185b545e0c9c861
3,644,631
import copy def sp_normalize(adj_def, device='cpu'): """ :param adj: scipy.sparse.coo_matrix :param device: default as cpu :return: normalized_adj: """ adj_ = sp.coo_matrix(adj_def) adj_ = adj_ + sp.coo_matrix(sp.eye(adj_def.shape[0]), dtype=np.float32) rowsum = np.array(adj_.sum(axis=1)).reshape(-1) norm_unit = np.float_power(rowsum, -0.5).astype(np.float32) degree_mat_inv_sqrt = sp.diags(norm_unit) degree_mat_sqrt = copy.copy(degree_mat_inv_sqrt) # degree_mat_sqrt = degree_mat_inv_sqrt.to_dense() support = adj_.__matmul__(degree_mat_sqrt) # support = coo_to_csp(support.tocoo()) # degree_mat_inv_sqrt = coo_to_csp(degree_mat_inv_sqrt.tocoo()) adj_normalized = degree_mat_inv_sqrt.__matmul__(support) adj_normalized = coo_to_csp(adj_normalized.tocoo()) return adj_normalized, rowsum # coo_adj = sp.coo_matrix(adj_normalized.to('cpu').numpy()) # return coo_to_csp(coo_adj).to(device), rowsum
a4ef96d439c27047def02234c3308e45dc934067
3,644,632
def isTileEvent(x:int, y:int): """ checks if a given tile is an event or not quicker than generateTileAt x: the x value of the target tile y: the y value of the target tile """ perlRand = getPerlin(x, y, s=2.501) if Math.floor(perlRand * 3400) == 421 and deriveTile(x, y)=='H': return True elif Math.floor(perlRand * 9000) == 4203 and deriveTile(x, y)=='C': return True return False
34efeef5e05830faef886f69fb95287817ced1c6
3,644,633
def to_onehot_sym(ind, dim): """Return a matrix with one hot encoding of each element in ind.""" assert ind.ndim == 1 return theano.tensor.extra_ops.to_one_hot(ind, dim)
db4871b3108aef0cbf2b2a63497dd30c22e2805e
3,644,634
def generate_potential_grasp(object_cloud): """ The object_cloud needs to be in table coordinates. """ # https://www.cs.princeton.edu/~funk/tog02.pdf picking points in triangle nrmls = object_cloud.normals.copy() # if object_cloud.points[:,2].max()<0.11: # nrmls[nrmls[:,2]>0] *= -1 # direction_bias = np.max( np.vstack( [ nrmls @ np.array([0,0,-1]), np.zeros(nrmls.shape[0])] ), axis=0 ) # else: # direction_bias = np.ones(nrmls.shape) area_bias = object_cloud.facet_areas/np.sum(object_cloud.facet_areas) probability = area_bias probability /= np.sum(probability) sample = np.random.choice(np.arange(object_cloud.hull.simplices.shape[0]), p=probability) simplex = object_cloud.hull.simplices[sample] r1,r2 = np.random.uniform(0,1,2) sqrt_r1 = r1**0.5 A,B,C = object_cloud.points[simplex] point = (1-sqrt_r1)*A + sqrt_r1*(1-r2)*B + sqrt_r1*r2*C direction = nrmls[sample] # this is pointing inwards distance = np.random.uniform(0.01, 0.15) # in cm p = point - direction*distance if p[2] < 0.07: n = (point[2] - 0.07)/distance - direction[2] direction[2] = direction[2]+n direction = direction/np.linalg.norm(direction) p = point - direction*distance y_axis = np.random.uniform(-1,1,3) y_axis = y_axis - (y_axis@direction)*direction y_axis /= np.linalg.norm(y_axis) x_axis = np.cross(y_axis, direction) x_axis /= np.linalg.norm(x_axis) R = np.zeros((3,3)) R[:,0] = x_axis R[:,1] = y_axis R[:,2] = direction return R, p[...,np.newaxis]
169552dfea38c9f4a15a67f23ca40cda2e824769
3,644,635
import torch import time def ACP_O(model, Xtrain, Ytrain, Xtest, labels = [0,1], out_file = None, seed = 42, damp = 10**-3, batches = 1): """Runs ACP (ordinary) CP-IF to make a prediction for all points in Xtest """ N = len(Xtrain) # Train model on D. model_D = model model_D = model_D.to(device) model_D.fit(Xtrain, Ytrain, seed = seed) torch.cuda.empty_cache() # Estimate influence. gradients = [] for x, y in zip(Xtrain, Ytrain): gradients.append(model_D.grad_z(x, y, flatten = True).cpu().detach()) torch.cuda.empty_cache() gradients.append(None) #for the test point H_D = torch.zeros(model_D.count_params(), model_D.count_params()).to(device) Xtrain_splitted = np.array_split(Xtrain, batches) Ytrain_splitted = np.array_split(Ytrain, batches) for batch_X, batch_Y in zip(Xtrain_splitted, Ytrain_splitted): H_D += model_D.hessian_all_points(batch_X, batch_Y) H_D = H_D/batches H_D += torch.diag(Tensor([damp]*len(H_D))).to(device) torch.cuda.empty_cache() H_inv = torch.inverse(H_D) del H_D #Preliminary scores losses = [np.float64(model_D.compute_loss(x, y).cpu().detach()) for x, y in zip(Xtrain, Ytrain)] losses.append(None) pvals = [] prediction_times = [] for k, xtest in enumerate(Xtest): print("TEST: " +str(k+1)) pvals_xtest = {} scores = {} start = time.perf_counter() for yhat in labels: # Extended dataset Xtmp = np.row_stack((Xtrain, [xtest])) Ytmp = np.concatenate((Ytrain, [yhat])) alphas = np.zeros(len(Xtmp)) # Obtain gradient on test point g_test = model_D.grad_z(Xtmp[-1,:], Ytmp[-1], flatten = True) # Obtain loss on test point loss_test = np.float64(model_D.compute_loss(Xtmp[-1,:], Ytmp[-1]).cpu().detach()) gradients[-1] = g_test losses[-1] = loss_test for j, (x,y) in enumerate(zip(Xtmp, Ytmp)): gradient = gradients[j].to(device) # Compute influence est = - gradient.T@H_inv@g_test/N alphas[j] = losses[j] + np.array(est.cpu().detach()) torch.cuda.empty_cache() pval = sum(alphas >= alphas[-1])/(N+1) print(pval) pvals_xtest[yhat], scores[yhat] = pval, list(alphas) prediction_times.append(time.perf_counter() - start) pvals.append(pvals_xtest) if out_file: log_to_file(out_file, {"N": len(Xtrain), "prediction-times": prediction_times[-1], "p-values": pvals_xtest }) return pvals, prediction_times
6205e62c153fac5fe58529882d232bba4a83ce8a
3,644,636
import math def func (x): """ sinc (x) """ if x == 0: return 1.0 return math.sin (x) / x
c91242e360547107f7767e442f40f4bf3f2b53e8
3,644,637
def grad_norm(model=None, parameters=None): """Compute parameter gradient norm.""" assert parameters is not None or model is not None total_norm = 0 if parameters is None: parameters = [] if model is not None: parameters.extend(model.parameters()) parameters = [p for p in parameters if p.grad is not None and p.requires_grad] for p in parameters: param_norm = p.grad.detach().data.norm(2) total_norm += param_norm.item()**2 total_norm = total_norm**0.5 return total_norm
ff471715a72f0d2afbafa60d19eb802a748a2419
3,644,638
import copy def db_entry_trim_empty_fields(entry): """ Remove empty fields from an internal-format entry dict """ entry_trim = copy.deepcopy(entry) # Make a copy to modify as needed for field in [ 'url', 'title', 'extended' ]: if field in entry: if (entry[field] is None) or \ (type(entry[field]) is str and len(entry[field]) == 0): del entry_trim[field] return entry_trim
d5b31c823f4e8091872f64445ab603bcbf6a2bef
3,644,639
def loadconfig(PATH): """Load Latte's repo configuration from the PATH. A dictionary of the config data is returned, otherwise None.""" try: f = open(PATH, "r") except FileNotFoundError: return None else: confobj = SWConfig(f.read()) if confobj is None: return None else: return confobj.data
cd912b1e9d7fddbecf72165b42d1eb8b47687838
3,644,640
import logging def _call_token_server(method, request): """Sends an RPC to tokenserver.minter.TokenMinter service. Args: method: name of the method to call. request: dict with request fields. Returns: Dict with response fields. Raises: auth.AuthorizationError on HTTP 403 reply. InternalError if the RPC fails unexpectedly. """ # Double check token server URL looks sane ('https://....'). This is checked # when it's imported from the config. This check should never fail. ts_url = auth.get_request_auth_db().token_server_url try: utils.validate_root_service_url(ts_url) except ValueError as exc: raise InternalError('Invalid token server URL %s: %s' % (ts_url, exc)) # See TokenMinter in # https://chromium.googlesource.com/infra/luci/luci-go/+/master/tokenserver/api/minter/v1/token_minter.proto # But beware that proto JSON serialization uses camelCase, not snake_case. try: return net.json_request( url='%s/prpc/tokenserver.minter.TokenMinter/%s' % (ts_url, method), method='POST', payload=request, headers={'Accept': 'application/json; charset=utf-8'}, scopes=[net.EMAIL_SCOPE]) except net.Error as exc: logging.error( 'Error calling %s (HTTP %s: %s):\n%s', method, exc.status_code, exc.message, exc.response) if exc.status_code == 403: raise auth.AuthorizationError(exc.response) raise InternalError('Failed to call MintOAuthTokenGrant, see server logs')
a81a6b0f8a42517bb2b7e6fafc762f2620c18dbb
3,644,641
import requests def get_keywords(text): """Get keywords that relate to this article (from NLP service) Args: text (sting): text to extract keywords from Returns: [list]: list of extracted keywords """ extracted_keywords = [] request = {'text': text} nlp_output = requests.post(env.get_keywords_endpoint(), json=request) nlp_output.raise_for_status() json_output = nlp_output.json() if 'error' in json_output: raise Exception(json_output['error']['message']) for keyword in json_output["tokens"]: extracted_keywords.append(keyword["lemma"]) return extracted_keywords
56caaa6af416c425eb54ef9e85460cd5921e9d74
3,644,642
def weather_config() -> str: """The function config_handle() is called and the contents that are returned are stored in the variable 'config file'. This is then appropriately parsed so the weather api key is accessed. This is then returned at the end of the function.""" #Accessing Weather Api Key config_file = config_handle() api_key = config_file["api_keys"]["weather_key"] return api_key
6eb35a876eb73160d39417a213195b39e68ea568
3,644,643
import warnings def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None): """ Converts input bit flags to a single integer value (bit mask) or `None`. When input is a list of flags (either a Python list of integer flags or a string of comma-, ``'|'``-, or ``'+'``-separated list of flags), the returned bit mask is obtained by summing input flags. .. note:: In order to flip the bits of the returned bit mask, for input of `str` type, prepend '~' to the input string. '~' must be prepended to the *entire string* and not to each bit flag! For input that is already a bit mask or a Python list of bit flags, set ``flip_bits`` for `True` in order to flip the bits of the returned bit mask. Parameters ---------- bit_flags : int, str, list, None An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or ``'+'``-separated list of integer bit flags or mnemonic flag names, or a Python list of integer bit flags. If ``bit_flags`` is a `str` and if it is prepended with '~', then the output bit mask will have its bits flipped (compared to simple sum of input flags). For input ``bit_flags`` that is already a bit mask or a Python list of bit flags, bit-flipping can be controlled through ``flip_bits`` parameter. .. note:: When ``bit_flags`` is a list of flag names, the ``flag_name_map`` parameter must be provided. .. note:: Only one flag separator is supported at a time. ``bit_flags`` string should not mix ``','``, ``'+'``, and ``'|'`` separators. flip_bits : bool, None Indicates whether or not to flip the bits of the returned bit mask obtained from input bit flags. This parameter must be set to `None` when input ``bit_flags`` is either `None` or a Python list of flags. flag_name_map : BitFlagNameMap A `BitFlagNameMap` object that provides mapping from mnemonic bit flag names to integer bit values in order to translate mnemonic flags to numeric values when ``bit_flags`` that are comma- or '+'-separated list of menmonic bit flag names. Returns ------- bitmask : int or None Returns an integer bit mask formed from the input bit value or `None` if input ``bit_flags`` parameter is `None` or an empty string. If input string value was prepended with '~' (or ``flip_bits`` was set to `True`), then returned value will have its bits flipped (inverse mask). Examples -------- >>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map >>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32) >>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16')) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)', ... flag_name_map=ST_DQ)) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16])) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True)) '1111111111100011' """ has_flip_bits = flip_bits is not None flip_bits = bool(flip_bits) allow_non_flags = False if _is_int(bit_flags): return (~int(bit_flags) if flip_bits else int(bit_flags)) elif bit_flags is None: if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' must be set to 'None' when " "input 'bit_flags' is None." ) return None elif isinstance(bit_flags, str): if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' is not permitted for " "comma-separated string lists of bit flags. Prepend '~' to " "the string to indicate bit-flipping." ) bit_flags = str(bit_flags).strip() if bit_flags.upper() in ['', 'NONE', 'INDEF']: return None # check whether bitwise-NOT is present and if it is, check that it is # in the first position: bitflip_pos = bit_flags.find('~') if bitflip_pos == 0: flip_bits = True bit_flags = bit_flags[1:].lstrip() else: if bitflip_pos > 0: raise ValueError("Bitwise-NOT must precede bit flag list.") flip_bits = False # basic check for correct use of parenthesis: while True: nlpar = bit_flags.count('(') nrpar = bit_flags.count(')') if nlpar == 0 and nrpar == 0: break if nlpar != nrpar: raise ValueError("Unbalanced parantheses in bit flag list.") lpar_pos = bit_flags.find('(') rpar_pos = bit_flags.rfind(')') if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1): raise ValueError("Incorrect syntax (incorrect use of " "parenthesis) in bit flag list.") bit_flags = bit_flags[1:-1].strip() if sum(k in bit_flags for k in '+,|') > 1: raise ValueError( "Only one type of bit flag separator may be used in one " "expression. Allowed separators are: '+', '|', or ','." ) if ',' in bit_flags: bit_flags = bit_flags.split(',') elif '+' in bit_flags: bit_flags = bit_flags.split('+') elif '|' in bit_flags: bit_flags = bit_flags.split('|') else: if bit_flags == '': raise ValueError( "Empty bit flag lists not allowed when either bitwise-NOT " "or parenthesis are present." ) bit_flags = [bit_flags] if flag_name_map is not None: try: int(bit_flags[0]) except ValueError: bit_flags = [flag_name_map[f] for f in bit_flags] allow_non_flags = len(bit_flags) == 1 elif hasattr(bit_flags, '__iter__'): if not all([_is_int(flag) for flag in bit_flags]): if (flag_name_map is not None and all([isinstance(flag, str) for flag in bit_flags])): bit_flags = [flag_name_map[f] for f in bit_flags] else: raise TypeError("Every bit flag in a list must be either an " "integer flag value or a 'str' flag name.") else: raise TypeError("Unsupported type for argument 'bit_flags'.") bitset = set(map(int, bit_flags)) if len(bitset) != len(bit_flags): warnings.warn("Duplicate bit flags will be ignored") bitmask = 0 for v in bitset: if not _is_bit_flag(v) and not allow_non_flags: raise ValueError("Input list contains invalid (not powers of two) " "bit flag: {:d}".format(v)) bitmask += v if flip_bits: bitmask = ~bitmask return bitmask
ea657c9f4abfe8503d7ea120ea7c5ff039f82634
3,644,644
import torch def reparam(mu, std, do_sample=True, cuda=True): """Reparametrization for Normal distribution. """ if do_sample: eps = torch.FloatTensor(std.size()).normal_() if cuda: eps = eps.cuda() eps = Variable(eps) return mu + eps * std else: return mu
dc959b0f1f3972ae612d44d90694480270b42a3e
3,644,645
import random def simu_grid_graph(width, height, rand_weight=False): """Generate a grid graph. To generate a grid graph. Each node has 4-neighbors. Please see more details in https://en.wikipedia.org/wiki/Lattice_graph. For example, we can generate 5x3(width x height) grid graph 0---1---2---3---4 | | | | | 5---6---7---8---9 | | | | | 10--11--12--13--14 by using simu_grid_graph(5, 3) We can also generate a 1x5 chain graph 0---1---2---3---4 by using simu_grid_graph(5, 1) :param width: width of this grid graph. :param height: height of this grid graph. :param rand_weight: generate weights from U(1., 2.) if it is True. :return: edges and corresponding edge costs. return two empty [],[] list if there was any error occurring. """ if width < 0 and height < 0: print('Error: width and height should be positive.') return [], [] width, height = int(width), int(height) edges, weights = [], [] index = 0 for i in range(height): for j in range(width): if (index % width) != (width - 1): edges.append((index, index + 1)) if index + width < int(width * height): edges.append((index, index + width)) else: if index + width < int(width * height): edges.append((index, index + width)) index += 1 edges = np.asarray(edges, dtype=int) # random generate costs of the graph if rand_weight: weights = [] while len(weights) < len(edges): weights.append(random.uniform(1., 2.0)) weights = np.asarray(weights, dtype=np.float64) else: # set unit weights for edge costs. weights = np.ones(len(edges), dtype=np.float64) return edges, weights
139be873014c96f05b3fb391ea1352fab68b8357
3,644,646
def calculate_sensitivity_to_weighting(jac, weights, moments_cov, params_cov): """calculate the sensitivity to weighting. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How would the precision change if the weight of the kth moment is increased a little? Args: sensitivity_to_bias (np.ndarray or pandas.DataFrame): See ``calculate_sensitivity_to_bias`` for details. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ _jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments( jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov ) gwg_inverse = _sandwich(_jac, _weights) gwg_inverse = robust_inverse(gwg_inverse, INVALID_SENSITIVITY_MSG) m6 = [] for k in range(len(_weights)): mask_matrix_o = np.zeros(shape=_weights.shape) mask_matrix_o[k, k] = 1 m6k_1 = gwg_inverse @ _sandwich(_jac, mask_matrix_o) @ _params_cov m6k_2 = ( gwg_inverse @ _jac.T @ mask_matrix_o @ _moments_cov @ _weights @ _jac @ gwg_inverse ) m6k_3 = ( gwg_inverse @ _jac.T @ _weights @ _moments_cov @ mask_matrix_o @ _jac @ gwg_inverse ) m6k_4 = _params_cov @ _sandwich(_jac, mask_matrix_o) @ gwg_inverse m6k = -m6k_1 + m6k_2 + m6k_3 - m6k_4 m6k = m6k.diagonal() m6.append(m6k) m6 = np.array(m6).T weights_diagonal = np.diagonal(_weights) params_variances = np.diagonal(_params_cov) e6 = m6 / params_variances.reshape(-1, 1) e6 = e6 * weights_diagonal if names: e6 = pd.DataFrame(e6, index=names.get("params"), columns=names.get("moments")) return e6
ed41e5e369446144e10bb7b7edfc59d2c3f8621e
3,644,648
def subword(w): """ Function used in the Key Expansion routine that takes a four-byte input word and applies an S-box to each of the four bytes to produce an output word. """ w = w.reshape(4, 8) return SBOX[w[0]] + SBOX[w[1]] + SBOX[w[2]] + SBOX[w[3]]
36dfd4c82484fda342629c94fc66454723e371f6
3,644,649
import multiprocessing from typing import Counter def deaScranDESeq2(counts, conds, comparisons, alpha, scran_clusters=False): """Makes a call to DESeq2 with SCRAN to perform D.E.A. in the given counts matrix with the given conditions and comparisons. Returns a list of DESeq2 results for each comparison """ results = list() n_cells = len(counts.columns) try: pandas2ri.activate() deseq2 = RimportLibrary("DESeq2") scran = RimportLibrary("scran") multicore = RimportLibrary("BiocParallel") multicore.register(multicore.MulticoreParam(multiprocessing.cpu_count()-1)) as_matrix = r["as.matrix"] # Create the R conditions and counts data r_counts = pandas2ri.py2ri(counts) cond = robjects.StrVector(conds) r_call = """ function(r_counts) { sce = SingleCellExperiment(assays=list(counts=r_counts)) return(sce) } """ r_func = r(r_call) sce = r_func(as_matrix(r_counts)) if scran_clusters: r_clusters = scran.quickCluster(as_matrix(r_counts), max(n_cells/10, 10)) min_cluster_size = min(Counter(r_clusters).values()) sizes = list(set([round((min_cluster_size/2) / i) for i in [5,4,3,2,1]])) sce = scran.computeSumFactors(sce, clusters=r_clusters, sizes=sizes, positive=True) else: sizes = list(set([round((n_cells/2) * i) for i in [0.1,0.2,0.3,0.4,0.5]])) sce = scran.computeSumFactors(sce, sizes=sizes, positive=True) sce = r.normalize(sce) dds = r.convertTo(sce, type="DESeq2") r_call = """ function(dds, conditions){ colData(dds)$conditions = as.factor(conditions) design(dds) = formula(~ conditions) return(dds) } """ r_func = r(r_call) dds = r_func(dds, cond) dds = r.DESeq(dds) # Perform the comparisons and store results in list for A,B in comparisons: result = r.results(dds, contrast=r.c("conditions", A, B), alpha=alpha) result = r['as.data.frame'](result) genes = r['rownames'](result) result = pandas2ri.ri2py_dataframe(result) # There seems to be a problem parsing the rownames from R to pandas # so we do it manually result.index = genes results.append(result) pandas2ri.deactivate() except Exception as e: raise e return results
de6c6a1640819c72ef72275f3205cf24d7a8ce0a
3,644,650
def cylindrical_to_cartesian(a: ArrayLike) -> NDArray: """ Transform given cylindrical coordinates array :math:`\\rho\\phi z` (radial distance, azimuth and height) to cartesian coordinates array :math:`xyz`. Parameters ---------- a Cylindrical coordinates array :math:`\\rho\\phi z` to transform, :math:`\\rho` is in range [0, +inf], :math:`\\phi` is in range [-pi, pi] radians i.e. [-180, 180] degrees, :math:`z` is in range [0, +inf]. Returns ------- :class:`numpy.ndarray` Cartesian coordinates array :math:`xyz`. References ---------- :cite:`Wikipedia2006`, :cite:`Wikipedia2005a` Examples -------- >>> a = np.array([3.16227766, 0.32175055, 6.00000000]) >>> cylindrical_to_cartesian(a) # doctest: +ELLIPSIS array([ 3. , 0.9999999..., 6. ]) """ a = as_float_array(a) x, y = tsplit(polar_to_cartesian(a[..., 0:2])) return tstack([x, y, a[..., -1]])
27b8d95c2c09b8b78069a4ad0c4cf351de93db13
3,644,651
def eigenarray_to_array( array ): """Convert Eigen::ArrayXd to numpy array""" return N.frombuffer( array.data(), dtype='d', count=array.size() )
7caa9ab71ca86b6a8afa45879d0f29c8122973fe
3,644,652
from typing import List def _ncon_to_adjmat(labels: List[List[int]]): """ Generate an adjacency matrix from the network connections. """ # process inputs N = len(labels) ranks = [len(labels[i]) for i in range(N)] flat_labels = np.hstack([labels[i] for i in range(N)]) tensor_counter = np.hstack( [i * np.ones(ranks[i], dtype=int) for i in range(N)]) index_counter = np.hstack([np.arange(ranks[i]) for i in range(N)]) # build log-adjacency index-by-index adjmat = np.zeros([N, N], dtype=int) unique_labels = np.unique(flat_labels) for ele in unique_labels: # identify tensor/index location of each edge tnr = tensor_counter[flat_labels == ele] ind = index_counter[flat_labels == ele] if len(ind) == 1: # external index adjmat[tnr[0], tnr[0]] += 1 elif len(ind) == 2: # internal index if tnr[0] != tnr[1]: # ignore partial traces adjmat[tnr[0], tnr[1]] += 1 adjmat[tnr[1], tnr[0]] += 1 return adjmat
8e7171321f547084bdaff4cefa1bd9e00b453cc9
3,644,653
def _is_install_requirement(requirement): """ return True iff setup should install requirement :param requirement: (str) line of requirements.txt file :return: (bool) """ return not (requirement.startswith('-e') or 'git+' in requirement)
339f6a8a573f33157a46193216e90d62475d2dea
3,644,655
def confusion_matrices_runs_thresholds( y, scores, thresholds, n_obs=None, fill=0.0, obs_axis=0 ): """Compute confusion matrices over runs and thresholds. `conf_mats_runs_thresh` is an alias for this function. Parameters ---------- y : np.ndarray[bool, int32, int64, float32, float64] the ground truth labels, if different runs have different number of observations the n_obs parameter must be set to avoid computing metrics of the filled values. If ``y`` is one dimensional and ``scores`` is not the ``y`` values are assumed to be the same for each run. scores : np.array[float32, float64] the classifier scoress, if different runs have different number of observations the n_obs parameter must be set to avoid computing metrics of the filled values. thresholds : np.array[float32, float64] classification thresholds n_obs : np.array[int64], default=None the number of observations per run, if None the same number of observations are assumed exist for each run. fill : double value to fill when a metric is not defined, e.g. divide by zero. obs_axis : {0, 1}, default=0 0 if the observations for a single run is a column (e.g. from pd.DataFrame) and 1 otherwhise Returns ------- conf_mat : np.ndarray[int64] 3D array where the rows contain the counts for a threshold, the columns the confusion matrix entries and the slices the counts for a run """ thresholds = check_array( thresholds, max_dim=1, dtype_check=_convert_to_float, ) scores = check_array( scores, axis=obs_axis, target_axis=obs_axis, target_order=1-obs_axis, max_dim=2, dtype_check=_convert_to_float, ) n_runs = scores.shape[1 - obs_axis] max_obs = scores.shape[obs_axis] if y.ndim == 1: y = np.tile(y[:, None], n_runs) elif y.shape[1] == 1 and y.shape[0] >= 2: y = np.tile(y, n_runs) y = check_array( y, axis=obs_axis, target_axis=obs_axis, target_order=1-obs_axis, max_dim=2, dtype_check=_convert_to_ext_types, ) n_thresholds = thresholds.size if n_obs is None: n_obs = np.repeat(max_obs, n_runs) cm = _core.confusion_matrix_runs_thresholds( y, scores, thresholds, n_obs ) # cm and mtr are both flat arrays with order conf_mat, thresholds, runs # as this is fastest to create. However, how the cubes will be sliced # later doesn't align with this. So we incur a copy such that the cubes # have the optimal strides for further processing if n_thresholds == 1: # create cube from flat array cm = cm.reshape(n_runs, 4, order='C') else: # create cube from flat array cm = cm.reshape(n_runs, n_thresholds, 4, order='C') # reorder such that with F-order we get from smallest to largest # strides: conf_mat, runs, thresholds cm = np.swapaxes(np.swapaxes(cm, 0, 2), 1, 2) # make values over the confusion matrix and runs contiguous cm = np.asarray(cm, order='F') # change order s.t. we have thresholds, conf_mat, runs cm = np.swapaxes(cm.T, 1, 2) return cm
7a7b640a724a2ffba266403ac72940da5a28f57b
3,644,656
def new_project(request): """ Function that enables one to upload projects """ profile = Profile.objects.all() for profile in profile: if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): pro = form.save(commit=False) pro.profile = profile pro.user = request.user pro.save() return redirect('landing') else: form = ProjectForm() return render(request, 'new_pro.html', {"form": form})
da4adb286e6b972b9ac37019cd4ff0ed4c82dd3f
3,644,657
def move_to_next_pixel(fdr, row, col): """ Given fdr (flow direction array), row (current row index), col (current col index). return the next downstream neighbor as row, col pair See How Flow Direction works http://desktop.arcgis.com/en/arcmap/latest/tools/spatial-analyst-toolbox/how-flow-direction-works.htm D8 flow direction grid | 32 | 64 | 128 | | 16 | X | 1 | | 8 | 4 | 2 | """ # get the fdr pixel value (x,y) value = fdr[row, col] # Update the row, col based on the flow direction if value == 1: col += 1 elif value == 2: col += 1 row += 1 elif value == 4: row += 1 elif value == 8: row += 1 col -= 1 elif value == 16: col -= 1 elif value == 32: row -= 1 col -= 1 elif value == 64: row -= 1 elif value == 128: row -= 1 col += 1 else: # Indetermine flow direction, sink. Do not move. row = row col = col return (row, col)
d134bb35ed4962945c86c0ac2c6af1aff5acd06b
3,644,658
import re import string def clean_text(post): """ Function to filter basic greetings and clean the input text. :param post: raw post :return: clean_post or None if the string is empty after cleaning """ post = str(post) """ filtering basic greetings """ for template in TEMPLATES: if template in str(post).lower(): post = post.replace(template, '') """ clean text """ raw_text = str(post).replace('\'', ' ') translator = re.compile('[%s]' % re.escape(string.punctuation)) clean_text_sub = translator.sub(' ', raw_text) clean_text = re.sub(' +', ' ', clean_text_sub).strip() if clean_text == 'nan' or clean_text is None: return '' else: return clean_text
1896cc991e0c061bad16e0a2ae9768c06f2b0029
3,644,659
def turnIsLegal(speed, unitVelocity, velocity2): """ Assumes all velocities have equal magnitude and only need their relative angle checked. :param velocity1: :param velocity2: :return: """ cosAngle = np.dot(unitVelocity, velocity2) / speed return cosAngle > MAX_TURN_ANGLE_COS
e598630d47ca77ed953199852a08ce376cdc0e0f
3,644,660
def convert_selection_vars_to_common_effects(G: ADMG) -> nx.DiGraph: """Convert all undirected edges to unobserved common effects. Parameters ---------- G : ADMG A causal graph with undirected edges. Returns ------- G_copy : ADMG A causal graph that is a fully specified DAG with unobserved selection variables added in place of undirected edges. """ uc_label = "Unobserved Confounders" G_copy = nx.DiGraph(G.dag) # for every bidirected edge, add a new node for idx, latent_edge in enumerate(G.c_component_graph.edges): G_copy.add_node(f"U{idx}", label=uc_label, observed="no") # then add edges from the new UC to the nodes G_copy.add_edge("U", latent_edge[0]) G_copy.add_edge("U", latent_edge[1]) return G_copy
00f4530e1262bac92b31b8855efdd453de035c57
3,644,661
def mean_binary_proto_to_np_array(caffe, mean_binproto): """ :param caffe: caffe instances from import_caffe() method :param mean_binproto: full path to the mode's image-mean .binaryproto created from train.lmdb. :return: """ # I don't have my image mean in .npy file but in binaryproto. I'm converting it to a numpy array. # Took me some time to figure this out. blob = caffe.proto.caffe_pb2.BlobProto() data = open(mean_binproto, 'rb').read() blob.ParseFromString(data) mu = np.array(caffe.io.blobproto_to_array(blob)) mu = mu.squeeze() # The output array had one redundant dimension. return mu
e79488051f26c9e3781c1fbeeb9f42683e8e2df0
3,644,663
def delete_channel(u, p, cid): """ Delete an existing service hook. """ c = Channel.query.filter_by( id=cid, project_id=p.id ).first() if not c: # Project or channel doesn't exist (404 Not Found) return abort(404) if c.project.owner.id != g.user.id or c.project.id != p.id: # Project isn't public and the viewer isn't the project owner. # (403 Forbidden) return abort(403) if request.method == 'POST' and request.form.get('do') == 'd': c.project.channels.remove(c) db.session.delete(c) db.session.commit() return redirect(url_for('.details', p=p.name, u=u.username)) return render_template('delete_channel.html', project=c.project, channel=c )
31f6b7640bb222a6b7c7ae62af24e6da170fbaeb
3,644,664
def joint_img_freq_loss(output_domain, loss, loss_lambda): """Specifies a function which computes the appropriate loss function. Loss function here is computed on both Fourier and image space data. Args: output_domain(str): Network output domain ('FREQ' or 'IMAGE') loss(str): Loss type ('L1' or 'L2') loss_lambda(float): Weighting of freq loss vs image loss Returns: Function computing loss value from a true and predicted input """ def joint_loss(y_true, y_pred): return(image_loss(output_domain, loss)(y_true, y_pred) + loss_lambda * fourier_loss(output_domain, loss)(y_true, y_pred)) return joint_loss
9f9a95314c109d6d331c494c77cae8a3578e659d
3,644,665
import requests import json def verify_auth_token(untrusted_message): """ Verifies a Auth Token. Returns a django.contrib.auth.models.User instance if successful or False. """ # decrypt the message untrusted = URLSafeTimedSerializer(settings.SSO_SECRET).loads( untrusted_message, max_age=300) # do some extra validation if 'auth_token' not in untrusted: return False if 'request_token' not in untrusted: return False # call the SSO server to verify the token params = { 'auth_token': untrusted['auth_token'], 'key': settings.SSO_KEY } message = URLSafeTimedSerializer(settings.SSO_SECRET).dumps(params) url = urljoin(settings.SSO_SERVER_PRIVATE_URL, 'sso/api/verify') + '/' response = requests.get( url, params={ 'key': settings.SSO_KEY, 'message': message }, timeout=10 ) # ensure the response is sane if response.status_code != 200: return False # build a User object from the message data = URLSafeTimedSerializer(settings.SSO_SECRET).loads( response.content, max_age=300) user_data = json.loads(data['user']) user = client.construct_user(user_data) if 'roles' in data: role_data = json.loads(data['roles']) client.synchronize_roles(user, role_data) return user
f470d96fea2561e5d754f5e39dad5cf816bb4e69
3,644,666
def get_requirements(): """ Obtenir la liste de toutes les dépences :return: la liste de toutes les dépences """ requirements = [] with open(REQUIREMENTS_TXT, encoding="utf-8") as frequirements: for requirement_line in frequirements.readlines(): requirement_line = requirement_line.strip() if not requirement_line.startswith("#"): if "#" in requirement_line: requirement_line = requirement_line.split("#")[0] if requirement_line: requirements.append(requirement_line) return requirements
68d73653818b9ede5f7be00cc79842d9ca4fa59a
3,644,667
import math as m from math import sin, cos, atan, asin, floor def equ2gal(ra, dec): """Converts Equatorial J2000d coordinates to the Galactic frame. Note: it is better to use AstroPy's SkyCoord API for this. Parameters ---------- ra, dec : float, float [degrees] Input J2000 coordinates (Right Ascension and Declination). Returns ------- glon, glat: float, float [degrees] """ OB = m.radians(23.4333334); dec = m.radians(dec) ra = m.radians(ra) a = 27.128251 # The RA of the North Galactic Pole d = 192.859481 # The declination of the North Galactic Pole l = 32.931918 # The ascending node of the Galactic plane on the equator sdec = sin(dec) cdec = cos(dec) sa = sin(m.radians(a)) ca = cos(m.radians(a)) GT = asin(cdec * ca * cos(ra - m.radians(d)) + sdec * sa) GL = m.degrees(atan((sdec - sin(GT) * sa) / (cdec * sin(ra - m.radians(d)) * ca))) TP = sdec - sin(GT) * sa BT = cdec * sin(ra - m.radians(d)) * ca if (BT < 0): GL += 180 else: if (TP < 0): GL += 360 GL += l if (GL > 360): GL -= 360 LG = floor(GL) LM = floor((GL - floor(GL)) * 60) LS = ((GL - floor(GL)) * 60 - LM) * 60 GT = m.degrees(GT) D = abs(GT) if (GT > 0): BG = floor(D) else: BG = -1*floor(D) BM = floor((D - floor(D)) * 60) BS = ((D - floor(D)) * 60 - BM) * 60 if (GT < 0): BM = -BM BS = -BS #if GL > 180: # GL -= 360 return (GL, GT)
ebed665e798a00b649367bc389747f046659d9af
3,644,668
import unittest from pyoptsparse import OPT def require_pyoptsparse(optimizer=None): """ Decorate test to raise a skiptest if a required pyoptsparse optimizer cannot be imported. Parameters ---------- optimizer : String Pyoptsparse optimizer string. Default is None, which just checks for pyoptsparse. Returns ------- TestCase or TestCase.method The decorated TestCase class or method. """ def decorator(obj): try: except Exception: msg = "pyoptsparse is not installed." if not isinstance(obj, type): @functools.wraps(obj) def skip_wrapper(*args, **kwargs): raise unittest.SkipTest(msg) obj = skip_wrapper obj.__unittest_skip__ = True obj.__unittest_skip_why__ = msg return obj try: OPT(optimizer) except Exception: msg = "pyoptsparse is not providing %s" % optimizer if not isinstance(obj, type): @functools.wraps(obj) def skip_wrapper(*args, **kwargs): raise unittest.SkipTest(msg) obj = skip_wrapper obj.__unittest_skip__ = True obj.__unittest_skip_why__ = msg return obj return decorator
2df011c38f9a44047a5a259be983ba23bf9ebe92
3,644,669
from typing import Union def addition(a:Union[int, float], b:Union[int, float]) -> Union[int, float]: """ A simple addition function. Add `a` to `b`. """ calc = a + b return calc
b9adaf3bea178e23bd4c02bdda3f286b6ca8f3ab
3,644,670
def print_version(): """ Print the module version information :return: returns 1 for for exit code purposes :rtype: int """ print(""" %s version %s - released %s" """ % (__docname__, __version__, __release__)) return 1
28f5ce2a922fe66de5dce2ed2bfab6241835c759
3,644,671
def splitpath(path): """ Split a path """ drive, path = '', _os.path.normpath(path) try: splitunc = _os.path.splitunc except AttributeError: pass else: drive, path = splitunc(path) if not drive: drive, path = _os.path.splitdrive(path) elems = [] try: sep = _os.path.sep except AttributeError: sep = _os.path.join('1', '2')[1:-1] while 1: prefix, path = _os.path.split(path) elems.append(path) if prefix in ('', sep): drive = _os.path.join(drive, prefix) break path = prefix elems.reverse() return drive, elems
830c7aa2e825bb57d819bc014afe7cb0ba31aaf5
3,644,672
def vmf1_zenith_wet_delay(dset): """Calculates zenith wet delay based on gridded zenith wet delays from VMF1 Uses gridded zenith wet delays from VMF1, which are rescaled from the gridded height to actual station height by using Equation(5) described in Kouba :cite:`kouba2007`. Args: dset (Dataset): Model data. Returns: numpy.ndarray: Zenith wet delay for each observation in [m] """ # Get gridded VMF1 data vmf1 = apriori.get("vmf1", time=dset.time) lat, lon, height = dset.site_pos.pos.llh.T grid_zwd = vmf1["zw"](dset.time, lon, lat) # Interpolation in time and space in VMF1 grid grid_height = vmf1["ell"](lon, lat, grid=False) # Zenith Wet delay. Eq. (5) in Kouba :cite:`kouba2007` zwd = grid_zwd * np.exp(-(height - grid_height) / 2000) return zwd
defa135bac27c1540caceee4ca12f6741c5e6475
3,644,673
from functools import reduce import random import hashlib def get_hash(dictionary): """Takes a dictionary as input and provides a unique hash value based on the values in the dictionary. All the values in the dictionary after converstion to string are concatenated and then the HEX hash is generated :param dictionary: A python dictionary :return: A HEX hash Credit: https://gitlab.com/calledbymountains/cvdatasetmanagement/blob/master/utils/gen_utils.py """ if not isinstance(dictionary, dict): raise ValueError('The argument must be ap ython dictionary.') str_input = reduce(lambda x, y: str(x) + str(y), list(dictionary.values())) str_input = ''.join(random.sample(str_input, len(str_input))) hash_object = hashlib.shake_128(str_input.encode()) output = hash_object.hexdigest(12) return output
2e69c397611510151996d152c4fc0b5573d62fdc
3,644,674
def convert_to_tensor(value, dtype=None, device = None): """ Converts the given value to a Tensor. Parameters ---------- value : object An object whose type has a registered Tensor conversion function. dtype : optional Optional element type for the returned tensor. If missing, the type is inferred from the type of value. Returns ------- A Tensor based on value. """ return pd.to_tensor(value, dtype=dtype)
fbae4561f3a38b8f72146f584ce07faf5096cdc1
3,644,675
def aggregate_extrema(features, Th, percentage = True) : """ Summary: Function that tries to remove false minima aggregating closeby extrema Arguments: features - pandas series containing the extrema to be aggregated. The series is of the form: Max, Min, Max, Max, Min, ... Th - threshold used to remove 'false' minima percentage - tells if the thrshold is expressed as percentage of the distance between adjacent maxima and minima Returns: aggregatedFeat - pandas vector with aggregated features """ # Keep the first maximum and minimum ind = [0] # Factor used to scale the threshold depending on 'percentage' d = 1 skipNext = False # For each minima check if it can be merged with the right node for ii in range(1, len(features), 3) : if skipNext : skipNext = False continue # check if are at the end of the feature vector if ii + 2 >= len( features ) : # Record the point which is the last in the list ind.append(ii) # Current minima ind.append(ii + 1) # Following maxima break aggregate = False # check if the next two maxima coincide if features[ii+1] == features[ii+2] : # find what is lowest minimum if features[ ii ] > features[ii + 3] : # try to aggregate on the left if percentage : d = features[ii - 1] - features[ii + 3] if (features[ii-1] > features[ii+1]) and (features[ii+1] - features[ii] < Th * d): aggregate = True # in this case, the point and the next 2 coincident maxima # should not be included in the output list else : # try to aggregate on the right if percentage : d = features[ii + 4] - features[ii] if (features[ii+4] > features[ii+2]) and (features[ii+2] - features[ii+3] < Th * d): aggregate = True # in this case, the point should be included but the next should not ind.append(ii) # Current minima ind.append(ii+4) if ii + 5 < len(features) : ind.append(ii+5) skipNext = True # skip the next minima that has already been processed if not aggregate: # Record the point ind.append(ii) # Current minima ind.append(ii + 1) # Following maxima ind.append(ii + 2) # Maxima of the next minima # check if the last max was copied twice if features[ind[-1]] == features[ind[-2]]: ind.pop() return features[ind].copy()
6eeed204de4c39f8b66353595cbc04800bb1b176
3,644,676
from pathlib import Path def load_embedded_frame_data(session_path, camera: str, raw=False): """ :param session_path: :param camera: The specific camera to load, one of ('left', 'right', 'body') :param raw: If True the raw data are returned without preprocessing (thresholding, etc.) :return: The frame counter, the pin state """ if session_path is None: return None, None raw_path = Path(session_path).joinpath('raw_video_data') # Load frame count count_file = raw_path / f'_iblrig_{camera}Camera.frame_counter.bin' count = np.fromfile(count_file, dtype=np.float64).astype(int) if count_file.exists() else None if not (count is None or raw): count -= count[0] # start from zero # Load pin state pin_file = raw_path / f'_iblrig_{camera}Camera.GPIO.bin' pin_state = np.fromfile(pin_file, dtype=np.float64).astype(int) if pin_file.exists() else None if not (pin_state is None or raw): pin_state = pin_state > PIN_STATE_THRESHOLD return count, pin_state
865d5151680b901fb0941cf487bd01836748f2c4
3,644,677
def reconstruct(edata, mwm=80.4, cme=1000): """ Reconstructs the momentum of the neutrino and anti-neutrino, given the momentum of the muons and bottom quarks. INPUT: edata: A list containing the x, y, and z momentum in GeV of the charged leptons and bottom quarks, in the following order: edata := [amux, amuy, amuz, b1x, b1y, b1z, mux, muy, muz, b2x, b2y, b2z] with notation, amu := anti-muon b1 := bottom quark 1* mu := muon b2 := bottom quark 2* * The charge of the bottom quark is assumed to be unknown. mwm(default=80.4): The constrained mass of the W boson in GeV. cme(default=1000): The center of mass energy. OUTPUT: solutions: A list of the reconstructed neutrino and anti-neutrino x, y, and z-momenta as a tuple, for each possible solution of p2z, [(nux, nuy, nuz, anux, anuy, anuz), ...]. """ assert len(edata) == 12, 'edata should have length 12.' degree = 4 # The degree of the interpolating polynomial. rbar_threshold = 0.95 mwm2 = mwm**2 domain_func, func1s, func2s = _getFuncs(edata, mwm2, cme) p2z_func1 = func1s[2] p2z_func2 = func2s[2] solutions = [] # Find domain by finding the two roots of domain_func (quadratic). domain = solve(domain_func, rational=False, simplify=False, minimal=True, quadratics=True) # Check for complex domain bounds. if not any([d.is_real for d in domain]): return [] domain = [float(d) for d in domain] # Interpolate function 1 and calculate adjusted R-squared (rbar). poly1, rbar1 = dividedDiffernce(p2z_func1, domain[0], domain[1], deg=degree, var_name='p2z') # Add solutions only if interpolation is a good fit. if rbar1 > rbar_threshold: solutions = _getSols(poly1, domain, func1s) # Interpolate function 2 and calculate adjusted R-squared. poly2, rbar2 = dividedDiffernce(p2z_func2, domain[0], domain[1], deg=degree, var_name='p2z') if rbar2 > rbar_threshold: solutions += _getSols(poly2, domain, func2s) # rbars = (rbar1, rbar2) return solutions
31ecb3f8982f8026a4d0052778b53f1e76912252
3,644,678
import contextlib import sqlite3 def run_sql_command(query: str, database_file_path:str, unique_items=False) -> list: """ Returns the output of an SQL query performed on a specified SQLite database Parameters: query (str): An SQL query database_file_path (str): absolute path of the SQLite database file unique_items (bool): whether the function should return a list of items instead of a list of tuples with one value Returns: records (list): The output of the SQLite database """ with contextlib.closing(sqlite3.connect(database_file_path)) as conn: with conn: with contextlib.closing(conn.cursor()) as cursor: # auto-closes cursor.execute(query) records = cursor.fetchall() if unique_items: return [x[0] for x in records] return records
705584db31fd270d4127e7d1b371a24a8a9dd22e
3,644,679
def zeta_nbi_nvi_ode(t, y, nb, C, nv, nb0, nbi_ss, f, g, c0, alpha, B, pv, e, R, eta, mu, nbi_norm = True): """ Solving the regular P0 equation using the ODE solver (changing s > 0) t : time to solve at, in minutes y : y[0] = nvi, y[1] = nbi, y[2] = zeta(t) """ F = f*g*c0 r = R*g*c0 beta = alpha*pv*nb delta = F + alpha*nb*(1-pv) L = 30 # protospacer length P0 = np.exp(-mu*L) dnvi = (-(F + alpha*nb)*y[0] + alpha*B*P0*pv*y[0]*(nb - e*y[1])) dnbi = ((g*C - F - r)*y[1] - alpha*pv*y[1]*(nv - e*y[0]) + alpha*eta*nb0*y[0]*(1 - pv)) if nbi_norm == True: # nbi normalized by p_0 if y[1] / (1 - y[2]) > nbi_ss: nbi_val = nbi_ss else: nbi_val = y[1] / (1-y[2]) if nbi_norm == False: # nbi not normalized by p_0, capped at nbi_ss if y[1] > nbi_ss: nbi_val = nbi_ss else: nbi_val = y[1] # straight deterministic nbi prediction #nbi_val = y[1] s = (beta - delta - 2*alpha*pv*e*nbi_val) / (delta + alpha*pv*e*nbi_val) dzeta = (beta + delta)*(1/(s + 2) + y[2]**B * (s + 1)/(s + 2) - y[2]) return dnvi, dnbi, dzeta
5c30846bbb1bbf4c9a698dcec3627edece63e4bc
3,644,680
from astroquery.irsa import Irsa def get_irsa_catalog(ra=165.86, dec=34.829694, radius=3, catalog='allwise_p3as_psd', wise=False, twomass=False): """Query for objects in the `AllWISE <http://wise2.ipac.caltech.edu/docs/release/allwise/>`_ source catalog Parameters ---------- ra, dec : float Center of the query region, decimal degrees radius : float Radius of the query, in arcmin Returns ------- table : `~astropy.table.Table` Result of the query """ #all_wise = 'wise_allwise_p3as_psd' #all_wise = 'allwise_p3as_psd' if wise: catalog = 'allwise_p3as_psd' elif twomass: catalog = 'fp_psc' coo = coord.SkyCoord(ra*u.deg, dec*u.deg) table = Irsa.query_region(coo, catalog=catalog, spatial="Cone", radius=radius*u.arcmin, get_query_payload=False) return table
13308f9ae6ffd61c70588eb7ee7980cc1cd71578
3,644,681
import platform def os_kernel(): """ Get the operating system's kernel version """ ker = "Unknown" if LINUX: ker = platform.release() elif WIN32 or MACOS: ker = platform.version() return ker
1f528ae3710485726736fd9596efbc84b8bc76fd
3,644,683
def make_nested_pairs_from_seq(args): """ Given a list of arguments, creates a list in Scheme representation (nested Pairs). """ cdr = Nil() for arg in reversed(args): cdr = Pair(arg, cdr) return cdr
74aeeb54b648ccf231b6619800d1727fc7504cd4
3,644,686
import ast def get_aug_assign_symbols(code): """Given an AST or code string return the symbols that are augmented assign. Parameters ---------- code: A code string or the result of an ast.parse. """ if isinstance(code, str): tree = ast.parse(code) else: tree = code n = AugAssignLister() n.visit(tree) return n.names
d7ee8834b5d1aa0ac93369815206c95919907ae1
3,644,687
from typing import Dict from typing import Any def stack_dict(state: Dict[Any, tf.Tensor]) -> tf.Tensor: """Stack a dict of tensors along its last axis.""" return tf.stack(sorted_values(state), axis=-1)
d50b774b708715934f2c9998d7a477839c73593a
3,644,688
def compact_float(n, max_decimals=None): """Reduce a float to a more compact value. Args: n: Floating point number. max_decimals: Maximum decimals to keep; defaults to None. Returns: An integer if `n` is essentially an integer, or a string representation of `n` reduced to `max_decimals` numbers after the decimal point. Otherwise, simply returns `n`. """ compact = n if float(n).is_integer(): compact = int(n) elif max_decimals is not None: compact = "{0:.{1}f}".format(n, max_decimals) return compact
827e49e05aaca31d497f84c2a8c8dd52cfad73d9
3,644,689
def resample_2d(X, resolution): """Resample input data for efficient plotting. Parameters: ----------- X : array_like Input data for clustering. resolution : int Number of "pixels" for 2d histogram downscaling. Default 'auto' downscales to 200x200 for >5000 samples, and no downscaling for <=5000 samples. Returns: -------- xx[mask] : array_like Rescaled x meshgrid. yy[mask] : array_like Rescaled y meshgrid. """ x, y = X[:,0], X[:,1] nbins = np.ptp(X, axis=0) / resolution hh, locx, locy = np.histogram2d(x, y, bins=np.ceil(nbins).astype('int')) xwidth, ywidth = np.diff(locx).mean(), np.diff(locy).mean() mask = hh != 0 locx = locx[:-1] + xwidth locy = locy[:-1] + ywidth yy, xx = np.meshgrid(locy, locx) np.random.seed(0) yy += np.random.uniform(-xwidth/2, xwidth/2, size=hh.shape) xx += np.random.uniform(-ywidth/2, ywidth/2, size=hh.shape) return xx[mask], yy[mask]
88cf98f82e0bf538df6e8a00b49f07eab75da1fe
3,644,690
def is_mocked(metric_resource: MetricResource) -> bool: """ Is this metrics a mocked metric or a real metric? """ return metric_resource.spec.mock is not None
d76f6a0a04026245605176799346092d4a8eb994
3,644,691
def negative_embedding_subtraction( embedding: np.ndarray, negative_embeddings: np.ndarray, faiss_index: faiss.IndexFlatIP, num_iter: int = 3, k: int = 10, beta: float = 0.35, ) -> np.ndarray: """ Post-process function to obtain more discriminative image descriptor. Parameters ---------- embedding : np.ndarray of shape (n, d) Embedding to be subtracted. negative_embeddings : np.ndarray of shape (m, d) Negative embeddings to be subtracted. faiss_index : faiss.IndexFlatIP Index to be used for nearest neighbor search. num_iter : int, optional Number of iterations. The default is 3. k : int, optional Number of nearest neighbors to be used for each iteration. The default is 10. beta : float, optional Parameter for the weighting of the negative embeddings. The default is 0.35. Returns ------- np.ndarray of shape (n, d) Subtracted embedding. """ for _ in range(num_iter): _, topk_indexes = faiss_index.search(embedding, k=k) # search for hard negatives topk_negative_embeddings = negative_embeddings[topk_indexes] embedding -= (topk_negative_embeddings.mean(axis=1) * beta) # subtract by hard negative embeddings embedding /= np.linalg.norm(embedding, axis=1, keepdims=True) # L2-normalize return embedding.astype('float32')
da20719f1b0b46fcfa4b7b11deb81aa2731abc5f
3,644,692
def op_conv_map(operator): """Convert operator or return same operator""" return OPERATOR_CONVERSION.get(operator, operator)
91f6fea341b473e4965495af23cf957fef39234a
3,644,693
def getGMapKey(): """Return value for <gmapKey/> configuration parameter.""" return sciflo.utils.ScifloConfigParser().getParameter('gmapKey')
0d4e36e39672b698b07d4be4d2cb0b6eff8a26c9
3,644,694
def _ro_hmac(msg, h=None): """Implements random oracle H as HMAC-SHA256 with the all-zero key. Input is message string and output is a 32-byte sequence containing the HMAC value. Args: msg: Input message string. h: An optional instance of HMAC to use. If None a new zeroed-out instance will be used. Returns: bytes: Random Oracle output (32 bytes). """ if h is None: h = ZERO_HMAC.copy() h.update(msg) return h.digest()
97aa326be593096a6441546a5d449b052ccc603a
3,644,696
def get_unique_slug(model_instance, sluggable_field_name, slug_field_name): """ Takes a model instance, sluggable field name (such as 'title') of that model as string, slug field name (such as 'slug') of the model as string; returns a unique slug as string. """ slug = slugify(getattr(model_instance, sluggable_field_name)) unique_slug = slug extension = 1 ModelClass = model_instance.__class__ while ModelClass._default_manager.filter( **{slug_field_name: unique_slug} ).exists(): unique_slug = '{}-{}'.format(slug, extension) extension += 1 return unique_slug
bbf1aa6108670f45b8a5b1a112f9b42a07344763
3,644,697
def get_precomputed_features(source_dataset, experts): """Get precomputed features from a set of experts and a dataset. Arguments: source_dataset: the source dataset as an instance of Base Dataset. experts: a list of experts to use precomputed features from Returns: A list of dicts, where each dict maps from video id to precomputed features. """ precomputed_features = [] for expert in experts: processed_expert_features = {} expert_features = cache.get_cached_features_by_expert_and_dataset( source_dataset, expert) for video_id, expert_value in expert_features.items(): video_expert_features = None missing_modalities = False expert_value = expert.feature_transformation(expert_value) if is_expert_value_missing(expert_value): video_expert_features = np.zeros( expert.embedding_shape, np.float32) missing_modalities = True else: expert_value = expert_value.astype(np.float32) if expert.constant_length: video_expert_features = expert_value else: video_expert_features = zero_pad_expert_features( expert, expert_value) processed_expert_features[video_id] = ( video_expert_features, missing_modalities ) precomputed_features.append(processed_expert_features) return precomputed_features
d89a932490f3c3ef4aca31301586ce628a720233
3,644,698
def _local_distribution(): """ 获取地区分布 """ data = {} all_city = Position.objects.filter(status=1).values_list("district__province__name", flat=True) value_count = pd.value_counts(list(all_city)).to_frame() df_value_counts = pd.DataFrame(value_count).reset_index() df_value_counts.columns = ['name', 'counts'] df_value_counts["name"] = df_value_counts['name'].str.slice(0, 2) data["count"] = all_city.__len__() all_data = df_value_counts.values range_max = (df_value_counts["counts"].values[0] // 100 + 2) * 100 render_data = [(item[0], item[1]) for item in all_data] maps = Map(init_opts=PYECHARTS_INIT_OPTS) maps.add(series_name="总览", data_pair=render_data, zoom=1.2, is_map_symbol_show=False, itemstyle_opts=opts.series_options.ItemStyleOpts(area_color="#ddd", border_color="#eee", border_width=.5), ).set_global_opts( visualmap_opts=opts.VisualMapOpts(max_=int(range_max))) return maps.render_embed()
2f81365c66680c1b04805869e45118601ed769df
3,644,699
def paradigm_filler(shared_datadir) -> ParadigmFiller: """ hese layout, paradigm, and hfstol files are **pinned** test data; the real files in use are hosted under res/ folder, and should not be used in tests! """ return ParadigmFiller( shared_datadir / "layouts", shared_datadir / "crk-normative-generator.hfstol", )
e20ca1eebfeca8b4d54d87a02f723e21fd54c3bf
3,644,700
def apply_scaling(data, dicom_headers): """ Rescale the data based on the RescaleSlope and RescaleOffset Based on the scaling from pydicomseries :param dicom_headers: dicom headers to use to retreive the scaling factors :param data: the input data """ # Apply the rescaling if needed private_scale_slope_tag = Tag(0x2005, 0x100E) private_scale_intercept_tag = Tag(0x2005, 0x100D) if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \ or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers: rescale_slope = 1 rescale_intercept = 0 if 'RescaleSlope' in dicom_headers: rescale_slope = dicom_headers.RescaleSlope if 'RescaleIntercept' in dicom_headers: rescale_intercept = dicom_headers.RescaleIntercept # try: # # this section can sometimes fail due to unknown private fields # if private_scale_slope_tag in dicom_headers: # private_scale_slope = float(dicom_headers[private_scale_slope_tag].value) # if private_scale_slope_tag in dicom_headers: # private_scale_slope = float(dicom_headers[private_scale_slope_tag].value) # except: # pass return do_scaling(data, rescale_slope, rescale_intercept) else: return data
13bc94058aa725b59f017fc069408a9d279cd933
3,644,701
from itertools import product def allowed_couplings(coupling, flow, free_id, symmetries): """Iterator over all the allowed Irreps for free_id in coupling if the other two couplings are fixed. """ if len(coupling) != 3: raise ValueError(f'len(coupling) [{len(coupling)}] != 3') if len(flow) != 3: raise ValueError(f'len(flow) [{len(flow)}] != 3') other_ids = [0, 1, 2] other_ids.remove(free_id) other_c = [coupling[o] for o in other_ids] other_f = [flow[o] for o in other_ids] this_f = flow[free_id] def fermionic_constraint(oirr, oflow, tflow): yield sum(oirr) % 2 def U1_constraint(oirr, oflow, tflow): sign = {True: 1, False: -1} yield sign[not tflow] * sum(sign[f] * x for x, f in zip(oirr, oflow)) def pg_constraint(oirr, oflow, tflow): yield oirr[0] ^ oirr[1] def SU2_constraint(oirr, oflow, tflow): return range(abs(oirr[0] - oirr[1]), oirr[0] + oirr[1] + 1, 2) constraint = { 'fermionic': fermionic_constraint, 'U(1)': U1_constraint, 'SU(2)': SU2_constraint, 'seniority': U1_constraint, 'C1': pg_constraint, 'Ci': pg_constraint, 'C2': pg_constraint, 'Cs': pg_constraint, 'D2': pg_constraint, 'C2v': pg_constraint, 'C2h': pg_constraint, 'D2h': pg_constraint } for ncoupling in product(*[constraint[s](c, other_f, this_f) for *c, s in zip(*other_c, symmetries)]): yield ncoupling
1e2d71edc68b8ecebfa3e09eae17e17a381d82b4
3,644,702
def poly_iou(poly1, poly2, thresh=None): """Compute intersection-over-union for two GDAL/OGR geometries. Parameters ---------- poly1: First polygon used in IOU calc. poly2: Second polygon used in IOU calc. thresh: float or None If not provided (default), returns the float IOU for the two polygons. If provided, return True if the IOU met this threshold. Otherwise, False. Returns ------- IOU: float or bool Return the IOU value if `thresh` is None, otherwise boolean if the threshold value was met. """ poly1 = ogr.CreateGeometryFromWkb(poly1) poly2 = ogr.CreateGeometryFromWkb(poly2) if not poly1.Intersects(poly2): return False intersection_area = poly1.Intersection(poly2).Area() #intersection_area = intersection.Area() union_area = poly1.Union(poly2).Area() #union_area = union.Area() # If threshold was provided, return if IOU met the threshold if thresh is not None: return (intersection_area / union_area) >= thresh return intersection_area / union_area
c033e654144bb89093edac049b0dfcd4cdec3d1a
3,644,703
def score_file(filename): """Score each line in a file and return the scores.""" # Prepare model. hparams = create_hparams() encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) has_inputs = "inputs" in encoders # Prepare features for feeding into the model. if has_inputs: inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. if has_inputs: features = {"inputs": batch_inputs, "targets": batch_targets} else: features = {"targets": batch_targets} # Prepare the model and the graph when model runs on features. model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) _, losses = model(features) saver = tf.train.Saver() with tf.Session() as sess: # Load weights from checkpoint. if FLAGS.checkpoint_path is None: ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) ckpt = ckpts.model_checkpoint_path else: ckpt = FLAGS.checkpoint_path saver.restore(sess, ckpt) # Run on each line. with tf.gfile.Open(filename) as f: lines = f.readlines() results = [] for line in lines: tab_split = line.split("\t") if len(tab_split) > 2: raise ValueError("Each line must have at most one tab separator.") if len(tab_split) == 1: targets = tab_split[0].strip() else: targets = tab_split[1].strip() inputs = tab_split[0].strip() # Run encoders and append EOS symbol. targets_numpy = encoders["targets"].encode( targets) + [text_encoder.EOS_ID] if has_inputs: inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] # Prepare the feed. if has_inputs: feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} else: feed = {targets_ph: targets_numpy} # Get the score. np_loss = sess.run(losses["training"], feed) results.append(np_loss) return results
62cac29e72bf5e7651cd8a258fb4151b6d7e1ff4
3,644,704
import responses from renku.core.commands.providers.dataverse import DATAVERSE_API_PATH, DATAVERSE_VERSION_API from renku.core.commands.providers.doi import DOI_BASE_URL import json import re import urllib import pathlib def doi_responses(): """Responses for doi.org requests.""" with responses.RequestsMock(assert_all_requests_are_fired=False) as response: def doi_callback(request): response_url = "https://dataverse.harvard.edu/citation" "?persistentId=doi:10.11588/data/yyxx1122" if "zenodo" in request.url: response_url = "https://zenodo.org/record/3363060" return ( 200, {"Content-Type": "application/json"}, json.dumps( { "type": "dataset", "id": request.url, "author": [{"family": "Doe", "given": "John"}], "contributor": [{"contributorType": "ContactPerson", "family": "Doe", "given": "John"}], "issued": {"date-parts": [[2019]]}, "abstract": "Test Dataset", "DOI": "10.11588/data/yyxx1122", "publisher": "heiDATA", "title": "dataset", "URL": response_url, } ), ) response.add_callback( method="GET", url=re.compile("{base_url}/.*".format(base_url=DOI_BASE_URL)), callback=doi_callback ) def version_callback(request): return ( 200, {"Content-Type": "application/json"}, json.dumps({"status": "OK", "data": {"version": "4.1.3", "build": "abcdefg"}}), ) base_url = "https://dataverse.harvard.edu" url_parts = list(urllib.parse.urlparse(base_url)) url_parts[2] = pathlib.posixpath.join(DATAVERSE_API_PATH, DATAVERSE_VERSION_API) pattern = "{url}.*".format(url=urllib.parse.urlunparse(url_parts)) response.add_callback(method="GET", url=re.compile(pattern), callback=version_callback) yield response
8e467910f2d9ad4df06ff0ecb11c0812e7dc3bb5
3,644,705
def lat_lng_to_tile_xy(latitude, longitude, level_of_detail): """gives you zxy tile coordinate for given latitude, longitude WGS-84 coordinates (in decimal degrees) """ x, y = lat_lng_to_pixel_xy(latitude, longitude, level_of_detail) return pixel_xy_to_tile_xy(x, y)
99e2a4b3d9dee41222b434768bed5501e7561a40
3,644,706
def update_inverse_jacobian(previous_inv_jac, dx, df, threshold=0, modify_in_place=True): """ Use Broyden method (following Numerical Recipes in C, 9.7) to update inverse Jacobian current_inv_jac is previous inverse Jacobian (n x n) dx is delta x for last step (n) df is delta errors for last step (n) """ dot_dx_inv_j = np.dot(dx, previous_inv_jac) denom = np.dot(dot_dx_inv_j, df) if abs(threshold) <= 0: threshold = MIN_DENOM if abs(denom) < threshold: return previous_inv_jac, False if modify_in_place: previous_inv_jac += np.outer((dx - np.dot(previous_inv_jac, df)), dot_dx_inv_j) / denom result = previous_inv_jac else: result = previous_inv_jac + np.outer((dx - np.dot(previous_inv_jac, df)), dot_dx_inv_j) / denom return result, True
4f6a0e3e3bdc25132fae2aa1df9d0bbcdd73c3b1
3,644,707
def _ConvertStack(postfix): """Convert postfix stack to infix string. Arguments: postfix: A stack in postfix notation. The postfix stack will be modified as elements are being popped from the top. Raises: ValueError: There are not enough arguments for functions/operators. Returns: A string of the infix represetation of the stack. """ if not postfix: raise bigquery_client.BigqueryInvalidQueryError( 'Not enough arguments.', None, None, None) top = postfix.pop() if isinstance(top, util.OperatorToken): args = [] for unused_i in range(top.num_args): args.append(_ConvertStack(postfix)) args.reverse() if top.num_args == 1: return '%s %s' % (str(top), args[0]) else: return '(%s %s %s)' % (args[0], str(top), args[1]) elif isinstance(top, util.BuiltInFunctionToken): func_name = str(top) if func_name in _ZERO_ARGUMENT_FUNCTIONS: return '%s()' % func_name elif func_name in _ONE_ARGUMENT_FUNCTIONS: op = _ConvertStack(postfix) return '%s(%s)' % (func_name, op) elif func_name in _TWO_ARGUMENT_FUNCTIONS: op2 = _ConvertStack(postfix) op1 = _ConvertStack(postfix) return '%s(%s, %s)' % (top, op1, op2) elif func_name in _THREE_ARGUMENT_FUNCTIONS: op3 = _ConvertStack(postfix) op2 = _ConvertStack(postfix) op1 = _ConvertStack(postfix) return '%s(%s, %s, %s)' % (top, op1, op2, op3) else: raise bigquery_client.BigqueryInvalidQueryError( 'Function %s does not exist.' % str(top), None, None, None) elif isinstance(top, util.AggregationFunctionToken): num_args = top.num_args func_name = str(top) ops = [] for unused_i in range(int(num_args)): ops.append(_ConvertStack(postfix)) ops.reverse() if func_name == 'DISTINCTCOUNT': func_name = 'COUNT' ops[0] = 'DISTINCT ' + ops[0] ops = [str(op) for op in ops] return func_name + '(' + ', '.join(ops) + ')' elif not isinstance(top, basestring): return str(top) else: return top
d69f4a503a84efedbf623cca24c496f5540d1b77
3,644,708
import re def count_repeats_for_motif(seq, motif, tally, intervals=None): """ seq --- plain sequence to search for the repeats (motifs) motif --- plain sequence of repeat, ex: CGG, AGG intervals --- 0-based start, 1-based end of Intervals to search motif in """ if intervals is None: # use the whole sequence intervals = [Interval(0, len(seq))] new_intl = [] for intl in intervals: cur = seq[intl.start:intl.end] prev_end = intl.start found_flag = False for m in re.finditer(motif, cur): tally[motif].append(intl.start + m.start()) if m.start() > prev_end: # new interval is prev_end (0-based), m.start() (1-based) new_intl.append(Interval(prev_end, intl.start + m.start())) prev_end = intl.start + m.end() found_flag = True if not found_flag: new_intl.append(intl) return new_intl
2a29339555374aaeb70ea07872a81a56050a9f36
3,644,709
def press_level(pressure, heights, plevels, no_time=False): """ Calculates geopotential heights at a given pressure level Parameters ---------- pressure : numpy.ndarray The 3-D pressure field (assumes time dimension, turn off with `no_time=True`) heights : numpy.ndarray The 3-D array of gridbox heights plevels : list List of pressure levels to interpolate to no_time=False: bool Optional, set to `True` to indicate lack of time dimension. Returns ------- press_height : numpy.ndarray The geopotential heights at the specified pressure levels """ if no_time is False: try: tlen, zlen, ylen, xlen = pressure.shape press_height = np.zeros((tlen, ylen, xlen)) for t in range(0, tlen): for x in range(0, xlen): for y in range(0, ylen): press_height[t, y, x] =\ log_interpolate_1d(plevels, pressure[t, :, y, x], heights[:, y, x]) except ValueError: print("Error in dimensions, trying with no_time=True") no_time = True elif no_time is True: try: xlen, ylen, xlen = pressure.shape press_height = np.zeros((ylen, xlen)) for x in range(0, xlen): for y in range(0, ylen): press_height[t, y, x] =\ log_interpolate_1d(plevels, pressure[t, :, y, x], heights[:, y, x]) except ValueError: print("Error in dimensions") return press_height
7cae6fe91eb1f6ad4171006633d04744909849c5
3,644,711
def valtoindex(thearray, thevalue, evenspacing=True): """ Parameters ---------- thearray: array-like An ordered list of values (does not need to be equally spaced) thevalue: float The value to search for in the array evenspacing: boolean, optional If True (default), assume data is evenly spaced for faster calculation. Returns ------- closestidx: int The index of the sample in thearray that is closest to val """ if evenspacing: limval = np.max([thearray[0], np.min([thearray[-1], thevalue])]) return int(np.round((limval - thearray[0]) / (thearray[1] - thearray[0]), 0)) else: return (np.abs(thearray - thevalue)).argmin()
5540023c77b544fbd91a724badf467981a0e0a5c
3,644,712
def get_converter(result_format, converters=None): """ Gets an converter, returns the class and a content-type. """ converters = get_default_converters() if converters is None else converters if result_format in converters: return converters.get(result_format) else: raise ValueError('No converter found for type {}'.format(result_format))
79ce7a728fb801922d672716aeb77dc76e270194
3,644,713
def _to_bool(s): """Convert a value into a CSV bool.""" if s.lower() == 'true': return True elif s.lower() == 'false': return False else: raise ValueError('String cannot be converted to bool')
3f6c31a07e7ba054e5c52f9d3c09fdd2f004fec5
3,644,715