content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def prepare_create_user_db(): """Clear a user from the database to be created.""" username = TEST_USERS[0][0] connection = connect_db() connection.cursor().execute('DELETE FROM Users WHERE username=%s', (username,)) connection.commit() close_db(connection) return username
beb1fd7a7f6c571f9d5e57a79d3b15c62a215789
3,645,533
def _getlocal(ui, rpath): """Return (path, local ui object) for the given target path. Takes paths in [cwd]/.hg/hgrc into account." """ try: wd = os.getcwd() except OSError, e: raise util.Abort(_("error getting current working directory: %s") % e.strerror) path = cmdutil.findrepo(wd) or "" if not path: lui = ui else: lui = ui.copy() lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) if rpath and rpath[-1]: path = lui.expandpath(rpath[-1]) lui = ui.copy() lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) return path, lui
4dc90dc62084e13c22b1a602fa20c552557e258c
3,645,534
import hashlib def get_size_and_sha256(infile): """ Returns the size and SHA256 checksum (as hex) of the given file. """ h = hashlib.sha256() size = 0 while True: chunk = infile.read(8192) if not chunk: break h.update(chunk) size += len(chunk) return (size, h.hexdigest())
32c37ca6762f9c62d806e22c991b60f9d60947f4
3,645,535
def cmServiceAbort(): """CM SERVICE ABORT Section 9.2.7""" a = TpPd(pd=0x5) b = MessageType(mesType=0x23) # 00100011 packet = a / b return packet
1ae9744fd21760775a45066ffeb11d7dea12c127
3,645,536
def get_distribution(distribution_id): """ Lists inforamtion about specific distribution by id. :param distribution_id: Id of CDN distribution """ cloudfront = CloudFront() return cloudfront.get_distribution(distribution_id=distribution_id)
082c572341435423cb42ec895369af7822caee80
3,645,537
import sqlite3 def get_information_per_topic(db_path: str, topic: str, field: str): """ Query all alert data monitoring rows for a given topic Parameters ---------- db_path: str Path to the monitoring database. The database will be created if it does not exist yet. topic: str Topic name of a stream field: str Field for which you want the data. Returns ---------- df: pd.DataFrame Pandas DataFrame with data of matching alert rows. Examples ---------- >>> df = get_information_per_topic(db_fn, "tutu", "objectId") >>> print(len(df)) 1 """ con = sqlite3.connect(db_path) statement = f"SELECT {field} FROM `{ALERT_TABLE}` WHERE topic = '{topic}';" # catch error if the DB is empty try: df = pd.read_sql_query(statement, con) alert_id = list(df[field]) except pd.io.sql.DatabaseError as e: print(e) alert_id = [""] return alert_id
97e95942506d15f1604d026c2a9954408ea01c29
3,645,538
def get_html_from_url(url, timeout=None): """Get HTML document from URL Parameters url (str) : URL to look for timeout (float) : Inactivity timeout in seconds Return The HTML document as a string """ resp = reqget(url, timeout=timeout) return resp.text
f909db702c812be029f00dd73bfaef8ac48966ba
3,645,541
def clean(column, output_column=None, file_path=None, df=None, symbols='!@#$%^&*()+={}[]:;’\”/<>', replace_by_space=True, keep_original=False): """ cleans the cell values in a column, creating a new column with the clean values. Args: column: the column to be cleaned. output_column: the name of the column where cleaned column values are stored. If not provided, the name of the new column is the name of the input column with the suffix _clean. file_path: input file path df: or input dataframe symbols: a string containing the set of characters to be removed: default is “!@#$%^&*()+={}[]:;’\”/<>” replace_by_space: when True (default) all instances of the symbols are replaced by a space. In case of removal of multiple consecutive characters, they’ll be replaced by a single space. The value False causes the symbols to be deleted. keep_original: when True, the output column will contain the original value and the clean value will be appended, separated by |. Default is False Returns: a dataframe with the new output clean containing clean values """ if file_path is None and df is None: raise RequiredInputParameterMissingException( 'One of the input parameters is required: {} or {}'.format(file_path, df)) symbols = list(symbols) if output_column is None: output_column = '{}_clean'.format(column) if file_path: df = pd.read_csv(file_path) df[output_column] = df[column].map(lambda x: string_clean(x, symbols, replace_by_space, keep_original)) return df
575d30a704c9ad37c027251ef609ef9c70445139
3,645,542
def len_lt(name, value): """ Only succeed if the length of the given register location is less than the given value. USAGE: .. code-block:: yaml foo: check.len_lt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo """ ret = {"name": name, "result": False, "comment": "", "changes": {}} if name not in __reg__: ret["result"] = False ret["comment"] = "Value {0} not in register".format(name) return ret if len(__reg__[name]["val"]) < value: ret["result"] = True return ret
fde2db2e73d7ac711677b33518b6d5342b5dcbdb
3,645,543
from typing import Iterable def _ll_to_xy(latitude, longitude, wrfin=None, timeidx=0, stagger=None, method="cat", squeeze=True, cache=None, _key=None, as_int=True, **projparams): """Return the x,y coordinates for a specified latitude and longitude. The *latitude* and *longitude* arguments can be a single value or a sequence of values. The leftmost dimension of the returned array represents two different quantities: - return_val[0,...] will contain the X (west_east) values. - return_val[1,...] will contain the Y (south_north) values. Args: latitude (:obj:`float` or sequence): A single latitude or a sequence of latitude values to be converted. longitude (:obj:`float` or sequence): A single longitude or a sequence of latitude values to be converted. wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. The default is 0. stagger (:obj:`str`): By default, the latitude and longitude are returned on the mass grid, but a staggered grid can be chosen with the following options: - 'm': Use the mass grid (default). - 'u': Use the same staggered grid as the u wind component, which has a staggered west_east (x) dimension. - 'v': Use the same staggered grid as the v wind component, which has a staggered south_north (y) dimension. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is 'cat'. squeeze (:obj:`bool`, optional): Set to False to prevent dimensions with a size of 1 from being automatically removed from the shape of the output. Default is True. cache (:obj:`dict`, optional): A dictionary of (varname, ndarray) that can be used to supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. _key (:obj:`int`, optional): A caching key. This is used for internal purposes only. Default is None. as_int (:obj:`bool`): Set to True to return the x,y values as :obj:`int`, otherwise they will be returned as :obj:`float`. **projparams: Map projection keyword arguments to set manually. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The x,y coordinate value(s) whose leftmost dimension is 2 (0=X, 1=Y). If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. """ if wrfin is not None: (map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc) = _get_proj_params(wrfin, timeidx, stagger, method, squeeze, cache, _key) else: (map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc) = _kwarg_proj_params(**projparams) if isinstance(latitude, Iterable): lats = np.asarray(latitude) lons = np.asarray(longitude) # Note: For scalars, this will make a single element array lats = lats.ravel() lons = lons.ravel() if (lats.size != lons.size): raise ValueError("'latitude' and 'longitude' " "must be the same length") if ref_lat.size == 1: outdim = [2, lats.size] extra_dims = [outdim[1]] else: # Moving domain will have moving ref_lats/ref_lons outdim = [2, ref_lat.size, lats.size] extra_dims = outdim[1:] result = np.empty(outdim, np.float64) for left_idxs in iter_left_indexes(extra_dims): # Left indexes is a misnomer, since these will be on the right x_idxs = (0,) + left_idxs y_idxs = (1,) + left_idxs if ref_lat.size == 1: ref_lat_val = ref_lat[0] ref_lon_val = ref_lon[0] else: ref_lat_val = ref_lat[left_idxs[-2]] ref_lon_val = ref_lon[left_idxs[-2]] lat = lats[left_idxs[-1]] lon = lons[left_idxs[-1]] xy = _lltoxy(map_proj, truelat1, truelat2, stdlon, ref_lat_val, ref_lon_val, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc, lat, lon) # Note: comes back from fortran as y,x result[x_idxs] = xy[1] result[y_idxs] = xy[0] else: result = np.empty((2,), np.float64) fort_out = _lltoxy(map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc, latitude, longitude) # Note, comes back from fortran as y,x. So, need to swap them. result[0] = fort_out[1] result[1] = fort_out[0] # Make indexes 0-based result = result - 1 if as_int: result = np.rint(result).astype(int) return result
9d96d0d6e520731f16079c69389eff0c47c70dce
3,645,544
def onedsinusoid(x,H,A,omega,phi): """ Returns a 1-dimensional sinusoid of form H+A*np.sin(omega*x+phi) """ phi = np.pi/180 * phi return H+A*np.sin(omega*x+phi)
9917b462a6cd39c84a354d031ad8c6a09afcdec0
3,645,545
def number_in_english(number): """Returns the given number in words >>> number_in_english(0) 'zero' >>> number_in_english(5) 'five' >>> number_in_english(11) 'eleven' >>> number_in_english(745) 'seven hundred and fourty five' >>> number_in_english(1380) 'one thousand three hundred and eighty' >>> number_in_english(3204000) 'three million two hundred four thousand' >>> number_in_english(15000) 'fifteen thousand' >>> number_in_english(1005) 'one thousand and five' """ if not number: return 'zero' # split number into blocks of 3 # e.g. 1234567 -> ['567', '234', '1'] numBlocks = int(ceil((log10(number)+1)/3)) # number of digits / 3 number_split = [(number//1000**x)%1000 for x in range(numBlocks)] # translate each block individual and add the word for the power # start with the lowest power word = '' for n, p in zip(number_split, powers): if n: # only the tenner block can have an 'and' (e.g. 'one hundred and five' but not 'one million and one thousand') word = _hundred_in_english(n, (p == '')) + ' ' + p + ' ' + word # remove 'and' that was added but is not precede by a number (e.g. 5 -> 'and five') if word.startswith('and'): word = word.replace('and', '') return word.strip()
b3d580ed843d5d4bf3c62662c831391536e7479e
3,645,547
def create_app(environment): """Factory Method that creates an instance of the app with the given config. Args: environment (str): Specify the configuration to initilize app with. Returns: app (Flask): it returns an instance of Flask. """ app = Flask(__name__) app.config.from_object(env_configuration[environment]) db.init_app(app) api = Api( app=app, default='Api', default_label="Available Endpoints", title='MovieBuff API', version='2.0.0', description="""MovieBuff Api Endpoint Documentation 📚""" ) # enable cross origin resource sharing CORS(app) api.add_resource(Users, "/api/v2/auth/<string:operation>", endpoint="user") api.add_resource(Movies, "/api/v2/movie", endpoint="movie") api.add_resource(Categories, "/api/v2/movie/category", "/api/v2/movie/category/<string:category_id>", endpoint="category") api.add_resource(UserMovieRatings, "/api/v2/movie/ratings", endpoint="ratings") api.add_resource(Search, "/api/v2/movie/search", endpoint="search") # handle default 404 exceptions @app.errorhandler(404) def resource_not_found(error): response = jsonify(dict( error='Not found', message='The requested URL was not found on the server.')) response.status_code = 404 return response # handle default 500 exceptions @app.errorhandler(500) def internal_server_error(error): response = jsonify(dict( error='Internal server error', message="The server encountered an internal error.")) response.status_code = 500 return response return app
5cd5867a80ec696ee2a5647448c8e8b60fe2e023
3,645,548
def heating_design_temp(tmy_id): """Returns the heating design temperature (deg F) for the TMY3 site identified by 'tmy_id'. """ return df_tmy_meta.loc[tmy_id].heating_design_temp
204e219840ed5d2e04e9bb53706883d0fc1c6cfa
3,645,549
def tonal_int(x): """ >>> tonal_int((4,7)) 7 >>> tonal_int((4,7,2)) 31 >>> tonal_int((6,11,-1)) -1 >>> tonal_int((0,-1,-1)) -13 >>> tonal_int((6,0,0)) 12 >>> tonal_int((0,11,0)) -1 >>> tonal_int((0,11)) -1 >>> tonal_int((2, 0)) 0 """ if len(x) == 2: x = _tonal_unmodulo(x) return x[1] d = x[0] c = x[1] base_c = MS[d].c # Example: Cb --- base=0 c=11 c-base=11 11 - 12 = -1 if c - base_c > 3: c = c - C_LEN # Example: B# --- base=11 c=0 c-base=-11 c+C_LEN =12 if c - base_c < -3: c = c + C_LEN return c + x[2]*(C_LEN)
c7fb8dfd7ac5c82a81241efb807a7e45b877eee4
3,645,550
def DrtVariableExpression(variable): """ This is a factory method that instantiates and returns a subtype of ``DrtAbstractVariableExpression`` appropriate for the given variable. """ if is_indvar(variable.name): return DrtIndividualVariableExpression(variable) elif is_funcvar(variable.name): return DrtFunctionVariableExpression(variable) elif is_eventvar(variable.name): return DrtEventVariableExpression(variable) else: return DrtConstantExpression(variable)
a37b6e3f295e603d4ee78007dc4d4a22d22d1c3f
3,645,553
def process_cv_results(cv_results): """ This function reformats the .cv_results_ attribute of a fitted randomized search (or grid search) into a dataframe with only the columns we care about. Args -------------- cv_results : the .cv_results_ attribute of a fitted randomized search (or grid search) object Returns -------------- a sorted dataframe with select information """ results = pd.DataFrame(cv_results) cols = ['mean_test_score', 'mean_train_score', 'std_test_score'] if 'mean_train_score' not in cv_results.keys(): cols = ['mean_test_score', 'std_test_score'] cols += [c for c in results.columns.values if c.startswith('param_')] return results[cols].sort_values(by='mean_test_score', ascending=False)
a47b9cbc3fcb00f782eb46269f55259995d4b73c
3,645,554
def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors, dataset, word2vecCostAndGradient = softmaxCostAndGradient): """ CBOW model in word2vec """ # Implement the continuous bag-of-words model in this function. # Input/Output specifications: same as the skip-gram model # We will not provide starter code for this function, but feel # free to reference the code you previously wrote for this # assignment! ################################################################# # IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN # # ASSIGNMENT ARE NOT! # ################################################################# cost = 0 gradIn = np.zeros(inputVectors.shape) gradOut = np.zeros(outputVectors.shape) ### YOUR CODE HERE #raise NotImplementedError ### END YOUR CODE return cost, gradIn, gradOut
5766b3c2facba8272431796b46da8abbd7264292
3,645,555
import yaml def generate_dlf_yaml(in_yaml): """ Generate DLF-compatible YAML configuration file using "templates/dlf_out.yaml" as template. :param in_yaml: dict representation of a YAML document defining placeholder values in "templates/dlf_out.yaml" :type in_yaml: dict :raises PlaceholderNotFoundError: a {{...}} placeholder referenced in "templates/dlf_out.yaml" was not found :raises ValueError in_yaml is not of type dict :return: DLF-compatible YAML file :rtype: str """ dlf_yaml_dict = generate_dlf_yaml_dict(in_yaml) dlf_yaml = yaml.safe_dump(dlf_yaml_dict, default_flow_style=False, allow_unicode=True, sort_keys=False) return dlf_yaml
c3bdf86731eb26904cae95b65c5b6181cc130ae8
3,645,556
def dice_coefficient(x, target): """ Dice Loss: 1 - 2 * (intersection(A, B) / (A^2 + B^2)) :param x: :param target: :return: """ eps = 1e-5 n_inst = x.size(0) x = x.reshape(n_inst, -1) target = target.reshape(n_inst, -1) intersection = (x * target).sum(dim=1) union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps loss = 1. - (2 * intersection / union) return loss
c73cd86ed11bf89d94fb84db16186d6ace39d814
3,645,557
def batch_intersection_union(output, target, nclass): """mIoU""" # inputs are numpy array, output 4D, target 3D predict = np.argmax(output, axis=1) + 1 # [N,H,W] target = target.astype(float) + 1 # [N,H,W] predict = predict.astype(float) * np.array(target > 0).astype(float) intersection = predict * np.array(predict == target).astype(float) # areas of intersection and union # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary. area_inter, _ = np.array(np.histogram(intersection, bins=nclass, range=(1, nclass+1))) area_pred, _ = np.array(np.histogram(predict, bins=nclass, range=(1, nclass+1))) area_lab, _ = np.array(np.histogram(target, bins=nclass, range=(1, nclass+1))) area_all = area_pred + area_lab area_union = area_all - area_inter return area_inter, area_union
a62596ee500ec7525ceefeb6e6de0fd6673c522d
3,645,558
import torch def convert(trainset,testset,seed=1,batch_size=128, num_workers=2,pin_memory=True): """ Converts DataSet Object to DataLoader """ SEED = 1 cuda = torch.cuda.is_available() torch.manual_seed(SEED) if cuda: torch.cuda.manual_seed(SEED) dataloader_args = dict(shuffle=True, batch_size=128, num_workers=2, pin_memory=pin_memory) if cuda else dict(shuffle=True, batch_size=64) trainloader = torch.utils.data.DataLoader(trainset, **dataloader_args) testloader = torch.utils.data.DataLoader(testset, **dataloader_args) return trainloader, testloader
c380caa064b07ffc108ae33acc98361910b8f28f
3,645,559
def build_gradcam(img_path, heatmap, color_map, original_image_colormap, alpha=0.5): """ Builds the gradcam. Args: img_path (_type_): Image path. heatmap (_type_): Heatmap. color_map (_type_): Color map. original_image_colormap (_type_): Original image colormap. alpha (float, optional): Alpha. Defaults to 0.5. Returns: _type_: Gradcam. """ img = keras.preprocessing.image.load_img(img_path, color_mode=original_image_colormap) img = keras.preprocessing.image.img_to_array(img) heatmap = np.uint8(255 * heatmap) jet = cm.get_cmap(color_map) jet_colors = jet(np.arange(256))[:, :3] jet_heatmap = jet_colors[heatmap] jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap) jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap) superimposed_img = jet_heatmap * alpha + img superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img) return superimposed_img
c71c24cc3fccc962b1083c1491e8da0fae9464ed
3,645,560
def sample_joint_comorbidities(age, country): """ Default country is China. For other countries pass value for country from {us, Republic of Korea, japan, Spain, italy, uk, France} """ return sample_joint(age, p_comorbidity(country, 'diabetes'), p_comorbidity(country, 'hypertension'))
8190e73ccd637b78270a974773259c9bb4367fd5
3,645,561
import pydoc def locate(name): """ Locate the object for the given name """ obj = pydoc.locate(name) if not obj: obj = globals().get(name, None) return obj
24f31b241ffcbd2e983889f209bff9a1ff8b1fc3
3,645,563
from stentseg.utils import PointSet from stentseg.utils.centerline import points_from_mesh def get_mesh_deforms(mesh, deforms, origin, **kwargs): """ input : mesh object deforms forward for mesh?! origin (from volume) output: PointSet of mesh vertices (duplicates removed) and list with deforms (PointSets) of mesh vertices """ # for vertice in mesh._vertices: # vertice[-1] = vertice[-1]*-1 # x,y,z with z flipped # # Turn surfacepoints into a pointset # pp = PointSet(3, dtype='float32') # [pp.append(*p) for p in mesh._vertices] pp = points_from_mesh(mesh, **kwargs) # removes duplicates # Get deformation for all points pp_deforms = [] samplePoints = pp - PointSet([o for o in reversed(origin)], dtype='float32') for deform in deforms: delta_z = deform.get_field_in_points(samplePoints, 0).reshape(-1, 1) delta_y = deform.get_field_in_points(samplePoints, 1).reshape(-1, 1) delta_x = deform.get_field_in_points(samplePoints, 2).reshape(-1, 1) delta = PointSet( np.concatenate((delta_x, delta_y, delta_z), axis=1) ) pp_deforms.append(delta) return pp, pp_deforms
710530c68d46e03d14eabc83db1fa448e76ebc2e
3,645,564
def lnLikelihoodDouble(parameters, values, errors, weights=None): """ Calculates the total log-likelihood of an ensemble of values, with uncertainties, for a double Gaussian distribution (two means and two dispersions). INPUTS parameters : model parameters (see below) values : data values errors : data uncertainties OPTIONS weights : weights on each data point [default: None, ie unweighted] PARAMETERS mean1 : model mean 1 dipsersion1 : model dispersion 1 mean2 : model mean 2 dipsersion2 : model dispersion 2 f : fraction of component 1 """ mean1, dispersion1, mean2, dispersion2, f = parameters # insist that mean1 is less than mean2 or solution is degenerate if mean1>mean2: return -np.inf # check for unit consistency if getattr(mean1, "unit", None) is not None \ and getattr(dispersion1, "unit", None) is not None \ and getattr(mean2, "unit", None) is not None \ and getattr(dispersion2, "unit", None) is not None \ and getattr(values, "unit", None) is not None \ and getattr(errors, "unit", None) is not None: mean1 = mean1.to(values.unit) dispersion1 = dispersion1.to(values.unit) mean2 = mean2.to(values.unit) dispersion2 = dispersion2.to(values.unit) errors = errors.to(values.unit) # require positive dispersions dispersion1 = np.abs(dispersion1) dispersion2 = np.abs(dispersion2) # likelihood of each data point conv_dispersion1 = np.sqrt(dispersion1**2+errors**2) conv_dispersion2 = np.sqrt(dispersion2**2+errors**2) likelihoods = f*stats.norm.pdf(values, mean1, conv_dispersion1) \ + (1-f)*stats.norm.pdf(values, mean2, conv_dispersion2) # check that all are positive (should be!) and non-zero if np.all(likelihoods<=0): return -np.inf # set zeros (or negatives) to the lowest non-zero value likelihoods[likelihoods<=0] = likelihoods[likelihoods>0].min()*1e-5 # and take the log ln_likelihoods = np.log(likelihoods) # multiply by weights: if weights is not None: ln_likelihoods *= weights # remove -infinities ln_likelihoods[ln_likelihoods==-np.inf] \ = ln_likelihoods[ln_likelihoods>-np.inf].min() # total likelihood total_ln_likelihood = np.sum(ln_likelihoods) # renormalise by weights if weights is not None: total_ln_likelihood *= np.size(ln_likelihoods)/np.sum(weights) return total_ln_likelihood
a387d0f8c52b380c57c4cd86ba06111c187db7b8
3,645,566
import urllib def moleculeEntry(request, adjlist): """ Returns an html page which includes the image of the molecule and its corresponding adjacency list/SMILES/InChI, as well as molecular weight info and a button to retrieve thermo data. Basically works as an equivalent of the molecule search function. """ adjlist = urllib.parse.unquote(adjlist) try: molecule = Molecule().from_adjacency_list(adjlist) except: return HttpResponseBadRequest('<h1>Bad Request (400)</h1><p>Invalid adjacency list.</p>') structure = getStructureInfo(molecule) mol_weight = molecule.get_molecular_weight() old_adjlist = '' try: old_adjlist = molecule.to_adjacency_list(remove_h=True, old_style=True) except: pass smiles = '' try: smiles = molecule.to_smiles() except ValueError: pass inchi = '' try: inchi = molecule.to_inchi() except ValueError: pass return render(request, 'moleculeEntry.html', {'structure': structure, 'smiles': smiles, 'adjlist': adjlist, 'mol_weight': mol_weight, 'old_adjlist': old_adjlist})
6a53812894b7150fc76444238597e8038f8ffa0c
3,645,567
def has_user_id(id: int): """Checks if the Command Author's ID is the same as the ID passed into the function""" def predicate(ctx) -> bool: if ctx.author.id == id: return True raise MissingID(id, "Author") return commands.check(predicate)
e83fff93f6ef3ebc06ebadc3470b1e74a18b3a39
3,645,568
def _polar_gbps(out, in_args, params, per_iter=False): """ `speed_function` for `benchmark` estimating the effective bandwidth of a polar decomposition in GB/s. The number of elements is estimated as 2 * the size of the input. For a matrix multiplication of dimensions `m, n, k` that took `dt` seconds, we define `GB/s := (GB of input + GB of output) / (1E9 * dt)`. """ out_rows, out_cols, dtype = params[:3] if out_cols is None: out_cols = out_rows dt = out[0] n_elements = 2 * out_rows * out_cols result = benchmark_utils.gbps(n_elements, dtype, dt) header = "GB/s" return benchmark_utils.per_iter(per_iter, out[-1], result, header)
a0428dfc9df6c4dd7f9d25712c3894d71bcd1700
3,645,569
def is_FreeMonoid(x): """ Return True if `x` is a free monoid. EXAMPLES:: sage: from sage.monoids.free_monoid import is_FreeMonoid sage: is_FreeMonoid(5) False sage: is_FreeMonoid(FreeMonoid(7,'a')) True sage: is_FreeMonoid(FreeAbelianMonoid(7,'a')) False sage: is_FreeMonoid(FreeAbelianMonoid(0,'')) False """ return isinstance(x, FreeMonoid_class)
d4fae223bcdec1f365406b9fb3c546f56db38565
3,645,570
import requests def get_quote_data(ticker): """Inputs: @ticker Returns a dictionary containing over 70 elements corresponding to the input ticker, including company name, book value, moving average data, pre-market / post-market price (when applicable), and more.""" site = "https://query1.finance.yahoo.com/v7/finance/quote?symbols=" + ticker resp = requests.get(site) if not resp.ok: raise AssertionError( """Invalid response from server. Check if ticker is valid.""" ) json_result = resp.json() info = json_result["quoteResponse"]["result"] return info[0]
a23d7e091547ceca3c66f0ae90e84ea9f89d4e1c
3,645,571
from typing import Dict import click def _import_stack_component( component_type: StackComponentType, component_config: Dict[str, str] ) -> str: """import a single stack component with given type/config""" component_type = StackComponentType(component_type) component_name = component_config.pop("name") component_flavor = component_config.pop("flavor") # make sure component can be registered, otherwise ask for new name while True: # check if component already exists try: other_component = _get_component_as_dict( component_type, component_name ) # component didn't exist yet, so we create it. except KeyError: break # check whether other component has exactly same config as export other_is_same = True for key, value in component_config.items(): if key not in other_component or other_component[key] != value: other_is_same = False break # component already exists and is correctly configured -> done if other_is_same: return component_name # component already exists but with different config -> rename display_name = _component_display_name(component_type) component_name = click.prompt( f"A component of type '{display_name}' with the name " f"'{component_name}' already exists, " f"but is configured differently. " f"Please choose a different name.", type=str, ) _register_stack_component( component_type=component_type, component_name=component_name, component_flavor=component_flavor, **component_config, ) return component_name
ec03abab6b005f5047dd7963ab93b83f4f891140
3,645,572
def unpack_range(a_range): """Extract chromosome, start, end from a string or tuple. Examples:: "chr1" -> ("chr1", None, None) "chr1:100-123" -> ("chr1", 99, 123) ("chr1", 100, 123) -> ("chr1", 100, 123) """ if not a_range: return Region(None, None, None) if isinstance(a_range, basestring): if ':' in a_range and '-' in a_range: return from_label(a_range, keep_gene=False) return Region(a_range, None, None) if isinstance(a_range, (list, tuple)): if len(a_range) == 3: return Region(*a_range) elif len(a_range) == 4: return Region(*a_range[:3]) raise ValueError("Not a range: %r" % a_range)
f44b6069eb5e0fc8c85f01d5cbe708667a09a005
3,645,573
import torch def _old_extract_roles(x, roles): """ x is [N, B, R, *shape] roles is [N, B] """ N, B, R, *shape = x.shape assert roles.shape == (N, B) parts = [] for n in range(N): parts.append(x[n:n+1, range(B), roles[n]]) return torch.cat(parts, dim=0)
07a7be138558baa28ab1a10e2be2c7f17501ae96
3,645,574
def setup(i): """ See "install" API with skip_process=yes """ i['skip_process']='yes' return install(i)
d4478adc27e444ac43dc9b4c8cd999157555c831
3,645,576
import requests def post_merge_request(profile, payload): """Do a POST request to Github's API to merge. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. payload A dict of information to pass to Github's API as the payload for a merge request, something like this:: { "base": <base>, "head": <head>, "commit_message": <mesg>} Returns: The response returned by the ``requests`` library when it does the POST request. """ repo = profile["repo"] url = GITHUB_API_BASE_URL + "repos/" + repo + "/merges" headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response
26131ac3dc078a9e33b7b2b785a71c51ec1d9072
3,645,577
def is_valid_table_name(cur, table_name): """ Checks whether a name is for a table in the database. Note: Copied from utils.database for use in testing, to avoid a circular dependency between tests and implementation. Args: cur: sqlite3 database cursor object table_name (str): name to check Returns: True if valid, False otherwise """ query = """ SELECT 1 FROM sqlite_master WHERE type == 'table' AND name == ? """ res = cur.execute(query, (table_name,)) return res.fetchone() is not None
f1efc66220baa215a73f374da19842ab38c619be
3,645,578
def create_mssql_pyodbc(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a mssql database using pyodbc. """ return create_engine( _create_mssql_pyodbc(username, password, host, port, database), **kwargs )
dac74a0c32f1c693eb059d6a61f84d2288651969
3,645,579
def _wait_for_multiple(driver, locator_type, locator, timeout, wait_for_n, visible=False): """Waits until `wait_for_n` matching elements to be present (or visible). Returns located elements when found. Args: driver: Selenium web driver instance locator_type: type of locator (e.g. By.CSS_SELECTOR or By.TAG_NAME) locator: name of tag, class, etc. to wait for timeout: how long to wait for presence/visibility of element wait_for_n: wait until this number of matching elements are present/visible visible: if True, require that elements are not only present, but visible """ wait = WebDriverWait(driver, timeout) def multiple_found(driver): elements = driver.find_elements(locator_type, locator) if visible: elements = [e for e in elements if e.is_displayed()] if len(elements) < wait_for_n: return False return elements return wait.until(multiple_found)
d96c10d95877d699f8b284ea41e8b8ef5aebbf3c
3,645,580
def relu(x): """ x -- Output of the linear layer, of any shape Returns: Vec -- Post-activation parameter, of the same shape as Z cash -- for computing the backward pass efficiently """ Vec = np.maximum(0, x) assert(Vec.shape == x.shape) cash = x return Vec, cash
1d94d3008aca7ab613dfa92504061264111f1c28
3,645,581
def _declare_swiftdoc( *, actions, arch, label_name, output_discriminator, swiftdoc): """Declares the swiftdoc for this Swift framework. Args: actions: The actions provider from `ctx.actions`. arch: The cpu architecture that the generated swiftdoc belongs to. label_name: Name of the target being built. output_discriminator: A string to differentiate between different target intermediate files or `None`. swiftdoc: A File referencing the swiftdoc file from a SwiftInfo provider. Returns: A File referencing the intermediate swiftdoc. """ bundle_doc = intermediates.file( actions = actions, target_name = label_name, output_discriminator = output_discriminator, file_name = "{}.swiftdoc".format(arch), ) actions.symlink( target_file = swiftdoc, output = bundle_doc, ) return bundle_doc
589828527b8fe775aafca8fb1bee677d716a88c6
3,645,582
def threshold_image(gray_image, name_bw, threshold): """ This computes the binary image of the input image using a threshold :param gray_image: input image :param threshold: input threshold :param name_bw: name of the binary image :return: BW image """ # perform Gaussian blurring to remove unwanted noisy components blurred = cv2.GaussianBlur(gray_image, (5, 5), 0) # convert the smooth image into a bw image thresh = cv2.threshold(blurred, threshold, 255, cv2.THRESH_BINARY)[1] # perform morphological operation to remove small components thresh = cv2.erode(thresh, None, iterations=1) thresh = cv2.dilate(thresh, None, iterations=1) # store the bw image cv2.imwrite("threshold_" + name_bw, thresh) return thresh
98c14281a322b110594e12a4e2b10016a8d6533f
3,645,583
import re def sub_repeatedly(pattern, repl, term): """apply sub() repeatedly until no change""" while True: new_term = re.sub(pattern, repl, term) if new_term == term: return term term = new_term
e57c648fb057f81e35e0fc2d2dc57edd0b400baf
3,645,585
from typing import Tuple def _dtw(distance_matrix: np.ndarray, gully: float = 1., additive_penalty: float = 0., multiplicative_penalty: float = 1.) -> Tuple[np.ndarray, np.ndarray, float]: """ Compute the dynamic time warping distance between two sequences given a distance matrix. DTW score of lowest cost path through the distance matrix, including penalties. :param distance_matrix: Distances between two sequences :param gully: Sequences must match up to this proportion of the shorter sequence. Default value is 1, which means that the entirety of the shorter sequence must be matched to a part of the longer sequence. :param additive_penalty: Additive penalty for non-diagonal moves. Default value is 0, which means no penalty. :param multiplicative_penalty: Multiplicative penalty for non-diagonal moves. Default value is 1, which means no penalty. :return: Lowest cost path through the distance matrix. Penalties are included, the score is not yet normalized. """ if np.isnan(distance_matrix).any(): raise ValueError('NaN values found in distance matrix.') distance_matrix = distance_matrix.copy() # Pre-allocate path length matrix traceback = np.empty(distance_matrix.shape, np.uint8) # Populate distance matrix with lowest cost path _dtw_core(distance_matrix, additive_penalty, multiplicative_penalty, traceback) if gully < 1.: # Allow the end of the path to start within gully percentage of the smaller distance matrix dimension gully = int(gully * min(distance_matrix.shape)) else: # When gully is 1 require matching the entirety of the smaller sequence gully = min(distance_matrix.shape) - 1 # Find the indices of the smallest costs on the bottom and right edges i = np.argmin(distance_matrix[gully:, -1]) + gully j = np.argmin(distance_matrix[-1, gully:]) + gully # Choose the smaller cost on the two edges if distance_matrix[-1, j] > distance_matrix[i, -1]: j = distance_matrix.shape[1] - 1 else: i = distance_matrix.shape[0] - 1 # Score is the final score of the best path score = float(distance_matrix[i, j]) # Pre-allocate the x and y path index arrays x_indices = np.zeros(sum(traceback.shape), dtype=np.int) y_indices = np.zeros(sum(traceback.shape), dtype=np.int) # Start the arrays from the end of the path x_indices[0] = i y_indices[0] = j # Keep track of path length n = 1 # Until we reach an edge while i > 0 and j > 0: # If the tracback matrix indicates a diagonal move... if traceback[i, j] == 0: i = i - 1 j = j - 1 # Horizontal move... elif traceback[i, j] == 1: i = i - 1 # Vertical move... elif traceback[i, j] == 2: j = j - 1 # Add these indices into the path arrays x_indices[n] = i y_indices[n] = j n += 1 # Reverse and crop the path index arrays x_indices = x_indices[:n][::-1] y_indices = y_indices[:n][::-1] return x_indices, y_indices, score
388b070d4bd2bbca42371b85d27f0807f86ae09b
3,645,586
async def _ensure_meadowgrid_security_groups() -> str: """ Creates the meadowgrid coordinator security group and meadowgrid agent security group if they doesn't exist. The coordinator security group allows meadowgrid agents and the current ip to access the coordinator, as well as allowing the current ip to ssh. See also _ensure_meadowgrid_agent_security_group. """ current_ip_for_ssh = await _get_current_ip_for_ssh() # allow meadowgrid traffic from the meadowgrid agent security group agent_security_group_id = ensure_security_group( _MEADOWGRID_AGENT_SECURITY_GROUP, [(22, 22, f"{current_ip_for_ssh}/32")], [] ) return ensure_security_group( _MEADOWGRID_COORDINATOR_SECURITY_GROUP, [ (22, 22, f"{current_ip_for_ssh}/32"), ( DEFAULT_COORDINATOR_PORT, DEFAULT_COORDINATOR_PORT, f"{current_ip_for_ssh}/32", ), ], [(DEFAULT_COORDINATOR_PORT, DEFAULT_COORDINATOR_PORT, agent_security_group_id)], )
b0fc2c0e1fb767c5cfbb365d0c58cf39d327caf3
3,645,587
def _create_pairs_numba( to_match, indexer, first_stage_cum_probs, group_codes_per_individual, seed ): """ Args: to_match (np.ndarry): 2d boolean array with one row per individual and one column sub-contact model. indexer (numba.List): Numba list that maps id of county to a numpy array with the row positions of all individuals from that county. first_stage_cum_probs(numpy.ndarray): Array of shape n_group, n_groups. cum_probs[i, j] is the probability that an individual from group i meets someone from group j or lower. group (np.ndarray): 1d array with assortative matching group ids, coded as integers. Returns: pairs_of_workers (np.ndarray): 2d integer array with meeting ids. """ np.random.seed(seed) unique_group_codes = np.arange(len(first_stage_cum_probs)) to_match = to_match.copy() out = np.full(to_match.shape, -1) n_obs, n_models = to_match.shape for m in range(n_models): meeting_id = 0 for i in range(n_obs): if to_match[i, m]: group_i = group_codes_per_individual[i] group_j = choose_other_group( unique_group_codes, first_stage_cum_probs[group_i] ) group_j_indices = indexer[group_j] weights = to_match[group_j_indices, m].astype(np.float64) j = choose_other_individual(group_j_indices, weights) if j != -1: to_match[i, m] = False to_match[j, m] = False out[i, m] = meeting_id out[j, m] = meeting_id meeting_id += 1 return out
5c7bed67a644104dc7b22b79d3858fc5e27cf14d
3,645,588
def filter_known_bad(orbit_points): """ Filter some commands that are known to be incorrect. """ ops = orbit_points bad = np.zeros(len(orbit_points), dtype=bool) bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2002:253:10:08:52.239') bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2004:010:10:00:00.000') return orbit_points[~bad]
c8f64b541be5d2f7fce3554dc83c7f36ee8bc0a4
3,645,590
def create_lexicon(word_tags): """ Create a lexicon in the right format for nltk.CFG.fromString() from a list with tuples with words and their tag. """ # dictionary to filter the double tags word_dict = {} for word, tag in word_tags: if tag not in word_dict: word_dict[tag] = {word} else: word_dict[tag].add(word) # PRO is the tag for 's, but the 's is not removed on nouns. word_dict['NN'] = [x.replace('\'s', '') for x in word_dict['NN']] word_dict['JJ'] = [x.replace('\'s', '') for x in word_dict['JJ']] del word_dict[','] word_dict['PRP'].update(word_dict['PRP$']) del word_dict['PRP$'] word_dict['POS'] = ['"s'] # convert the dictionary to the right NLTK format lexicon = '' for key, val in word_dict.items(): lexicon += key + ' -> ' # add ' ' around every word val = [f'\'{v}\'' for v in val] # the words are seperated by a pipe lexicon += ' | '.join(val) + '\n' return lexicon
3a91671d559f5924ec9326520db6e11a1672fee4
3,645,591
def display_time(seconds, granularity=2): """Display time as a nicely formatted string""" result = [] if seconds == 0: return "0 second" for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip('s') result.append("{} {}".format(value, name)) return ', '.join(result[:granularity])
d8fe16585a66d085a08941b7b038d448fee23570
3,645,593
def reciprocal(x): """ Returns the reciprocal of x. Args: x (TensorOp): A tensor. Returns: TensorOp: The reciprocal of x. """ return ReciprocalOp(x)
3678efa2d69948e85ccaae43f34492783a77cef9
3,645,594
def csv_saver_parser(): """ Csv saver parser. Returns tuple with args as dictionary and sufix that needs to be removed. :return: tuple """ csv_saver_parser = ArgumentParser(description='Parser for saving data into CSV files.') csv_saver_parser.add_argument('--F-csvsave', help='The field separator to be used. \'\t\' can be used as well. (default: \',\')') csv_saver_parser.add_argument('--M-csvsave', help='The string representing a missing value. (default: ?)') csv_saver_parser.add_argument('--N-csvsave', action='store_const', const="", help='Don\'t write a header row.') csv_saver_parser.add_argument('--decimal-csvsave', help='The maximum number of digits to print after the decimal place for numeric values (default: 6)') csv_saver_parser.add_argument('--i-csvsave', help='The input file') csv_saver_parser.add_argument('--o-csvsave', help='The output file') return vars((csv_saver_parser.parse_known_args())[0]), '-csvsave'
d98fd53217eafa24826df56e114720a7881f17bb
3,645,596
def get_var(expr: Expression) -> Var: """ Warning: this in only true for expressions captured by a match statement. Don't call it from anywhere else """ assert isinstance(expr, NameExpr) node = expr.node assert isinstance(node, Var) return node
f8bec4c919858f6aaa5126fc4e55f825f2ca677c
3,645,597
def sight(unit_type: int): """Return the sight range of a unit, given its unit type ID :param unit_type: the unit type ID, according to :mod:`pysc2.lib.stats` :type unit_type: int :return: the unit's sight range :rtype: float """ return __data['Sight'][unit_type]
84c3b8fdbfaaede81e7abc10cc190830df9e2c86
3,645,598
def decode_jwt(encoded_jwt): """ 解码jwt """ global key # 注意当载荷里面申明了 aud 受众的时候,解码时需要说明 decoded_jwt = jwt.decode(encoded_jwt, key, audience='dev', algorithms=["HS256"]) return decoded_jwt
467bfcce7c5264813ab57f420da277b7674976db
3,645,599
from typing import OrderedDict async def bulkget(ip, community, scalar_oids, repeating_oids, max_list_size=1, port=161, timeout=DEFAULT_TIMEOUT): # type: (str, str, List[str], List[str], int, int, int) -> BulkResult """ Delegates to :py:func:`~puresnmp.aio.api.raw.bulkget` but returns simple Python types. See the "raw" equivalent for detailed documentation & examples. """ raw_output = await raw.bulkget(ip, community, scalar_oids, repeating_oids, max_list_size=max_list_size, port=port, timeout=timeout) pythonized_scalars = {oid: value.pythonize() for oid, value in raw_output.scalars.items()} pythonized_list = OrderedDict( [(oid, value.pythonize()) for oid, value in raw_output.listing.items()]) return BulkResult(pythonized_scalars, pythonized_list)
6795f7d9ff7ac1952406922395e2308346ff244d
3,645,600
def repo_version_db_key() -> bytes: """The db formated key which version information can be accessed at Returns ------- bytes db formatted key to use to get/set the repository software version. """ db_key = c.K_VERSION.encode() return db_key
090a70e59d1a2c7d4a3f4589b9f4a2ef975e2585
3,645,601
def retrieve_psd_cdf(path): """interact with hdf5 file format for marginal CDFs for a set of PSDs""" with h5py.File(path, 'r') as obj: group = obj['PSD_CDF'] Npsd = group.attrs['num_psds'] freqs = group['frequencies'][...] data = group['CDFs'][...] vals = data[:,0,:] cdfs = data[:,1,:] return freqs, vals, cdfs, Npsd
f0ee184d972ddcbedeb94345f15fcab9d08e8458
3,645,602
def get(context: mgp.ProcCtx) -> mgp.Record(tracks=list): """Returns a list of track_ids of trendy songs. Calculates recently popular tracks by comparing the popularity of songs using the `followers`, `created_at`, and proximity to other popular songs (pagerank). Example usage: CALL trendy_tracks.get() YIELD tracks Equivalent cypher query: MATCH (track:Track)<--(playlist:Playlist) WITH track, count(playlist) AS popularity RETURN track ORDER BY popularity DESC LIMIT 10 :return: List of track ids that are currently trendy. :rtype: mgp.Record(tracks=list[dict[str][Any]]) """ return mgp.Record( tracks=list( map( lambda vertex: dict(vertex.properties), nlargest( 10, filter( lambda vertex: "Track" in vertex.labels, context.graph.vertices, ), key=lambda vertex: sum(1 for _ in vertex.in_edges), ), ) ) )
182d8a3d26028f472f1cc64bd993b6a29635daf5
3,645,603
import ipaddress def ip_only(value): """ Returns only the IP address string of the value provided. The value could be either an IP address, and IP network or and IP interface as defined by the ipaddress module. Parameters ---------- value : str The value to use Returns ------- str The IP address only value, if the value provided was valid None If the value provided is not an IP thing """ for test in [lambda x: str(ipaddress.ip_address(x)), lambda x: str(ipaddress.ip_interface(x).ip), lambda x: str(ipaddress.ip_network(x).network_address)]: try: return test(value) except: pass return None
149b202969c0ccb4e0c5e55417ce0231f1b5fc11
3,645,604
from typing import Tuple from datetime import datetime from typing import Optional def get_data_by_isin(isin: str, dates: Tuple[datetime.date], is_etf: bool) -> Tuple[Optional[np.ndarray], str]: """Retrieves stock/ETF prices in EUR by ISIN for the given dates. Cached to make sure this is only queried once for a given currency & date-range.""" from_date = dates[0].strftime("%d/%m/%Y") to_date = (dates[-1] + datetime.timedelta(days=7)).strftime("%d/%m/%Y") # Retrieves stock/etf information based on the ISIN try: if is_etf: data = investpy.search_etfs(by="isin", value=isin) else: data = investpy.search_stocks(by="isin", value=isin) except RuntimeError: print(f"[DGPC] Warning, could not retrieve {'ETF' if is_etf else 'stock'} data for ISIN {isin}.") return None, "" # When a stock/ETF is listed in multiple countries, take one of the preferred countries if found for country in PREFERRED_COUNTRIES: local_data = data[data["country"] == country] if local_data.shape[0] > 0: break else: # Taking the first country from the results if none of the preferred countries is found country = data["country"][0] local_data = data # Retrieves the actual historical prices for the stock/etf currency = list(local_data["currency"])[0] symbol = list(local_data["symbol"])[0] if is_etf: name = list(local_data["name"])[0] history = investpy.get_etf_historical_data(name, country=country, from_date=from_date, to_date=to_date) else: history = investpy.get_stock_historical_data(symbol, country=country, from_date=from_date, to_date=to_date) history = history.reset_index() values = densify_history(history, dates) # Convert the results to euro if currency != "EUR": currency_modifier = to_euro_modifier(currency, tuple(dates)) values *= currency_modifier return values, symbol
d4d46b45f480488fb67d3a6116a3b2e90c736efc
3,645,607
from typing import Dict from typing import Any def get_result_qiskit() -> Dict[str, Dict[str, Any]]: """Fixture for returning sample experiment result Returns ------- Dict[str, Dict[str, Any]] A dictionary of results for physics simulation and perfect gates A result dictionary which looks something like:: { "name": name of this experiment (obtained from qobj.experiment header) "seed": random seed used for simulation "shots": number of shots used in the simulation "data": { "counts": {'0x9: 5, ...}, "memory": ['0x9', '0xF', '0x1D', ..., '0x9'] }, "status": status string for the simulation "success": boolean "time_taken": simulation time of this single experiment } """ # Result of physics based sim for applying X on qubit 0 in 6 qubits perfect_counts = {"110000": 1000} counts_dict = { "c3_qasm_perfect_simulator": perfect_counts, } return counts_dict
7dc44fe110687b92f5e8b23c24798b06dd19e71e
3,645,608
def all_budgets_for_student(user_id): """Returns a queryset for all budgets that a student can view/edit i.e. is the submitter, president, or treasurer for any of the organization's budgets""" query = Q(budget__submitter=user_id) | Q(budget__president_crsid=user_id) | Q(budget__treasurer_crsid=user_id) orgs = Organization.objects.filter(query) budgets = Budget.objects.filter(organization__in=orgs) return budgets
2048f2b579c2e8903ca34c34990e9c2c5215f79c
3,645,609
import torch def uniform_weights(x, x_mask): """Return uniform weights over non-masked x (a sequence of vectors). Args: x: batch * len * hdim x_mask: batch * len (1 for padding, 0 for true) Output: x_avg: batch * hdim """ alpha = Variable(torch.ones(x.size(0), x.size(1))) if x.data.is_cuda: alpha = alpha.cuda() alpha = alpha * x_mask.eq(0).float() alpha = alpha / alpha.sum(1, keepdim=True).expand(alpha.size()) return alpha
a1b88fc88ac65886283159d077e9550dab95c8de
3,645,611
from operator import inv def mixed_estimator_2(T1, T2, verbose=False): """ Based on the Lavancier and Rochet (2016) article. The method combines two series of estimates of the same quantity taking into account their correlations. The individual measureements are assumed independent. The current implementation works only for point estimates. The main result corresponds to Eq. (11) from the article. Its variance is the equation after Eq. (9). [equation not checked] """ B = 1000 # bootstrap repetitions # Drop nans not_nans = np.logical_or(np.isnan(T1), np.isnan(T2)) T1, T2 = T1[~not_nans], T2[~not_nans] n = len(T1) # Return nan if no samples # If one sample, return simple average with no variance if n == 0: return np.nan, np.nan, np.array([np.nan, np.nan]) elif n == 1: # print(T1) return T1[0] / 2 + T2[0] / 2, np.nan, np.array([0.5, 0.5]) # Calculate the estimators for the data set. This is the input data for the rest T1_data_median = np.median(T1) T2_data_median = np.median(T2) # Estimate the covariance sigma matrix with bootstrap (with replacement, as described in the article) sigma = np.zeros((2, 2)) for b in range(B): T1_sample = np.random.choice(T1, size=n, replace=True) T2_sample = np.random.choice(T2, size=n, replace=True) # print('T1', T1_sample) T1_sample_median = np.median(T1_sample) T2_sample_median = np.median(T2_sample) sigma += np.array([[(T1_sample_median - T1_data_median)**2, (T1_sample_median - T1_data_median) * (T2_sample_median - T2_data_median)], [(T1_sample_median - T1_data_median) * (T2_sample_median - T2_data_median), (T2_sample_median - T2_data_median)**2]]) sigma /= B # print(n, sigma) # Calculate the mixed estimator I = np.array([[1, 1]]).T T = np.array([[T1_data_median, T2_data_median]]).T weights = inv(I.T @ inv(sigma) @ I) @ I.T @ inv(sigma) mixed_estimator = (weights @ T)[0, 0] mixedV = (inv(I.T @ inv(sigma) @ I))[0, 0] if verbose: print('weights', weights) print(mixed_estimator, '+-', np.sqrt(mixedV)) return mixed_estimator, mixedV, np.squeeze(weights)
8f9ee282b0756dd41ff98e9ae596e46ddf6947a3
3,645,612
def e_add_const(pub, a, n): """Add constant n to an encrypted integer""" return a * modpow(pub.g, n, pub.n_sq) % pub.n_sq
37be82c71da3114f94d8b2ebe08f54a0726ec655
3,645,613
def area_triangle(base, height): """ """ return (base * height) / 2.0
474e1a090dc7af9d68eaab35e6b04e5e165b6777
3,645,614
def _getAtomInvariantsWithRadius(mol, radius): """ Helper function to calculate the atom invariants for each atom with a given radius Arguments: - mol: the molecule of interest - radius: the radius for the Morgan fingerprint Return: list of atom invariants """ inv = [] for i in range(mol.GetNumAtoms()): info = {} fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info) for k in info.keys(): if info[k][0][1] == radius: inv.append(k) return inv
8b8565a62af7f94c79604342077918a5b4261410
3,645,615
def radcool(temp, zmetal): """ Cooling Function This version redefines Lambda_sd (rho/m_p)^2 Lambda(T,z) is the cooling in erg/cm^3 s Args: temp : temperature in the unit of K zmetal: metallicity in the unit of solar metallicity Return: in the unit of erg*s*cm^3 """ tshape = temp.shape tempflt = temp.flatten() qlog0 = np.zeros_like(tempflt) qlog1 = np.zeros_like(tempflt) for i, t in enumerate(tempflt): tlog = np.log10(t) # zero metal cooling coefficient Lambda_([Fe/H]=0 if tlog>=6.1: qlog0[i] = -26.39 + 0.471*(np.log10(t + 3.1623e6)) elif tlog>=4.9: arg = 10.**(-(tlog-4.9)/.5) + 0.077302 qlog0[i] = -22.16 + np.log10(arg) elif tlog>=4.25: bump1rhs = -21.98 - ((tlog-4.25)/0.55) bump2lhs = -22.16 - ((tlog-4.9)/0.284)**2 qlog0[i] = max(bump1rhs,bump2lhs) else: qlog0[i] = -21.98 - ((tlog-4.25)/0.2)**2 if qlog0[i]==np.nan: mylog.warning('There is NaN.') # emission from metals alone at solar abundance if tlog>=5.65: tlogc = 5.65 qlogc = -21.566 qloginfty = -23.1 p = 0.8 qlog1[i] = qlogc -p*(tlog - tlogc) qlog1[i] = max(qlog1[i],qloginfty) else: tlogm = 5.1 qlogm = -20.85 sig = 0.65 qlog1[i] = qlogm - ((tlog - tlogm)/sig)**2 qlambda0 = 10.**qlog0 qlambda1 = 10.**qlog1 # final cooling coefficient Lambda_sd: radcoolsd = qlambda0 + zmetal.flatten()*qlambda1 radcoolsd = radcoolsd.reshape(tshape) return radcoolsd
720ed6625c9fe348ebe78aa80127c4bcc4e911a9
3,645,617
def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A A, cache = linear_activation_forward(A_prev, parameters["W" + str(l)], parameters["b" + str(l)], activation="relu") caches.append(cache) # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. AL, cache = linear_activation_forward(A, parameters["W" + str(L)], parameters["b" + str(L)], activation="sigmoid") caches.append(cache) assert(AL.shape == (1,X.shape[1])) return AL, caches
b086f172e1fc0d8dad2353af1b35a8f6bd3f13dc
3,645,618
def linalg_multiply(a): """ Multiple all elements in vector or matrix Parameters: * a (array or matrix): The input to multiply Return (number): The product of all elements """ return np.prod(a)
bac2457c61813cb5d662cef37fb2b48d8e65ba34
3,645,619
def lambda_handler(event, context): """ This method selects 10% of the input manifest as validation and creates an s3 file containing the validation objects. """ label_attribute_name = event['LabelAttributeName'] meta_data = event['meta_data'] s3_input_uri = meta_data['IntermediateManifestS3Uri'] input_total = int(meta_data['counts']['input_total']) # 10% of the total input should be used for validation. validation_set_size = input_total // 10 source = S3Ref.from_uri(s3_input_uri) validation_labeled_query = """select * from s3object[*] s where s."{}-metadata"."human-annotated" IN ('yes') LIMIT {}""".format( label_attribute_name, validation_set_size) dest = create_ref_at_parent_key(source, "validation_input.manifest") copy_with_query(source, dest, validation_labeled_query) logger.info("Uploaded validation set of size {} to {}.".format( validation_set_size, dest.get_uri())) meta_data['counts']['validation'] = validation_set_size meta_data['ValidationS3Uri'] = dest.get_uri() return meta_data
f6e0313155a47110e47567320e03e241bb6dde37
3,645,620
def get_table_6(): """表 6 蓄熱の採用の可否 Args: Returns: list: 表 6 蓄熱の採用の可否 """ table_6 = [ ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '可', '可', '可'), ('不可', '不可', '不可', '可', '可'), ('不可', '不可', '不可', '可', '可') ] return table_6
4ecd4526ed9ce67b7a5d22b67dd804059807e94d
3,645,621
import random def is_prime(number, num_trials=200): """Determines whether a number is prime. Runs the Miller-Rabin probabilistic primality test many times on the given number. Args: number (int): Number to perform primality test on. num_trials (int): Number of times to perform the Miller-Rabin test. Returns: True if number is prime, False otherwise. """ if number < 2: return False if number != 2 and number % 2 == 0: return False # Find largest odd factor of n-1. exp = number - 1 while exp % 2 == 0: exp //= 2 for _ in range(num_trials): rand_val = int(random.SystemRandom().randrange(1, number)) new_exp = exp power = pow(rand_val, new_exp, number) while new_exp != number - 1 and power != 1 and power != number - 1: power = (power * power) % number new_exp *= 2 if power != number - 1 and new_exp % 2 == 0: return False return True
78478437c08bcbd5e4c690466e4fe51bb4fad5ce
3,645,622
def extract_labels(filenames): """ Extract class labels of the images from image path list. # Arguments filenames: List of paths to image file. # Returns List of image labels. """ return LabelEncoder().fit_transform([extract_label(filename) for filename in filenames])
53f708a0abb105d3ffce0202117b6eae812a9ede
3,645,623
def reverseList(head): """ :type head: ListNode :rtype: ListNode """ current = head; # temp is first counter = 0; flag = 'Y'; while current is not None and flag != 'N': # store the current element # print(f"Current: {current.val}"); first = current; if counter == 0: try: # find the next element second = current.next; current.next = None; current = second.next; if current == None: # this means we are in an edge case so we want to exit head = second; second.next = first; return head; second.next = first; next_element = second; except: # this means we are in an edge case so we want to exit flag='N'; else: try: next_iter = current.next; current.next = next_element; current = next_iter; next_element = first; head = first; except: # this means we are in an edge case so we want to exit flag = 'N'; counter+=1; return head;
06f07ad9c5dbb13d2e288ea2ff14ef31febf87b9
3,645,624
def validate(data: BuildParams): """ Makes sure a valid combination of params have been provided. """ git_repo = bool(data.source.git_repo) dockerfile = bool(data.source.dockerfile) build_context = bool(data.source.build_context) git_valid = git_repo and not dockerfile and not build_context dockerfile_valid = dockerfile and not git_repo if not (git_valid or dockerfile_valid): return False, "Only one of build sources (git_repo, dockerfile) can be used.\n" \ "git_context can only be used in combination with dockerfile" return True, ""
708335092018339aa4f64b58d5ec8d2cb09751c3
3,645,625
import pickle def load_model(filepath=FILEPATH) -> TrainingParams: """ Load :param filepath: :return: """ with open(filepath, "rb") as handler: model = pickle.load(handler) return model
f2a1ed631bdb7b1f7e6fd372ca604ef4ef6890f2
3,645,626
def is_symmetric(m): """Check if a sparse matrix is symmetric https://mail.python.org/pipermail/scipy-dev/2014-October/020117.html Parameters ---------- m : sparse matrix Returns ------- check : bool """ if m.shape[0] != m.shape[1]: raise ValueError('m must be a square matrix') if not isinstance(m, sparse.coo_matrix): m = sparse.coo_matrix(m) r, c, v = m.row, m.col, m.data tril_no_diag = r > c triu_no_diag = c > r if triu_no_diag.sum() != tril_no_diag.sum(): return False, "no_diag_sum", triu_no_diag.sum() - tril_no_diag.sum() rl = r[tril_no_diag] cl = c[tril_no_diag] vl = v[tril_no_diag] ru = r[triu_no_diag] cu = c[triu_no_diag] vu = v[triu_no_diag] sortl = np.lexsort((cl, rl)) sortu = np.lexsort((ru, cu)) vl = vl[sortl] vu = vu[sortu] check = np.allclose(vl, vu) return check
84523c1c4bf0120025d6e7a0bcc9cf2e489b1ae8
3,645,627
def render_raster_map(bounds, scale, basemap_image, aoi_image, id, path, colors): """Render raster dataset map based on bounds. Merge this over basemap image and under aoi_image. Parameters ---------- bounds : list-like of [xmin, ymin, xmax, ymax] bounds of map scale : dict map scale info basemap_image : Image object aoi_image : Image object id : str map ID path : str path to raster dataset colors : list-like of colors colors to render map image based on values in raster Returns ------- id, Image object Image object is None if it could not be rendered or does not overlap bounds """ raster_img = render_raster(path, bounds, scale, WIDTH, HEIGHT, colors) map_image = merge_maps([basemap_image, raster_img, aoi_image]) map_image = to_base64(map_image) return id, map_image
f24c3b48911c7c322d3c02e9808f0013354c567d
3,645,629
def str2num(s): """Convert string to int or float number. Parameters ---------- s : string String representing a number. Returns ------- Number (int or float) Raises ------ TypeError If `s` is not a string. ValueError If the string does not represent a (float or int) number. """ try: x = float(s) if x.is_integer(): return int(x) else: return x except ValueError: raise ValueError("'s' does not represent a number (int or float)")
5dfaed567a66fc7d3ee46cbb70d9c408d38fcbfe
3,645,630
def new_log(infile_history=None, extra_notes=None, git_repo=None): """Create a new command line log/history. Kwargs: infile_history (dict): keys are input file names and values are the logs for those files extra_notes (list): List containing strings of extra information (output is one list item per line) git_repo (str): Location of git repository associated with script executed at command line Returns: str. Command line log """ log = '' current_entry = get_current_entry(git_repo=git_repo) log += current_entry + '\n' if extra_notes: log += 'Extra notes: \n' for line in extra_notes: log += line + '\n' if infile_history: assert type(infile_history) == dict nfiles = len(list(infile_history.keys())) for fname, history in infile_history.items(): if nfiles > 1: log += 'History of %s: \n %s \n' %(fname, history) else: log += '%s \n' %(history) return log
f1bbf4b9c84442d7abf700fec98277eb9e2283ea
3,645,632
from typing import OrderedDict import inspect def _get_new_args_dict(func, args, kwargs): """Build one dict from args, kwargs and function default args The function signature is used to build one joint dict from args and kwargs and additional from the default arguments found in the function signature. The order of the args in this dict is the order of the args in the function signature and hence the list of args can be used in cases where we can only supply *args, but we have to work with a mixture of args, kwargs and default args as in xarray.apply_ufunc in the xarray wrapper. """ new_args_dict = OrderedDict() for i, (arg, parameter) in enumerate(inspect.signature(func).parameters.items()): if i < len(args): new_args_dict[arg] = args[i] elif arg in kwargs.keys(): new_args_dict[arg] = kwargs[arg] else: new_args_dict[arg] = parameter.default return new_args_dict
ad7553e7b778b8f7b499217c7ee4ad7328958809
3,645,633
def hellinger_funct(x,P,Q): """ P,Q should be numpy stats gkde objects """ return np.sqrt(P(x) * Q(x))
198f0cf72ef75cece3c59248d8cd1215fa4299a1
3,645,634
from datetime import datetime def human_date(date): """ Return a string containing a nice human readable date/time. Miss out the year if it's this year """ today = datetime.datetime.today() if today.year == date.year: return date.strftime("%b %d, %I:%M%P") return date.strftime("%Y %b %d, %I:%M%P")
7088617b58c0d3b3193e11885fcd7b7ef075f627
3,645,636
def compute_thickness(wmP, kdTreegm, kdTreewm): """ This function.. :param wmP: :param kdTreegm: :param kdTreewm: :return: """ # Find the closest point to the gray matter surface point gmIndex = kdTreegm.FindClosestPoint(wmP) gmP = kdTreegm.GetDataSet().GetPoint(gmIndex) # compute the distance # distance from wm point to gm point dst1 = distance.euclidean(wmP, gmP) wmIndex = kdTreewm.FindClosestPoint(gmP) wmP2 = kdTreegm.GetDataSet().GetPoint(wmIndex) # distnace from gm to closest wm point dst2 = distance.euclidean(gmP, wmP2) # average the two distances thickness = (dst1 + dst2) / float(2) return thickness
c2c13a8c17eb997843c9e5752c6ae05f0854a7e5
3,645,637
def cytoband_interval(): """Create test fixture for Cytoband Interval.""" return CytobandInterval( start="q13.32", end="q13.32" )
d052e2dcf7276dc24c680d0b1168ebea6f779eac
3,645,639
def _get_proxy_class(request): """ Return a class that is a subclass of the requests class. """ cls = request.__class__ if cls not in _proxy_classes: class RequestProxy(cls): def __init__(self, request): self.__dict__ = request.__dict__ self.__request = request def __eq__(self, other): return self.__request == other # since we're overriding __eq__ we must override __hash__: def __hash__(self): return hash(self.__request) def finish(self): return self.__request.finish() _proxy_classes[cls] = RequestProxy return _proxy_classes[cls]
72113c9d38bdf91650fa88d4297a25457f34b9f8
3,645,640
import csv def rebuilt_emoji_dictionaries(filename): """ Rebuilds emoji dictionaries, given a csv file with labeled emoji's. """ emoji2unicode_name, emoji2sentiment = {}, {} with open(filename) as csvin: for emoji in csv.DictReader(csvin): for key, value in emoji.items(): if key in ('Occurrences', 'Positive', 'Neutral', 'Negative'): emoji[key] = int(value) elif key in ('Position',): emoji[key] = float(value) emoji['Sentiment'] = (emoji['Positive'] - emoji['Negative']) / \ max(100, (emoji['Positive'] + emoji['Neutral'] + emoji['Negative'])) emoji2unicode_name[emoji['Emoji']] = emoji['Unicode name'] emoji2sentiment[emoji['Emoji']] = emoji['Sentiment'] return emoji2unicode_name, emoji2sentiment
69bf1438a524ea54bd7bef2d4537a0c61cd0bc3d
3,645,641
def send_message(receiver, message): """ Send message to receivers using the Twilio account. :param receiver: Number of Receivers :param message: Message to be Sent :return: Sends the Message """ message = client.messages.create( from_="whatsapp:+14155238886", body=message, to=f"whatsapp:{receiver}" ) return message
05022f40104d8b38ffe096ee01941ef04da5f076
3,645,642
def stem_list(tokens: list) -> list: """Stems all tokens in a given list Arguments: - tokens: List of tokens Returns: List of stemmed tokens """ stem = PorterStemmer().stem return [stem(t) for t in tokens]
6086bda0bce5ce042156a12617a2c09b4b8f9cc8
3,645,643
def unique(s): """Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """ n = len(s) if n == 0: return [] # Try using a dict first, as that's the fastest and will usually # work. If it doesn't work, it will usually fail quickly, so it # usually doesn't cost much to *try* it. It requires that all the # sequence elements be hashable, and support equality comparison. u = {} try: for x in s: u[x] = 1 except TypeError: del u # move on to the next method else: return u.keys() # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. # NOTE: Python's list.sort() was designed to be efficient in the # presence of many duplicate elements. This isn't true of all # sort functions in all languages or libraries, so this approach # is more effective in Python than it may be elsewhere. try: t = list(s) t.sort() except TypeError: del t # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti += 1 i += 1 return t[:lasti] # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u
055d2d6e748e1a4ee22057fcd3e73d4e8c8e8081
3,645,644
def get_ground_truth_assignments_for_zacharys_karate_club() -> jnp.ndarray: """Returns ground truth assignments for Zachary's karate club.""" return jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
fc2050072293d9857b50425a4f31137a0872096d
3,645,646
from typing import List from typing import Tuple def sort_places_versus_distance_from_coordinates( list_places: List[Place], gps_coord: Tuple[float, float] ) -> List[Place]: """Oder list of places according to the distance to a reference coordinates. Note: this helper is compensating the bad results of the API. Results in the API are generally sorted, but lot of cases identified where the order is inconsistent (example: Montréal) Args: list_places: List of Place instances to be ordered gps_coord: Tuple with latitude and longitude in degrees for the reference point Returns: List of Place instances ordered by distance to the reference point (nearest first) """ sorted_places = sorted( list_places, key=lambda x: haversine((float(x.latitude), float(x.longitude)), gps_coord), ) return sorted_places
3089503406bf0959dd1caac5746693f812eb449c
3,645,648
from typing import List from typing import Tuple def solve_part2_coordinate_subdivision(boxes: List[Tuple[str, Box]]) -> int: """ An alternative method to solve part 2 which uses coordinate subdivisions to make a new grid. On the puzzle input, this is roughly a 800x800x800 grid, which actually takes some time to compute through (~3 min) It runs all the examples however, in under 3 seconds. """ # The boxes are in [a, b] form. Replace them with coordinate divisions that are [a, b) x_divisions = sorted({b.x0 for _, b in boxes} | {b.x1 + 1 for _, b in boxes}) y_divisions = sorted({b.y0 for _, b in boxes} | {b.y1 + 1 for _, b in boxes}) z_divisions = sorted({b.z0 for _, b in boxes} | {b.z1 + 1 for _, b in boxes}) # Map of lower corner coordinates to index into the divisions x_index = {x: i for i, x in enumerate(x_divisions)} y_index = {y: i for i, y in enumerate(y_divisions)} z_index = {z: i for i, z in enumerate(z_divisions)} on = set() for step, box in boxes: points = { (x, y, z) for x in range(x_index[box.x0], x_index[box.x1 + 1]) for y in range(y_index[box.y0], y_index[box.y1 + 1]) for z in range(z_index[box.z0], z_index[box.z1 + 1]) } if step == 'on': on |= points else: on -= points # Calculate the actual area held by all boxes def area(pos: Tuple[int, int, int]) -> int: x, y, z = pos return ((x_divisions[x + 1] - x_divisions[x]) * (y_divisions[y + 1] - y_divisions[y]) * (z_divisions[z + 1] - z_divisions[z])) return sum(map(area, on))
5a283b0de558755f7ec95ff9cde091ca95b245de
3,645,649
def decodeSignal(y, t, fclk, nbits) : """ This file reads in digitized voltages outputted from the QCM Antenna Master Controller Board and outputs time,logic code number pairs. The encoding scheme is as follows: HEADER: 3 clock cycles HIGH, followed by 3 clock cycles LOW SIGNAL: 1 clock cycle LOW, followed by 1 clock cycle HIGH or LOW depending on logic state of bit, followed by another clock cycle LOW CLOSER: 1 clock cycle LOW, followed by 2 clock cycles HIGH. Ex. USAGE: ... fclk=4.E6 nbits=7 t,y = decodeSig(y,t, fclk, nbits) y = array of double-precision numbers giving voltage signal with encoded numbers t = array of double-precision numbers giving timebase of signal fclk = Clock speed of output, which is master controller board's clock speed divided by 16, since least significant bit of counter is only toggled on clock positive edges [Hz] nbits = number of bits encoded in signal. Begun on Tuesday, 17 January 2012 (my 28th Birthday!), Ted Golfinopoulos """ tauc=1./fclk #Period of master controller board clock, [s] taus=t[1]-t[0] #Sampling time fs=1.E0/taus #Sampling frequency. onThresh=1.0E0 #Threshold voltage above which the signal is considered ON. #Duration of an encoded logic transmission, including header (6 clock cycles), #bit encoding, and closer (3 clock cycles) [s] dt = (9.E0+nbits*3.E0)*tauc tbin = 3.E0*tauc #Find indice and times times where board output is high onSamplesInHeader=int(3.E0*tauc/taus) #Number of digitizer samples expected to be high in header. onSamplesInCloser=int(2.E0*tauc/taus) #Number of digitizer samples expected to be low in closer. codeLength=int(dt/taus) #Number of samples expected in whole code. ###Nomenclature: #header = characteristic pattern at the start of an encoded signal. # Here, it is 3 clock counts HIGH, followed by 3 clock counts LOW #closer = characteristic pattern at the end of an encoded signal. # Here, it is 1 clock count LOW, followed by 2 clock counts HIGH #Find indices at which headers and closers start. #The algorithm that follows looks for stretches of points where the signal is HIGH for a given #duration - the header is high for 3 counts, the closer for 2, and encoded signal bits for 1. #There may be some spread in the actual number of points registering as HIGH; as such, the algorithm #returns the index of the first point for which the subsequent sequence of points is HIGH for the expected #time period, then advances the index pointer by (a) if header, the nominal number of time points in the #encoded stream, less the closer, or (b) if closer, the nominal number of time points in the closer. #This avoids double-counting. #The resulting indices delimit the boundaries of encoded numbers. headInds=[] closeInds=[] bufferLength=0; i=0 # Initialize index pointer while i < len(y) : if(y[i]>onThresh) : #First, check if y[i] is on - save computation of comparing series. if(all(y[(i+bufferLength):(i+onSamplesInHeader-bufferLength)]>onThresh)) : #Header found - store and jump to end of header ON code. headInds.append(i) i=i+codeLength-onSamplesInCloser continue #Don't start marking closers until a header has been found - this can be important if MCB starts putting outputs before the outputs signal digitization starts. elif(all(y[(i+bufferLength):(i+onSamplesInCloser-bufferLength)]>onThresh) and len(headInds)>0) : closeInds.append(i) #Start index of closer found - store. Code is between these two indices. i=i+onSamplesInCloser continue i=i+1 #Increment index print("Finished finding headers and closers.") # Takes an array containing a list of bits which are on in a binary number, in any order, with least-significant value corresponding to 0, and returns the decimal number corresponding to this number. def onBits2num(bitInds) : if len(bitInds)==0 : return 0 else : return sum([ pow(2,aa) for aa in bitInds ]) #Preallocate arrays. codeVals=zeros(len(closeInds)) #Array to store encoded numbers timeVals=zeros(len(closeInds)) #Array to store timepoints at which encoded numbers were sampled #Loop through all indices containing the start and end times for encoded bit patterns for i in range( 0, len(closeInds) ) : #Within each encoded segment, divide up the segment into bins of duration, tbin. #The state of the bits are contained in each bin. Find and number the bins for which the #board output was high. try : tOnInBin= t[headInds[i]+find( y[headInds[i]:closeInds[i]]>onThresh )] - t[headInds[i]] codeInds=find([tOnInBin[jj]>2.E0*tbin and tOnInBin[jj]<(2.E0+nbits)*tbin for jj in range(0,len(tOnInBin))]) except : temp=headInds[i:i+5] print(i) print('headInds') print(len(headInds)) print(temp) temp=closeInds[i:i+5] print('closeInds') print(len(closeInds)) print(temp) temp=find( y[headInds[i]:closeInds[i]]>onThresh ) print('length of find( y[headInds[i]:closeInds[i]]>onThresh )') print(len(temp)) print('First value') print(temp[0]) raise #Re-raise the exception. #Don't try to index into tOnInBin with array unless array is not empty. If array is empty, the logic code is 0, and the signal is low for the entire code segment. if(len(codeInds)>0) : tOnInBin= tOnInBin[ codeInds ] tOnInBin=tOnInBin-2.E0*tbin #Subtract time equivalent to first two time bins from signal - these are for the header. else : tOnInBin = [] onBins = unique([ int(aa) for aa in tOnInBin/tbin ]) #The first two bins (i.e t>0 and t < 2*tbin) comprise the header. #Remove these bins from consideration. The remaining internal bins comprise the logic signal, #ordered most-to-least significant bit. Turn these numbers into the 2's place to simplify conversion #into a decimal number. onBits = (nbits - 1) - onBins #Convert array showing which places are 1 in the binary number into a decimal number. Store. codeVals[i] = onBits2num(onBits) timeVals[i] = t[headInds[i]]-0.5*taus #Store timepoint. On average, time point is halfway between data points around the edge. print("Finished calculating codes.") #Return vectors of time points and corresponding code values. return [timeVals, codeVals]
72b812267e172ee245cb5c7f59366105baad40dc
3,645,650
def find_dead_blocks(func, cfg): """Find all immediate dead blocks""" return [block for block in cfg if not cfg.predecessors(block) if block != func.startblock]
3f72e0a573b1ef617511f2b9ec3d2e30c7ba6554
3,645,653
def _mcse_sd(ary): """Compute the Markov Chain sd error.""" _numba_flag = Numba.numba_flag ary = np.asarray(ary) if _not_valid(ary, shape_kwargs=dict(min_draws=4, min_chains=1)): return np.nan ess = _ess_sd(ary) if _numba_flag: sd = np.float(_sqrt(svar(np.ravel(ary), ddof=1), np.zeros(1))) else: sd = np.std(ary, ddof=1) fac_mcse_sd = np.sqrt(np.exp(1) * (1 - 1 / ess) ** (ess - 1) - 1) mcse_sd_value = sd * fac_mcse_sd return mcse_sd_value
4fcf966b7ec98cad193418f7e623c96154646b5f
3,645,654
def dfs_predecessors(G, source=None): """Return dictionary of predecessors in depth-first-search from source.""" return dict((t,s) for s,t in dfs_edges(G,source=source))
6929d25c981fec9b932c0f978b1ee45f37e0e565
3,645,655
def merge_all_sections(prnt_sctns, child_sctns, merge_within_sections=False): """ Merge the doc-sections of the parent's and child's attribute into a single docstring. Parameters ---------- prnt_sctns: OrderedDict[str, Union[None,str]] child_sctns: OrderedDict[str, Union[None,str]] Returns ------- str Output docstring of the merged docstrings.""" doc = [] prnt_only_raises = prnt_sctns["Raises"] and not ( prnt_sctns["Returns"] or prnt_sctns["Yields"] ) if prnt_only_raises and (child_sctns["Returns"] or child_sctns["Yields"]): prnt_sctns["Raises"] = None for key in prnt_sctns: sect = merge_section( key, prnt_sctns[key], child_sctns[key], merge_within_sections=merge_within_sections ) if sect is not None: doc.append(sect) return "\n\n".join(doc) if doc else None
c692d1b08db1a49545eb39e6385040fafc10e149
3,645,656