content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import scipy def lqr_ofb_cost(K, R, Q, X, ss_o): # type: (np.array, np.array, np.array, np.array, control.ss) -> np.array """ Cost for LQR output feedback optimization. @K gain matrix @Q process noise covariance matrix @X initial state covariance matrix @ss_o open loop state space system @return cost """ K = np.matrix(K).T A = np.matrix(ss_o.A) B = np.matrix(ss_o.B) C = np.matrix(ss_o.C) A_c = A - B * K * C Q_c = C.T * K.T * R * K * C + Q P = scipy.linalg.solve_lyapunov(A_c.T, -Q_c) J = np.trace(P * X) return J
b0a22685c640c2970dad63628d1c87aac73b241a
3,642,722
def steadystate_floquet(H_0, c_ops, Op_t, w_d=1.0, n_it=3, sparse=False): """ Calculates the effective steady state for a driven system with a time-dependent cosinusoidal term: .. math:: \\mathcal{\\hat{H}}(t) = \\hat{H}_0 + \\mathcal{\\hat{O}} \\cos(\\omega_d t) Parameters ---------- H_0 : :obj:`~Qobj` A Hamiltonian or Liouvillian operator. c_ops : list A list of collapse operators. Op_t : :obj:`~Qobj` The the interaction operator which is multiplied by the cosine w_d : float, default 1.0 The frequency of the drive n_it : int, default 3 The number of iterations for the solver sparse : bool, default False Solve for the steady state using sparse algorithms. Actually, dense seems to be faster. Returns ------- dm : qobj Steady state density matrix. .. note:: See: Sze Meng Tan, https://copilot.caltech.edu/documents/16743/qousersguide.pdf, Section (10.16) """ if sparse: N = H_0.shape[0] L_0 = liouvillian(H_0, c_ops).data.tocsc() L_t = liouvillian(Op_t) L_p = (0.5 * L_t).data.tocsc() # L_p and L_m correspond to the positive and negative # frequency terms respectively. # They are independent in the model, so we keep both names. L_m = L_p L_p_array = L_p.todense() L_m_array = L_p_array Id = sp.eye(N ** 2, format="csc", dtype=np.complex128) S = T = sp.csc_matrix((N ** 2, N ** 2), dtype=np.complex128) for n_i in np.arange(n_it, 0, -1): L = sp.csc_matrix(L_0 - 1j * n_i * w_d * Id + L_m.dot(S)) L.sort_indices() LU = splu(L) S = - LU.solve(L_p_array) L = sp.csc_matrix(L_0 + 1j * n_i * w_d * Id + L_p.dot(T)) L.sort_indices() LU = splu(L) T = - LU.solve(L_m_array) M_subs = L_0 + L_m.dot(S) + L_p.dot(T) else: N = H_0.shape[0] L_0 = liouvillian(H_0, c_ops).full() L_t = liouvillian(Op_t) L_p = (0.5 * L_t).full() L_m = L_p Id = np.eye(N ** 2) S, T = np.zeros((N ** 2, N ** 2)), np.zeros((N ** 2, N ** 2)) for n_i in np.arange(n_it, 0, -1): L = L_0 - 1j * n_i * w_d * Id + np.matmul(L_m, S) lu, piv = la.lu_factor(L) S = - la.lu_solve((lu, piv), L_p) L = L_0 + 1j * n_i * w_d * Id + np.matmul(L_p, T) lu, piv = la.lu_factor(L) T = - la.lu_solve((lu, piv), L_m) M_subs = L_0 + np.matmul(L_m, S) + np.matmul(L_p, T) return steadystate(Qobj(M_subs, type="super", dims=L_t.dims))
8e59e8f138116877678d7d203d4767c6fc6bd1fa
3,642,723
def gsl_blas_dtrmm(*args, **kwargs): """ gsl_blas_dtrmm(CBLAS_SIDE_t Side, CBLAS_UPLO_t Uplo, CBLAS_TRANSPOSE_t TransA, CBLAS_DIAG_t Diag, double alpha, gsl_matrix A, gsl_matrix B) -> int """ return _gslwrap.gsl_blas_dtrmm(*args, **kwargs)
5efee7571f49afc20c3f33d010caccb199613315
3,642,724
def scale(value, upper, lower, min_, max_): """Scales value between upper and lower values, depending on the given minimun and maximum value. """ numerator = ((lower - upper) * float((value - min_))) denominator = float((max_ - min_)) return numerator / denominator + upper
3e13c80b765cffb1e75a6856d343bd9a88c353e9
3,642,725
def conditional_response(view, video=None, **kwargs): """ Redirect to login page if user is anonymous and video is private. Raise a permission denied error if user is logged in but doesn't have permission. Otherwise, return standard template response. Args: view(TemplateView): a video-specific View object (ViewDetail, ViewEmbed, etc). video(ui.models.Video): a video to display with the view Returns: TemplateResponse: the template response to render """ if not ui_permissions.has_video_view_permission(video, view.request): if view.request.user.is_authenticated: raise PermissionDenied else: return redirect_to_login(view.request.get_full_path()) context = view.get_context_data(video, **kwargs) return view.render_to_response(context)
ea8e8176a979fcd46c0c72d5201c6f85c7b4ed48
3,642,726
def Flatten(nmap_list): """Flattens every `.NestedMap` in nmap_list and concatenate them.""" ret = [] for x in nmap_list: ret += x.Flatten() return ret
c630869b725d69338830e1a14ef920d6d1e87ade
3,642,727
from re import T def get_data_schema() -> T.StructType: """ Return the kafka data schema """ return T.StructType( [T.StructField('key', T.StringType()), T.StructField('message', T.StringType())] )
0cbc2fc6e7015c458e70b8d0ec6efb5fbc0d84f5
3,642,728
from typing import List import random def build_graph(num: int = 0) -> (int, List[int]): """Build a graph of num nodes.""" if num < 3: raise app.UsageError('Must request graph of at least 3 nodes.') weight = 5.0 nodes = [(0, 1, 1.0), (1, 2, 2.0), (0, 2, 3.0)] for i in range(num-3): l = random.sample(range(0, 3 + i - 1), 2) nodes.append((3 + i, l[0], weight*np.random.random())) nodes.append((3 + i, l[1], weight*np.random.random())) return num, nodes
05efb60ae5cdcc561c93cf2faba172ca5a3ff0d7
3,642,729
from typing import List from typing import Collection def concatenate(boxes_list:List[Boxes], fields:Collection[str]=None) -> Boxes: """Merge multiple boxes to a single instance B = A[:10] C = A[10:] D = concatenate([A, B]) D should be equal to A """ if not boxes_list: if fields is None: fields = [] return empty(*fields) if fields is None: # Get fields common to all sub-boxes common_fields = set.intersection( *[set(x.get_fields()) for x in boxes_list] ) else: common_fields = fields coords = np.concatenate([x.get() for x in boxes_list], axis=0) new_fields = dict() for f in common_fields: new_fields[f] = np.concatenate([x.get_field(f) for x in boxes_list], axis=0) return Boxes(coords, **new_fields)
096067aea3d01e984befd2cadfce5a86c33580e9
3,642,730
def detect_peaks(array, freq=0, cthr=0.2, unprocessed_array=False, fs=44100): """ Function detects the peaks in array, based from the mirpeaks algorithm. :param array: Array in which to detect peaks :param freq: Scale representing the x axis (sample length as array) :param cthr: Threshold for checking adjacent peaks :param unprocessed_array: Array that in unprocessed (normalised), if False will default to the same as array. :param fs: Sampe rate of the array :return: index of peaks, values of peaks, peak value on freq. """ # flatten the array for correct processing array = array.flatten() if np.isscalar(freq): # calculate the frerquency scale - assuming a samplerate if none provided freq = np.linspace(0, fs / 2.0, len(array)) if np.isscalar(unprocessed_array): unprocessed_array = array # add values to allow peaks at the first and last values # to allow peaks at start and end (default of mir) array_appended = np.insert(array, [0, len(array)], -2.0) # unprocessed array to get peak values array_unprocess_appended = np.insert( unprocessed_array, [0, len(unprocessed_array)], -2.0 ) # append the frequency scale for precise freq calculation freq_appended = np.insert(freq, [0, len(freq)], -1.0) # get the difference values diff_array = np.diff(array_appended) # find local maxima mx = ( np.array( np.where((array >= cthr) & ( diff_array[0:-1] > 0) & (diff_array[1:] <= 0)) ) + 1 ) # initialise arrays for output finalmx = [] peak_value = [] peak_x = [] peak_idx = [] if np.size(mx) > 0: # unpack the array if peaks found mx = mx[0] j = 0 # scans the peaks from beginning to end mxj = mx[j] # the current peak under evaluation jj = j + 1 bufmin = 2.0 bufmax = array_appended[mxj] if mxj > 1: oldbufmin = min(array_appended[: mxj - 1]) else: oldbufmin = array_appended[0] while jj < len(mx): # if adjacent mx values are too close, returns no array if mx[jj - 1] + 1 == mx[jj] - 1: bufmin = min([bufmin, array_appended[mx[jj - 1]]]) else: bufmin = min( [bufmin, min(array_appended[mx[jj - 1]: mx[jj] - 1])]) if bufmax - bufmin < cthr: # There is no contrastive notch if array_appended[mx[jj]] > bufmax: # new peak is significant;y higher than the old peak, # the peak is transfered to the new position j = jj mxj = mx[j] # the current peak bufmax = array_appended[mxj] oldbufmin = min([oldbufmin, bufmin]) bufmin = 2.0 elif array_appended[mx[jj]] - bufmax <= 0: bufmax = max([bufmax, array_appended[mx[jj]]]) oldbufmin = min([oldbufmin, bufmin]) else: # There is a contrastive notch if bufmax - oldbufmin < cthr: # But the previous peak candidate is too weak and therefore discarded oldbufmin = min([oldbufmin, bufmin]) else: # The previous peak candidate is OK and therefore stored finalmx.append(mxj) oldbufmin = bufmin bufmax = array_appended[mx[jj]] j = jj mxj = mx[j] # The current peak bufmin = 2.0 jj += 1 if bufmax - oldbufmin >= cthr and ( bufmax - min(array_appended[mx[j] + 1:]) >= cthr ): # The last peak candidate is OK and stored finalmx.append(mx[j]) """ Sort the values according to their level """ finalmx = np.array(finalmx, dtype=np.int64) sort_idx = np.argsort(array_appended[finalmx])[::-1] # descending sort finalmx = finalmx[sort_idx] # indexes were for the appended array, -1 to return to original array index peak_idx = finalmx - 1 peak_value = array_unprocess_appended[finalmx] peak_x = freq_appended[finalmx] """ Interpolation for more precise peak location """ corrected_value = [] corrected_position = [] for current_peak_idx in finalmx: # if there enough space to do the fitting if 1 < current_peak_idx < (len(array_unprocess_appended) - 2): y0 = array_unprocess_appended[current_peak_idx] ym = array_unprocess_appended[current_peak_idx - 1] yp = array_unprocess_appended[current_peak_idx + 1] p = (yp - ym) / (2 * (2 * y0 - yp - ym)) corrected_value.append(y0 - (0.25 * (ym - yp) * p)) if p >= 0: correct_pos = ((1 - p) * freq_appended[current_peak_idx]) + ( p * freq_appended[current_peak_idx + 1] ) corrected_position.append(correct_pos) elif p < 0: correct_pos = ((1 + p) * freq_appended[current_peak_idx]) - ( p * freq_appended[current_peak_idx - 1] ) corrected_position.append(correct_pos) else: corrected_value.append( array_unprocess_appended[current_peak_idx]) corrected_position.append(freq_appended[current_peak_idx]) if corrected_position: peak_x = corrected_position peak_value = corrected_value peak_idx = peak_idx.astype(np.int64) return peak_idx, np.array(peak_value, dtype=np.float64), np.array(peak_x, np.float64) else: return np.array([0], dtype=np.int64), np.array( [0], dtype=np.float64), np.array([0], np.float64)
c11a09624085d505d36a9e374954dd6ba5c1e05a
3,642,731
def left_index_iter(shape): """Iterator for the left boundary indices of a structured grid.""" return range(0, shape[0] * shape[1], shape[1])
c7da6f5de48d0446cb0729593d3dc0eb95f5ab9a
3,642,732
import logging def calculate_precision_recall(df_merged): """Calculates precision and recall arrays going through df_merged row-wise.""" all_positives = get_all_positives(df_merged) # Populates each row with 1 if this row is a true positive # (at its score level). df_merged["is_tp"] = np.where( (df_merged["label_groundtruth"] == "SPEAKING_AUDIBLE") & (df_merged["label_prediction"] == "SPEAKING_AUDIBLE"), 1, 0) # Counts true positives up to and including that row. df_merged["tp"] = df_merged["is_tp"].cumsum() # Calculates precision for every row counting true positives up to # and including that row over the index (1-based) of that row. df_merged["precision"] = df_merged["tp"] / (df_merged.index + 1) # Calculates recall for every row counting true positives up to # and including that row over all positives in the groundtruth dataset. df_merged["recall"] = df_merged["tp"] / all_positives logging.info( "\n%s\n", df_merged.head(10)[[ "uid", "score", "label_groundtruth", "is_tp", "tp", "precision", "recall" ]]) return np.array(df_merged["precision"]), np.array(df_merged["recall"])
80d2c82c99e0bbbab8460ff997fc1358f758f2f6
3,642,733
def combine(shards, judo_file): """combine this class is passed the """ # Recombine the shards to create the kek combined_shares = Shamir.combine(shards) combined_shares_string = "{}".format(combined_shares) # decrypt the dek uysing the recombined kek decrypted_dek = decrypt( judo_file['wrappedKey'], unhexlify(combined_shares_string) ) # decrypt the data using the dek decrypted_data = decrypt( judo_file['data'], unhexlify(decrypted_dek) ) decrypted_text = unhexlify(decrypted_data) return(decrypted_data, decrypted_text)
3ba88307c3d0cb0a43473e89b731c61e9bbfe83d
3,642,734
def shiftRightUnsigned(e, numBits): """ :rtype: Column >>> from pysparkling import Context >>> from pysparkling.sql.session import SparkSession >>> from pysparkling.sql.functions import shiftLeft, shiftRight, shiftRightUnsigned >>> spark = SparkSession(Context()) >>> df = spark.range(-5, 4) >>> df.select("id", shiftRight("id", 1), shiftRightUnsigned("id", 1)).show() +---+-----------------+-------------------------+ | id|shiftright(id, 1)|shiftrightunsigned(id, 1)| +---+-----------------+-------------------------+ | -5| -3| 9223372036854775805| | -4| -2| 9223372036854775806| | -3| -2| 9223372036854775806| | -2| -1| 9223372036854775807| | -1| -1| 9223372036854775807| | 0| 0| 0| | 1| 0| 0| | 2| 1| 1| | 3| 1| 1| +---+-----------------+-------------------------+ """ return col(ShiftRightUnsigned(parse(e), lit(numBits)))
4f528609bb72a44a99581bca997fbde2f19af861
3,642,735
def change_wallpaper_job(profile, force=False): """Centralized wallpaper method that calls setter algorithm based on input prof settings. When force, skip the profile name check """ with G_WALLPAPER_CHANGE_LOCK: if profile.spanmode.startswith("single") and profile.ppimode is False: thrd = Thread(target=span_single_image_simple, args=(profile, force), daemon=True) thrd.start() elif ((profile.spanmode.startswith("single") and profile.ppimode is True) or profile.spanmode.startswith("advanced")): thrd = Thread(target=span_single_image_advanced, args=(profile, force), daemon=True) thrd.start() elif profile.spanmode.startswith("multi"): thrd = Thread(target=set_multi_image_wallpaper, args=(profile, force), daemon=True) thrd.start() else: sp_logging.G_LOGGER.info("Unkown profile spanmode: %s", profile.spanmode) return None return thrd
b4013e847cae337f83af5f3282d5551a52b4a7b3
3,642,736
def sheets_from_excel(xlspath): """ Reads in an xls(x) file, returns an array of arrays, like: Xijk, i = sheet, j = row, k = column (but it's not a np ndarray, just nested arrays) """ wb = xlrd.open_workbook(xlspath) n_sheets = wb.nsheets sheet_data = [] for sn in xrange(n_sheets): sheet = wb.sheet_by_index(sn) rows = [sheet.row_values(i) for i in xrange(sheet.nrows)] if len(rows) > 0: sheet_data.append(rows) return sheet_data
11099d2929ef0078ae0e5b07a700bdb2021eaa56
3,642,738
import numpy import logging def fitStatmechPseudoRotors(Tlist, Cvlist, Nvib, Nrot, molecule=None): """ Fit `Nvib` harmonic oscillator and `Nrot` hindered internal rotor modes to the provided dimensionless heat capacities `Cvlist` at temperatures `Tlist` in K. This method assumes that there are enough heat capacity points provided that the vibrational frequencies can be fit directly, but the hindered rotors must be combined into a single "pseudo-rotor". """ # Construct the lower and upper bounds for each variable bounds = [] # Bounds for harmonic oscillator frequencies for i in range(Nvib): bounds.append((hoFreqLowerBound, hoFreqUpperBound)) # Bounds for pseudo-hindered rotor frequency and barrier height bounds.append((hrFreqLowerBound, hrFreqUpperBound)) bounds.append((hrBarrLowerBound, hrBarrUpperBound)) # Construct the initial guess # Initial guesses within each mode type must be distinct or else the # optimization will fail x0 = numpy.zeros(Nvib + 2, numpy.float64) # Initial guess for harmonic oscillator frequencies if Nvib > 0: x0[0] = 200.0 x0[1:Nvib] = numpy.linspace(800.0, 1600.0, Nvib-1) # Initial guess for hindered rotor frequencies and barrier heights x0[Nvib] = 100.0 x0[Nvib+1] = 300.0 # Execute the optimization fit = PseudoRotorFit(Tlist, Cvlist, Nvib, Nrot) fit.initialize(Neq=len(Tlist), Nvars=len(x0), Ncons=0, bounds=bounds, maxIter=maxIter) x, igo = fit.solve(x0) # Check that the results of the optimization are valid if not numpy.isfinite(x).all(): raise StatmechFitError('Returned solution vector is nonsensical: x = {0}.'.format(x)) if igo == 8: logging.warning('Maximum number of iterations reached when fitting spectral data for {0}.'.format(molecule.toSMILES())) # Postprocess optimization results vib = list(x[0:Nvib]) hind = [] for i in range(Nrot): hind.append((x[Nvib], x[Nvib+1])) return vib, hind
eb110aab6a5ed35bd2ec1bdb2ca262524fe44dcf
3,642,739
def add_numbers(a, b): """Sums the given numbers. :param int a: The first number. :param int b: The second number. :return: The sum of the given numbers. >>> add_numbers(1, 2) 3 >>> add_numbers(50, -8) 42 """ return a + b
7d9a0c26618a2aee5a8bbff6a65e315c33594fde
3,642,740
def get_version(table_name): """Get the most recent version number held in a given table.""" db = get_db() cur = db.cursor() cur.execute("select * from {} order by entered_on desc".format(table_name)) return cur.fetchone()["version"]
7bc55bacf7aa84ccc9ba6f6bb51bbc51c1556395
3,642,741
def area(a, indices=(0, 1, 2, 3)): """ :param a: :param indices: :return: """ x0, y0, x1, y1 = indices return (a[..., x1] - a[..., x0]) * (a[..., y1] - a[..., y0])
17df4d4f4ad818be0b2ed7a1fe65aaeccbe63638
3,642,742
def infer_tf_dtypes(image_array): """ Choosing a suitable tf dtype based on the dtype of input numpy array. """ return dtype_casting( image_array.dtype[0], image_array.interp_order[0], as_tf=True)
fd8fc353fd6a76a1dae2a693a9121415393b8d50
3,642,744
def get_cifar10_datasets(n_devices, batch_size=256, normalize=False): """Get CIFAR-10 dataset splits.""" if batch_size % n_devices: raise ValueError("Batch size %d isn't divided evenly by n_devices %d" % (batch_size, n_devices)) train_dataset = tfds.load('cifar10', split='train[:90%]') val_dataset = tfds.load('cifar10', split='train[90%:]') test_dataset = tfds.load('cifar10', split='test') def decode(x): decoded = { 'inputs': tf.cast(tf.image.rgb_to_grayscale(x['image']), dtype=tf.int32), 'targets': x['label'] } if normalize: decoded['inputs'] = decoded['inputs'] / 255 return decoded train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE) val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE) test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE) train_dataset = train_dataset.repeat() train_dataset = train_dataset.batch(batch_size, drop_remainder=True) val_dataset = val_dataset.batch(batch_size, drop_remainder=True) test_dataset = test_dataset.batch(batch_size, drop_remainder=True) train_dataset = train_dataset.shuffle( buffer_size=256, reshuffle_each_iteration=True) return train_dataset, val_dataset, test_dataset, 10, 256, (batch_size, 32, 32, 1)
50dd1b02792ab13f4b6d42d52e6467503f319bb2
3,642,745
from disco.worker.pipeline.worker import Worker, Stage from disco.core import Job, result_iterator def predict(dataset, fitmodel_url, save_results=True, show=False): """ Function starts a job that makes predictions to input data with a given model Parameters ---------- input - dataset object with input urls and other parameters fitmodel_url - model created in fit phase save_results - save results to ddfs show - show info about job execution Returns ------- Urls with predictions on ddfs """ if dataset.params["y_map"] == []: raise Exception("Logistic regression requires a target label mapping parameter.") if "logreg_fitmodel" not in fitmodel_url: raise Exception("Incorrect fit model.") job = Job(worker=Worker(save_results=save_results)) # job parallelizes execution of mappers job.pipeline = [ ("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))] job.params = dataset.params # job parameters (dataset object) job.params["thetas"] = [v for k, v in result_iterator(fitmodel_url["logreg_fitmodel"]) if k == "thetas"][ 0] # thetas are loaded from ddfs job.run(name="logreg_predict", input=dataset.params["data_tag"]) results = job.wait(show=show) return results
dbf56e82a3ff81a899cf2c33fa83f8c0f1b73947
3,642,746
def format_string_to_json(balance_info): """ Format string to json. e.g: '''Working Account|KES|481000.00|481000.00|0.00|0.00''' => {'Working Account': {'current_balance': '481000.00', 'available_balance': '481000.00', 'reserved_balance': '0.00', 'uncleared_balance': '0.00'}} """ balance_dict = frappe._dict() for account_info in balance_info.split("&"): account_info = account_info.split('|') balance_dict[account_info[0]] = dict( current_balance=fmt_money(account_info[2], currency="KES"), available_balance=fmt_money(account_info[3], currency="KES"), reserved_balance=fmt_money(account_info[4], currency="KES"), uncleared_balance=fmt_money(account_info[5], currency="KES") ) return dumps(balance_dict)
1be0d4d8ad3c5373e18e6f78957e18d8f0c0c846
3,642,747
from typing import Tuple from typing import List def get_relevant_texts(subject: Synset, doc_threshold: float) -> Tuple[List[str], List[int], int, int]: """Get all lines from all relevant articles. Also return the number of retrieved documents and retained ones.""" article_dir = get_article_dir(subject) rel_path = get_relevant_scores_path(subject) subject_name = get_concept_name(subject) with rel_path.open() as f: # read file to get the ids of relevant articles scores = [float(line) for line in f if line.strip()] num_doc_retrieved = len(scores) line_list = [] doc_id_list = [] num_doc_retained = 0 for doc_id, score in enumerate(scores): path = article_dir / "{}.txt".format(doc_id) try: with path.open() as f: lines = [line.strip() for line in f if line.strip()] if len(lines) > 500: # ignore huge files continue text = "\n".join(lines) if score >= doc_threshold or (len(text.split()) <= 200 and subject_name in text.lower()): line_list.extend(lines) doc_id_list.extend([doc_id] * len(lines)) num_doc_retained += 1 except FileNotFoundError: logger.warning(f"Subject {subject.name()} - {path} does not exist!") continue return line_list, doc_id_list, num_doc_retrieved, num_doc_retained
150dca990fe67ed3fb5381e6d4a6bce8656f2619
3,642,748
def plot_mae(X, y, model): """ Il est aussi pertinent de logger les graphiques sous forme d'artifacts. """ fig = plt.figure() plt.scatter(y, model.predict(X)) plt.xlabel("Durée réelle du trajet") plt.ylabel("Durée estimée du trajet") image = fig fig.savefig("MAE.png") plt.close(fig) return image
3bc4225f530f7f80ea903d55963cb0a33fe1cb45
3,642,749
import types def get_pure_function(method): """ Retreive pure function, for a method. Depends on features specific to CPython """ assert(isinstance(method, types.MethodType)) assert(hasattr(method, 'im_func')) return method.im_func
f0a7f25a38fd9da061f281f5c55453f8e7ae37d0
3,642,751
def _agg_samples_2d(sample_df: pd.DataFrame) -> pd.DataFrame: """Aggregate ENN samples for plotting.""" def pct_95(x): return np.percentile(x, 95) def pct_5(x): return np.percentile(x, 5) enn_df = (sample_df.groupby(['x0', 'x1'])['y'] .agg([np.mean, np.std, pct_5, pct_95]).reset_index()) enn_df = enn_df.rename({'mean': 'y'}, axis=1) enn_df['method'] = 'enn' return enn_df
d2decff9ae5224ad77ce6f133ac0cf0099dda89f
3,642,752
def get_np_num_array_str(data_frame_rows): """ Get a complete code str that creates a np array with random values """ test_code = cleandoc(""" from sklearn.preprocessing import StandardScaler import pandas as pd from numpy.random import randint series = randint(0,100,size=({})) df = pd.DataFrame(series, columns=["num"]) """.format(data_frame_rows)) return test_code
66a81bba8666a02770f1de233e458a5067e08f62
3,642,753
from typing import Any def get_config(name: str = None, default: Any = _MISSING) -> Any: """Gets the global configuration. Parameters ---------- name : str, optional The name of the setting to get the value for. If no name is given then the whole :obj:`Configuration` object is returned. default : optional The default value to return if `name` is provided but the setting doesn't exist in the global configuration. Returns ------- :obj:`Configuration` or :obj:`object` The global configuration object or the configuration setting requested. """ global _GLOBAL_CONFIG if not name: return _GLOBAL_CONFIG.copy() if default == _MISSING: return _GLOBAL_CONFIG[name] return _GLOBAL_CONFIG.get(name, default)
da43dd18c3841489cf6c909acb12a95b34179135
3,642,754
def domain_domain_distance(ptg1, ptg2, pdb_struct, domain_distance_dict): """ Return the distance between two domains, which will be defined as the distance between their two closest SSEs (using SSE distnace defined in ptdistmatrix.py) Parameters: ptg1 - PTGraph2 object for one domain ptg2 - PTGraph2 object for the other domain pdb_struct - parsed PDB structure from Bio.PDB domain_distance_dict (In/Out) - dict { (dom1, dom2) : ret_tuple } for memoizing domiain-domain distances. (dom1,dom2) is tuple of two PTGraph2 objects, note both (dom1,dom2) and (dom2,dom1) are always added and ret_tuple is the return value tuple as defined below. Return value: tuple (dist, closest_sse1, closest_sse2, closest_res1, closest_res2) distance in Angstroms between the two domains, as defined above and closest_sse1, closest_sse2 are PTNode objects for the closest SSEs in ptg1 and ptg2 domains respectively and closest_res1 and closest_res2 are the closest residues in closest_sse1 and closest_sse2 respectively. """ # This function is memoized by the domain_distance_dict parmeter, # to save recomputations of distances that are previously computed. if domain_distance_dict.has_key((ptg1, ptg2)): return domain_distance_dict[(ptg1, ptg2)] min_dist = float("inf") closest_sse1 = closest_sse2 = None closest_res1 = closest_res2 = None # exclude the terminus nodes ptg1_sses = [ node for node in ptg1.iter_nodes() if not isinstance(node, PTNodeTerminus) ] ptg2_sses = [ node for node in ptg2.iter_nodes() if not isinstance(node, PTNodeTerminus) ] for sse1 in ptg1_sses: for sse2 in ptg2_sses: (dist, res1, res2) = calc_sse_sse_dist(sse1, sse2, pdb_struct) if dist < min_dist: min_dist = dist closest_sse1 = sse1 closest_sse2 = sse2 closest_res1 = res1 closest_res2 = res2 ret_tuple12 = (min_dist,closest_sse1,closest_sse2,closest_res1,closest_res2) ret_tuple21 = (min_dist,closest_sse2,closest_sse1,closest_res2,closest_res1) domain_distance_dict[(ptg1, ptg2)] = ret_tuple12 domain_distance_dict[(ptg2, ptg1)] = ret_tuple21 # if verbose: # sys.stderr.write('dist between domain ' + ptg1.domainid + ' and ' + # ptg2.domainid + ' is ' + str(min_dist) + '\n') return ret_tuple12
6f2f68714717a32da0182db814629ac0e55b59e8
3,642,755
def pred_error(f_pred, prepare_data, data, iterator, max_len, n_words, filter_h): """ compute the prediction error. """ valid_err = 0 for _, valid_index in iterator: x = [data[0][t] for t in valid_index] x = prepare_data(x,max_len,n_words,filter_h) preds = f_pred(x) targets = np.array([data[1][t] for t in valid_index],dtype='int32') valid_err += (preds == targets).sum() valid_err = 1. - numpy_floatX(valid_err) / len(data[0]) return valid_err
c8f667a2eb6b9cc67d96ea0b6848f27cd337a2f9
3,642,756
def standardize_10msample(frac: float=0.01): """Runs each data processing function in series to save a new .csv data file. Intended for Pandas DataFrame. For Dask DataFrames, use standardize_10msample_dask Args: frac (float, optional): Fraction of data file rows to sample. Defaults to 0.01. Returns: df_10msample(pd.core.Frame.DataFrame): Finished DataFrame\ ,that should match the same when using .read_csv() method """ sample_10m = '../data/10m_sample_common_passwords/10-million-combos.txt' df_10msample = pd.read_csv(sample_10m, header=None, delimiter='\t').astype(str).sample(frac=frac) df_10msample.columns = ['username', 'password'] df_10msample.drop('username', axis=1, inplace=True) df_10msample['length'] = df_10msample['password'].apply(len) strength_features(df_10msample) df_10msample['class'] = df_10msample['password'].apply(withPassClass) pass_class_expand(df_10msample) to_csv(df_10msample, filename='../data/10m_sample_common_passwords/10m_standardized.csv') return df_10msample
d834cc31220a34204966160bb72399a53b99ff5b
3,642,757
def is_ansible_managed(file_path): """ Gets whether the fail2ban configuration file at the given path is managed by Ansible. :param file_path: the file to check if managed by Ansible :return: whether the file is managed by Ansible """ with open(file_path, "r") as file: return file.readline().strip() == ANSIBLE_MANAGED_LINE
a8e70d242f598ad26a00cf0b3ccc1a1494475ba8
3,642,758
import ctypes def sumai(array): """ Return the sum of the elements of an integer array. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sumai_c.html :param array: Input Array. :type array: Array of ints :return: The sum of the array. :rtype: int """ n = ctypes.c_int(len(array)) array = stypes.toIntVector(array) return libspice.sumai_c(array, n)
ece9b6a171dff66d4f66c7ce711b6a7a7b4c59a2
3,642,759
from typing import Union from typing import Optional from typing import Mapping from typing import Any def invoke( node: Union[DAG, Task], params: Optional[Mapping[str, Any]] = None, ) -> Mapping[str, NodeOutput]: """ Invoke a node with a series of parameters. Parameters ---------- node Node to execute params Inputs to the task, indexed by input/parameter name. Returns ------- Serialized outputs of the task, indexed by output name. Raises ------ ValueError When any required parameters are missing TypeError When any of the outputs cannot be obtained from the return value of the task's function SerializationError When some of the outputs cannot be serialized with the specified Serializer """ if isinstance(node, DAG): return _invoke_dag(node, params=params) else: return _invoke_task(node, params=params)
f05a49996912a52db37a809d078faaa208942e7f
3,642,762
def convert_acl_to_iam_policy(acl): """Converts the legacy ACL format to an IAM Policy proto.""" owners = acl.get('owners', []) readers = acl.get('readers', []) if acl.get('all_users_can_read', False): readers.append('allUsers') writers = acl.get('writers', []) bindings = [] if owners: bindings.append({'role': 'roles/owner', 'members': owners}) if readers: bindings.append({'role': 'roles/viewer', 'members': readers}) if writers: bindings.append({'role': 'roles/editor', 'members': writers}) return {'bindings': bindings}
990cdb6a51a696cf2b7825af94cf4265b2229be9
3,642,763
def get_valid_start_end(mask): """ Args: mask (ndarray of bool): invalid mask Returns: """ ns = mask.shape[0] nt = mask.shape[1] start_idx = np.full(ns, -1, dtype=np.int32) end_idx = np.full(ns, -1, dtype=np.int32) for s in range(ns): # scan from start to the end for t in range(nt): if not mask[s][t]: start_idx[s] = t break # reverse scan, from end to start for t in range(nt - 1, -1, -1): if not mask[s][t]: end_idx[s] = t + 1 break return start_idx, end_idx
41520c051d25aed203e5db9f64497f75eaab4f6c
3,642,764
def pahrametahrize(*args, **kwargs) -> t.Callable: """Pass arguments straight through to `pytest.mark.parametrize`.""" return pytest.mark.parametrize(*args, **kwargs)
43bbc1e8323956f1ed2e1da60abf23e5b35130ba
3,642,765
from datetime import datetime def utcnow(): """Return the current time in UTC with a UTC timezone set.""" return datetime.utcnow().replace(microsecond=0, tzinfo=UTC)
496c80cfa4a2b00b514346705fc0709739e2d3c0
3,642,766
def default_to(default, value): """ Ramda implementation of default_to :param default: :param value: :return: """ return value or default
58338f67332a0ff116cd2ff46d65ee92bf59c360
3,642,767
def insertGraph(): """ Create a new graph """ root = Xref.getroot().elem ref = getNewRef() elem = etree.Element(etree.QName(root, sgraph), reference=ref) name = makeNewName(sgraph, elem) root.append(elem) Xref.setDirty() return name, (elem, newDotGraph(name, ref, elem))
2a60fac192d6d3448c3e48637585af2d54bdf87f
3,642,768
from datetime import datetime def get_line_notif(line_data: str): """ Извлечь запись из таблицы. :param line_data: запрашиваемая строка """ try: connection = psycopg2.connect( user=USER, password=PASSWORD, host="127.0.0.1", port="5432", database=DATABASE) cursor = connection.cursor(cursor_factory=extras.DictCursor) date_time = datetime.datetime.now() hour = '0' + str(date_time.hour) if date_time.hour < 10 else date_time.hour minute = '0' + str(date_time.minute) if date_time.minute < 10 else date_time.minute day = '0' + str(date_time.day) if date_time.day < 10 else date_time.day month = '0' + str(date_time.month) if date_time.month < 10 else date_time.month if line_data in ("Мобильная Связь", "Подписки", "ЖКХ"): cursor.execute(f'SELECT * from reminders WHERE date = \'{day}\' and ' + f'time = \'{hour}:{minute}\' and type=\'{line_data}\';') elif line_data == "Планер": cursor.execute(f'SELECT * from reminders WHERE date = \'{date_time.date}\' ' + f'and time = \'{hour}:{minute}\' and type=\'{line_data}\';') elif line_data == "День Рождения": cursor.execute(f'SELECT * from reminders WHERE date = \'{day}.{month}\' ' + f'and time = \'{hour}:{minute}\' and type=\'{line_data}\';') elif line_data == "Приём Лекарств": cursor.execute('SELECT * from reminders WHERE ' + f'time = \'{hour}:{minute}\' and type=\'{line_data}\';') connection.commit() except (Exception, Error) as error: print(ERROR_MESSAGE, error) finally: res = cursor.fetchall() if connection: cursor.close() connection.close() return res
8fbeb195faaa1f49928e3d0e49310cc3d4bcb37f
3,642,769
def bouts_per_minute(boutlist): """Takes list of times of bouts in seconds, returns bpm = total_bouts / total_time.""" bpm = (total_bouts(boutlist) / total_time(boutlist)) * 60 return bpm
949f0d8758d7fcc8a1e19d4772788504b5ba10a5
3,642,771
import re def convert_to_snake_case(string: str) -> str: """Helper function to convert column names into snake case. Takes a string of any sort and makes conversions to snake case, replacing double- underscores with single underscores.""" s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string) draft = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return draft.replace('__', '_')
2a8de69a6915e87e46582a1af7a7897ff6fd97ce
3,642,772
def list_keys(client, keys): """ :param client: string :param keys: list of candidate keys :return: True if all keys exist, None otherwise """ objects = client.get_multi(keys) if bool(objects): return objects else: return None
4370053b76ea526e1f43309112f85f968ce76b6b
3,642,773
def estimate_variance(ip_image: np.ndarray, x: int, y: int, nbr_size: int) -> float: """Estimates local variances as described in pg. 6, eqn. 20""" nbrs = get_neighborhood(x, y, nbr_size, ip_image.shape[0], ip_image.shape[1]) vars = list() for channel in range(3): pixel_avg = 0 for i, j in nbrs: pixel_avg += ip_image[i, j, channel] pixel_avg /= len(nbrs) pixel_var = 0 for i, j in nbrs: pixel_var += (ip_image[i, j, channel] - pixel_avg) * (ip_image[i, j, channel] - pixel_avg) pixel_var /= len(nbrs) vars.append(pixel_var) return np.average(vars)
26932d333a50526f5f3bc4b10e5dd2b0bd15e871
3,642,775
def api_key_regenerate(): """ Generate a new API key for the currently logged-in user. """ try: return flask.jsonify({ constants.api.RESULT: constants.api.RESULT_SUCCESS, constants.api.MESSAGE: None, 'api_key': database.user.generate_new_api_key(current_user.user_id).api_key, }), constants.api.SUCCESS_CODE except: return flask.jsonify(constants.api.UNDEFINED_FAILURE), constants.api.UNDEFINED_FAILURE_CODE
59ccc904dc80386910370dae0752c4810107224c
3,642,776
def almost_equal_ignore_nan(a, b, rtol=None, atol=None): """Test that two NumPy arrays are almost equal (ignoring NaN in either array). Combines a relative and absolute measure of approximate eqality. If either the relative or absolute check passes, the arrays are considered equal. Including an absolute check resolves issues with the relative check where all array values are close to zero. Parameters ---------- a : np.ndarray b : np.ndarray rtol : None or float The relative threshold. Default threshold will be used if set to ``None``. atol : None or float The absolute threshold. Default threshold will be used if set to ``None``. """ a = np.copy(a) b = np.copy(b) nan_mask = np.logical_or(np.isnan(a), np.isnan(b)) a[nan_mask] = 0 b[nan_mask] = 0 return almost_equal(a, b, rtol, atol)
ca364b23e5a6106a98ba52629ccb152dc0d95214
3,642,777
def make_commands(manager): """Prototype""" # pylint: disable=no-member return (cmd_t(manager) for cmd_t in AbstractTwitterFollowersCommand.__subclasses__())
54443970dc69b06c530b746cb42b418bc5a7ee42
3,642,778
import logging def copy_rds_snapshot( target_snapshot_identifier: str, source_snapshot_identifier: str, target_kms: str, wait: bool, rds, ): """Copy snapshot from source_snapshot_identifier to target_snapshot_identifier and encrypt using target_kms""" logger = logging.getLogger("copy_rds_snapshot") xs = rds.copy_db_cluster_snapshot( SourceDBClusterSnapshotIdentifier=source_snapshot_identifier, TargetDBClusterSnapshotIdentifier=target_snapshot_identifier, KmsKeyId=target_kms, )["DBClusterSnapshot"] if not wait: return xs else: sleep(5) waiter = rds.get_waiter("db_cluster_snapshot_available") logger.warning( "Waiting for snapshot {} to be created...".format( xs["DBClusterSnapshotIdentifier"] ) ) try: waiter.wait( DBClusterSnapshotIdentifier=xs["DBClusterSnapshotIdentifier"], SnapshotType="manual", Filters=[ { "Name": "db-cluster-id", "Values": [ xs["DBClusterIdentifier"], ], }, ], WaiterConfig={"Delay": 10, "MaxAttempts": 100}, ) except: logger.exception( "Unable to wait for snapshot {} to be created for cluster {}".format( xs["DBClusterSnapshotIdentifier"], xs["DBClusterIdentifier"] ) ) else: return xs
f7d3c3b9b5588afb9dd1b6e65fc3a51f6411e997
3,642,779
def get_other_menuitems(): """ returns other menu items each menu pk will be dict key {0: QuerySet, 1: QuerySet, ..} """ menuitems = {} all_objects = Menu.objects.all() for obj in all_objects: menuitems[obj.pk] = obj.menuitem_set.all() return menuitems
7e868e3d434dd168dfe6d9938093044e97e2bc5c
3,642,780
import random def create_deck(shuffle=False): """Create a new deck of 52 cards""" deck = [(s, r) for r in RANKS for s in SUITS] if shuffle: random.shuffle(deck) return deck
92b828ce373c48a0a403c519a2e25b0cb1ab7409
3,642,782
def mock_gate_util_provider_oldest_namespace_feed_sync( monkeypatch, mock_distromapping_query ): """ Mocks for anchore_engine.services.policy_engine.engine.policy.gate_util_provider.GateUtilProvider.oldest_namespace_feed_sync """ # required for FeedOutOfDateTrigger.evaluate # setup for anchore_engine.services.policy_engine.engine.feeds.feeds.FeedRegistry.registered_vulnerability_feed_names init_feed_registry() @contextmanager def mock_session_scope(): """ Mock context manager for anchore_engine.db.session_scope. """ yield None def raise_no_active_grypedb(session): raise NoActiveGrypeDB def _setup_mocks(feed_group_metadata=None, grype_db_feed_metadata=None): # required for FeedOutOfDateTrigger.evaluate # mocks anchore_engine.services.policy_engine.engine.feeds.db.get_feed_group_detached monkeypatch.setattr( "anchore_engine.services.policy_engine.engine.policy.gate_util_provider.session_scope", mock_session_scope, ) if grype_db_feed_metadata: monkeypatch.setattr( "anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_most_recent_active_grypedb", lambda x: grype_db_feed_metadata, ) else: monkeypatch.setattr( "anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_most_recent_active_grypedb", raise_no_active_grypedb, ) # mocks anchore_engine.db.db_grype_db_feed_metadata.get_most_recent_active_grypedb # if feed_group_metadata: monkeypatch.setattr( "anchore_engine.services.policy_engine.engine.policy.gate_util_provider.get_feed_group_detached", lambda x, y: feed_group_metadata, ) return _setup_mocks
c6cf043b49574be44114110f5c1092d06fe531a0
3,642,783
def ESMP_LocStreamGetBounds(locstream, localDe=0): """ Preconditions: An ESMP_LocStream has been created.\n Postconditions: .\n Arguments:\n :RETURN: Numpy.array :: \n :RETURN: Numpy.array :: \n ESMP_LocStream :: locstream\n """ llde = ct.c_int(localDe) # locstream rank is always one locstreamrank = 1 exLB = np.zeros(locstreamrank, dtype=np.int32) exUB = np.zeros(locstreamrank, dtype=np.int32) rc = _ESMF.ESMC_LocStreamGetBounds(locstream.ptr, llde, exLB, exUB) # adjust bounds to be 0 based exLB = exLB - 1 if rc != constants._ESMP_SUCCESS: raise ValueError('ESMC_LocStreamGetBounds() failed with rc = '+str(rc)+'. '+ constants._errmsg) return exLB, exUB
179b24463cd8dd5f70ad63530a50b6fe4dd4dfb8
3,642,784
def reverse(collection): """ Reverses a collection. Args: collection: `dict|list|depset` - The collection to reverse Returns: `dict|list|depset` - A new collection of the same type, with items in the reverse order of the input collection. """ forward_list = None collection_type = type(collection) if collection_type == "dict": forward_list = collection.items() elif collection_type == "list": forward_list = collection elif collection_type == "depset": forward_list = collection.to_list() else: fail("Unsupported collection type: " + collection_type) reverse_list = [] for value in forward_list: reverse_list.insert(0, value) ret = None if collection_type == "dict": ret = dict(reverse_list) elif collection_type == "list": ret = reverse_list elif collection_type == "depset": ret = depset(reverse_list) else: fail("Unsupported collection type: " + collection_type) return ret
587bf847028f485783e74633b1aa2ed0ef003daa
3,642,785
def A_fast_full5(S, phase_factors, r, r_min, MY, MX): """ Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included :param S: B x NY x NX :param phase_factors: K x B :param r: K x 2 :param out: K x MY x MX :return: exit waves in out """ B = S.shape[0] K, _ = r.shape out = th.zeros((K, B, MY, MX), dtype=th.complex64, device=S.device) K, B, MY, MX = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = gpu.MAX_THREADS_PER_BLOCK // 2 blockspergrid = m.ceil(np.prod(np.array((K, B, MY, MX))) / threadsperblock) # 1 - get crops from S-matrix split_kernel4[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, th.view_as_real(out)) # threadsperblock = 128 # gpu.MAX_THREADS_PER_BLOCK # blockspergrid = m.ceil(np.prod(np.array((K, B))) / threadsperblock) # # 1 - get crops from S-matrix # split_kernel2[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out) out = out.view((K, B, MY * MX)) # 1.5 - convert to cupy # 2 - complex batched matmul: K x 1 x B @ K x B x MY*MX --> K x 1 x MY * MX # print(out.shape) # print(phase_factors2.shape) # print(out.dtype) # print(phase_factors2.dtype) phase_factors2 = phase_factors.unsqueeze(1) exitwaves = phase_factors2 @ out # 3 - reshape exitwaves = exitwaves.view((K, MY, MX)) #4 convert to pytorch return exitwaves
3bffd01037f317c88328a751958aca67bc90b2dd
3,642,786
from pathlib import Path import requests import logging def get_metadata_for_druid(druid, redownload_mods): """Obtains a .mods metadata file for the roll specified by DRUID either from the local mods/ folder or the Stanford Digital Repository, then parses the XML to build the metadata dictionary for the roll. """ def get_value_by_xpath(xpath): try: return xml_tree.xpath( xpath, namespaces=NS, )[0] except IndexError: return None # Takes an array of potential xpaths, returns the first one that matches, # or None def get_value_by_xpaths(xpaths): for xpath in xpaths: value = get_value_by_xpath(xpath) if value is not None: return value return value mods_filepath = Path(f"input/mods/{druid}.mods") if not mods_filepath.exists() or redownload_mods: response = requests.get(f"{PURL_BASE}{druid}.mods") try: xml_tree = etree.fromstring(response.content) except etree.XMLSyntaxError: logging.error( f"Unable to parse MODS metadata for {druid} - record is likely missing." ) return None with mods_filepath.open("w") as _fh: _fh.write(etree.tostring(xml_tree, encoding="unicode", pretty_print=True)) else: xml_tree = etree.parse(mods_filepath.open()) # The representation of the roll type in the MODS metadata continues to # evolve. Hopefully this logic covers all cases. roll_type = "NA" type_note = get_value_by_xpath( "x:physicalDescription/x:note[@displayLabel='Roll type']/text()" ) scale_note = get_value_by_xpath( "x:physicalDescription/x:note[@displayLabel='Scale']/text()" ) if type_note is not None and type_note in ROLL_TYPES: roll_type = ROLL_TYPES[type_note] if ( scale_note is not None and scale_note in ROLL_TYPES and (roll_type == "NA" or type_note == "standard") ): roll_type = ROLL_TYPES[scale_note] if roll_type == "NA" or type_note == "standard": for note in xml_tree.xpath("(x:note)", namespaces=NS): if note is not None and note.text in ROLL_TYPES: roll_type = ROLL_TYPES[note.text] metadata = { "title_prefix": get_value_by_xpath( "(x:titleInfo[@usage='primary']/x:nonSort)[1]/text()" ), "title": get_value_by_xpath( "(x:titleInfo[@usage='primary']/x:title)[1]/text()" ), "title_part_number": get_value_by_xpath( "(x:titleInfo[@usage='primary']/x:partNumber)[1]/text()" ), "title_part_name": get_value_by_xpath( "(x:titleInfo[@usage='primary']/x:partName)[1]/text()" ), "subtitle": get_value_by_xpath("(x:titleInfo/x:subTitle)[1]/text()"), "composer": get_value_by_xpaths( [ "x:name[descendant::x:roleTerm[text()='composer']]/x:namePart[not(@type='date')]/text()", "x:name[descendant::x:roleTerm[text()='Composer']]/x:namePart[not(@type='date')]/text()", "x:name[descendant::x:roleTerm[text()='composer.']]/x:namePart[not(@type='date')]/text()", "x:name[descendant::x:roleTerm[text()='cmp']]/x:namePart[not(@type='date')]/text()", ] ), "performer": get_value_by_xpaths( [ "x:name[descendant::x:roleTerm[text()='instrumentalist']]/x:namePart[not(@type='date')]/text()", "x:name[descendant::x:roleTerm[text()='instrumentalist.']]/x:namePart[not(@type='date')]/text()", ] ), "arranger": get_value_by_xpaths( [ "x:name[descendant::x:roleTerm[text()='arranger of music']]/x:namePart[not(@type='date')]/text()", "x:name[descendant::x:roleTerm[text()='arranger']]/x:namePart[not(@type='date')]/text()", ] ), "original_composer": get_value_by_xpaths( [ "x:relatedItem[@displayLabel='Based on (work) :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()", "x:relatedItem[@displayLabel='Based on']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()", "x:relatedItem[@displayLabele='Adaptation of (work) :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()", "x:relatedItem[@displayLabel='Adaptation of']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()", "x:relatedItem[@displayLabel='Arrangement of :']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()", "x:relatedItem[@displayLabel='Arrangement of']/x:name[@type='personal']/x:namePart[not(@type='date')]/text()", ] ), "label": get_value_by_xpaths( [ "x:identifier[@type='issue number' and @displayLabel='Roll number']/text()", "x:identifier[@type='issue number']/text()", ] ), "publisher": get_value_by_xpaths( [ "x:identifier[@type='publisher']/text()", "x:originInfo[@eventType='publication']/x:publisher/text()", "x:name[@type='corporate']/x:nameType/text()", "x:name[descendant::x:roleTerm[text()='publisher.']]/x:namePart/text()", ] ), "number": get_value_by_xpath("x:identifier[@type='publisher number']/text()"), "publish_date": get_value_by_xpaths( [ "x:originInfo[@eventType='publication']/x:dateIssued[@keyDate='yes']/text()", "x:originInfo[@eventType='publication']/x:dateIssued/text()", "x:originInfo/x:dateIssued[@point='start']/text()", "x:originInfo[@displayLabel='publisher']/x:dateIssued/text()", ] ), "publish_place": get_value_by_xpaths( [ "x:originInfo[@eventType='publication']/x:place/x:placeTerm[@type='text']/text()", "x:originInfo[@displayLabel='publisher']/x:place/x:placeTerm/text()", ] ), "recording_date": get_value_by_xpaths( [ "x:note[@type='venue']/text()", "x:originInfo[@eventType='publication']/x:dateCaptured/text()", ] ), # The call number is not consistently available in all MODS variants # "call_number": get_value_by_xpath("x:location/x:shelfLocator/text()"), "type": roll_type, "PURL": PURL_BASE + druid, } return metadata
982c2a89e85b07692901f1452a62c144ab1181b7
3,642,787
def logistic_dataset_gen_data(num, w, dim, temp, rng_key): """Samples data from a standard Gaussian with binary noisy labels. Args: num: An integer denoting the number of data points. w: An array of size dim x odim, the weight vector used to generate labels. dim: An integer denoting the number of input dimensions. temp: A float denoting the temperature parameter controlling label noise. rng_key: JAX random number generator key. Returns: x: An array of size dim x num denoting data points. y_pm: An array of size num x odim denoting +/-1 labels. """ rng_subkey = jax.random.split(rng_key, 3) x = jax.random.normal(rng_subkey[0], (dim, num)) prob = jax.nn.sigmoid(-(1 / temp) * w.T.dot(x)) y = jax.random.bernoulli(rng_subkey[1], (prob)) y_pm = 2. * y - 1 return x, y_pm
99fed2fd2cdb1250a444a986dd182ab846477890
3,642,788
def sech(x): """Computes the hyperbolic secant of the input""" return 1 / cosh(x)
1cded1fbf37070dbecba0f8518990c3eef8e6406
3,642,789
import torch def _map_triples_elements_to_ids( triples: LabeledTriples, entity_to_id: EntityMapping, relation_to_id: RelationMapping, ) -> MappedTriples: """Map entities and relations to pre-defined ids.""" if triples.size == 0: logger.warning('Provided empty triples to map.') return torch.empty(0, 3, dtype=torch.long) heads, relations, tails = slice_triples(triples) # When triples that don't exist are trying to be mapped, they get the id "-1" entity_getter = np.vectorize(entity_to_id.get) head_column = entity_getter(heads, [-1]) tail_column = entity_getter(tails, [-1]) relation_getter = np.vectorize(relation_to_id.get) relation_column = relation_getter(relations, [-1]) # Filter all non-existent triples head_filter = head_column < 0 relation_filter = relation_column < 0 tail_filter = tail_column < 0 num_no_head = head_filter.sum() num_no_relation = relation_filter.sum() num_no_tail = tail_filter.sum() if (num_no_head > 0) or (num_no_relation > 0) or (num_no_tail > 0): logger.warning( f"You're trying to map triples with {num_no_head + num_no_tail} entities and {num_no_relation} relations" f" that are not in the training set. These triples will be excluded from the mapping.", ) non_mappable_triples = (head_filter | relation_filter | tail_filter) head_column = head_column[~non_mappable_triples, None] relation_column = relation_column[~non_mappable_triples, None] tail_column = tail_column[~non_mappable_triples, None] logger.warning( f"In total {non_mappable_triples.sum():.0f} from {triples.shape[0]:.0f} triples were filtered out", ) triples_of_ids = np.concatenate([head_column, relation_column, tail_column], axis=1) triples_of_ids = np.array(triples_of_ids, dtype=np.long) # Note: Unique changes the order of the triples # Note: Using unique means implicit balancing of training samples unique_mapped_triples = np.unique(ar=triples_of_ids, axis=0) return torch.tensor(unique_mapped_triples, dtype=torch.long)
5d4db571e9b9d37329df7689b7e7629559580522
3,642,790
from typing import Tuple def pinf_two_networks(grgd: Tuple[float, float], k: Tuple[float, float] = (3, 3), alpha_i: Tuple[float, float] = (1, 1), solpoints: int = 10, eps: float = 1e-5, method: str = "hybr"): """Find the fixed points for two recovery coupled ER networks (not-symmetric) Args: grgd (Tuple[float, float]): gamma_r / gamma_d ratio in each network k (Tuple[float, float], optional): avg degree in each network. Defaults to (3, 3). alpha_i (Tuple[float, float], optional): coupling strength in each network. Defaults to (1, 1). solpoints (int, optional): number of guesses to feed solver. Defaults to 10. eps (float, optional): precision of solution. Defaults to 1e-5. method (str, optional): method to pass to `root`. Defaults to "hybr". Returns: List[np.ndarray]: a list of all solutions found """ g = list(map(u_factory, k)) mu = lambda p: (1 - alpha_i[0] * g[0] (1 - p)), lambda p: (1 - alpha_i[1] * g[1](1 - p)) def two_networks_self_consistent(f1f2): cond1 = 1 / (1 + (grgd[0] * mu[1](f1f2[1]))) - f1f2[0] cond2 = 1 / (1 + (grgd[1] * mu[0](f1f2[0]))) - f1f2[1] return np.array([cond1, cond2], dtype=float).squeeze() return get_all_sols_two_networks( two_networks_self_consistent, eps=eps, method=method, solpoints=solpoints, )
c90db1bb6d9d314086887e4f5b98f422731b3853
3,642,791
def uncapped_flatprice_goal_reached(chain, uncapped_flatprice, uncapped_flatprice_finalizer, preico_funding_goal, preico_starts_at, customer) -> Contract: """A ICO contract where the minimum funding goal has been reached.""" time_travel(chain, preico_starts_at + 1) wei_value = preico_funding_goal uncapped_flatprice.functions.buy().transact({"from": customer, "value": wei_value}) return uncapped_flatprice
20a6a10b4cb1318e2be7fd1995d025b582ee4768
3,642,792
def depfile_name(request, tmp_path_factory): """A fixture for a temporary doit database file(s) that will be removed after running""" depfile_name = str(tmp_path_factory.mktemp('x', True) / 'testdb') def remove_depfile(): remove_db(depfile_name) request.addfinalizer(remove_depfile) return depfile_name
cbe99e664abeea52a038898f3e76547795bca30a
3,642,793
from typing import OrderedDict import collections import warnings def calculate(dbf, comps, phases, mode=None, output='GM', fake_points=False, broadcast=True, parameters=None, **kwargs): """ Sample the property surface of 'output' containing the specified components and phases. Model parameters are taken from 'dbf' and any state variables (T, P, etc.) can be specified as keyword arguments. Parameters ---------- dbf : Database Thermodynamic database containing the relevant parameters. comps : str or sequence Names of components to consider in the calculation. phases : str or sequence Names of phases to consider in the calculation. mode : string, optional See 'make_callable' docstring for details. output : string, optional Model attribute to sample. fake_points : bool, optional (Default: False) If True, the first few points of the output surface will be fictitious points used to define an equilibrium hyperplane guaranteed to be above all the other points. This is used for convex hull computations. broadcast : bool, optional If True, broadcast given state variable lists against each other to create a grid. If False, assume state variables are given as equal-length lists. points : ndarray or a dict of phase names to ndarray, optional Columns of ndarrays must be internal degrees of freedom (site fractions), sorted. If this is not specified, points will be generated automatically. pdens : int, a dict of phase names to int, or a seq of both, optional Number of points to sample per degree of freedom. Default: 2000; Default when called from equilibrium(): 500 model : Model, a dict of phase names to Model, or a seq of both, optional Model class to use for each phase. sampler : callable, a dict of phase names to callable, or a seq of both, optional Function to sample phase constitution space. Must have same signature as 'pycalphad.core.utils.point_sample' grid_points : bool, a dict of phase names to bool, or a seq of both, optional (Default: True) Whether to add evenly spaced points between end-members. The density of points is determined by 'pdens' parameters : dict, optional Maps SymPy Symbol to numbers, for overriding the values of parameters in the Database. Returns ------- Dataset of the sampled attribute as a function of state variables Examples -------- None yet. """ # Here we check for any keyword arguments that are special, i.e., # there may be keyword arguments that aren't state variables pdens_dict = unpack_kwarg(kwargs.pop('pdens', 2000), default_arg=2000) points_dict = unpack_kwarg(kwargs.pop('points', None), default_arg=None) model_dict = unpack_kwarg(kwargs.pop('model', FallbackModel), default_arg=FallbackModel) callable_dict = unpack_kwarg(kwargs.pop('callables', None), default_arg=None) sampler_dict = unpack_kwarg(kwargs.pop('sampler', None), default_arg=None) fixedgrid_dict = unpack_kwarg(kwargs.pop('grid_points', True), default_arg=True) parameters = parameters or dict() if isinstance(parameters, dict): parameters = OrderedDict(sorted(parameters.items(), key=str)) param_symbols = tuple(parameters.keys()) param_values = np.atleast_1d(np.array(list(parameters.values()), dtype=np.float)) if isinstance(phases, str): phases = [phases] if isinstance(comps, str): comps = [comps] if points_dict is None and broadcast is False: raise ValueError('The \'points\' keyword argument must be specified if broadcast=False is also given.') components = [x for x in sorted(comps) if not x.startswith('VA')] # Convert keyword strings to proper state variable objects # If we don't do this, sympy will get confused during substitution statevar_dict = collections.OrderedDict((v.StateVariable(key), unpack_condition(value)) \ for (key, value) in sorted(kwargs.items())) # XXX: CompiledModel assumes P, T are the only state variables if statevar_dict.get(v.P, None) is None: statevar_dict[v.P] = 101325 if statevar_dict.get(v.T, None) is None: statevar_dict[v.T] = 300 str_statevar_dict = collections.OrderedDict((str(key), unpack_condition(value)) \ for (key, value) in statevar_dict.items()) all_phase_data = [] comp_sets = {} largest_energy = 1e30 maximum_internal_dof = 0 # Consider only the active phases active_phases = dict((name.upper(), dbf.phases[name.upper()]) \ for name in unpack_phases(phases)) for phase_name, phase_obj in sorted(active_phases.items()): # Build the symbolic representation of the energy mod = model_dict[phase_name] # if this is an object type, we need to construct it if isinstance(mod, type): try: model_dict[phase_name] = mod = mod(dbf, comps, phase_name, parameters=parameters) except DofError: # we can't build the specified phase because the # specified components aren't found in every sublattice # we'll just skip it warnings.warn("""Suspending specified phase {} due to some sublattices containing only unspecified components""".format(phase_name)) continue if points_dict[phase_name] is None: maximum_internal_dof = max(maximum_internal_dof, sum(len(x) for x in mod.constituents)) else: maximum_internal_dof = max(maximum_internal_dof, np.asarray(points_dict[phase_name]).shape[-1]) for phase_name, phase_obj in sorted(active_phases.items()): try: mod = model_dict[phase_name] except KeyError: continue # this is a phase model we couldn't construct for whatever reason; skip it if isinstance(mod, type): continue if (not isinstance(mod, CompiledModel)) or (output != 'GM'): if isinstance(mod, CompiledModel): mod = Model(dbf, comps, phase_name, parameters=parameters) # Construct an ordered list of the variables variables, sublattice_dof = generate_dof(phase_obj, mod.components) # Build the "fast" representation of that model if callable_dict[phase_name] is None: try: out = getattr(mod, output) except AttributeError: raise AttributeError('Missing Model attribute {0} specified for {1}' .format(output, mod.__class__)) # As a last resort, treat undefined symbols as zero # But warn the user when we do this # This is consistent with TC's behavior undefs = list(out.atoms(Symbol) - out.atoms(v.StateVariable)) for undef in undefs: out = out.xreplace({undef: float(0)}) warnings.warn('Setting undefined symbol {0} for phase {1} to zero'.format(undef, phase_name)) comp_sets[phase_name] = build_functions(out, list(statevar_dict.keys()) + variables, include_obj=True, include_grad=False, include_hess=False, parameters=param_symbols) else: comp_sets[phase_name] = callable_dict[phase_name] phase_record = PhaseRecord_from_cython(comps, list(statevar_dict.keys()) + variables, np.array(dbf.phases[phase_name].sublattices, dtype=np.float), param_values, comp_sets[phase_name], None, None) else: variables = sorted(set(mod.variables) - {v.T, v.P}, key=str) sublattice_dof = mod.sublattice_dof phase_record = PhaseRecord_from_compiledmodel(mod, param_values) points = points_dict[phase_name] if points is None: points = _sample_phase_constitution(phase_name, phase_obj.constituents, sublattice_dof, comps, tuple(variables), sampler_dict[phase_name] or point_sample, fixedgrid_dict[phase_name], pdens_dict[phase_name]) points = np.atleast_2d(points) fp = fake_points and (phase_name == sorted(active_phases.keys())[0]) phase_ds = _compute_phase_values(phase_obj, components, variables, str_statevar_dict, points, phase_record, output, maximum_internal_dof, broadcast=broadcast, largest_energy=float(largest_energy), fake_points=fp) all_phase_data.append(phase_ds) # speedup for single-phase case (found by profiling) if len(all_phase_data) > 1: final_ds = _fast_concat(all_phase_data, dim='points') final_ds['points'].values = np.arange(len(final_ds['points'])) final_ds.coords['points'].values = np.arange(len(final_ds['points'])) else: final_ds = all_phase_data[0] return final_ds
c69769fa322831cc021db497a329de816541a20a
3,642,795
def is_negative(value): """Checks if `value` is negative. Args: value (mixed): Value to check. Returns: bool: Whether `value` is negative. Example: >>> is_negative(-1) True >>> is_negative(0) False >>> is_negative(1) False .. versionadded:: 2.0.0 """ return is_number(value) and value < 0
ce0183d95a2394db18904f0ca7f1225e43cf671d
3,642,796
import torch def get_optimizer_noun(lr, decay, mode, cnn_features, role_features): """ To get the optimizer mode 0: training from scratch mode 1: cnn fix, verb fix, role training mode 2: cnn fix, verb fine tune, role training mode 3: cnn finetune, verb finetune, role training""" if mode == 0: set_trainable_param(cnn_features, True) set_trainable_param(role_features, True) optimizer = torch.optim.Adam([ {'params': cnn_features}, {'params': role_features} ], lr=lr, weight_decay=decay) elif mode == 1: set_trainable_param(role_features, True) optimizer = torch.optim.Adam([ {'params': role_features} ], lr=lr, weight_decay=decay) elif mode == 2: set_trainable_param(role_features, True) optimizer = torch.optim.Adam([ {'params': role_features}], lr=1e-3) elif mode == 3: set_trainable_param(cnn_features, True) set_trainable_param(role_features, True) optimizer = torch.optim.Adam([ {'params': cnn_features}, {'params': role_features} ], lr=lr, weight_decay=decay) return optimizer
6ac2df23f6a50d3488302cfe2da6189a995c0d85
3,642,797
def _loc_str_to_pars(loc, x=None, y=None, halign=None, valign=None, pad=_PAD): """Convert from a string location specification to the specifying parameters. If any of the specifying parameters: {x, y, halign, valign}, are 'None', they are set to default values. Returns ------- x : float y : float halign : str valign : str """ _valid_loc = [['t', 'u', 'b', 'l', 'c'], ['l', 'r', 'c']] for ii, (ll, vv) in enumerate(zip(loc, _valid_loc)): if ll not in vv: err = "Unrecognized `loc`[{}] = '{}' (`loc` = '{}').".format(ii, ll, loc) err += "\n\t`loc`[{}] must be one of '{}'".format(ii, vv) raise ValueError(err) pad = np.atleast_1d(pad) if pad.size == 1: pad = np.concatenate([pad, pad]) if loc[0] == 't' or loc[0] == 'u': if valign is None: valign = 'top' if y is None: y = 1 - pad[1] elif loc[0] == 'b' or loc[0] == 'l': if valign is None: valign = 'bottom' if y is None: y = pad[1] elif loc[0] == 'c': if valign is None: valign = 'center' if y is None: y = 0.5 if loc[1] == 'l': if halign is None: halign = 'left' if x is None: x = pad[0] elif loc[1] == 'r': if halign is None: halign = 'right' if x is None: x = 1 - pad[0] elif loc[1] == 'c': if halign is None: halign = 'center' if x is None: x = 0.5 return x, y, halign, valign
84094b2eaf39390a1d30fd26d8ae36ecd32a7665
3,642,799
def skeda_from_skedadict(line_dict, filing_number, line_sequence, is_amended): """ We can either pass the header row in or not; if not, look it up. """ line_dict['transaction_id'] = line_dict['transaction_id'][:20] line_dict['line_sequence'] = line_sequence line_dict['superseded_by_amendment'] = is_amended line_dict['filing_number'] = filing_number if line_dict['contribution_date']: try: line_dict['contribution_date_formatted'] = parser.parse(line_dict['contribution_date']) except ValueError: pass return line_dict
2e07efa96f93ef777185e48bb07787774d4e5180
3,642,801
from datetime import datetime def oracle_to_date(string2convert, fmt, nlsparam=None): """ https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions183.htm TO_DATE(char [, fmt [, 'nlsparam' ] ]) TO_DATE converts char of CHAR, VARCHAR2, NCHAR, or NVARCHAR2 datatype to a value of DATE datatype. The fmt is a datetime model format specifying the format of char. If you omit fmt, then char must be in the default date format. If fmt is J, for Julian, then char must be an integer. On SQLite date are in iso-8601 format: 'YYYY-MM-DD HH:MM:SS' Also, the supported format is the C standard (1989 version) The Function is cached for performance reason """ dobj = datetime.datetime.strptime(string2convert, fmt) # Return a nice Sqlite date string return dobj.isoformat(sep=" ", timespec="seconds")
eeaee6289d43bd446fbf27ce25ed87555a116ae4
3,642,802
import re def replace_whitespace(s, rep=' '): """Replace any length white spaces in the given string with a replacement. Parameters ---------- s : str The string in which any length whitespaces should be replaced. rep : Optional[str] The string with which all whitespace should be replaced. By default, the plain ASCII space ( ) is used. Returns ------- str The string in which whitespaces have been replaced. """ s = re.sub(r'\s+', rep, s) return s
b583a627dda830275822f6276af33b58afb55f1e
3,642,803
import aiohttp async def handle_xml_response(request): """ Faking response """ response = load_data("equipment_data.xml") return aiohttp.web.Response( content_type="text/xml", body=response )
d56526414469424483fc8461c29f3b9c9963e698
3,642,805
import this def plugin_prefs(parent, cmdr, is_beta): """ Return a TK Frame for adding to the EDMC settings dialog. """ global listbox frame = nb.Frame(parent) nb.Label(frame, text="Faction Name:").grid(row=0,column=0) nb.Label(frame, text="System Name").grid(row=0,column=1) faction_entry = nb.Entry(frame,width=35) faction_entry.grid(row=2,column=0) faction_listbox = tk.Listbox(frame,width=35) faction_listbox.grid(row=3,column=0) this.faction_el = entry_lookup.EntryLookup(faction_entry,faction_listbox, db_connection.get_faction_names(),this.faction_name.get()) system_entry = nb.Entry(frame,width=35) system_entry.grid(row=2,column=1) system_listbox = tk.Listbox(frame,width=35) system_listbox.grid(row=3,column=1) this.system_el = entry_lookup.EntryLookup(system_entry,system_listbox, db_connection.get_system_names(),this.system_name.get()) b = nb.Button(frame, text="Scrape history", command=scrape_history) b.grid(row=4, column=1) nb.Label(frame,text="Warning, this will take a while. Shut down ED before running").grid(row=4,column=0) return frame
25df93343750cdac60604e6f5f91f84b3d105a12
3,642,806
from typing import Concatenate def Conv1D_positive_r(x, kernel_size): """index of r is hard-coded to 2!""" out1 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='linear')(x) out2 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='linear')(x) out3 = Conv1D(1, kernel_size=kernel_size, padding='valid', activation='relu')(x) return Concatenate()([out1, out2, out3])
b66db8e65007d6044ad711b4ac9e7e9f967ecd91
3,642,807
def decrypt(text, key): """Decrypt the supplied text and return the result. Args: text (str): The text to decrypt. key (str): The key with which to perform the decryption. """ return transform(text, key, True)
bb7fb87622a38c3eba9156d9a8678357e40adcb3
3,642,809
def psi_gauss_1d(x, a: float = 1.0, x_0: float = 0.0, k_0: float = 0.0): """ Gaussian wave packet of width a and momentum k_0, centered at x_0 :param x: mathematical variable :param a: Amplitude of pulse :param x_0: Mean spatial x of pulse :param k_0: Group velocity of pulse """ return ((a * np.sqrt(np.pi)) ** (-0.5) * np.exp(-0.5 * ((x - x_0) * 1. / a) ** 2 + 1j * x * k_0))
278ffa7f15fd8c52346f5b232a89d40ee48c8843
3,642,810
def get(address, limit=LIMIT): """ Recursively dereferences an address. Returns: A list containing ``address``, followed by up to ``limit`` valid pointers. """ result = [] for i in range(limit): # Don't follow cycles, except to stop at the second occurrence. if result.count(address) >= 2: break result.append(address) try: address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address)) except gdb.MemoryError: break return result
1a3b7122ede440ddee773d7e260430517181909d
3,642,811
def find_availability_by_year(park, campground, year, months=range(1, 13)): """ Parameters ---------- park : str campground : str year : str months : list list of months as str or int. Default is `range(1, 13)` Returns ------- list list of weekend availability at the given park's campground during the given month and year """ yearly_availability = [] for month in months: if isinstance(month, int): month = str(month) try: monthly_availability = find_availability_by_month(park, campground, year, month) yearly_availability.append(monthly_availability) except: break # Flatten list yearly_availability = [item for sublist in yearly_availability for item in sublist] return yearly_availability
28e81b2382f2733d1cc024a221c11feaa5ae5653
3,642,814
def seconds(value=None, utc=True, **kwargs): """ Converts value to seconds. If value is timedelta or struc_time, it will be just converted to seconds. If value is datetime instance it will be converted to milliseconds since epoch (UTC). If value is number, it's assumed that it's in milliseconds, so it will be just divided by 1000. You can also provide named arguments, same as for timedelta function. """ if isinstance(value, (int, float)): return int(float(value) / 1000.0) else: return _convert_time(value, utc, **kwargs)
aced764fc038b316ca0b772254b6c6a44f333d9e
3,642,815
def fix_mocov2_state_dict(state_dict): """ Ref: https://bit.ly/3cDfGVA """ new_state_dict = {} for k, v in state_dict.items(): if k.startswith("model.encoder_q."): k = k.replace("model.encoder_q.", "") new_state_dict[k] = v return new_state_dict
13471d6863eb14eb3248f6d6e1d6b5882c341ed0
3,642,816
def get_perspective(image, contours, ratio): """ This function takes image and contours and returns perspective of this contours. :param image: image, numpy array :param contours: contours, numpy array :param ratio: rescaling parameter to the original image :return: warped image """ points = contours.reshape(4, 2) points = points * ratio rectangle = np.zeros(shape=(4, 2), dtype='float32') total = points.sum(axis=1) rectangle[0] = points[np.argmin(total)] rectangle[2] = points[np.argmax(total)] difference = np.diff(points, axis=1) rectangle[1] = points[np.argmin(difference)] rectangle[3] = points[np.argmax(difference)] # rectangle *= ratio (a, b, c, d) = rectangle width1 = norm(c - d) width2 = norm(b - a) height1 = norm(b - c) height2 = norm(a - d) max_width = max(int(width1), int(width2)) max_height = max(int(height1), int(height2)) destination = np.array([[0, 0], [max_width - 1, 0], [max_width - 1, max_height - 1], [0, max_height - 1]], dtype='float32') M = cv2.getPerspectiveTransform(src=rectangle, dst=destination) warped_image = cv2.warpPerspective(src=image, M=M, dsize=(max_width, max_height)) return warped_image
237db75baa8b72314e095f435075e75b8aa126b0
3,642,817
from pathlib import Path def load_model_selector(folder_path): """Load information about stored model selection Parameters ---------- folder_path : str path where .model_selector_result files are stored Returns ------- ModelSelector Information about model selection for each partition """ results = [ load_model_selector_result(path=r.parent, partition_hash=r.stem) for r in Path(folder_path).glob("*.model_selector_result") ] model_selector = ModelSelector( horizon=results[0].horizon, frequency=results[0].frequency, country_code_column=results[0].country_code_column, ) model_selector.results = results return model_selector
1e977ca422c5004e510f4989f7778bd0ca95f4c0
3,642,819
def generate_expired_date(): """Generate a datetime object NB_DAYS_BEFORE_DELETING_LIVE_RECORDINGS days in the past.""" return timezone.now() - timedelta( days=settings.NB_DAYS_BEFORE_DELETING_LIVE_RECORDINGS )
8d6fb9aae4cd5065416ccea4ba17d11080d8ccbc
3,642,820
from typing import Dict def make_dummy_authentication_request_args() -> Dict[str, bytes]: """Creates a request to emulate a login request. Returns: Dict[str, bytes]: Authenticator dictionary """ def _make_dummy_authentication_request_args(): args = { "username": ["foobar".encode()], "password": ["mypassword".encode()], "assignment_name": ["lab101".encode()], "course_id": ["intro101".encode()], "lms_user_id": ["abc123".encode()], "user_role": ["Student".encode()], } return args return _make_dummy_authentication_request_args
2e0919bac46a5140a72c02ee09c1ce3b1cb9269a
3,642,821
def add_experiment_images_to_image_info_csv(image_info_df, experiment_xml_file): """ Goes through the xml file of the experiment and adds the info of its images to the image info dataframe. If the gene name is missing in the experiment, then this experiment is considered invalid. :param image_info_df: the image info dataframe to append the new images :param experiment_xml_file: the xml file of the experiment that we want to add its images :return: the image info dataframe and also a boolean which determines whether this experiment is invalid. """ invalid = False tree = et.parse(experiment_xml_file) root = tree.getroot() section_data_sets = root.find('section-data-sets') section_data_set = section_data_sets.find('section-data-set') experiment_id = section_data_set.find('id').text specimen_id = section_data_set.find('specimen-id').text section_images = section_data_set.find('section-images') genes = section_data_set.find('genes') specimen = section_data_set.find('specimen') donor = specimen.find('donor') structure = specimen.find('structure') donor_id = donor.find('name').text donor_sex = donor.find('sex').text donor_age = donor.find('age-id').text pmi = donor.find('pmi').text donor_race = donor.find('race-only').text smoker = donor.find('smoker').text chemotherapy = donor.find('chemotherapy').text radiation_therapy = donor.find('radiation-therapy').text tumor_status = donor.find('tumor-status').text conditions = donor.find('conditions') condition = conditions.find('condition') description = condition.find('description').text region_name = structure.find('name').text region_acronym = structure.find('acronym').text tissue_ph = specimen.find('tissue-ph').text gene = genes.find('gene') if gene == None: print ("experiment " + experiment_id + " is invalid") invalid = True else: gene_symbol = gene.find('acronym').text gene_alias_tags = gene.find('alias-tags').text entrez_id = gene.find('entrez-id').text gene_original_name = gene.find('original-name').text gene_original_symbol = gene.find('original-symbol').text all_section_images = section_images.findall('section-image') image_id_list = [] for item in all_section_images: image_id_list.append(item.find('id').text) for image_id in image_id_list: new_row = pd.Series({'image_id': image_id, 'gene_symbol': gene_symbol, 'entrez_id': entrez_id, 'alias_tags': gene_alias_tags, 'original_name': gene_original_name, 'original_symbol': gene_original_symbol, 'experiment_id':experiment_id,'specimen_id': specimen_id, 'description': description, 'donor_id': donor_id, 'donor_sex': donor_sex, 'donor_age':donor_age, 'donor_race':donor_race, 'smoker' : smoker, 'chemotherapy': chemotherapy, 'radiation_therapy': radiation_therapy, 'tumor_status' : tumor_status, 'region':region_name, 'region_acronym': region_acronym, 'tissue_ph': tissue_ph, 'pmi': pmi }) image_info_df = image_info_df.append(new_row, ignore_index=True) return image_info_df, invalid
99b545cba5aeb53f9ba2af2a1a5bf3acb72c6fa7
3,642,822
from typing import Iterable from re import T from typing import Optional from typing import Callable from re import U from typing import Iterator def dedup(iterable: Iterable[T], key: Optional[Callable[[T], U]] = None) -> Iterator[T]: """ List unique elements. >>> tuple(dedup([5, 4, 3, 5, 3, 3])) (3, 4, 5) """ return uniq(sorted(iterable, key=key), key)
8334d08f926584b1c976c24bde180930124b78ba
3,642,823
def get_product(barcode): """ Return information of a given product. """ return utils.fetch('api/v0/product/%s' % barcode)
2cc298cf640b4aa742c51b5d076f0021660fe0d5
3,642,824
def knn_matcher(arr2, arr1, neighbours=2, img_id=0, ratio_threshold=0.75): """Computes the inlier matches for given descriptor ararys arr1 and arr2 Arguments: arr2 {np.ndarray} -- Image used for finding the matches (train image) arr1 {[type]} -- Image in which matches are found (test image) Keyword Arguments: neighbours {int} -- Number of neighbours to consider while matching. Should be 2 (default: {2}) img_id {int} -- Id of the train image (default: {0}) ratio_threshold {float} -- Ratio threshold for the ratio test (default: {0.75}). If 0 or None, the mathes are not filtered. Returns: list(matches) -- List of cv2.DMatch objects """ assert neighbours == 2 # Compute L2 distance for all the descriptors arr1 and arr2 all_distances = np.sqrt(np.square(arr2).sum( axis=1)[:, np.newaxis] + np.square(arr1).sum(axis=1) - 2 * arr2.dot(arr1.T)) # Take top K closest neighbours for each descriptor closest_indices = np.argsort(all_distances, axis=1)[:, :neighbours] # Create a list of "K" match pairs matches = [] for i in range(closest_indices.shape[0]): match_list = [cv2.DMatch( _trainIdx=n, _queryIdx=i, _distance=all_distances[i, n], _imgIdx=img_id) for n in closest_indices[i]] matches.append(match_list) # Perform ratio test to get inliers if ratio_threshold: matches = filter_matches(matches, ratio_threshold) return matches
6397938b3624e1f32426b429f809e60e6bb72b49
3,642,825
from typing import Optional def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None, resource_group_name: Optional[str] = None, virtual_network_gateway_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult: """ Response for list BGP peer status API service call. :param str peer: The IP address of the peer to retrieve the status of. :param str resource_group_name: The name of the resource group. :param str virtual_network_gateway_name: The name of the virtual network gateway. """ __args__ = dict() __args__['peer'] = peer __args__['resourceGroupName'] = resource_group_name __args__['virtualNetworkGatewayName'] = virtual_network_gateway_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:network/v20200601:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult( value=__ret__.value)
ade144cd6ce8c6827c0631a5c795d4ef2fbcaf7f
3,642,826
import pathlib def cat(file_path: str) -> str: """pathlib.Path().read_textのshortcut Args: file_path (str): filepath Returns: str: file内の文字列 Example: >>> cat('unknown.txt') """ file_path = pathlib.Path(file_path) if file_path.is_file(): return file_path.read_text() return None
17eef15686a97e62380d077d678f2993e02e6d5c
3,642,828
def _get_role_by_name(role_name): """ Get application membership role Args: role_name (str): role name. Returns: int: application membership role id. """ base_request = BaseRequest() settings = Settings() params = { 'filter': 'name', 'eq': role_name } roles = base_request.request( 'application_membership_role', 'GET', params=params, endpoint=settings.get('pine_endpoint') )['d'] if not roles: raise exceptions.BalenaApplicationMembershipRoleNotFound(role_name=role_name) else: return roles[0]['id']
0599f6c9571345318be71b9f453f89d1439c64fa
3,642,829
def parse_filename(filename, is_adversarial=False, **kwargs): """Parse the filename of the experment result file into a dictionary of settings. Args: filename: a string of filename is_adversarial: whether the file is from experiments/GIB_node_adversarial_attack. """ if is_adversarial: return parse_filename_adversarial(filename, **kwargs) else: return parse_filename_standard(filename)
1972de5803a8eb0ff50438adbe0adee1597199a9
3,642,830
def WHo_mt(dist, sigma): """ Speed Accuracy model for generating finger movement time. :param dist: euclidian distance between points. :param sigma: speed-accuracy trade-off variance. :return: mt: movement time. """ x0 = 0.092 y0 = 0.0018 alpha = 0.6 x_min = 0.006 x_max = 0.06 k_alpha = 0.12 if dist == 0: dist = 0.0000001 mt = pow((k_alpha * pow(((sigma - y0) / dist), (alpha - 1))), 1 / alpha) + x0 return mt
36d8b7e913df658b52f1f03617d0b9817091d0ef
3,642,831
def find_next_sibling_position(element, tag_type): """ Gets current elements next sibling's (chosen by provided tag_type) actual character position in html document :param element: Whose sibling to look for, type: An object of class bs4.Tag :param tag_type: sibling tag's type (e.g. p, h2, div, span etc. ), type: A string :return: An Integer specifying character pos. in html, infinite when no sibling is found """ nxt_sib = element.find_next_sibling(tag_type) return float("inf") if nxt_sib is None else nxt_sib.sourcepos
9b912fd9b7d30e81d6b4c2fec0e0573017b51a83
3,642,832
def one_hot(arr, n_class=0): """Change labels to one-hot expression. Args: arr [np.array]: numpy array n_class [int]: number of class Returns: oh [np.array]: numpy array with one-hot expression """ if arr is None: return None if isinstance(arr, list) or isinstance(arr, np.ndarray): arr = np.array(arr) ishape = arr.shape arr = arr.flatten() n_class = arr.max() + 1 if n_class == 0 else n_class assert n_class >= arr.max() + 1, ValueError("Value of 'n_class' is too small.") oh = np.zeros((arr.size, n_class), dtype=int) oh[np.arange(arr.size), arr] = 1 oh = np.reshape(oh, (*ishape, -1)) return oh
ba22f7f1f7d97d5d3989eff69c42bdce2ca34e87
3,642,834
def boost_nfw_at_R(R, B0, R_scale): """NFW boost factor model. Args: R (float or array like): Distances on the sky in the same units as R_scale. Mpc/h comoving suggested for consistency with other modules. B0 (float): NFW profile amplitude. R_scale (float): NFW profile scale radius. Returns: float or array like: NFW boost factor profile; B = (1-fcl)^-1. """ R = _ArrayWrapper(R, 'R') boost = _ArrayWrapper.zeros_like(R) cluster_toolkit._lib.boost_nfw_at_R_arr(R.cast(), len(R), B0, R_scale, boost.cast()) return boost.finish()
a7e13f5309fa663b41c5eec1c8518f444ba86b5f
3,642,835
def get_swatches(root): """Get swatch elements in the SVG""" swatches = {} for node in descendants(root): if "hasAttribute" not in dir(node) or not node.hasAttribute("id"): continue classname = extract_class_name(node.getAttribute("id")) if classname: swatches[classname] = node return swatches
2d9cd4ca2ff034d4200b242eaa5592311c250155
3,642,836
def chunks(l, n): """ Split list in chunks - useful for controlling memory usage """ if n < 1: n = 1 return [l[i:i + n] for i in range(0, len(l), n)]
d878aeb50bd42c9f5a2060f4bb2747aecb1a3b58
3,642,837
def UserLevelAuthEntry(val=None): """Provide a 2-tuple of user and level * user: string * level: oneof(ACCESS_LEVELS) currently: GUEST, USER, ADMIN """ if len(val) != 2: raise ValueError('UserLevelAuthEntry entry needs to be a 2-tuple ' '(name, accesslevel)') # pylint: disable=unbalanced-tuple-unpacking user, _p, level = UserPassLevelAuthEntry((val[0], '', val[1])) return tuple((user, level))
e26c723a55d215c71d46d2e45e30b3a39d78723d
3,642,838
import tokenize def parseCookie(headers): """Bleargh, the cookie spec sucks. This surely needs interoperability testing. There are two specs that are supported: Version 0) http://wp.netscape.com/newsref/std/cookie_spec.html Version 1) http://www.faqs.org/rfcs/rfc2965.html """ cookies = [] # There can't really be multiple cookie headers according to RFC, because # if multiple headers are allowed, they must be joinable with ",". # Neither new RFC2965 cookies nor old netscape cookies are. header = ';'.join(headers) if header[0:8].lower() == "$version": # RFC2965 cookie h = tokenize([header], foldCase=False) r_cookies = split(h, Token(',')) for r_cookie in r_cookies: last_cookie = None rr_cookies = split(r_cookie, Token(';')) for cookie in rr_cookies: nameval = tuple(split(cookie, Token('='))) if len(nameval) == 2: (name,), (value,) = nameval else: (name,), = nameval value = None name = name.lower() if name == '$version': continue if name[0] == '$': if last_cookie is not None: if name == '$path': last_cookie.path = value elif name == '$domain': last_cookie.domain = value elif name == '$port': if value is None: last_cookie.ports = () else: last_cookie.ports = tuple([int(s) for s in value.split(',')]) else: last_cookie = Cookie(name, value, version=1) cookies.append(last_cookie) else: # Oldstyle cookies don't do quoted strings or anything sensible. # All characters are valid for names except ';' and '=', and all # characters are valid for values except ';'. Spaces are stripped, # however. r_cookies = header.split(';') for r_cookie in r_cookies: name, value = r_cookie.split('=', 1) name = name.strip(' \t') value = value.strip(' \t') cookies.append(Cookie(name, value)) return cookies
f12cfc5303f466eebe3f1b2731d22d02caf12b1d
3,642,839