content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List def async_entries_for_config_entry( registry: DeviceRegistry, config_entry_id: str ) -> List[DeviceEntry]: """Return entries that match a config entry.""" return [ device for device in registry.devices.values() if config_entry_id in device.config_entries ]
17af1610631e6b0f407883fa8386082a694f9cd2
3,641,789
from pathlib import Path import yaml def load_material(name: str) -> Material: """Load a material from the materials library Args: name (str): Name of material Raises: FileNotFoundError: If material is not found, raises an error Returns: Material: Loaded material """ try: with open( Path(__file__).parent.joinpath(f"resources/{name}.yaml"), "r") as f: matdict = yaml.full_load(f) try: matdict["properties"]["alpha"] = get_alpha(name) except FileNotFoundError: print(f"{name}.csv does not exist!") return create_material(**matdict["properties"]) except FileNotFoundError: print (f"{name} is not an available material!")
aa53eac4889d6c44e78d8f3f6e3b5c2099a7bb53
3,641,790
def reportBusniessModelSummaryView(request): """ รายงาน สรุปจำนวนผู้สมัครตามสถานะธุรกิจ ทุกขั้นตอน""" queryset = SmeCompetition.objects \ .values('enterpise__business_model', 'enterpise__business_model__name') \ .annotate(step_register=Count('enterpise__business_model')) \ .annotate(step_screen=Count('enterpise__business_model', filter=Q(state__in=[2,4,5, 6, 7, 8, 9, 10, 11]))) \ .annotate(step_interview=Count('enterpise__business_model', filter=Q(state__in=[4, 6, 7, 8, 9, 10, 11]))) \ .annotate(step_sitevisit=Count('enterpise__business_model', filter=Q(state__in=[6, 8, 9, 10]))) \ .order_by('enterpise__business_model__name') total_sme = SmeCompetition.objects.filter(state__in=[6, 8, 9, 10, 11]).count() total_register = SmeCompetition.objects.filter(active=True).count() total_screen = SmeCompetition.objects.filter(active=True, state__in=[2,4,5,6,7,8,9,10,11]).count() total_interview = SmeCompetition.objects.filter(active=True, state__in=[4, 6, 7, 8, 9, 10, 11]).count() total_summary = SmeCompetition.objects.filter(active=True, state__in=[6, 8, 9, 10, 11]).count() context = {'queryset': queryset, 'total_register':total_register, 'total_screen':total_screen \ ,'total_interview':total_interview, 'total_summary':total_summary} return render(request, 'app_sme12/report/businessmodel_summary.html', context)
a0bcb8e0b9d51fc1ed9070857ef9db2c4641ddd6
3,641,791
def _get_announce_url(rjcode: str) -> str: """Get DLsite announce URL corresponding to an RJ code.""" return _ANNOUNCE_URL.format(rjcode)
997b82270fcb115f8510d0ded1e1b6204e835e92
3,641,792
import torch def get_r_adv(x, decoder, it=1, xi=1e-1, eps=10.0): """ Virtual Adversarial Training https://arxiv.org/abs/1704.03976 """ x_detached = x.detach() with torch.no_grad(): pred = F.softmax(decoder(x_detached), dim=1) d = torch.rand(x.shape).sub(0.5).to(x.device) d = _l2_normalize(d) for _ in range(it): d.requires_grad_() pred_hat = decoder(x_detached + xi * d) logp_hat = F.log_softmax(pred_hat, dim=1) adv_distance = F.kl_div(logp_hat, pred, reduction='batchmean') # print ('a') # print (adv_distance) adv_distance.backward() # print('c') d = _l2_normalize(d.grad) # print ('b') decoder.zero_grad() r_adv = d * eps return r_adv
7613e59d88117a8ed263aad76139b1e4d808582c
3,641,793
def get_factors(shoppers, n_components=4, random_state=903, **kwargs): """ Find Factors to represent the shopper-level features in compressed space. These factors will be used to map simplified user input from application to the full feature space used in modeling. Args: shoppers (pd.DataFrame): full set of shoppers in feature data (train + test) n_components (int): number of factors to mine. Defaults to 4 and should stay that way (application UI based on these 4 analyzed factors) random_state (int): sets random state for factor analysis algorithm. Defaults to 4 (and should stay that way) kwargs: additional keyword arguments for sklearn.decomposition.FactorAnalysis Returns: pd.DataFrame: will have n_components rows and n_features columns. The values of this matrix can be used to map factors to full feature set (on std normal scale). """ # Remove columns which should not be considered in factor analysis x = shoppers for col in ['user_id', 'n_orders', 'label']: if col in x.columns: x = x.drop(columns=col) # Need to scale data as columns on incommensurate scales cols = x.columns x = preprocessing.scale(x) fa = FactorAnalysis(n_components, random_state=random_state, **kwargs) fa.fit(x) return pd.DataFrame(fa.components_, columns=cols)
966ca305b87b836d9caa5c857608bc6b16120e26
3,641,795
def cols_to_array(*cols, remove_na: bool = True) -> Column: """ Create a column of ArrayType() from user-supplied column list. Args: cols: columns to convert into array. remove_na (optional): Remove nulls from array. Defaults to True. Returns: Column of ArrayType() """ if remove_na: return F.filter(F.array(*cols), lambda x: x.isNotNull()) else: return F.array(*cols)
a33e9b907d95fc767c2f247e12c22bdac9ad7585
3,641,796
def _git_repo_status(repo): """Get current git repo status. :param repo: Path to directory containing a git repo :type repo: :class:`pathlib.Path()` :return: Repo status :rtype: dict """ repo_status = { 'path': repo } options = ['git', '-C', str(repo), 'status', '-s'] changes = _run_cmd(options).stdout.decode() repo_status['uncommited changes'] = True if len(changes) else False local, remote = _git_get_branches(repo) repo_status['local only branches'] = bool(set(remote) - set(local)) repo_status['ahead of origin'] = _git_get_ahead(repo) return repo_status
c35a20b7350dcf20bedcd9b201fd04a46c83449b
3,641,797
def _parseList(s): """Validation function. Parse a comma-separated list of strings.""" return [item.strip() for item in s.split(",")]
5bf9ac50a44a18cc4798ed616532130890803bac
3,641,798
def true_segments_1d(segments, mode=SegmentsMode.CENTERS, max_gap=0, min_length=0, name=None): """Labels contiguous True runs in segments. Args: segments: 1D boolean tensor. mode: The SegmentsMode. Returns the start of each segment (STARTS), or the rounded center of each segment (CENTERS). max_gap: Fill gaps of length at most `max_gap` between true segments. int. min_length: Minimum length of a returned segment. int. name: Optional name for the op. Returns: run_centers: int32 tensor. Depending on `mode`, either the start of each True run, or the (rounded) center of each True run. run_lengths: int32; the lengths of each True run. """ with tf.name_scope(name, "true_segments", [segments]): segments = tf.convert_to_tensor(segments, tf.bool) run_starts, run_lengths = _segments_1d(segments, mode=SegmentsMode.STARTS) # Take only the True runs. After whichever run is True first, the True runs # are every other run. first_run = tf.cond( # First value is False, or all values are False. Handles empty segments # correctly. tf.logical_or(tf.reduce_any(segments[0:1]), ~tf.reduce_any(segments)), lambda: tf.constant(0), lambda: tf.constant(1)) num_runs = tf.shape(run_starts)[0] run_nums = tf.range(num_runs) is_true_run = tf.equal(run_nums % 2, first_run % 2) # Find gaps between True runs that can be merged. is_gap = tf.logical_and( tf.not_equal(run_nums % 2, first_run % 2), tf.logical_and( tf.greater(run_nums, first_run), tf.less(run_nums, num_runs - 1))) fill_gap = tf.logical_and(is_gap, tf.less_equal(run_lengths, max_gap)) # Segment the consecutive runs of True or False values based on whether they # are True, or are a gap of False values that can be bridged. Then, flatten # the runs of runs. runs_to_merge = tf.logical_or(is_true_run, fill_gap) run_of_run_starts, _ = _segments_1d(runs_to_merge, mode=SegmentsMode.STARTS) # Get the start of every new run from the original run starts. merged_run_starts = tf.gather(run_starts, run_of_run_starts) # Make an array mapping the original runs to their run of runs. Increment # the number for every run of run start except for the first one, so that # the array has values from 0 to num_run_of_runs. merged_run_inds = tf.cumsum( tf.sparse_to_dense( sparse_indices=tf.cast(run_of_run_starts[1:, None], tf.int64), output_shape=tf.cast(num_runs[None], tf.int64), sparse_values=tf.ones_like(run_of_run_starts[1:]))) # Sum the lengths of the original runs that were merged. merged_run_lengths = tf.segment_sum(run_lengths, merged_run_inds) if mode is SegmentsMode.CENTERS: merged_starts_or_centers = ( merged_run_starts + tf.floordiv(merged_run_lengths - 1, 2)) else: merged_starts_or_centers = merged_run_starts # If there are no true values, increment first_run to 1, so we will skip # the single (false) run. first_run += tf.to_int32(tf.logical_not(tf.reduce_any(segments))) merged_starts_or_centers = merged_starts_or_centers[first_run::2] merged_run_lengths = merged_run_lengths[first_run::2] # Only take segments at least min_length long. is_long_enough = tf.greater_equal(merged_run_lengths, min_length) is_long_enough.set_shape([None]) merged_starts_or_centers = tf.boolean_mask(merged_starts_or_centers, is_long_enough) merged_run_lengths = tf.boolean_mask(merged_run_lengths, is_long_enough) return merged_starts_or_centers, merged_run_lengths
801541b7c3343fd59f79a3f3696c3cb17ab41c31
3,641,799
def get_user_language_keyboard(user): """Get user language picker keyboard.""" buttons = [] # Compile the possible options for user sorting for language in supported_languages: button = InlineKeyboardButton( language, callback_data=f'{CallbackType.user_change_language.value}:{user.id}:{language}' ) buttons.append([button]) github_url = 'https://github.com/Nukesor/ultimate-poll-bot/tree/master/i18n' new_language = i18n.t('keyboard.add_new_language', locale=user.locale) buttons.append([InlineKeyboardButton(text=new_language, url=github_url)]) buttons.append([get_back_to_settings_button(user)]) return InlineKeyboardMarkup(buttons)
b297f5f796f84d5a89849017d29f4a841e3fa2f3
3,641,800
def splitDataSet(dataSet, index, value): """ 划分数据集,取出index对应的值为value的数据 dataSet: 待划分的数据集 index: 划分数据集的特征 value: 需要返回的特征的值 """ retDataSet = [] for featVec in dataSet: if featVec[index] == value: reducedFeatVec = featVec[:index] reducedFeatVec.extend(featVec[index+1:]) retDataSet.append(reducedFeatVec) # 返回index列为value的数据集(去除index列) return retDataSet
814a54fe13d832e69d8df32af52d882d4a15c4ba
3,641,801
def spam(a, b, c): """The spam function Returns a * b + c""" return a * b + c
356a2bc2c108bc7458bfd67608695412f045035f
3,641,802
def in_skill_product_response(handler_input): """Get the In-skill product response from monetization service.""" """ # type: (HandlerInput) -> Union[InSkillProductsResponse, Error] """ locale = handler_input.request_envelope.request.locale ms = handler_input.service_client_factory.get_monetization_service() return ms.get_in_skill_products(locale)
9452ac1498ff0e6601df9fc419df0cfdd6b9171e
3,641,803
import typing def ipaddr( value: typing.Union[str, int], query: typing.Optional[str] = None, ) -> str: """Filter IP addresses and networks. .. versionadded:: 1.1 Implements Ansible `ipaddr filter <https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters_ipaddr.html>`_. """ return _utils.ip_filter(value, query=query)
c0830db91a2ba6ffbb8ec8eab4da90136283110e
3,641,804
def npoints_between(lon1, lat1, depth1, lon2, lat2, depth2, npoints): """ Find a list of specified number of points between two given ones that are equally spaced along the great circle arc connecting given points. :param float lon1, lat1, depth1: Coordinates of a point to start from. The first point in a resulting list has these coordinates. :param float lon2, lat2, depth2: Coordinates of a point to finish at. The last point in a resulting list has these coordinates. :param npoints: Integer number of points to return. First and last points count, so if there have to be two intervals, ``npoints`` should be 3. :returns: Tuple of three 1d numpy arrays: longitudes, latitudes and depths of resulting points respectively. Finds distance between two reference points and calls :func:`npoints_towards`. """ hdist = geodetic_distance(lon1, lat1, lon2, lat2) vdist = depth2 - depth1 rlons, rlats, rdepths = npoints_towards( lon1, lat1, depth1, azimuth(lon1, lat1, lon2, lat2), hdist, vdist, npoints ) # the last point should be left intact rlons[-1] = lon2 rlats[-1] = lat2 rdepths[-1] = depth2 return rlons, rlats, rdepths
bd4032f655d6f296f372a89848e7b7ffa431e4ee
3,641,805
def maxPoolLayer(x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"): """max-pooling""" return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1], strides = [1, strideX, strideY, 1], padding = padding, name = name)
6fd68582db359a2c113925f0aef5d7b5a79e8fe4
3,641,806
import torch def coo2st(coo): """ transform matrix in sparse coo_matrix to sparse tensor of torch INPUT coo - matrix in sparse coo_matrix format OUTPUT coo matrix in torch.sparse.tensor """ values = coo.data indices = np.vstack((coo.row, coo.col)) i = torch.LongTensor(indices) v = torch.FloatTensor(values) shape = coo.shape return torch.sparse.FloatTensor(i, v, torch.Size(shape))
4b516f8df8829873c6160fdb8e35403f914ebc8b
3,641,807
def make_dataset(data, nafc): """Create a PsiData object from column based input. Parameters ---------- data : sequence on length 3 sequences Psychometric data in colum based input, e.g.[[1, 1, 5], [2, 3, 5] [3, 5, 5]]. nafc : int Number of alternative choices in forced choice procedure. Returns ------- data: PsiData Dataset object. """ data = np.array(data).T x = sfr.vector_double(map(float, data[0])) k = sfr.vector_int(map(int, data[1])) N = sfr.vector_int(map(int, data[2])) return sfr.PsiData(x,N,k,nafc)
cdf88db4c04373aae6a4655aeba9b2fa53f9b394
3,641,809
def TorchFFTConv2d(a, K): """ FFT tensor convolution of image a with kernel K Args: a (torch.Tensor): 1-channel Image as tensor with at least 2 dimensions. Dimensions -2 & -1 are spatial dimensions and all other dimensions are assumed to be batch dimensions K (torch.Tensor): 1-channel kernel as tensor with at least 2 dimensions. Return: Absolute value of the convolution of image a with kernel K """ K = torch_fft.rfft2(K) a = torch_fft.rfft2(a) img_conv = TorchComplexMul(K, a) img_conv = torch_fft.irfft2(img_conv) return (img_conv**2).sqrt().cpu()
fefdfbb19d8d4cb5d785f80e8197cf19e05c3cb6
3,641,810
def pad(obj, pad_length): """ Return a copy of the object with piano-roll padded with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad along the time axis with zeros. """ if not isinstance(obj, Track): raise TypeError("Support only `pypianoroll.Track` class objects") copied = deepcopy(obj) copied.pad(pad_length) return copied
9d124a06002941078e648707015b3698a290abe7
3,641,811
def stat_mtime(stat): """Returns the mtime field from the results returned by os.stat().""" return stat[8]
1f7fec9a54a97bb63141d63db706b2885913dadb
3,641,812
def from_yaml_dictionary(yaml_gra_dct, one_indexed=True): """ read the graph from a yaml dictionary """ atm_dct = yaml_gra_dct['atoms'] bnd_dct = yaml_gra_dct['bonds'] atm_dct = dict_.transform_values( atm_dct, lambda x: tuple(map(x.__getitem__, ATM_PROP_NAMES))) bnd_dct = dict_.transform_keys( bnd_dct, lambda x: frozenset(map(int, x.split('-')))) bnd_dct = dict_.transform_values( bnd_dct, lambda x: tuple(map(x.__getitem__, BND_PROP_NAMES))) gra = _create.from_atoms_and_bonds(atm_dct, bnd_dct) if one_indexed: # revert one-indexing if the input is one-indexed atm_key_dct = {atm_key: atm_key-1 for atm_key in atom_keys(gra)} gra = relabel(gra, atm_key_dct) return gra
6d98ba4ff8821cb3072d42a7d672b70e69234fa8
3,641,813
def classifier_train( X_train, y_train, X_val, y_val, clf, k_fold_no=10, uo_sample_method=None, imbalance_ratio=1, print_results=False, train_on_all=False, ): """Trains a sklearn classifier using k-fold cross-validation. Returns the ROC_AUC score, with other parameters in a pandas df. To-Do: - When using SVM, only use under sampling when feature count over a certain size, otherwise will blow up """ skfolds = StratifiedKFold(n_splits=k_fold_no, shuffle=True, random_state=42) # below code is modified from 'Hands on Machine Learning' by Geron (pg. 196) roc_auc_results = [] auc_results = [] precision_results = [] recall_results = [] f1_results = [] if print_results == True: # print definitions of precision / recall print( "\033[1m", "Precision:", "\033[0m", "What proportion of positive identifications were actually correct?", ) print( "\033[1m", "Recall:", "\033[0m", "What proportion of actual positives were identified correctly?", ) # implement cross-validation with for train_index, test_index in skfolds.split(X_train, y_train): # use clone to do a deep copy of model without copying attached data # https://scikit-learn.org/stable/modules/generated/sklearn.base.clone.html clone_clf = clone(clf) X_train_fold = X_train[train_index] y_train_fold = y_train[train_index] # get the test folds X_test_fold = X_train[test_index] y_test_fold = y_train[test_index] # # add over/under sampling (do this after scaling) # X_train_fold, y_train_fold = under_over_sampler( # X_train_fold, y_train_fold, method=uo_sample_method, ratio=imbalance_ratio # ) clone_clf.fit(X_train_fold, y_train_fold) final_clf = clone_clf ( auc_score, roc_score, precision_result, recall_result, f1_result, ) = calculate_scores(clone_clf, X_test_fold, y_test_fold) auc_results.append(auc_score) precision_results.append(precision_result) recall_results.append(recall_result) f1_results.append(f1_result) roc_auc_results.append(roc_score) if print_results == True: print( "ROC: {:.3%} \t AUC: {:.3%} \t Pr: {:.3%} \t Re: {:.3%} \t F1: {:.3%}".format( roc_score, auc_score, precision_result, recall_result, f1_result ) ) if print_results == True: print("\033[1m", "\nFinal Results:", "\033[0m") print( "ROC: {:.3%} \t AUC: {:.3%} \t Pr: {:.3%} \t Re: {:.3%} \t F1: {:.3%}".format( np.sum(roc_auc_results) / k_fold_no, np.sum(auc_results) / k_fold_no, np.sum(precision_results) / k_fold_no, np.sum(recall_results) / k_fold_no, np.sum(f1_results) / k_fold_no, ) ) # standard deviations print( "Std: {:.3%} \t Std: {:.3%} \t Std: {:.3%} \t Std: {:.3%} \t Std: {:.3%}".format( np.std(roc_auc_results), np.std(auc_results), np.std(precision_results), np.std(recall_results), np.std(f1_results), ) ) result_dict = { "roc_auc_score_train": np.sum(roc_auc_results) / k_fold_no, "roc_auc_std_train": np.std(roc_auc_results), "roc_auc_min_train": np.min(roc_auc_results), "roc_auc_max_train": np.max(roc_auc_results), "auc_score_train": np.sum(auc_results) / k_fold_no, "auc_std_train": np.std(auc_results), "auc_min_train": np.min(auc_results), "auc_max_train": np.max(auc_results), "f1_score_train": np.sum(f1_results) / k_fold_no, "f1_std_train": np.std(f1_results), "f1_min_train": np.min(f1_results), "f1_max_train": np.max(f1_results), "precision_train": np.sum(precision_results) / k_fold_no, "precision_std_train": np.std(precision_results), "precision_min_train": np.min(precision_results), "precision_max_train": np.max(precision_results), "recall_train": np.sum(recall_results) / k_fold_no, "recall_std_train": np.std(recall_results), "recall_min_train": np.min(recall_results), "recall_max_train": np.max(recall_results), } # when to use ROC vs. precision-recall curves, Jason Brownlee http://bit.ly/38vEgnW # https://stats.stackexchange.com/questions/113326/what-is-a-good-auc-for-a-precision-recall-curve if train_on_all == True: # now scale and fit the data on the entire training set new_clf = clone(clf) new_clf.fit(X_train, y_train) ( auc_score_val, roc_score_val, precision_result_val, recall_result_val, f1_result_val, ) = calculate_scores(new_clf, X_val, y_val) result_dict['auc_score_val'] = auc_score_val result_dict['roc_auc_score_val'] = roc_score_val final_clf = new_clf return result_dict, final_clf
1dc33fc2e0c53f4c1e8a079ddb6c305f66888226
3,641,814
def html_code_envir(envir, envir_spec): """ Return html tags that can be used to wrap formatted code This method was created to enhance modularization of code. See latex_code_envir in latex.py :param tuple[str, str, str] envir: code blocks arguments e.g. ('py','cod','-h') :param str envir_spec: optional typesetting of code blocks :return: tuple of html tags, e.g. ('<pre>','</pre>') :rtype: [str, str] """ begin, end = '<pre>', '</pre>' if envir_spec.endswith('-out') and option('ignore_output'): begin, end = '','' elif envir_spec.endswith('-e'): begin, end = '','' return begin, end
1efa2f8e1a295a49b03241ac99e4c139c10b5f58
3,641,815
def calc_very_restricted_wage_distribution(df): """Compute per-period mean and std of wages for agents under two choice restrictions.""" return ( df.query("Policy == 'veryrestricted' and Choice == 'a' or Choice == 'b'") .groupby(["Period"])["Wage"] .describe()[["mean", "std"]] )
3ca8a2f0061e456a3158b4ee8a128a5a7439af3f
3,641,819
from typing import Iterable def distrib_one_v_max( adata: anndata, celltype: str, ax, gene_highlight: Iterable[str], partition_key: str = "CellType", ): """ Parameters ---------- adata The corrected expression data. celltype Celltype to be plotted gene_highlight List of genes to highlight on the plot, usually marker genes partition_key The key in adata.obs corresponding to the annotations to be used. Returns ------- """ one_v_max = one_v_max_matrix(adata, partition_key=partition_key) to_plot = one_v_max.loc[celltype] to_plot_trunc = to_plot[ to_plot > 1 ] # The only relevant one_v_max are the ones >1 gene_highlight = list(set(gene_highlight) & set(list(to_plot_trunc.index))) if gene_highlight: to_plot_highlight = to_plot_trunc[gene_highlight] to_plot_trunc = to_plot_trunc.drop(gene_highlight) out = sns.stripplot( y=to_plot_trunc, ax=ax, orient="v", s=3, linewidth=0.25 ) out = sns.stripplot( y=to_plot_highlight, ax=ax, orient="v", color="red", linewidth=0.5 ) out.axes.set_xticks([]) if len(gene_highlight) == 1: spe_ovm = round(to_plot_highlight[0], 2) out.axes.set_title( label=f"{gene_highlight[0]} \n " f"{to_plot_trunc.name} onevmax={spe_ovm}", loc="center", fontsize="small", ) else: out.axes.set_title( label=f"{to_plot_trunc.name}", loc="center", fontsize="small" ) out.axes.set_ylabel(ylabel="") else: out = sns.stripplot( y=to_plot_trunc, ax=ax, orient="v", s=3, linewidth=0.25 ) out.axes.set_title( label=f"{to_plot_trunc.name}", loc="center", fontsize="small" ) out.axes.set_ylabel(ylabel="") return out
af622c3cd7910691fe208420884bf7097b5807a2
3,641,821
def create_angler(request, report_a_tag=False): """This view is used to create a new tag reporter / angler. when we create a new angler, we do not want to duplicate entries with the same first name and last name by default. If there already angers with the same first and last name, add them to the reponse we will return with the form and ask the user to confirm that this new user really does have the same name as and existing (but different) angler. """ if request.method == "POST": form = CreateJoePublicForm(request.POST) if form.is_valid(): angler = form.save() if report_a_tag: return redirect("tfat:report_a_tag_angler_reports", angler_id=angler.id) else: return redirect("tfat:angler_reports", angler_id=angler.id) else: first_name = form.cleaned_data.get("first_name") last_name = form.cleaned_data.get("last_name") anglers = JoePublic.objects.filter( first_name__iexact=first_name, last_name__iexact=last_name ).all() if len(anglers): return render( request, "tfat/angler_form.html", { "form": form, "anglers": anglers, "report_a_tag": report_a_tag, "action": "Create New ", }, ) else: form = CreateJoePublicForm() return render( request, "tfat/angler_form.html", {"form": form, "report_a_tag": report_a_tag, "action": "Create New "}, )
8493f7d1f0bdc34ed16a7ca9270364bde2611d1a
3,641,822
def peak_1d_binary_search_iter(nums): """Find peak by iterative binary search algorithm. Time complexity: O(logn). Space complexity: O(1). """ left, right = 0, len(nums) - 1 while left < right: mid = left + (right - left) // 2 if nums[mid] < nums[mid + 1]: # If mid < its right, search right part. left = mid + 1 elif nums[mid] < nums[mid - 1]: # If mid < its left, search left part. right = mid - 1 else: # Else, found peak. return mid # For left = right. return left
320ce5ae1c0dd1756a609639b3daaa6598a194b5
3,641,823
def add_site_users_sheet(ws, cols, lnth): """ """ for col in cols: cell = "{}1".format(col) ws[cell] = "='Total_Sites_MNO'!{}".format(cell) for col in cols[:2]: for i in range(2, lnth): cell = "{}{}".format(col, i) ws[cell] = "='Total_Sites_MNO'!{}".format(cell) for col in cols[2:]: for i in range(2, lnth): cell = "{}{}".format(col, i) part1 = "=IFERROR((Users_km2!{}*Area!{})/Total_Sites_MNO!{},0)".format(cell,cell,cell) ws[cell] = part1 ws.formula_attributes[cell] = {'t': 'array', 'ref': "{}:{}".format(cell, cell)} columns = ['C','D','E','F','G','H','I','J','K','L'] ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0) set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000") return ws
decd1b0c65c6962371826258d3b981483115e790
3,641,824
def process(document, rtype=None, api=None): """ Extracts named entities in specified format from given texterra-annotated text.""" entities = [] if annotationName in document['annotations']: if rtype == 'entity': for token in document['annotations'][annotationName]: entities.append((document['text'][token['start']: token['end']], token['value']['tag'])) else: # rtype == 'full': for token in document['annotations'][annotationName]: entities.append((token['start'], token['end'], document['text'][token['start']: token['end']], token['value']['tag'])) return entities
b554f9c4166d7386d11a33b735033eb13de5bc89
3,641,825
def get_out_hmm_path(new_afa_path): """Define an hmm file path for a given aligned fasta file path. """ new_exten = None old_exten = new_afa_path.rsplit('.', 1)[1] if old_exten == 'afaa': new_exten = 'hmm' elif old_exten == 'afna': new_exten = 'nhmm' # Check that it worked. assert new_exten is not None, """Error: HMM extension could not be determined for input file: %s""" % query_file # Get new path. hmm_file_path = new_afa_path.rsplit('.', 1)[0] + '.' + new_exten # Return the new path. return hmm_file_path
08a56612588cf756720c14f80d186cc815283b07
3,641,826
import copy def transformer_decoder_block(name, n_layers, x, x_mask, output_size, init, **kwargs): """A transformation block composed of transformer decoder layers. Args: name: variable scope. n_layers: number of transformer layers. x: input to transformation. x_mask: mask. output_size: output dimensionality. init: data-dependent init for weightnorm parameters. **kwargs: Constains hparams, encoder_output, encoder_decoder_attention_bias and decoder_self_attention_bias Returns: outputs: Tensor of shape [batch_size, length, output_size]. """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE): hparams = kwargs.pop("hparams") disable_dropout = kwargs.pop("disable_dropout") if disable_dropout: hparams = copy.deepcopy(hparams) hparams.attention_dropout = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.relu_dropout = 0.0 n_channels = common_layers.shape_list(x)[-1] if n_channels != hparams.hidden_size: hparams = copy.deepcopy(hparams) hparams.hidden_size = n_channels outputs = common_attention.add_timing_signal_1d(x) with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE): for layer_idx in range(n_layers): outputs = transformer_decoder_layer( decoder_input=outputs, layer_idx=layer_idx, hparams=hparams, **kwargs) outputs = common_layers.layer_preprocess(outputs, hparams) outputs = dense_weightnorm( "h2o", outputs, output_size, x_mask, init_scale=0.0, init=init) return outputs
74203fa3282392e1c5a579ac242c857d7619c786
3,641,827
from typing import Union def compute_rolling_norm( signal: Union[pd.DataFrame, pd.Series], tau: float, min_periods: int = 0, min_depth: int = 1, max_depth: int = 1, p_moment: float = 2, ) -> Union[pd.DataFrame, pd.Series]: """ Implement smooth moving average norm (when p_moment >= 1). Moving average corresponds to compute_ema when min_depth = max_depth = 1. """ signal_p = compute_rolling_moment( signal, tau, min_periods, min_depth, max_depth, p_moment ) return signal_p ** (1.0 / p_moment)
ef6eabe3c63c22896ea17308fc54e11e734487f3
3,641,828
def msort(liste, indice): """ This function sorts a vector regarding values of the indice 'indice' Indice start from 0 """ tmp = [[tbl[indice]]+[tbl] for tbl in liste] tmp.sort() liste = [cl[1] for cl in tmp] del tmp return liste
7f4caff9a74f4d118877e335513e68ecd54986d8
3,641,829
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): """ Forward pass: prepare inputs and run the net forward. Take blobs: list of blobs to return in addition to output blobs. kwargs: Keys are input blob names and values are blob ndarrays. For formatting inputs for Caffe, see Net.preprocess(). If None, input is taken from data layers. start: optional name of layer at which to begin the forward pass end: optional name of layer at which to finish the forward pass (inclusive) Give outs: {blob name: blob ndarray} dict. """ if blobs is None: blobs = [] if start is not None: start_ind = list(self._layer_names).index(start) else: start_ind = 0 if end is not None: end_ind = list(self._layer_names).index(end) outputs = set([end] + blobs) else: end_ind = len(self.layers) - 1 outputs = set(self.outputs + blobs) if kwargs: if set(kwargs.keys()) != set(self.inputs): raise Exception('Input blob arguments do not match net inputs.') # Set input according to defined shapes and make arrays single and # C-contiguous as Caffe expects. for in_, blob in kwargs.items(): if blob.ndim != 4: raise Exception('{} blob is not 4-d'.format(in_)) if blob.shape[0] != self.blobs[in_].num: raise Exception('Input is not batch sized') self.blobs[in_].data[...] = blob self._forward(start_ind, end_ind) # Unpack blobs to extract return {out: self.blobs[out].data for out in outputs}
790baa0fc8529e3cad45bd8236060bad591ab4a4
3,641,830
def mre(actual: np.ndarray, predicted: np.ndarray, benchmark: np.ndarray = None): """ Mean Relative Error """ # return np.mean( np.abs(_error(actual, predicted)) / (actual + EPSILON)) return np.mean(_relative_error(actual, predicted, benchmark))
c293bf49968feaa01823ea29a7e39a150338c06d
3,641,832
def _get_fusion_kernel(patch_size, fusion='gaussian', margin=0): """ Return a 3D kernel with the same size as a patch that will be used to assign weights to each voxel of a patch during the patch-based predictions aggregation. :param patch_size: int or tuple; size of the patch :param fusion: string; type of fusion. :param margin: int; if margin > 0, mask the prediction at a distance less margin to the border. Default value is 0. If you the 'gaussian' mode you should not need to use margin > 0. """ if isinstance(patch_size, int): shape = (patch_size, patch_size, patch_size) else: shape = patch_size assert np.all(np.array(shape) > 2 * margin), \ "Margin %d is too large for patch size %s" % (margin, str(shape)) # Create the kernel if fusion == 'uniform': # Uniform kernel (default) kernel = np.ones(shape) elif fusion == 'gaussian': # Gaussian kernel # Define the gaussian kernel sigma = 1. dist_border_center = 3 * sigma x, y, z = np.meshgrid( np.linspace(-dist_border_center, dist_border_center, shape[1]), np.linspace(-dist_border_center, dist_border_center, shape[0]), np.linspace(-dist_border_center, dist_border_center, shape[2]), ) d = x*x + y*y + z*z kernel = np.exp(-d / (2. * sigma**2)) else: error_msg = "Only the fusion strategy %s are supported. Received %s" % \ (str(SUPPORTED_FUSION), fusion) raise ArgumentError(error_msg) # (optional) Set the contribution of voxels at distance less than margin # to the border of the patch to 0 if margin > 0: kernel[:margin, :, :] = 0. kernel[-margin:, :, :] = 0. kernel[:, :margin, :] = 0. kernel[:, -margin:, :] = 0. kernel[:, :, :margin] = 0. kernel[:, :, -margin:] = 0. return kernel
d57ddb880d31bc726d6a01e37867b91fa8acbbe5
3,641,833
def transforma( vetor: list, matriz_linha: bool = True, T: callable = lambda x: transposta(x) ) -> list: """Transforma um vetor em uma matriz linha ou matriz coluna.""" matriz = [] if matriz_linha: matriz.append(vetor) else: matriz.append(vetor) matriz = T(matriz) return matriz
7aefed2cda94767b9713c7ee71b9fc79f8065b04
3,641,834
import json def post(event, context): """ :param event: AWS Log Event. :param context: Object to determine runtime info of the Lambda function. See http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html for more info on context. Process an AWS Log event and post it to a Slack Channel. """ WEBHOOK = read_webhook() event_processed = process_subscription_notification(event) for log_event in event_processed['logEvents']: message = log_event['message'] for entry in NO_ALERT: if entry in message: return "Success" headers = { "content-type": "application/json"} datastr = json.dumps({ 'attachments': [ { 'color': determine_message_color(event_processed, message), 'pretext': get_account_alias(), 'author_name': determine_region(), 'text': message } ]}) request = Request(WEBHOOK, headers=headers, data=datastr) uopen = urlopen(request) rawresponse = ''.join(uopen) uopen.close() assert uopen.code == 200 return "Success"
a379c5801bb03d096660070194684a119b0243dd
3,641,835
def process_docstrings(docstrings): """Process the docstrings into a proper structure""" docs = {} # First we'll find all of the modules and prepare the docs structure for chunk in docstrings: if chunk[2].startswith("==="): # This is a module definition modulename = chunk[CHUNK_SIGN].strip("= ") dbg("process_docstrings: Module: %s at %s:%s" % ( modulename, chunk[CHUNK_FILE], chunk[CHUNK_LINE])) docs[modulename] = {} docs[modulename]["header"] = chunk docs[modulename]["items"] = {} # Now we'll get all of the item definitions for chunk in docstrings: if not chunk[2].startswith("==="): # This is an item definition itemname = find_itemname_from_signature(chunk[CHUNK_SIGN]) dbg("process_docstrings: Found item: %s at %s:%s" % ( itemname, chunk[CHUNK_FILE], chunk[CHUNK_LINE])) modulename = find_module_for_item(list(docs.keys()), itemname) dbg("process_docstrings: Assigning item to module: %s" % modulename) if modulename != None: docs[modulename]["items"][itemname] = chunk return docs
7f3179b06ba702a039ccf4e7f29a03ae20696cfe
3,641,836
def create_deck(request: HttpRequest): """ Форма для кода колоды + ее отображение """ deck, deckstring_form, deck_save_form = None, None, None title = _('Hearthstone | Decoding the deck code') if request.method == 'POST': if 'deckstring' in request.POST: # код колоды отправлен с формы DeckstringForm deckstring_form = DeckstringForm(request.POST) if deckstring_form.is_valid(): try: deckstring = deckstring_form.cleaned_data['deckstring'] deckstring = get_clean_deckstring(deckstring) with transaction.atomic(): deck = Deck.create_from_deckstring(deckstring) deck_name_init = f'{deck.deck_class}-{deck.pk}' deck_save_form = DeckSaveForm(initial={'string_to_save': deckstring, 'deck_name': deck_name_init}) title = deck except DecodeError as de: msg = _('Error: %(error)s') % {'error': de} deckstring_form.add_error(None, msg) except UnsupportedCards as u: msg = _('%(error)s. The database will be updated shortly.') % {'error': u} deckstring_form.add_error(None, msg) if 'deck_name' in request.POST: # название колоды отправлено с формы DeckSaveForm deck = Deck.create_from_deckstring(request.POST['string_to_save'], named=True) deck.author = request.user.author deck.name = request.POST['deck_name'] deck.save() return redirect(deck) else: deckstring_form = DeckstringForm() context = {'title': title, 'deckstring_form': deckstring_form, 'deck_save_form': deck_save_form, 'deck': deck, 'similar': find_similar_decks(deck)} return render(request, template_name='decks/deck_detail.html', context=context)
2268867d85c4a2e440e1e8a4dde9e931d924c069
3,641,837
def read_completed_flag(uarm, flag_type): """ Read Complete Flag from EEPROM :param uarm: uArm instance :param flag_type: protocol.CALIBRATION_FLAG, protocol.CALIBRATION_LINEAR_FLAG, procotol.CALIBRATION_SERVO_FLAG :return: """ if flag_type == CALIBRATION_FLAG: if uarm.get_rom_data(CALIBRATION_FLAG) == CONFIRM_FLAG: return True else: return False elif flag_type == CALIBRATION_LINEAR_FLAG: if uarm.get_rom_data(CALIBRATION_LINEAR_FLAG) == CONFIRM_FLAG: return True else: return False elif flag_type == CALIBRATION_SERVO_FLAG: if uarm.get_rom_data(CALIBRATION_SERVO_FLAG) == CONFIRM_FLAG: return True else: return False
90bb0410b078a8b320697121854fbb163c962a12
3,641,838
def zpves(output_string): """ Reads the zero-point energies for each of the hindered rotors from MESS output file string. :param output_string: string of lines of MESS output file :type output_string: str :return zpves: zero-point energy for each of the rotors :rtype: list(float) """ # Patterns for the ZPVE of a rotor num_patterns = (app.EXPONENTIAL_FLOAT, app.FLOAT) pattern1 = (app.escape('minimum energy[kcal/mol]') + app.one_or_more(app.SPACE) + '=' + app.one_or_more(app.SPACE) + app.capturing(app.one_of_these(num_patterns))) pattern2 = (app.escape('ground energy [kcal/mol]') + app.one_or_more(app.SPACE) + '=' + app.one_or_more(app.SPACE) + app.capturing(app.one_of_these(num_patterns))) # Obtain each ZPVE from the output string tmp1 = [-float(val) for val in apf.all_captures(pattern1, output_string)] tmp2 = [float(val) for val in apf.all_captures(pattern2, output_string)] tors_zpes = [sum(tmp) for tmp in zip(tmp1, tmp2)] # print('tors_zpes calc test:', tmp1, tmp2, tors_zpes) return tors_zpes
16d43d7bc78573074b1d0f414957a31ed162bea6
3,641,839
from typing import Any from typing import Optional from typing import Callable def _get_cast_type(field_type: type, value: Any) -> Optional[Callable]: """Get a casting callable for a field type/value.""" if type(value) is dict: return _get_cast_type_for_dict(field_type) if type(value) is str: return _get_cast_type_for_str(field_type) return None
05e57bb93c154d77433db596c3659a190d7acd7a
3,641,840
import re def read_data(filepath , strict_lang='en'): """Read data in csv format in order to preprocess. Args: filepath (str): a filepath to a csv file with twitter data. strict_lang (str, optional): whether to select only tweets with explicit language metadata.Defaults to 'en'. Returns: a pandas DataFrame: [description] """ data = pd.read_csv(filepath , names=["id", "user", "language", "text", "date", "favs"]) # Apply language selection if specified. if strict_lang != None: data = data.loc[data['language'] == strict_lang] # # drop duplicate tweets. data.drop_duplicates(subset=['text'] , inplace=True) # # Anonymize mentions in tweets mention = re.compile("@\w+") data.text = data.text.str.replace(mention, '@USER') # # Anonymize urls in tweets. data.text = data.text.str.replace(URL, 'URL') return data
93cfd820b42d3e2030d869f5317ff71491dc98d7
3,641,841
async def absent(hub, ctx, name, resource_group, connection_auth=None, **kwargs): """ .. versionadded:: 4.0.0 Ensure the specified disk does not exist in a resource group. :param name: The name of the disk. :param resource_group: The name of the resource group containing the disk. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure disk absent: azurerm.compute.disk.absent: - name: test_disk - resource_group: test_group """ ret = {"name": name, "result": False, "comment": "", "changes": {}} if not isinstance(connection_auth, dict): if ctx["acct"]: connection_auth = ctx["acct"] else: ret[ "comment" ] = "Connection information must be specified via acct or connection_auth dictionary!" return ret disk = await hub.exec.azurerm.compute.disk.get( ctx, name, resource_group, azurerm_log_level="info", **connection_auth ) if "error" in disk: ret["result"] = True ret["comment"] = "Disk {0} was not found.".format(name) return ret if ctx["test"]: ret["comment"] = "Disk {0} would be deleted.".format(name) ret["result"] = None ret["changes"] = { "old": disk, "new": {}, } return ret deleted = await hub.exec.azurerm.compute.disk.delete( ctx, name, resource_group, **connection_auth ) if deleted: ret["result"] = True ret["comment"] = "Disk {0} has been deleted.".format(name) ret["changes"] = {"old": disk, "new": {}} return ret ret["comment"] = "Failed to delete disk {0}!".format(name) return ret
e71c0e536187556a4aec6746f45442a568a7d4aa
3,641,842
def _update_dicts(name_scope, model_layer, input_to_in_layer, model_name_to_output, prev_node_name): """Updates input_to_in_layer, model_name_to_output, and prev_node_name based on the model_layer. Args: name_scope: a string representing a scope name, similar to that of tf.name_scope. model_layer: a dict representing a Keras model configuration. input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. Returns: A tuple of (input_to_in_layer, model_name_to_output, prev_node_name). input_to_in_layer: a dict mapping Keras.layers.Input to inbound layer. model_name_to_output: a dict mapping Keras Model name to output layer of the model. prev_node_name: a string representing a previous, in sequential model layout, node name. """ layer_config = model_layer.get('config') if not layer_config.get('layers'): raise ValueError('layer is not a model.') node_name = _scoped_name(name_scope, layer_config.get('name')) input_layers = layer_config.get('input_layers') output_layers = layer_config.get('output_layers') inbound_nodes = model_layer.get('inbound_nodes') is_functional_model = bool(input_layers and output_layers) # In case of [1] and the parent model is functional, current layer # will have the 'inbound_nodes' property. is_parent_functional_model = bool(inbound_nodes) if is_parent_functional_model and is_functional_model: for (input_layer, inbound_node) in zip(input_layers, inbound_nodes): input_layer_name = _scoped_name(node_name, input_layer) inbound_node_name = _scoped_name(name_scope, inbound_node[0]) input_to_in_layer[input_layer_name] = inbound_node_name elif is_parent_functional_model and not is_functional_model: # Sequential model can take only one input. Make sure inbound to the # model is linked to the first layer in the Sequential model. prev_node_name = _scoped_name(name_scope, inbound_nodes[0][0]) elif not is_parent_functional_model and prev_node_name and is_functional_model: assert len(input_layers) == 1, ( 'Cannot have multi-input Functional model when parent model ' 'is not Functional. Number of input layers: %d' % len(input_layer)) input_layer = input_layers[0] input_layer_name = _scoped_name(node_name, input_layer) input_to_in_layer[input_layer_name] = prev_node_name if is_functional_model and output_layers: layers = _norm_to_list_of_layers(output_layers) layer_names = [_scoped_name(node_name, layer[0]) for layer in layers] model_name_to_output[node_name] = layer_names else: last_layer = layer_config.get('layers')[-1] last_layer_name = last_layer.get('config').get('name') output_node = _scoped_name(node_name, last_layer_name) model_name_to_output[node_name] = [output_node] return (input_to_in_layer, model_name_to_output, prev_node_name)
dbaee780b0f81caa72cb1ba84a2ec976fc50d180
3,641,845
def geomance_results(session_key): """ Looks in the Redis queue to see if the worker has finished yet. """ rv = DelayedResult(session_key) if rv.return_value is None: return jsonify(ready=False) redis.delete(session_key) result = rv.return_value return jsonify(ready=True, result=result['result'], status=result['status'])
2ab92d54a61c50bc10514a31aac6d80ca388db83
3,641,846
def _return_dataframe_type(dataframe, dataframe_type): """ Helper method for returning te dataframe in spark/pandas/numpy/python, depending on user preferences Args: :dataframe: the spark dataframe to convert :dataframe_type: the type to convert to (spark,pandas,numpy,python) Returns: The dataframe converted to either spark, pandas, numpy or python. """ if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK: return dataframe if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_PANDAS: return dataframe.toPandas() if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY: return np.array(dataframe.collect()) if dataframe_type == constants.FEATURE_STORE.DATAFRAME_TYPE_PYTHON: return dataframe.collect()
665353dc91e389da399eb90926a6114053514e92
3,641,847
from typing import Tuple def split_dataset( raw: pd.DataFrame, train_ratio: float, val_ratio: float, lags: int, verbose: bool = False ) -> Tuple[np.ndarray]: """ Generate and split the prepared dataset for RNN training into training, testing and validation sets. Args: raw: A dataframe containing the prepared dataset returned from prepare_dataset method. train_ratio: A float representing the ratio of the whole dataset to be used as training set. val_ratio: A float denoting the ratio of the whole dataset to be used as validation set. ** Note the sum of train_ratio and val_ratio should between 0 and 1. lags: An integer indicating Returns: return a 6-tuple. Univariate case format: X: (num_samples, lags, 1) y: (num_samples, 1) Tuple format: (X_train, X_val, X_test, y_train, y_test, y_val) """ # ======== Args Check ======== assert isinstance( raw, pd.DataFrame), "Raw dataset should be a pandas dataframe." assert type(train_ratio) in [float, np.float_]\ and 0 < train_ratio <= 1,\ f"train_ratio should be a float within range (0,1], received: {train_ratio}" assert type(val_ratio) in [float, np.float_]\ and 0 < val_ratio <= 1,\ f"val_ratio should be a float within range (0,1], received: {val_ratio}" assert type(lags) in [int, np.int_]\ and lags >= 1,\ f"lags should be an integer at least 1, received: {lags}" # ======== Core ======== test_ratio = 1 - train_ratio - val_ratio df = normalize( raw, train_ratio ) X_raw, y_raw = gen_supervised_sequence( df, lags, df.columns[0], sequential_label=False) (X_train, X_test, y_train, y_test) = train_test_split( X_raw, y_raw, test_size=test_ratio, shuffle=False) (X_train, X_val, y_train, y_val) = train_test_split( X_train, y_train, test_size=val_ratio / (val_ratio + train_ratio), shuffle=False ) def trans(x): return x.reshape(-1, 1) y_train = trans(y_train) y_test = trans(y_test) y_val = trans(y_val) if verbose: print( f"Training and testing set generated,\ \nX_train shape: {X_train.shape}\ \ny_train shape: {y_train.shape}\ \nX_test shape: {X_test.shape}\ \ny_test shape: {y_test.shape}\ \nX_validation shape: {X_val.shape}\ \ny_validation shape: {y_val.shape}") return ( X_train, X_val, X_test, y_train, y_val, y_test )
d662f45ab3468814e2161f69ff8e253c864bf188
3,641,848
def create_ranking_model() -> tf.keras.Model: """Create ranking model using Functional API.""" context_keras_inputs, example_keras_inputs, mask = create_keras_inputs() context_features, example_features = preprocess_keras_inputs( context_keras_inputs, example_keras_inputs, mask) (flattened_context_features, flattened_example_features) = tfr.keras.layers.FlattenList()( inputs=(context_features, example_features, mask)) # Concatenate flattened context and example features along `list_size` dim. context_input = [ tf.keras.layers.Flatten()(flattened_context_features[name]) for name in sorted(flattened_context_features) ] example_input = [ tf.keras.layers.Flatten()(flattened_example_features[name]) for name in sorted(flattened_example_features) ] input_layer = tf.concat(context_input + example_input, 1) # User can create a custom scoring logic as a sequence of layers. dnn = tf.keras.Sequential() # Input batch normalization. if FLAGS.use_batch_norm: dnn.add( tf.keras.layers.BatchNormalization( momentum=FLAGS.batch_normalization_moment)) for layer_size in FLAGS.hidden_layer_dims: dnn.add(tf.keras.layers.Dense(units=layer_size)) if FLAGS.use_batch_norm: dnn.add( tf.keras.layers.BatchNormalization( momentum=FLAGS.batch_normalization_moment)) dnn.add(tf.keras.layers.Activation(activation=tf.nn.relu)) dnn.add(tf.keras.layers.Dropout(rate=FLAGS.dropout_rate)) dnn.add(tf.keras.layers.Dense(units=1)) logits = tfr.keras.layers.RestoreList()(inputs=(dnn(input_layer), mask)) return tf.keras.Model( inputs=dict( list(context_keras_inputs.items()) + list(example_keras_inputs.items()) + [(_MASK, mask)]), outputs=logits, name="din_ranking_model")
94a2754b21ce8696262e1dc92e924ae1cdaf08df
3,641,849
def to_count_matrix(pair_counts, vocab_size): """ transforms the counts into a sparse matrix """ cols = [] rows = [] data = [] for k, v in pair_counts.items(): rows.append(k[0]) cols.append(k[1]) data.append(v) # setting to float is important, +1 for UNK # COO matrix is the fastest for constructing the matrix since we have all # the data already count_matrix = coo_matrix( (data, (rows, cols)), shape=(vocab_size + 1, vocab_size + 1), dtype=np.float32 ) # CSR matrices support more arithmetic operations and are more efficient return count_matrix.tocsr()
639957d8b280a0a84d1a64810e82b5d86deae14d
3,641,850
from typing import List from typing import Optional import time def create_dataset(genres_types: List[str], sp: spotipy.Spotify, genius: Genius, limit: Optional[int] = 50, how_many_in_genre: Optional[int] = 2_000, sleep_time: Optional[int] = 30, path_to_save: Optional[str] = None, verbose: Optional[bool] = True, save_progess: Optional[bool] = True) -> pd.DataFrame: """ Function scrapes fixed number of songs by genre from Spotify API and their lyrics from Genius API. Parameters ---------- genres_types (list): list of selected genres to scrape. sp (spotipy.Spotify): connector to Spotify API. It should be defined with credentials specified. genius (Genius) : connector to Genius API. It should be defined with credentials specified. limit (int) : How many observations get with single request. The max is 50. Needs to be smaller than how_many_in_genre and limit % how_many_in_genre == 0 how_many_in_genre (int) : how many observations should be in single genre class. Then, the whole dataset has len(genre_types) * how_many_in_genre observations. sleep_time (int) : seconds of function sleep after each spotipy request. Set this up to avoid too many request error path_to_save (str) : path to save resulted data frame verbose (bool) : if true then it will print progress of our function save_progess (bool) : if ture then after each spotipy request the current result will be saved Returns ------- df (pd.DataFrame) : scraped dataframe with author, title, popularity, genre and lyrics columns """ # iterator needed when verbose is true i_sample = 0 max_sample = len(genres_types) * how_many_in_genre # Define empty lists to locate scraped data artist_names = [] track_names = [] popularities = [] genres = [] lyrics = [] # If verbose then print progress if verbose: print(f"Number of scraped samples: {i_sample}/{max_sample}") for genre in genres_types: # offset is the start index of API results # maximum limit of request is 50 observations so we need to change offset in each iteration for offset in range(0, how_many_in_genre, limit): t1 = time() # Scraped data from spotify track_results = sp.search(q=f'genre:"{genre}"', type='track', limit=limit, offset=offset) # We will iterate over our results for i, t in enumerate(track_results['tracks']['items']): # Extract artist name artist_name = t['artists'][0]['name'] # Extract title of song track_name = t['name'] # Save results artist_names.append(artist_name) track_names.append(track_name) popularities.append(t['popularity']) genres.append(genre) # We use try/except because genius has limited timeout to single request # and raise error when there is timeout # If this situation occurs wie will need to rescrape certain lyrics try: # Searching for lyrics to scraped song text = genius.search_song(track_name, artist_name).lyrics except: text = f"Error in {artist_name} - {track_name}" lyrics.append(text) # After each iteration the function will sleep to avoid too many request error sleep(sleep_time) # Monitoring progess i_sample += limit if verbose: t2 = time() print(f"Number of scraped samples: {i_sample}/{max_sample}. Time: {(t2 - t1) / 60:.2f} min") # We can save result after each request if save_progess: df = pd.DataFrame({'artist_name': artist_names, 'track_name': track_names, 'popularity': popularities, "genre": genres, "lyrics": lyrics}) if path_to_save: df.to_csv(path_to_save) # Creating data frame df = pd.DataFrame({'artist_name': artist_names, 'track_name': track_names, 'popularity': popularities, "genre": genres, "lyrics": lyrics}) # Shuffle pandas df rows, reset index and drop duplicates df = df.sample(frac=1, random_state=7).reset_index(drop=True).drop_duplicates() # Save to specified path if path_to_save: df.to_csv(path_to_save) return df
49f887612e9ce69e312bf9d43da3db7764eb3b7d
3,641,851
def get_enum_type_definition(ua_graph: UAGraph, data_type_id: int): """Given a UAGraph object and a internal id of an enum type, the definition of the enumeration will be produced. The form of the definition may vary based on the enumeration. Args: ua_graph (UAGraph): UAGraph where the enum defintion is found data_type_id (int): The internal id for data type enum Return: The content of the 'Values' column in the node definition """ enum_type_row = ua_graph.nodes[ua_graph.nodes["id"] == data_type_id] enum_type_id = enum_type_row["id"].values[0] # The enum type points to the enum definition via HasProperty ReferenceType. # The enum type does not contain the enum definition itself, it points to # an EnumStrings or EnumValues enum_neighbors = ua_graph._get_neighboring_nodes_by_id(enum_type_id, "outgoing") # Ensuring we are getting the HasProperty reference enum_neighbors_has_property = enum_neighbors[ enum_neighbors["ReferenceType"] == "HasProperty" ] # Getting the node which actually defines the enum enum_definition_id = enum_neighbors_has_property["Trg"].values[0] enum_definition_node = ua_graph.nodes[ua_graph.nodes["id"] == enum_definition_id] enum_ua_list_of = enum_definition_node["Value"].values[0] return enum_ua_list_of
7464b99f4140ed8908dcc6b966fad917856edd36
3,641,852
def create_classification_of_diseases(): """ :param qtty: number of objects to create """ fake = Factory.create() return ClassificationOfDiseases.objects.create( code=randint(1, 1000), description=fake.text(), abbreviated_description=fake.text(max_nb_chars=100), parent=None )
7e1cea3f80d9e5559e33ce10c702d3429f6dd31b
3,641,853
def cmpversion(a, b): """Compare versions the way chrome does.""" def split_version(v): """Get major/minor of version.""" if '.' in v: return v.split('.', 1) if '_' in v: return v.split('_', 1) return (v, '0') a_maj, a_min = split_version(a) b_maj, b_min = split_version(b) if a_maj == b_maj: return cmpversion(a_min, b_min) return int(a_maj) > int(b_maj)
226191f2a72d4cb65198ddcb779b130b7a524034
3,641,856
def get_dict_json(attr_name: str, possible_dict: dict) -> dict: """ returns a {key : item}. If the item is a np.ndarray then it is converted to a list Parameters ---------- attr_name possible_dict Returns ------- """ if type(possible_dict[0]) == np.ndarray: output_tail = dict_ndarray_to_list(possible_dict) return {attr_name: output_tail} else: return {attr_name: possible_dict}
0b883cd4153593036df4ced924ffc09f6f000177
3,641,857
def eulers_totient_phi(num): """ Euler's totient (a.k.a. phi) function, φ(n). Count the number of positive integers less than or equal to "n" that are relatively prime (coprimes) to "n". Coprimes: if the only positive integer that evenly divides two numbers is 1. This is the same thing as their greatest common divisor is 1. https://secure.wikimedia.org/wikipedia/en/wiki/Totient_function """ dpd = set(prime_divisors(num)) # distinct_prime_divisors phi = num for p in dpd: phi *= (1 - (1.0 / float(p))) return phi
ccf90dc5f0b3b73eeffff8cb78430f049128eb01
3,641,858
from datetime import datetime def create_nav_btn(soup,date,text): """ Helper functions for month_calendar, generates a navigation button for calendar :param soup: BeautifulSoup parser of document :param date: Date to create nav button :param text: Text for button """ nav_th = soup.new_tag('th',attrs=[('colspan','2')]) nav_th['class'] = 'month' nav_a = soup.new_tag('a',href='/apps/orders/%s/%s' % (date.year, date.month)) nav_a.string = text if date > datetime.today(): nav_a['class'] = "btn btn-mini btn-info disabled" nav_a['href'] = '#' else: nav_a['class'] = "btn btn-mini btn-info" nav_th.insert(0,nav_a) return nav_th
6f49e5173980a9da01e4d92e2f5adfeb73a4a4d0
3,641,859
def pad_with_dots(msg, length=PAD_TEXT): """Pad text with dots up to given length. >>> pad_with_dots("Hello world", 20) 'Hello world ........' >>> pad_with_dots("Exceeding length", 10) 'Exceeding length' """ msg_length = len(msg) if msg_length >= length: return msg msg = msg + " " for i in range(msg_length+1, length): msg += "." return msg
bfae02036c7773fba47576432f0c1f4f32a797e1
3,641,861
import itertools def get_top_10_features(target_params, results, importance_type="weight"): """Gets the top 10 features of each XGBoost regressor. Parameters ---------- target_params: dictionary Should contain a dict with with params for each target label. results : dictionary Should contain a dict with the regression results. importance_type: string The score type that should be retrieved by. Either weight, gain or cover. Default weight. """ return_dict = {} for target in target_params: xg_reg = results[target[0]][2] ordered_feature = {k: v for k, v in sorted(xg_reg.get_booster().get_score(importance_type=importance_type).items(), key=lambda item: item[1], reverse=True)} return_dict[target[0]] = dict(itertools.islice(ordered_feature.items(), 10)) return return_dict, pd.DataFrame.from_dict(return_dict, orient='index')
8735513250411e64ab787427727b32569c352a10
3,641,862
def index(): """ Class check list view """ new_class_check_form = NewClassCheckForm() upload_csv_form = UploadCSVForm() classes = ClassCheck.query.all() return render_template('index.html', new_class_check_form=new_class_check_form, upload_csv_form=upload_csv_form, classes=classes)
b5475177637e7e887f8505c5db7c81b0b0abc49e
3,641,863
import tempfile import shutil def main(args): """Main function called when run from command line or as part of pipeline.""" usage = """ Usage: split_by_taxa.py --genomes-a=FILE file with genome GenBank Project ID and Organism name on each line for taxon A --genomes-b=FILE file with genome GenBank Project ID and Organism name on each line for taxon B --orthologs-zip=FILE archive of aligned & trimmed single copy orthologous (SICO) genes --taxon-a-zip=FILE destination file path for archive of SICO genes belonging to taxon A --taxon-b-zip=FILE destination file path for archive of SICO genes belonging to taxon B """ options = ['genomes-a', 'genomes-b', 'orthologs-zip', 'taxon-a-zip', 'taxon-b-zip'] genome_a_ids_file, genome_b_ids_file, orthologs_zip, taxon_a_zip, taxon_b_zip = parse_options(usage, options, args) # Parse file containing RefSeq project IDs to extract RefSeq project IDs with open(genome_a_ids_file) as read_handle: lines = [line.split('\t') for line in read_handle] genome_ids_a = [line[0] for line in lines] common_prefix_a = _common_prefix([line[1] for line in lines], 'taxon_a') with open(genome_b_ids_file) as read_handle: lines = [line.split('\t') for line in read_handle] genome_ids_b = [line[0] for line in lines] common_prefix_b = _common_prefix([line[1] for line in lines], 'taxon_b') # Create run_dir to hold files related to this run run_dir = tempfile.mkdtemp(prefix='split_by_taxa_') # Extract files from zip archive ortholog_files = extract_archive_of_files(orthologs_zip, create_directory('alignments', inside_dir=run_dir)) # Actually split alignments per taxon taxon_a_files, taxon_b_files = split_alignment_by_taxa(run_dir, ortholog_files, (genome_ids_a, common_prefix_a), (genome_ids_b, common_prefix_b)) # Write the produced files to command line argument filenames create_archive_of_files(taxon_a_zip, taxon_a_files) create_archive_of_files(taxon_b_zip, taxon_b_files) # Remove unused files to free disk space shutil.rmtree(run_dir) # Exit after a comforting log message log.info("Produced: \n%s\n%s", taxon_a_zip, taxon_b_zip) return taxon_a_zip, taxon_b_zip
29f6251f2ebbc5ee7e05bd4a0859b61da74a6266
3,641,865
def nrmse(y_true, y_pred, MEAN_OF_DATA): """ Calculates the normalized root mean square error of y_true and y_pred where MEAN_OF_DATA is the mean of y_pred. """ y_true = y_true.squeeze() y_pred = y_pred.squeeze() std = np.sum(np.square(y_true - MEAN_OF_DATA)) errors = np.sum(np.square(y_true - y_pred)) return np.sqrt(errors / (std + 1e-8))
641566ac6ed60543463b107f53d964a4864e4901
3,641,867
def field_references( model_tuple, field, reference_model_tuple, reference_field_name=None, reference_field=None, ): """ Return either False or a FieldReference if `field` references provided context. False positives can be returned if `reference_field_name` is provided without `reference_field` because of the introspection limitation it incurs. This should not be an issue when this function is used to determine whether or not an optimization can take place. """ remote_field = field.remote_field if not remote_field: return False references_to = None references_through = None if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple: to_fields = getattr(field, 'to_fields', None) if ( reference_field_name is None or # Unspecified to_field(s). to_fields is None or # Reference to primary key. (None in to_fields and (reference_field is None or reference_field.primary_key)) or # Reference to field. reference_field_name in to_fields ): references_to = (remote_field, to_fields) through = getattr(remote_field, 'through', None) if through and resolve_relation(through, *model_tuple) == reference_model_tuple: through_fields = remote_field.through_fields if ( reference_field_name is None or # Unspecified through_fields. through_fields is None or # Reference to field. reference_field_name in through_fields ): references_through = (remote_field, through_fields) if not (references_to or references_through): return False return FieldReference(references_to, references_through)
89719c1aa2dc4065f10d952b0b7b42c30e49be72
3,641,868
def friendlist_embed(friendlist, guild): """ :param friendlist: The friendlist of the source guild :param guild: Soruce guild :return: Embedded friendlist """ embed = Embed() embed.set_author( name=guild.name, icon_url=guild.icon_url ) friends_field = '\n'.join( [f'**{name}: {snowflake}**' for name, snowflake in friendlist.items()] ) embed.add_field( name='Friends:', value=friends_field, inline=False ) return embed
8f32fa75cdfd5d06ae83686e7df6c97587534e8e
3,641,870
def find_contours(img): """ Find all contours in the image """ _, thresh = cv2.threshold(img,127,255,0) _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours
90064f0e7e3d5aece0e6dc9fd58a202a52e2bfaa
3,641,871
import numpy def _split_binline(binline): """Internal function to read the line of bin specs Returns bin width and array of bin lower edges """ bin_strings = tabmatch.split(binline.strip()) # Grab the first float from each binspec, we'll return lower edges # Note that commas must be stripped from numbers... try: bins = numpy.fromiter( (floatmatch.findall(x)[0].replace(",", "") for x in bin_strings), dtype=numpy.float64, ) except IndexError: bins = numpy.asarray(bin_strings) return bins
bfc89d5b32dfe19cd90cd90a8c04ae6fd9353419
3,641,872
import re def parse_name(content): """ Finds the name of the man page. """ # Create regular expression name_regex = re.compile(r"^([\w\.-]*)") # Get name of manual page just_name = name_regex.search(content) name_str = "" if just_name is not None: name_str = just_name.group(1) return name_str
c3a1f32beb96d39d4490681bf90d54115597ffe5
3,641,873
def albanian_input_normal(field, text): """ Prepare a string from one of the query fields for subsequent processing: replace common shortcuts with valid Albanian characters. """ if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'): return text text = text.replace('ё', 'ë') text = text.replace('e:', 'ë') return text
6bd4e7a1e764feada04ae5e95465fb4d7cbb29fb
3,641,874
import re def extract_share_id_from_url(public_base_url: str) -> str: """ Extracts the Airtable share id from the provided URL. :param public_base_url: The URL where the share id must be extracted from. :raises ValueError: If the provided URL doesn't match the publicly shared Airtable URL. :return: The extracted share id. """ result = re.search(r"https:\/\/airtable.com\/shr(.*)$", public_base_url) if not result: raise ValueError( f"Please provide a valid shared Airtable URL (e.g. " f"https://airtable.com/shrxxxxxxxxxxxxxx)" ) return f"shr{result.group(1)}"
5aad99b5bf022a2b957f10fcb09793188051340c
3,641,875
def load_data(file_path): """ 读取地名文件,解析出外文和中文的字符总数(去重后),做成字符和索引映射表。 加工地名数据,首尾增加开始和结束标记。 :param file_path: 文件路径 :return: 字符和索引映射表, 地名列表 """ df = pd.read_table(file_path) df.columns = ['source', 'chinese'] # 获取外文和中文字符数组 characters_source = sorted(list(set(df.source.unique().sum()))) characters_chinese = sorted(list(set(df.chinese.unique().sum()))) # 添加的开头结尾符号 special_characters = [PADDING_TOKEN, START_TOKEN, END_TOKEN] token_to_idx_source = dict([(char, i) for i, char in enumerate(special_characters + characters_source)]) token_to_idx_zh = dict([(char, i) for i, char in enumerate(special_characters + characters_chinese)]) idx_to_token_source = dict([(i, char) for i, char in enumerate(special_characters + characters_source)]) idx_to_token_zh = dict([(i, char) for i, char in enumerate(special_characters + characters_chinese)]) # 给地名添加开始和结束符 df['source'] = df['source'].apply(lambda x: START_TOKEN + preprocess_dm_source(x) + END_TOKEN) df['chinese'] = df['chinese'].apply(lambda x: START_TOKEN + preprocess_dm_chinese(x) + END_TOKEN) # 获取地名数组,1维 dm_text_source = df.source.values.tolist() dm_text_chinese = df.chinese.values.tolist() return (dm_text_source, dm_text_chinese), (token_to_idx_source, token_to_idx_zh), (idx_to_token_source, idx_to_token_zh)
490712dd2786d45497246f03a3b7d463faea7494
3,641,876
def verify_portchannel_member(dut, portchannel, members, flag='add', cli_type=""): """ This API is used to verify the members of portchannel Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com) :param dut: :param dut: :param portchannel: :param members: :param flag: :return: """ st.log("Verifying port channel members ...", dut=dut) portchannel_members = get_portchannel_members(dut, portchannel, cli_type=cli_type) if flag == 'add': if not portchannel_members: st.error("ERROR in port channel members") return False for member in utils.make_list(members): if member not in portchannel_members: return False return True elif flag == 'del': for member in utils.make_list(members): if member in portchannel_members: return False return True
826d7df22bec748bd948329327920f302b67761f
3,641,877
def adjust_learning_rate(optimizer, epoch, args): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" if args.lr_policy == 'decay': lr = args.lr * (args.lr_decay ** epoch) elif args.lr_policy == 'poly': interval = len([x for x in args.lr_custom_step if epoch >= x]) epoch = epoch if interval == 0 else epoch - args.lr_custom_step[interval-1] if interval == 0: step = args.lr_custom_step[0] elif interval >= len(args.lr_custom_step): step = args.epochs - args.lr_custom_step[interval-1] else: step = args.lr_custom_step[interval] - args.lr_custom_step[interval-1] lr = args.eta_min + (args.lr - args.eta_min) * (1 - epoch * 1.0 /step)** args.lr_decay elif args.lr_policy == 'fix': lr = args.lr elif args.lr_policy == 'fix_step': lr = args.lr * (args.lr_decay ** (epoch // args.lr_fix_step)) elif args.lr_policy in ['custom_step', 'sgdr_step']: interval = len([x for x in args.lr_custom_step if epoch >= x]) lr = args.lr *(args.lr_decay ** interval) else: return None if optimizer is not None and args.lr_policy != 'sgdr_step': for param_group in optimizer.param_groups: if param_group.get('lr_constant', None) is not None: continue param_group['lr'] = lr return lr
c247c81a90476e737ff54c2a7ca45f5c42dccd38
3,641,878
def refresh_well_known_oidc(realm): """ Refresh Open ID Connect .well-known :param django_keycloak.models.Realm realm: :rtype django_keycloak.models.Realm """ server_url = realm.server.internal_url or realm.server.url # While fetching the well_known we should not use the prepared URL openid_api_client = KeycloakRealm( server_url=server_url, realm_name=realm.name ).open_id_connect(client_id='', client_secret='') realm.well_known_oidc = openid_api_client.well_known.contents realm.save(update_fields=['_well_known_oidc']) return realm
44861d479c6fbc737cb729c4b857b17bed86be7b
3,641,879
import requests def get_access_token(jwt_token: str) -> str: """ Gets an access token, used for fully-authenticated app actions """ installations = requests.get( "https://api.github.com/app/installations", headers=GH_JWT_HEADER(jwt_token) ) response = requests.post( installations.json()[0]["access_tokens_url"], headers=GH_JWT_HEADER(jwt_token) ) return response.json()["token"]
63fdcaa237be937924d3e0d4a941ce90bd7e1f1c
3,641,880
def donation_process_subscription_deleted(event): """ :param event: :return: """ donation_manager = DonationManager() data = event['data'] subscription = data['object'] subscription_ended_at = subscription['ended_at'] subscription_canceled_at = subscription['canceled_at'] customer_id = subscription['customer'] subscription_id = subscription['id'] # At this time we are only supporting the UI for canceling subscriptions if subscription_canceled_at is not None or subscription_ended_at is not None: donation_manager.mark_subscription_canceled_or_ended(subscription_id, customer_id, subscription_ended_at, subscription_canceled_at) return None
0b6de4695cf5c8eaa8699dc7fb7c3e1fd27c0659
3,641,881
from pathlib import Path def get_most_probable_strand(filenames, tolerance, sample_name): """Return most propable strand given 3 feature count files (strand of 0,1, and 2) Return the total counts by strand from featureCount matrix folder, strandness and probable strand for a single sample (using a tolerance threshold for strandness). This assumes a single sample by featureCounts file. :param filenames: a list of 3 feature counts files for a given sample corresponding to the strand 0,1,2 :param tolerance: a value below 0.5 :param sample: the name of the sample corresponding to the list in filenames Possible values returned are: * 0: unstranded * 1: stranded * 2: eversely stranded We compute the number of counts in case 1 and 2 and compute the ratio strand as :math:`RS = stranded / (stranded + reversely stranded )`. Then we decide on the possible strandness with the following criteria: * if RS < tolerance, reversely stranded * if RS in 0.5+-tolerance: unstranded. * if RS > 1-tolerance, stranded * otherwise, we cannot decided. """ fc_files = [Path(x) for x in filenames] res_dict = {} for f in fc_files: strand = str(f.parent)[-1] # Feature counts may have extra columns (not just a Series), # the count is the last columns though. So, # FeatureCounts(f).df[df.columns[-1]] is a time series df = FeatureCount(f).df df = df[df.columns[-1]] res_dict[strand] = int(df.sum()) strandness = res_dict["1"] / (res_dict["1"] + res_dict["2"]) res_dict["strandness"] = strandness if strandness < tolerance: res_dict["strand"] = 2 elif strandness > 1 - tolerance: res_dict["strand"] = 1 elif 0.5 - tolerance < strandness and strandness < 0.5 + tolerance: res_dict["strand"] = 0 else: res_dict["strand"] = None df = pd.DataFrame(res_dict, index=[sample_name]) return df
72913c171a64d3639398db871a799d0cbf938522
3,641,882
def listBlogs(username, password, serverURL=None): """Get a list of your blogs Returns: list of dictionaries [{"blogid": ID_of_this_blog, "blogName": "name_of_this_blog", "url": "URL_of_this_blog"}, ...] Arguments: - username: your weblog username - password: your weblog password - serverURL: URL of remote server (optional, defaults to constants.xmlrpcServer) Example: >>> blogList = blogger.listBlogs("my_blogger_username", "my_secret_password") >>> for blog in blogList: ... print "ID:", blog["blogid"] ... print "Name:, blog["blogName"] ... print "URL:", blog["url"] ... print Manila notes: - Manila does not support this method, because it does not keep a centralized database of a user's blogs. """ server = xmlrpclib.Server(serverURL or constants.xmlrpcServer, constants.transport) response = server.blogger.getUsersBlogs(constants.applicationKey, username, password) return response
573bb643bb808bef17d76e92a8ed5c8b12735c65
3,641,883
def find_lineup_no_optimization(set1, set2): """ Find the approximate offset between two GPS data sets without range start optimization. This algorithm first identifies a primary data set and a secondary one based on which starts later. The offset is then applied to the secondary data set. After mapping the timestamps to the corresponding points from each set, it finds the optimal offset using the later start time of the two sets as the starting point. Requires checking of more values in the set to obtain an accurate offset, compared to the optimized version. Args: set1: GpsDataSet object set2: GpsDataSet object Returns: Tuple of two datetimes, the start time for set 1 and for set 2 given the calculated offset. If no lineup is found, it will return None. """ set1_start_time = set1.gps_data_list[0].time set2_start_time = set2.gps_data_list[0].time if set1_start_time > set2_start_time: later_start_time = utils.round_time(set1_start_time) primary_set = set1 secondary_set = set2 else: later_start_time = utils.round_time(set2_start_time) primary_set = set2 secondary_set = set1 # create dicts that map rounded times to points primary_time_points_mapping = create_time_to_points_mapping(primary_set) secondary_time_points_mapping = create_time_to_points_mapping(secondary_set) offset_range_length = 200 # how many offsets to check so, for 200, check offsets (-100,100) point_checking_range_length = 200 # points to check around each offset, for 200, check points (-100,100) # find best offset optimal_offset = find_optimal_offset(primary_time_points_mapping, secondary_time_points_mapping, later_start_time, offset_range_length, point_checking_range_length) print(optimal_offset) if optimal_offset is None: print("no optimal line-up for these two data sets; check if correct files are being used") return (None, None) if primary_set == set1: print("Optimal offset: set 2 is %s seconds from set 1" % optimal_offset) return (later_start_time, later_start_time + timedelta(seconds=optimal_offset)) else: print("Optimal offset: set 1 is %s seconds from set 2" % optimal_offset) return (later_start_time + timedelta(seconds=optimal_offset), later_start_time)
46ae35c2a9c7bf8de22df044acc1fc184c5820a8
3,641,884
def matching_plots_nn(plots_0, plots_1, K): """ :param plots_0: :param plots_1: :param K: :return: """ M, N = plots_0.shape[0], plots_1.shape[0] mapping = {} cost_mat = np.zeros((M, N), dtype=np.float32) for i, plot_0 in enumerate(plots_0): for j, plot_1 in enumerate(plots_1): shift_vector = plot_0 - plot_1 l2_dist = np.linalg.norm(shift_vector, ord=2) cost_mat[i][j] = l2_dist # 取topK: cost最小 # k_smallest = heapq.nsmallest(K, cost_mat.ravel().tolist()) inds = np.argpartition(cost_mat.ravel(), K)[:K] inds_i = inds // N inds_j = inds % N # k_smallest = cost_mat[inds_i, inds_j] for i, j in zip(inds_i, inds_j): # i∈range(M), j∈range(N) mapping[i] = j return mapping
8d4baff1927591b84757f34b48070966c03a82b3
3,641,885
def find_file_start(chunks, pos): """Find a chunk before the one specified which is not a file block.""" pos = pos - 1 while pos > 0: if chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102: # This is not a block return pos else: pos = pos - 1 return pos
b0fb280a847dea3cd589d59863888d1087d4982f
3,641,886
def navigation_children(parser, token): """Navigation""" args = token.contents.split() kwargs = extract_kwargs(args) if len(args) < 2: raise template.TemplateSyntaxError( _("navigation_children requires object as argument and optionally tree={{tree_name}}") ) return NavigationChildrenNode(args[1], **kwargs)
6057946c15c65f1d1f7887055c7b42f831b887cc
3,641,887
def get_all_ann_index(self): """ Retrieves all annotation ids """ return list(self.ann_infos.keys())
4375c9dbc14bf50575c8a5e42ce0ae8749820dfb
3,641,888
def file_to_list(file_path): """ 读取文件到lists :param param1: this is a first param :param param2: this is a second param :returns: this is a description of what is returned :raises keyError: raises an exception @author: jhuang @time:1/22/2018 """ lists = [] fd = file(file_path, "r") for line in fd.readlines(): lists.append(str(line).replace("\n", "")) return lists
a0aba732c121f4380d603f44d3cef5690df24d06
3,641,889
def reverse_words(str): """Reverses the letters in each word of a string.""" words = str.split() new_words = reverse(words[0]) for word in words[1:]: new_words += ' ' + reverse(word) return new_words
6da77e9f214bdbfb3e20b7fe13d43fb63763a5b6
3,641,890
def parse_www_authenticate_header(value, on_update=None): """Parse an HTTP WWW-Authenticate header into a :class:`WWWAuthenticate` object. :param value: a WWW-Authenticate header to parse. :param on_update: an optional callable that is called every time a value on the :class:`WWWAuthenticate` object is changed. :return: a :class:`WWWAuthenticate` object. """ if not value: return WWWAuthenticate(on_update=on_update) try: auth_type, auth_info = value.split(None, 1) auth_type = auth_type.lower() except (ValueError, AttributeError): return WWWAuthenticate(value.lower(), on_update=on_update) return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)
69b5b9e5a1cf591eb761a58d62fc53ca48aefffc
3,641,891
def get_image_feature_column(X: pd.DataFrame) -> str: """ Get only the image feature column name from X """ X = X.select_dtypes(object) img_features_col_mask = [X[col].str.startswith("/9j/", na=False).any() for col in X] # should have just one image feature assert sum(img_features_col_mask) == 1, "expecting just one image feature column" img_col = X.columns[np.argmax(img_features_col_mask)] return img_col
9d178887c0b118c57379e91b7c1b7ed6d8c996ea
3,641,892
def merge_sort(sez): """urejamo z zlivanjem, torej nazačetku razdelimo seznam na 2 dela, potem pa jih zlijemo tako da imenično jemljemo manjše elemente z enega ali drugega seznama. Nakoncu naredimo nov seznam, ki je urejen""" n=len(sez) if n<=1: return sez levo = merge_sort(sez[:n//2]) desno= merge_sort(sez[n//2:]) #print("Leva stran = ", end='') #print(levo) #print("Desna stran = ", end='') #print(desno) #print("kličem združi", end="") Nsez=zdruzi(levo, desno) #print(Nsez) return Nsez
024ae3362681f3e4e65cd083c86e3df0d52ea720
3,641,894
def get_pairs(l, k): """ Given a list L of N unique positive integers, returns the count of the total pairs of numbers whose difference is K. First, each integer is stored into a dictionary along with its frequency. Then, for each integer I in the input list, the presence of the integer I+K is checked within the dictionary. The approach may be generalized to the case of non-unique positive integers. The computational time complexity of the algorithm is O(N). :param k: the given difference :type k: int :param l: the list of input integers :type l: list :return: the count of the total pairs of numbers whose difference is k :rtype: int """ hash_map = dict((i, 1) for i in l) return len([1 for i in l if hash_map.get(i + k)])
90fd199c75431c1d20076cea04358b3ca5872810
3,641,896
import collections def numba_to_jax(name: str, numba_fn, abstract_eval_fn, batching_fn=None): """Create a jittable JAX function for the given Numba function. Args: name: The name under which the primitive will be registered. numba_fn: The function that can be compiled with Numba. abstract_eval_fn: The abstract evaluation function. batching_fn: If set, this function will be used when vmap-ing the returned function. Returns: A jitable JAX function. """ primitive = jax.core.Primitive(name) primitive.multiple_results = True def abstract_eval_fn_always(*args, **kwargs): # Special-casing when only a single tensor is returned. shapes = abstract_eval_fn(*args, **kwargs) if not isinstance(shapes, collections.abc.Collection): return [shapes] else: return shapes primitive.def_abstract_eval(abstract_eval_fn_always) primitive.def_impl(partial(_np_evaluation_rule, numba_fn, abstract_eval_fn_always)) def _primitive_bind(*args): result = primitive.bind(*args) output_shapes = abstract_eval_fn(*args) # Special-casing when only a single tensor is returned. if not isinstance(output_shapes, collections.abc.Collection): assert len(result) == 1 return result[0] else: return result if batching_fn is not None: batching.primitive_batchers[primitive] = batching_fn else: batching.primitive_batchers[primitive] = partial( _naive_batching, _primitive_bind ) xla.backend_specific_translations["cpu"][primitive] = partial( _xla_translation_cpu, numba_fn, abstract_eval_fn_always ) xla.backend_specific_translations["gpu"][primitive] = partial( _xla_translation_gpu, numba_fn, abstract_eval_fn_always ) return _primitive_bind
9b56fe14c98a47746d51a6debb602e84066133b6
3,641,897
from ..meta import with_metadata from typing import Iterable from re import T from typing import Any from typing import Iterator def zip_metadata(iterable: Iterable[T], keys: Iterable[str], values: Iterable[Any]) -> Iterator[T]: """ Adds meta-data to each object in an iterator. :param iterable: The object iterable. :param keys: The meta-data key iterable. :param values: The meta-data iterable. :return: An iterator over the objects with added meta-data. """ return (with_metadata(obj, key, value) for obj, key, value in zip(iterable, keys, values))
18202ba46f143c76b387cb7fa99db33a8ff34655
3,641,898
import time def get_projects_query_flags(project_ids): """\ 1. Fetch `needs_final` for each Project 2. Fetch groups to exclude for each Project 3. Trim groups to exclude ZSET for each Project Returns (needs_final, group_ids_to_exclude) """ project_ids = set(project_ids) now = time.time() p = redis_client.pipeline() needs_final_keys = [get_project_needs_final_key(project_id) for project_id in project_ids] for needs_final_key in needs_final_keys: p.get(needs_final_key) exclude_groups_keys = [get_project_exclude_groups_key(project_id) for project_id in project_ids] for exclude_groups_key in exclude_groups_keys: p.zremrangebyscore(exclude_groups_key, float('-inf'), now - settings.REPLACER_KEY_TTL) p.zrevrangebyscore(exclude_groups_key, float('inf'), now - settings.REPLACER_KEY_TTL) results = p.execute() needs_final = any(results[:len(project_ids)]) exclude_groups = sorted({ int(group_id) for group_id in sum(results[(len(project_ids) + 1)::2], []) }) return (needs_final, exclude_groups)
f01ca44282ba211bf7ac1d16f6a5e07b3e905473
3,641,899
def openTypeNameVersionFallback(info): """ Fallback to *versionMajor.versionMinor* in the form 0.000. """ versionMajor = getAttrWithFallback(info, "versionMajor") versionMinor = getAttrWithFallback(info, "versionMinor") return "%d.%s" % (versionMajor, str(versionMinor).zfill(3))
370ab06aedd9909cc1b5d0e7f2211a554695c268
3,641,900
def get_quoted_text(text): """Method used to get quoted text. If body/title text contains a quote, the first quote is considered as the text. :param text: The replyable text :return: The first quote in the text. If no quotes are found, then the entire text is returned """ lines = text.split('\n\n') for line in lines: if line.startswith('>'): return line[1:] return text
3ac1801edcaf16af45d118918cb548f41d9a08fb
3,641,901
def pad_sequences(sequences, pad_tok): """ Args: sequences: a generator of list or tuple pad_tok: the char to pad with Returns: a list of list where each sublist has same length """ max_length = max(map(lambda x: len(x), sequences)) sequence_padded, sequence_length = _pad_sequences(sequences, pad_tok, max_length) return sequence_padded, sequence_length
077d80424607864d6e0fa63d3843f80b9c822d1e
3,641,902
def get_username_for_os(os): """Return username for a given os.""" usernames = {"alinux2": "ec2-user", "centos7": "centos", "ubuntu1804": "ubuntu", "ubuntu2004": "ubuntu"} return usernames.get(os)
579ebfa4e76b6660d28afcc010419f32d74aa98c
3,641,903
import copy def stats_getter(context, core_plugin, ignore_list=None): """Update Octavia statistics for each listener (virtual server)""" stat_list = [] lb_service_client = core_plugin.nsxlib.load_balancer.service # Go over all the loadbalancers & services lb_bindings = nsx_db.get_nsx_lbaas_loadbalancer_bindings( context.session) for lb_binding in lb_bindings: if ignore_list and lb_binding['loadbalancer_id'] in ignore_list: continue lb_service_id = lb_binding.get('lb_service_id') try: # get the NSX statistics for this LB service # Since this is called periodically, silencing it at the logs rsp = lb_service_client.get_stats(lb_service_id, silent=True) if rsp and 'virtual_servers' in rsp: # Go over each virtual server in the response for vs in rsp['virtual_servers']: # look up the virtual server in the DB vs_bind = nsx_db.get_nsx_lbaas_listener_binding_by_vs_id( context.session, vs['virtual_server_id']) if vs_bind and 'statistics' in vs: vs_stats = vs['statistics'] stats = copy.copy(lb_const.LB_EMPTY_STATS) stats['id'] = vs_bind.listener_id stats['request_errors'] = 0 # currently unsupported for stat, stat_value in lb_const.LB_STATS_MAP.items(): lb_stat = stat_value stats[stat] += vs_stats[lb_stat] stat_list.append(stats) except nsxlib_exc.ManagerError: pass return stat_list
65ebac76b6543683103584c18a7c06d2ea453e0a
3,641,904
from typing import List def track_to_note_string_list( track: Track, ) -> List[str]: """Convert a mingus.containers.Track to a list of note strings""" final_note_list = [] for element in track.get_notes(): for note in element[-1]: final_note_list.append(note_to_string(note)) return final_note_list
71b4fab66d18242e67a4ea59998e94341531f77a
3,641,905