content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import argparse import os, sys os.chdir("/home/jackie/ResearchArea/SkinCancerResearch/semi_skin_cancer") sys.path.append("/home/jackie/ResearchArea/SkinCancerResearch/semi_skin_cancer") print(os.getcwd()) import os.path as osp import torchvision import numpy as np import torch # import torch.nn as nn # import torch.optim as optim # from itertools import cycle from torchvision import transforms # import network, loss from torch.utils.data import DataLoader from data_list import ImageList, ImageList_idx import random, pdb, math, copy from evaluation.draw import draw_ROC, draw_TSNE, draw_cm from evaluation.metrics import get_metrics, get_metrics_sev_class, get_test_data import matplotlib.pyplot as plt from transforms import image_test import utils plt.rc('font', family='Times New Roman') if __name__ == "__main__": parser = argparse.ArgumentParser(description='oral_cancer') parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run") parser.add_argument('--batch_size', type=int, default=32, help="batch_size") parser.add_argument('--num_classes', type=int, default=7, help="number of classes") parser.add_argument('--worker', type=int, default=12, help="number of workers") parser.add_argument('--dir', type=str, default='./ckps/') parser.add_argument('--subDir', type=str, default='resnet50_sev_cates_2500_0.99_naive_0_afm_0.7_u_0.3') parser.add_argument('--dset_path', type=str, default='./data/semi_processed') parser.add_argument('--seed', type=int, default=2021, help="random seed") parser.add_argument('--which', type=str, default='one', choices=['one', 'all']) parser.add_argument('--img_dir', type=str, default=None) parser.add_argument('--save_dir', type=str, default=None) parser.add_argument('--bin_class', type=str, default=None) args = parser.parse_args() if args.num_classes == 2: args.label_names = [("not " + args.bin_class), args.bin_class] else: args.label_names = ['akiec', 'bcc', 'bkl', 'df', 'mel', 'nv', 'vasc'] if args.which == 'one': args.net = osp.basename(args.subDir).split('_')[0] # torch.backends.cudnn.deterministic = True print(args.dir) args.output_dir_train = os.path.join(args.dir, args.subDir) print(args.output_dir_train) args.output_dir = os.path.join('test', args.output_dir_train) if not osp.exists(args.output_dir): os.system('mkdir -p ' + args.output_dir) if not osp.exists(args.output_dir): os.makedirs(args.output_dir) args.out_file = open(osp.join(args.output_dir, 'log.txt'), 'w') args.out_file.write(print_args(args) + '\n') args.out_file.flush() test_target(args) if args.which == 'all': for dir in os.listdir(args.dir): args.net = dir.split('_')[0] # torch.backends.cudnn.deterministic = True args.output_dir_train = os.path.join(args.dir, dir) args.output_dir = os.path.join('./test', args.output_dir_train) if not osp.exists(args.output_dir): os.system('mkdir -p ' + args.output_dir) if not osp.exists(args.output_dir): os.makedirs(args.output_dir) args.out_file = open(osp.join(args.output_dir, 'log.txt'), 'w') args.out_file.write(print_args(args) + '\n') args.out_file.flush() test_target(args)
[ 11748, 1822, 29572, 201, 198, 11748, 28686, 11, 25064, 201, 198, 201, 198, 418, 13, 354, 15908, 7203, 14, 11195, 14, 19650, 494, 14, 25104, 30547, 14, 42455, 34, 8250, 25104, 14, 325, 11632, 62, 20407, 62, 48870, 4943, 201, 198, 17597...
2.28279
1,563
from django.urls import path, re_path from . import views urlpatterns = [ path('', views.index, name='index'), path('countdown/', views.countdown, name='countdown'), #re_path(r'.+', views.redir, name='redir'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 302, 62, 6978, 198, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 3256, 5009, 13, 9630, 11, 1438, 11639, 9630, 33809, 198, 220, 22...
2.627907
86
from sys import argv script, first, second, third = argv print "This script called", script print "The first variable :", first print "The second variable :", second print "The third variable :", third
[ 6738, 25064, 1330, 1822, 85, 198, 198, 12048, 11, 717, 11, 1218, 11, 2368, 796, 1822, 85, 198, 198, 4798, 366, 1212, 4226, 1444, 1600, 4226, 198, 4798, 366, 464, 717, 7885, 1058, 1600, 717, 198, 4798, 366, 464, 1218, 7885, 1058, 160...
3.777778
54
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from flask import render_template, request import view_base
[ 2, 15069, 357, 34, 8, 2211, 399, 3974, 261, 21821, 290, 44735, 10501, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846...
3.895954
173
import numpy as np import pytest import torch from numpy.testing import assert_almost_equal from allennlp.common.checks import ConfigurationError from allennlp.common.testing import AllenNlpTestCase from allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
[ 11748, 299, 32152, 355, 45941, 198, 11748, 12972, 9288, 198, 11748, 28034, 198, 6738, 299, 32152, 13, 33407, 1330, 6818, 62, 28177, 62, 40496, 198, 198, 6738, 477, 1697, 34431, 13, 11321, 13, 42116, 1330, 28373, 12331, 198, 6738, 477, 1...
3.627907
86
import tensorflow as tf import numpy as np
[ 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 628, 198 ]
3.214286
14
"""Cell parameter random initializations.""" from typing import Any, Dict import numpy as np from ..parameters import ( Height, NewCellBendLowerLower, NewCellBendLowerUpper, NewCellBendOverallLower, NewCellBendOverallUpper, NewCellBendUpperLower, NewCellBendUpperUpper, NewCellLength1Mean, NewCellLength1Std, NewCellLength2Mean, NewCellLength2Std, NewCellLengthAbsoluteMax, NewCellLengthAbsoluteMin, NewCellRadiusFromCenter, NewCellWidthAbsoluteMax, NewCellWidthAbsoluteMin, NewCellWidthMean, NewCellWidthStd, Width, ) from ..random import RRF, enforce_bounds RandomSequenceType = Dict[str, Any]
[ 37811, 28780, 11507, 4738, 4238, 4582, 526, 15931, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 11485, 17143, 7307, 1330, 357, 198, 220, 220, 220, 27280, 11, 198, 220, 220, 220, ...
2.575758
264
import qiskit import numpy as np import matplotlib.pyplot as plt import json from graph import * # Random comment P =1 # measure all qubits in q_input register, return dictionary of samples ### gradient ascent optimizer # graph is graph to optimize over # epsilon controls how far out the delta is calculated # eta is learning rate # threshold is the average of gamma and beta that we will consider a max main()
[ 11748, 10662, 1984, 270, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 33918, 198, 6738, 4823, 1330, 1635, 198, 198, 2, 14534, 2912, 198, 47, 796, 16, 198, 198, 2, 3953, ...
3.747748
111
# Copyright (c) 2021 OpenCyphal # This software is distributed under the terms of the MIT License. # Author: Pavel Kirienko <pavel@opencyphal.org> from ._app_descriptor import AppDescriptor as AppDescriptor from ._cmd import file_server as file_server
[ 2, 15069, 357, 66, 8, 33448, 4946, 20418, 27451, 198, 2, 770, 3788, 318, 9387, 739, 262, 2846, 286, 262, 17168, 13789, 13, 198, 2, 6434, 25, 49612, 7385, 2013, 7204, 1279, 8957, 626, 31, 404, 1387, 27451, 13, 2398, 29, 198, 198, 6...
3.465753
73
"""Simple code for training an RNN for motion prediction.""" import os import sys import time import numpy as np import torch import torch.optim as optim from torch.autograd import Variable import mtfixb_model import mtfixb_model2 import parseopts def create_model(args, total_num_batches): """Create MT model and initialize or load parameters in session.""" if len(args.load) > 0: print("Loading model") model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load) return model if args.k == 0: return create_model_k0(args, total_num_batches) if args.dynamicsdict: return create_model_DD(args, total_num_batches) if args.biasonly: return create_model_BiasOnly(args, total_num_batches) if args.nobias: return create_model_NoMTBias(args, total_num_batches) model = mtfixb_model.MTGRU( args.seq_length_out, args.decoder_size, args.decoder_size2, args.batch_size, total_num_batches, args.k, args.size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=args.human_size, input_dim=args.input_size, dropout=args.dropout_p, residual_output=args.residual_velocities, init_state_noise=args.init_state_noise, mt_rnn=args.mt_rnn, psi_affine=args.psi_affine, ) if len(args.load) <= 0: if len(args.load_layer1) > 0: print("Loading GRU2 model") model = load_layer1(model, args.load_layer1, args.use_cpu) return model print("Loading model") model = torch.load(args.load, map_location="cpu") if args.use_cpu else torch.load(args.load) return model def create_model_k0(args, total_num_batches): """Create MT model and initialize or load parameters in session.""" model = mtfixb_model.OpenLoopGRU( args.seq_length_out, args.decoder_size, args.batch_size, args.human_size, args.input_size, args.dropout_p, args.residual_velocities, args.init_state_noise, ) return model def create_model_DD(args, total_num_batches): """Create MT model and initialize or load parameters in session.""" if len(args.load_layer1) > 0: NotImplementedError("Layer 1 load not yet implemented for Dynamics Dict.") model = mtfixb_model.DynamicsDict( args.seq_length_out, args.decoder_size, total_num_batches, args.batch_size, args.k, args.size_psi_hidden, args.size_psi_lowrank, args.human_size, args.input_size, args.dropout_p, args.residual_velocities, args.init_state_noise, ) return model def create_model_BiasOnly(args, total_num_batches): """Create MT model and initialize or load parameters in session.""" if len(args.load_layer1) > 0: NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.") model = mtfixb_model.MTGRU_BiasOnly( args.seq_length_out, args.decoder_size, args.decoder_size2, args.batch_size, total_num_batches, args.k, args.size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=args.human_size, input_dim=args.input_size, dropout=args.dropout_p, residual_output=args.residual_velocities, init_state_noise=args.init_state_noise, ) return model def create_model_NoMTBias(args, total_num_batches): """Create MT model and initialize or load parameters in session.""" if len(args.load_layer1) > 0: NotImplementedError("Layer 1 load not yet implemented for MT Bias Only.") model = mtfixb_model2.MTGRU_NoBias( args.seq_length_out, args.decoder_size, args.decoder_size2, args.batch_size, total_num_batches, args.k, args.size_psi_hidden, args.size_psi_lowrank, args.bottleneck, output_dim=args.human_size, input_dim=args.input_size, dropout=args.dropout_p, residual_output=args.residual_velocities, init_state_noise=args.init_state_noise, mt_rnn=args.mt_rnn, psi_affine=args.psi_affine, ) return model def train(args): """Train a MT model on human motion""" train_iter = read_all_data(args) train_iter.shuffle() total_num_batches = train_iter.total_length() model = create_model(args, total_num_batches) model = model if args.use_cpu else model.cuda() has_weight = not np.isclose(args.first3_prec, 1.0) is_hard_em = args.hard_em_iters > 0 is_MT = args.k > 0 current_step = 0 previous_losses = [] step_time, loss = 0, 0 mt_lr = args.learning_rate_mt if args.learning_rate_mt >= 0 else args.learning_rate z_lr = args.learning_rate_z if args.learning_rate_z >= 0 else args.learning_rate zls_lr = 0 if is_hard_em else z_lr pars_lrs, zls_ix = model.get_params_optim_dicts(mt_lr, args.learning_rate, z_lr, zls_lr=zls_lr) if args.optimiser.upper() == "SGD": optimiser = optim.SGD(pars_lrs, weight_decay=args.weight_decay) elif args.optimiser.upper() == "NESTEROV": optimiser = optim.SGD(pars_lrs, momentum=0.8, nesterov=True, weight_decay=args.weight_decay) elif args.optimiser.upper() == "ADAM": optimiser = optim.Adam(pars_lrs, betas=(0.9, 0.999), weight_decay=args.weight_decay) else: Exception("Unknown optimiser type: {:d}. Try 'SGD', 'Nesterov' or 'Adam'") has_ar_noise = args.ar_coef > 0 device = "cpu" if args.use_cpu else "cuda" if has_ar_noise: assert args.ar_coef < 1, "ar_coef must be in [0, 1)." # Construct banded AR precision matrix (fn def below) Prec = ar_prec_matrix(args.ar_coef, args.seq_length_out).float().to(device) for _ in range(args.iterations): optimiser.zero_grad() model.train() start_time = time.time() # ------------------------------------------------------- TRAINING inputs, outputs, c_ids = model.get_batch(train_iter) inputs, outputs = torchify(inputs, outputs, device=device) if is_MT: mu = model.mt_net.Z_mu[c_ids, :] sd = torch.sigmoid(3 * model.mt_net.Z_logit_s[c_ids, :]) preds, _state = model(inputs, mu, sd) else: preds, _state = model(inputs) err = preds - outputs if has_weight: err = err * torch.cat( (torch.ones(1, 1, 3) * np.sqrt(args.first3_prec), torch.ones(1, 1, args.human_size - 3)), dim=2 ).to(err.device) if not has_ar_noise: sqerr = err ** 2 else: sqerr = (Prec @ err) * err step_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2 # assume \sigma is const. wrt optimisation, and hence normalising constant can be ignored. # Now for KL term. Since we're descending *negative* L.B., we need to *ADD* KL to loss: if is_MT: logstd = torch.log(sd) KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd)) step_loss = step_loss + KLD # Actual backpropagation step_loss.backward() optimiser.step() # ------------------------------------------------------- # Reporting / admin step_loss = step_loss.cpu().data.numpy() if current_step % 10 == 0: if is_MT: KLD_part = KLD.cpu().data.numpy() print( "step {0:04d}; step_loss: {1:.4f} ({2:.4f})".format(current_step, step_loss, step_loss - KLD_part) ) else: print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss)) step_time += (time.time() - start_time) / args.test_every loss += step_loss / args.test_every current_step += 1 if current_step % 20 == 0: sys.stdout.flush() # Decay learning rate (if appl.) if current_step % args.learning_rate_step == 0: for param_group in optimiser.param_groups: param_group["lr"] *= args.learning_rate_decay_factor print("Decay learning rate. New value at " + str(optimiser.param_groups[0]["lr"])) # remove Hard EM spec (if appl.) if is_hard_em and zls_ix is not None and current_step == args.hard_em_iters: optimiser.param_groups[zls_ix]["lr"] = z_lr model.standardise_aggregate_posterior() # Once in a while, we save checkpoint, print statistics, and run evals. if current_step % args.test_every == 0: model.eval() # === CANNOT DO TEST SET EVALUATION SINCE DONT KNOW LATENT Z === # inputs, outputs = model.get_test_batch(test_set_Y, test_set_U, -1) # inputs, outputs = torchify(inputs, outputs, device=device) # # if is_MT: # preds, state = model(inputs, mu, sd) # else: # preds = model(inputs) # # err = (preds - outputs) # if has_weight: # err = err * torch.cat((torch.ones(1, 1, 3) * np.sqrt(args.first3_prec), # torch.ones(1, 1, args.human_size - 3)), dim=2).to(err.device) # # if not has_ar_noise: # sqerr = err ** 2 # else: # Prec_test = ar_prec_matrix(args.ar_coef, err.size(1)).float().to(device) # sqerr = (Prec_test @ err) * err # # val_loss = args.human_size * args.seq_length_out * sqerr.mean() / 2 # # if is_MT: # logstd = torch.log(sd) # KLD = -0.5 * torch.sum(1 + 2 * logstd - mu.pow(2) - torch.exp(2 * logstd)) # val_loss = val_loss + KLD # # print() # print("{0: <16} |".format("milliseconds"), end="") # for ms in [60, 240, 480, 750, 990, 1500, 2010]: # print(" {0:5d} |".format(ms), end="") # print() # # avg_mse_tt = sqerr.detach().cpu().mean(dim=0).numpy().mean(axis=1) # Pretty print of the results for 60, 240, 480, 750, 990, 1500, 2010 ms # print("{0: <16} |".format(" "), end="") # for ms in [1, 7, 15, 24, 32, 49, 66]: # if args.seq_length_out >= ms + 1: # print(" {0:.3f} |".format(avg_mse_tt[ms]), end="") # else: # print(" n/a |", end="") # print() # # print() # print("============================\n" # "Global step: %d\n" # "Learning rate: %.4f\n" # "Step-time (ms): %.4f\n" # "Train loss avg: %.4f\n" # "--------------------------\n" # "Test loss: %.4f\n" # "============================" % (current_step, # args.learning_rate, step_time * 1000, loss, # val_loss)) torch.save(model, args.train_dir + "/model_" + str(current_step)) # print() previous_losses.append(loss) # Reset global time and loss step_time, loss = 0, 0 sys.stdout.flush() def read_all_data(args): """ Loads data for training/testing and normalizes it. Args data_dir: directory to load the data from style_ix: style index of the test set (and leave out from the training set) njoints: number of joints to model (0 or -1 = all) Returns train_set: dictionary with normalized training data test_set: dictionary with test data data_mean: d-long vector with the mean of the training data data_std: d-long vector with the standard dev of the training data dim_to_ignore: dimensions that are not used becaused stdev is too small dim_to_use: dimensions that we are actually using in the model """ # === Read training data === print("Reading training data (test index {0:d}).".format(args.style_ix)) njoints = args.human_size if not args.train_set_size == -1: style_lkp = { str(i): range(1 + args.train_set_size * (i - 1), 1 + args.train_set_size * i) for i in range(1, 8 + 1) } else: style_lkp = np.load(os.path.join(args.data_dir, args.stylelkp_fname)) train_set_Y = np.load(os.path.join(args.data_dir, args.output_fname)) train_set_U = np.load(os.path.join(args.data_dir, args.input_fname)) njoints = train_set_Y[str(0)].shape[1] if njoints <= 0 else njoints if args.train_set_size != 0: train_ixs = np.concatenate( [ style_lkp[str(i)] for i in range(1, len(style_lkp.keys()) + 1) if i != args.style_ix ] # CAREFUL: jl is 1-based! ) train_set_Y = [train_set_Y[str(i)][:, :njoints] for i in train_ixs] train_set_U = [train_set_U[str(i)] for i in train_ixs] else: assert args.style_ix not in range(1, 9), "no support for LOO experiments with max MTL data yet. Use style_ix=9" train_set_Y = [train_set_Y[str(i + 1)][:, :njoints] for i in range(len(train_set_Y))] train_set_U = [train_set_U[str(i + 1)] for i in range(len(train_set_U))] print("Using files {:s}; {:s}".format(args.input_fname, args.output_fname)) print("done reading data.") return mtfixb_model.DataIterator(train_set_Y, train_set_U, 64, min_size=64, overlap2=args.overlap_windows) if __name__ == "__main__": main()
[ 37811, 26437, 2438, 329, 3047, 281, 371, 6144, 329, 6268, 17724, 526, 15931, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 640, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 40085, 355, 6436, ...
2.071545
6,723
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, unused-argument """Transposed 2D convolution operators (sometimes called Deconvolution).""" import collections import tvm from tvm import relay, te from ..utils import simplify from .dilate import dilate from .pad import pad from .utils import get_pad_tuple _single = _ntuple(1) _pair = _ntuple(2) _triple = _ntuple(3) _quadruple = _ntuple(4) def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding): """Transposed 2D convolution nchw forward operator. Parameters ---------- Input : tvm.te.Tensor 4-D with shape [batch, in_channel, in_height, in_width] Filter : tvm.te.Tensor 4-D with shape [in_channel, num_filter, filter_height, filter_width] strides : tuple of two ints The spatial stride along height and width padding : int or str Padding size, or ['VALID', 'SAME'] out_dtype : str The output data type. This is used for mixed precision. output_padding : tuple of ints Used to get the right output shape for gradients Returns ------- Output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ return declaration_conv2d_transpose_impl( Input, Filter, strides, padding, out_dtype, output_padding=output_padding ) def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding): """Preprocess data and kernel to make the compute pattern of conv2d_transpose the same as conv2d""" batch, in_c, in_h, in_w = data.shape _, out_c, filter_h, filter_w = kernel.shape stride_h, stride_w = strides opad_h, opad_w = output_padding assert opad_h < stride_h and opad_w < stride_w # dilate data data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate") # pad data fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w)) bpad_top = filter_h - 1 - fpad_top bpad_bottom = filter_h - 1 - fpad_bottom + opad_h bpad_left = filter_w - 1 - fpad_left bpad_right = filter_w - 1 - fpad_right + opad_w data_pad = pad( data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad" ) # transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees kernel_transform = te.compute( (out_c, in_c, filter_h, filter_w), lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w], name="kernel_transform", ) return data_pad, kernel_transform def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding): """Implementation of conv2d transpose""" data_pad, kernel_transform = conv2d_transpose_nchw_preprocess( data, kernel, strides, padding, out_dtype, output_padding ) batch, in_c, in_h, in_w = data_pad.shape out_c, _, filter_h, filter_w = kernel_transform.shape # convolution stage out_c = simplify(out_c) out_h = simplify(in_h - filter_h + 1) out_w = simplify(in_w - filter_w + 1) dc = te.reduce_axis((0, in_c), name="dc") dh = te.reduce_axis((0, filter_h), name="dh") dw = te.reduce_axis((0, filter_w), name="dw") Output = te.compute( (batch, out_c, out_h, out_w), lambda b, c, h, w: te.sum( data_pad[b, dc, h + dh, w + dw].astype(out_dtype) * kernel_transform[c, dc, dh, dw].astype(out_dtype), axis=[dc, dh, dw], ), tag="conv2d_transpose_nchw", ) return Output def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups): """Group convolution operator in NCHW layout. Parameters ---------- data : tvm.te.Tensor 4-D with shape [batch, in_channel, in_height, in_width] kernel : tvm.te.Tensor 4-D with shape [in_channel, out_channel // groups, filter_height, filter_width] stride : int or a list/tuple of two ints Stride size, or [stride_height, stride_width] padding : int or a list/tuple of 2 or 4 ints padding size, or [pad_height, pad_width] for 2 ints, or [pad_top, pad_left, pad_bottom, pad_right] for 4 ints out_dtype : str The output data type. This is used for mixed precision. output_padding : tuple of ints Used to get the right output shape for gradients groups : int number of groups out_dtype : str The output type. This is used for mixed precision. Returns ------- Output : tvm.te.Tensor 4-D with shape [batch, out_channel, out_height, out_width] """ if groups == 1: return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding) # some pre-processing and prelimnary checks if out_dtype is None: out_dtype = data.dtype batch, in_channels, in_h, in_w = data.shape _, out_c, filter_h, filter_w = kernel.shape assert ( in_channels % groups == 0 ), f"input channels {in_channels} must divide group size {groups}" # assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}" strides = _pair(stride) # padding = _pair(padding) # output_padding = _pair(output_padding) # dilation = _pair(dilation) stride_h, stride_w = strides opad_h, opad_w = output_padding assert ( opad_h < stride_h and opad_w < stride_w ), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \ and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy." # dilate data data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate") # pad data fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w)) bpad_top = filter_h - 1 - fpad_top bpad_bottom = filter_h - 1 - fpad_bottom + opad_h bpad_left = filter_w - 1 - fpad_left bpad_right = filter_w - 1 - fpad_right + opad_w data_pad = pad( data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad" ) # transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees kernel_transform = te.compute( (out_c, in_channels, filter_h, filter_w), lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w], name="kernel_transform", ) batch, in_channels, in_h, in_w = data_pad.shape out_c, _, filter_h, filter_w = kernel_transform.shape # convolution stage out_channels = simplify(out_c * groups) out_h = simplify(in_h - filter_h + 1) out_w = simplify(in_w - filter_w + 1) dc = te.reduce_axis((0, in_channels // groups), name="dc") dh = te.reduce_axis((0, filter_h), name="dh") dw = te.reduce_axis((0, filter_w), name="dw") # data: batch, in_channels, out_h, out_w # weight: out_channels // G, in_channels, out_h, out_w return te.compute( (batch, out_channels, out_h, out_w), lambda b, c, h, w: te.sum( data_pad[ b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw ].astype(out_dtype) * kernel_transform[ c % (out_channels // groups), c // (out_channels // groups) * (in_channels // groups) + dc, dh, dw, ].astype(out_dtype), axis=[dc, dh, dw], ), tag="group_conv2d_transpose_nchw", ) def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str): """Transform a tensor with the current layout to the desired layout. E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3]) Parameters ---------- tensor: relay.Expr The Tensor to transpose current_layout: str The current layout e.g. NCHW or OIHW desired_layout: str The desired layout, must be compatible with current_layout Returns ------- The layout_transformed tensor. """ if sorted(current_layout) != sorted(desired_layout): raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}") if current_layout == desired_layout: return tensor current_layout_map = {c: i for i, c in enumerate(current_layout)} desired_layout_map = {c: i for i, c in enumerate(desired_layout)} axes = [None] * len(current_layout) for c, i in desired_layout_map.items(): axes[i] = current_layout_map[c] return relay.transpose(tensor, axes=axes)
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
2.445974
3,850
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """Module for assessing causal feature importance.""" import warnings from collections import OrderedDict, namedtuple import joblib import lightgbm as lgb from numba.core.utils import erase_traceback import numpy as np from numpy.lib.function_base import iterable import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from sklearn.compose import ColumnTransformer from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV from sklearn.pipeline import make_pipeline, Pipeline from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, StandardScaler from sklearn.tree import _tree from sklearn.utils.validation import column_or_1d from ...cate_interpreter import SingleTreeCateInterpreter, SingleTreePolicyInterpreter from ...dml import LinearDML, CausalForestDML from ...inference import NormalInferenceResults from ...sklearn_extensions.linear_model import WeightedLasso from ...sklearn_extensions.model_selection import GridSearchCVList from ...utilities import _RegressionWrapper, inverse_onehot # TODO: this utility is documented but internal; reimplement? from sklearn.utils import _safe_indexing # TODO: this utility is even less public... from sklearn.utils import _get_column_indices def _get_default_shared_insights_output(): """ Dictionary elements shared among all analyses. In case of breaking changes to this dictionary output, the major version of this dictionary should be updated. In case of a change to this dictionary, the minor version should be updated. """ return { _CausalInsightsConstants.RawFeatureNameKey: [], _CausalInsightsConstants.EngineeredNameKey: [], _CausalInsightsConstants.CategoricalColumnKey: [], _CausalInsightsConstants.TypeKey: [], _CausalInsightsConstants.Version: '1.0', _CausalInsightsConstants.CausalComputationTypeKey: "simple", _CausalInsightsConstants.ConfoundingIntervalKey: None, _CausalInsightsConstants.InitArgsKey: {} } # simplification of sklearn's ColumnTransformer that encodes categoricals and passes through selected other columns # but also supports get_feature_names with expected signature # Wrapper to make sure that we get a deep copy of the contents instead of clone returning an untrained copy def _freeze(transformer): return _FrozenTransformer(_Wrapper(transformer)) # Convert python objects to (possibly nested) types that can easily be represented as literals # Convert SingleTreeInterpreter to a python dictionary # named tuple type for storing results inside CausalAnalysis class; # must be lifted to module level to enable pickling _result = namedtuple("_result", field_names=[ "feature_index", "feature_name", "feature_baseline", "feature_levels", "hinds", "X_transformer", "W_transformer", "estimator", "global_inference", "treatment_value"]) # Unless we're opting into minimal cross-fitting, this is the minimum number of instances of each category # required to fit a discrete DML model _CAT_LIMIT = 10 def _pandas_summary(self, get_inference, *, props, n, expand_arr=False, keep_all_levels=False): """ Summarizes results into a dataframe. Parameters ---------- get_inference : lambda Method to get the relevant inference results from each result object props : list of (string, string or lambda) Set of column names and ways to get the corresponding values from the inference object n : int The number of samples in the dataset expand_arr : boolean, default False Whether to add a synthetic sample dimension to the result arrays when performing internal computations keep_all_levels : boolean, default False Whether to keep all levels, even when they don't take on more than one value; Note that regardless of this argument the "sample" level will only be present if expand_arr is False """ return self._summarize(summary=make_dataframe, get_inference=get_inference, props=props, expand_arr=expand_arr, drop_sample=False) # dropping the sample dimension is handled above instead def _dict_summary(self, get_inference, *, n, props, kind, drop_sample=False, expand_arr=False, row_wise=False): """ Summarizes results into a dictionary. Parameters ---------- get_inference : lambda Method to get the relevant inference results from each result object n : int The number of samples in the dataset props : list of (string, string or lambda) Set of column names and ways to get the corresponding values from the inference object kind : string The kind of inference results to get (e.g. 'global', 'local', or 'cohort') drop_sample : boolean, default False Whether to drop the sample dimension from each array expand_arr : boolean, default False Whether to add an initial sample dimension to the result arrays row_wise : boolean, default False Whether to return a list of dictionaries (one dictionary per row) instead of a dictionary of lists (one list per column) """ return self._summarize(summary=make_dict, get_inference=get_inference, props=props, expand_arr=expand_arr, drop_sample=drop_sample) def global_causal_effect(self, *, alpha=0.05, keep_all_levels=False): """ Get the global causal effect for each feature as a pandas DataFrame. Parameters ---------- alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- global_effects : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['feature', 'feature_value'] :Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name. For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """ # a global inference indicates the effect of that one feature on the outcome return self._pandas_summary(lambda res: res.global_inference, props=self._point_props(alpha), n=1, expand_arr=True, keep_all_levels=keep_all_levels) def _global_causal_effect_dict(self, *, alpha=0.05, row_wise=False): """ Gets the global causal effect for each feature as dictionary. Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """ return self._dict_summary(lambda res: res.global_inference, props=self._point_props(alpha), kind='global', n=1, row_wise=row_wise, drop_sample=True, expand_arr=True) def cohort_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False): """ Gets the average causal effects for a particular cohort defined by a population of X's. Parameters ---------- Xtest : array-like The cohort samples for which to return the average causal effects within cohort alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- cohort_effects : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['feature', 'feature_value'] :Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name. For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """ return self._pandas_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha), n=1, expand_arr=True, keep_all_levels=keep_all_levels) def _cohort_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False): """ Gets the cohort causal effects for each feature as dictionary. Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """ return self._dict_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha), kind='cohort', n=1, row_wise=row_wise, expand_arr=True, drop_sample=True) def local_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False): """ Gets the local causal effect for each feature as a pandas DataFrame. Parameters ---------- Xtest : array-like The samples for which to return the causal effects alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('sample', 'outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- global_effect : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['sample', 'feature', 'feature_value'] :Rows: For each feature that is numeric, we have an entry with index ['{sampleid}', '{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name and sampleid is the index of the sample in Xtest. For each feature that is categorical, we have an entry with index ['{sampleid', '{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """ return self._pandas_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha), n=Xtest.shape[0], keep_all_levels=keep_all_levels) def _local_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False): """ Gets the local feature importance as dictionary Dictionary entries for predictions, etc. will be nested lists of shape (n_rows, d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """ return self._dict_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha), kind='local', n=Xtest.shape[0], row_wise=row_wise) def whatif(self, X, Xnew, feature_index, y, *, alpha=0.05): """ Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart. Note that this only applies to regression use cases; for classification what-if analysis is not supported. Parameters ---------- X: array-like Features Xnew: array-like New values of a single column of X feature_index: int or string The index of the feature being varied to Xnew, either as a numeric index or the string name if the input is a dataframe y: array-like Observed labels or outcome of a predictive model for baseline y values alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. Returns ------- y_new: DataFrame The predicted outputs that would have been observed under the counterfactual features """ return self._whatif_inference(X, Xnew, feature_index, y).summary_frame(alpha=alpha) def _whatif_dict(self, X, Xnew, feature_index, y, *, alpha=0.05, row_wise=False): """ Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart. Note that this only applies to regression use cases; for classification what-if analysis is not supported. Parameters ---------- X: array-like Features Xnew: array-like New values of a single column of X feature_index: int or string The index of the feature being varied to Xnew, either as a numeric index or the string name if the input is a dataframe y: array-like Observed labels or outcome of a predictive model for baseline y values alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. row_wise : boolean, default False Whether to return a list of dictionaries (one dictionary per row) instead of a dictionary of lists (one list per column) Returns ------- dict : dict The counterfactual predictions, as a dictionary """ inf = self._whatif_inference(X, Xnew, feature_index, y) props = self._point_props(alpha=alpha) res = _get_default_specific_insights('whatif') if row_wise: row_data = {} # remove entries belonging to row data, since we're including them in the list of nested dictionaries for k in _get_data_causal_insights_keys(): del res[k] row_data.update([(key, self._make_accessor(attr)(inf).flatten()) for key, attr in props]) # get the length of the list corresponding to the first dictionary key # `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into n_rows = len(row_data[list(row_data)[0]]) res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i] for key in row_data} for i in range(n_rows)] else: res.update([(key, self._make_accessor(attr)(inf).tolist()) for key, attr in props]) return res # TODO: it seems like it would be better to just return the tree itself rather than plot it; # however, the tree can't store the feature and treatment names we compute here... def plot_policy_tree(self, Xtest, feature_index, *, treatment_costs=0, max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, include_model_uncertainty=False, alpha=0.05): """ Plot a recommended policy tree using matplotlib. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_value_increase : float, default 1e-4 The minimum increase in the policy value that a split needs to create to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """ intrp, feature_names, treatment_names, _ = self._tree(True, Xtest, feature_index, treatment_costs=treatment_costs, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_value_increase, include_model_uncertainty=include_model_uncertainty, alpha=alpha) return intrp.plot(feature_names=feature_names, treatment_names=treatment_names) def _policy_tree_output(self, Xtest, feature_index, *, treatment_costs=0, max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, alpha=0.05): """ Get a tuple of policy outputs. The first item in the tuple is the recommended policy tree expressed as a dictionary. The second item is the per-unit-average value of applying the learned policy; if the feature is continuous this means the gain from increasing the treatment by 10% of the typical amount for units where the treatment should be increased and decreasing the treatment by 10% of the typical amount when not. The third item is the value of always treating. This is a list, with one entry per non-control-treatment for discrete features, or just a single entry for continuous features, again increasing by 10% of a typical amount. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_value_increase : float, default 1e-4 The minimum increase in the policy value that a split needs to create to construct it alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. Returns ------- output : _PolicyOutput """ (intrp, feature_names, treatment_names, (policy_val, always_trt)) = self._tree(True, Xtest, feature_index, treatment_costs=treatment_costs, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_value_increase, alpha=alpha) return _PolicyOutput(_tree_interpreter_to_dict(intrp, feature_names, policy_data), policy_val, {treatment_names[i + 1]: val for (i, val) in enumerate(always_trt.tolist())}, treatment_names[0]) # TODO: it seems like it would be better to just return the tree itself rather than plot it; # however, the tree can't store the feature and treatment names we compute here... def plot_heterogeneity_tree(self, Xtest, feature_index, *, max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4, include_model_uncertainty=False, alpha=0.05): """ Plot an effect hetergoeneity tree using matplotlib. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_impurity_decrease : float, default 1e-4 The minimum decrease in the impurity/uniformity of the causal effect that a split needs to achieve to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """ intrp, feature_names, treatment_names, _ = self._tree(False, Xtest, feature_index, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_impurity_decrease, include_model_uncertainty=include_model_uncertainty, alpha=alpha) return intrp.plot(feature_names=feature_names, treatment_names=treatment_names) def _heterogeneity_tree_output(self, Xtest, feature_index, *, max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4, include_model_uncertainty=False, alpha=0.05): """ Get an effect heterogeneity tree expressed as a dictionary. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament max_depth : int, optional (default=3) maximum depth of the tree min_samples_leaf : int, optional (default=2) minimum number of samples on each leaf min_impurity_decrease : float, optional (default=1e-4) The minimum decrease in the impurity/uniformity of the causal effect that a split needs to achieve to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """ intrp, feature_names, _, _ = self._tree(False, Xtest, feature_index, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_impurity_decrease, include_model_uncertainty=include_model_uncertainty, alpha=alpha) return _tree_interpreter_to_dict(intrp, feature_names, hetero_data) def individualized_policy(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05): """ Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect. Parameters ---------- Xtest: array-like Features feature_index: int or string Index of the feature to be considered as treatment n_rows: int, optional How many rows to return (all rows by default) treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) alpha: float in [0, 1], default 0.05 Confidence level of the confidence intervals A (1-alpha)*100% confidence interval is returned Returns ------- output: DataFrame Dataframe containing recommended treatment, effect, confidence interval, sorted by effect """ result = self._safe_result_index(Xtest, feature_index) # get dataframe with all but selected column orig_df = pd.DataFrame(Xtest, columns=self.feature_names_).rename( columns={self.feature_names_[result.feature_index]: 'Current treatment'}) Xtest = result.X_transformer.transform(Xtest) if Xtest.shape[1] == 0: x_rows = Xtest.shape[0] Xtest = None if result.feature_baseline is None: # apply 10% of a typical treatment for this feature effect = result.estimator.effect_inference(Xtest, T1=result.treatment_value * 0.1) else: effect = result.estimator.const_marginal_effect_inference(Xtest) if Xtest is None: # we got a scalar effect although our original X may have had more rows effect = effect._expand_outputs(x_rows) multi_y = (not self._vec_y) or self.classification if multi_y and result.feature_baseline is not None and np.ndim(treatment_costs) == 2: # we've got treatment costs of shape (n, d_t-1) so we need to add a y dimension to broadcast safely treatment_costs = np.expand_dims(treatment_costs, 1) effect.translate(-treatment_costs) est = effect.point_estimate est_lb = effect.conf_int(alpha)[0] est_ub = effect.conf_int(alpha)[1] if multi_y: # y was an array, not a vector est = np.squeeze(est, 1) est_lb = np.squeeze(est_lb, 1) est_ub = np.squeeze(est_ub, 1) if result.feature_baseline is None: rec = np.empty(est.shape[0], dtype=object) rec[est > 0] = "increase" rec[est <= 0] = "decrease" # set the effect bounds; for positive treatments these agree with # the estimates; for negative treatments, we need to invert the interval eff_lb, eff_ub = est_lb, est_ub eff_lb[est <= 0], eff_ub[est <= 0] = -eff_ub[est <= 0], -eff_lb[est <= 0] # the effect is now always positive since we decrease treatment when negative eff = np.abs(est) else: # for discrete treatment, stack a zero result in front for control zeros = np.zeros((est.shape[0], 1)) all_effs = np.hstack([zeros, est]) eff_ind = np.argmax(all_effs, axis=1) treatment_arr = np.array([result.feature_baseline] + [lvl for lvl in result.feature_levels], dtype=object) rec = treatment_arr[eff_ind] # we need to call effect_inference to get the correct CI between the two treatment options effect = result.estimator.effect_inference(Xtest, T0=orig_df['Current treatment'], T1=rec) # we now need to construct the delta in the cost between the two treatments and translate the effect current_treatment = orig_df['Current treatment'].values if np.ndim(treatment_costs) >= 2: # remove third dimenions potentially added if multi_y: # y was an array, not a vector treatment_costs = np.squeeze(treatment_costs, 1) assert treatment_costs.shape[1] == len(treatment_arr) - 1, ("If treatment costs are an array, " " they must be of shape (n, d_t-1)," " where n is the number of samples" " and d_t the number of treatment" " categories.") all_costs = np.hstack([zeros, treatment_costs]) # find cost of current treatment: equality creates a 2d array with True on each row, # only if its the location of the current treatment. Then we take the corresponding cost. current_cost = all_costs[current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)] target_cost = np.take_along_axis(all_costs, eff_ind.reshape(-1, 1), 1).reshape(-1) else: assert isinstance(treatment_costs, (int, float)), ("Treatments costs should either be float or " "a 2d array of size (n, d_t-1).") all_costs = np.array([0] + [treatment_costs] * (len(treatment_arr) - 1)) # construct index of current treatment current_ind = (current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)) @ np.arange(len(treatment_arr)) current_cost = all_costs[current_ind] target_cost = all_costs[eff_ind] delta_cost = current_cost - target_cost # add second dimension if needed for broadcasting during translation of effect if multi_y: delta_cost = np.expand_dims(delta_cost, 1) effect.translate(delta_cost) eff = effect.point_estimate eff_lb, eff_ub = effect.conf_int(alpha) if multi_y: # y was an array, not a vector eff = np.squeeze(eff, 1) eff_lb = np.squeeze(eff_lb, 1) eff_ub = np.squeeze(eff_ub, 1) df = pd.DataFrame({'Treatment': rec, 'Effect of treatment': eff, 'Effect of treatment lower bound': eff_lb, 'Effect of treatment upper bound': eff_ub}, index=orig_df.index) return df.join(orig_df).sort_values('Effect of treatment', ascending=False).head(n_rows) def _individualized_policy_dict(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05): """ Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect. Parameters ---------- Xtest: array-like Features feature_index: int or string Index of the feature to be considered as treatment n_rows: int, optional How many rows to return (all rows by default) treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample alpha: float in [0, 1], default 0.05 Confidence level of the confidence intervals A (1-alpha)*100% confidence interval is returned Returns ------- output: dictionary dictionary containing treatment policy, effects, and other columns """ return self.individualized_policy(Xtest, feature_index, n_rows=n_rows, treatment_costs=treatment_costs, alpha=alpha).to_dict('list') def typical_treatment_value(self, feature_index): """ Get the typical treatment value used for the specified feature Parameters ---------- feature_index: int or string The index of the feature to be considered as treatment Returns ------- treatment_value : float The treatment value considered 'typical' for this feature """ result = [res for res in self._results if res.feature_index == feature_index] if len(result) == 0: if self._has_column_names: result = [res for res in self._results if res.feature_name == feature_index] assert len(result) == 1, f"Could not find feature with index/name {feature_index}" return result[0].treatment_value else: raise ValueError(f"No feature with index {feature_index}") return result[0].treatment_value
[ 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 198, 198, 37811, 26796, 329, 24171, 26558, 3895, 6817, 526, 15931, 198, 198, 11748, 14601, 198, 6738, 17268, 1330, 14230, 1068, 3...
2.329569
14,965
''' problem-- Given five positive integers, find the minimum and maximum values that can be calculated by summing exactly four of the five integers. Then print the respective minimum and maximum values as a single line of two space-separated long integers. For example, arr=[1,3,5,7,9]. Our minimum sum is 1+3+5+7=16 and our maximum sum is 3+5+7+9=24. We would print 16 24 Function Description-- Complete the miniMaxSum function in the editor below. It should print two space-separated integers on one line: the minimum sum and the maximum sum of 4 of 5 elements. miniMaxSum has the following parameter(s): arr: an array of 5 integers Input Format-- A single line of five space-separated integers. Constraints-- 1<arr[i]<=10^9 Output Format-- Print two space-separated long integers denoting the respective minimum and maximum values that can be calculated by summing exactly four of the five integers. (The output can be greater than a 32 bit integer.) Sample Input--- 1 2 3 4 5 Sample Output-- 10 14 ''' #code here #!/bin/python3 import math import os import random import re import sys if __name__ == '__main__': arr = list(map(int, input().rstrip().split())) miniMaxSum(arr)
[ 7061, 6, 198, 45573, 438, 198, 198, 15056, 1936, 3967, 37014, 11, 1064, 262, 5288, 290, 5415, 3815, 326, 460, 307, 10488, 416, 2160, 2229, 3446, 1440, 286, 262, 1936, 37014, 13, 3244, 3601, 262, 11756, 5288, 290, 5415, 3815, 355, 257,...
3.430595
353
from pprint import pprint from enum import Enum
[ 6738, 279, 4798, 1330, 279, 4798, 198, 6738, 33829, 1330, 2039, 388, 198 ]
3.692308
13
import argparse import copy import torch from torchvision.datasets import MNIST, CIFAR10 import torchvision.transforms as TF import torchelie as tch import torchelie.loss.gan.hinge as gan_loss from torchelie.recipes.gan import GANRecipe import torchelie.callbacks as tcb from torchelie.recipes import Recipe parser = argparse.ArgumentParser() parser.add_argument('--cpu', action='store_true') opts = parser.parse_args() device = 'cpu' if opts.cpu else 'cuda' BS = 32 tfms = TF.Compose([ TF.Resize(64), tch.transforms.AdaptPad((64, 64)), TF.RandomHorizontalFlip(), TF.ToTensor()]) ds = CIFAR10('~/.cache/torch/cifar10', download=True, transform=tfms) dl = torch.utils.data.DataLoader(ds, num_workers=4, batch_size=BS, shuffle=True) train_net(tch.models.autogan_64, tch.models.snres_discr_4l)
[ 11748, 1822, 29572, 198, 11748, 4866, 198, 198, 11748, 28034, 198, 198, 6738, 28034, 10178, 13, 19608, 292, 1039, 1330, 29060, 8808, 11, 327, 5064, 1503, 940, 198, 11748, 28034, 10178, 13, 7645, 23914, 355, 24958, 198, 198, 11748, 7332, ...
2.259259
405
print ("Hello Word!")
[ 4798, 5855, 15496, 9678, 2474, 8, 198 ]
3.142857
7
import xml.sax import rdflib from django.db import transaction from hs_core.serialization import GenericResourceMeta
[ 11748, 35555, 13, 82, 897, 198, 11748, 374, 67, 2704, 571, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 8611, 198, 198, 6738, 289, 82, 62, 7295, 13, 46911, 1634, 1330, 42044, 26198, 48526, 628, 198 ]
3.361111
36
from setuptools import setup import versioneer packages = ['pygdf', 'pygdf.tests', ] install_requires = [ 'numba', ] setup(name='pygdf', description="GPU Dataframe", version=versioneer.get_version(), classifiers=[ # "Development Status :: 4 - Beta", "Intended Audience :: Developers", # "Operating System :: OS Independent", "Programming Language :: Python", # "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5", ], # Include the separately-compiled shared library author="Continuum Analytics, Inc.", packages=packages, package_data={ 'pygdf.tests': ['data/*.pickle'], }, install_requires=install_requires, license="BSD", cmdclass=versioneer.get_cmdclass(), )
[ 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 11748, 2196, 28153, 628, 198, 43789, 796, 37250, 9078, 70, 7568, 3256, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 705, 9078, 70, 7568, 13, 41989, 3256, 198, 220, 220, 2...
2.40056
357
from functools import partial from .multiagentenv import MultiAgentEnv import sys import os REGISTRY = {} #REGISTRY["sc2"] = partial(env_fn, env=StarCraft2Env)
[ 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 764, 41684, 25781, 24330, 1330, 15237, 36772, 4834, 85, 198, 11748, 25064, 198, 11748, 28686, 198, 198, 31553, 1797, 40405, 796, 23884, 198, 2, 31553, 1797, 40405, 14692, 1416, 17, 8973, 796...
2.927273
55
import os import os.path import sqlite3 import logging from typing import List from gumtree_watchdog.types import Listing, Contract, ListingWithChatId TConn = sqlite3.Connection DB_PATH = os.environ.get('GUMTREE_DB')
[ 11748, 28686, 198, 11748, 28686, 13, 6978, 198, 11748, 44161, 578, 18, 198, 11748, 18931, 198, 6738, 19720, 1330, 7343, 198, 6738, 27142, 21048, 62, 8340, 9703, 13, 19199, 1330, 7343, 278, 11, 17453, 11, 7343, 278, 3152, 30820, 7390, 19...
3.04
75
#section [initial] parameter_list, accessor_list, check_list = \ _parameters_accessors_checks()
[ 2, 5458, 685, 36733, 60, 198, 198, 17143, 2357, 62, 4868, 11, 1895, 273, 62, 4868, 11, 2198, 62, 4868, 796, 3467, 198, 220, 220, 220, 4808, 17143, 7307, 62, 15526, 669, 62, 42116, 3419, 628 ]
2.833333
36
''' Chad Meadowcroft Credit to Sentdex (https://pythonprogramming.net/) ''' import asyncio if __name__ == '__main__': try: loop = asyncio.get_event_loop() loop.set_debug(1) d1, d2, d3 = loop.run_until_complete(main()) print(d1.result()) except Exception as e: pass finally: loop.close()
[ 7061, 6, 198, 1925, 324, 43868, 36714, 198, 23690, 284, 11352, 67, 1069, 357, 5450, 1378, 29412, 23065, 2229, 13, 3262, 34729, 198, 7061, 6, 198, 198, 11748, 30351, 952, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 103...
2.161491
161
from setuptools import setup import os with open("README.md", "r", encoding="utf-8") as f: long_description = f.read() requirements = [] if os.path.isfile("./requirements.txt"): with open("requirements.txt", "r") as f: requirements = f.read() requirements = [x for x in requirements.split("\n") if x != ""] setup( name="ma5_expert", version="0.0.1", description=("MadAnalysis 5 interpreter for Expert mode"), long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/jackaraz/ma5_expert", author="Jack Y. Araz", author_email=("jack.araz@durham.ac.uk"), license="MIT", packages=[ "ma5_expert", "ma5_expert.CutFlow", "ma5_expert.tools", ], install_requires=requirements, python_requires=">=3.6", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Physics", ], )
[ 6738, 900, 37623, 10141, 1330, 9058, 198, 11748, 28686, 198, 198, 4480, 1280, 7203, 15675, 11682, 13, 9132, 1600, 366, 81, 1600, 21004, 2625, 40477, 12, 23, 4943, 355, 277, 25, 198, 220, 220, 220, 890, 62, 11213, 796, 277, 13, 961, ...
2.497758
446
from rest_framework.authentication import BaseAuthentication from rest_framework import exceptions from rest_framework.parsers import JSONParser from django.conf import settings import requests from api_test.common import MD5 from api_test.models import ProjectMember from django.contrib.auth.models import User,Group from rest_framework.authtoken.models import Token ssoLogin=settings.SSO_LOGIN ssoClientId=settings.SSO_CLIENTID ssoClientSecret=settings.SSO_CLIENTSECRET ssoRedirectUrl=settings.SSO_REDIRECTURL ssoNotifyUrl=settings.SSO_NOTIFYURL ssoGetTicketUrl=settings.SSO_GETTICKETURL #ssotoken ssoValidateUrl=settings.SSO_VALIDATEURL ssoLoginUrl=settings.SSO_LOGINURL ssoLogoutUrl=settings.SSO_LOGOUTURL def permission_required(*permissions): ''' ''' return wrapper
[ 6738, 1334, 62, 30604, 13, 41299, 3299, 1330, 7308, 47649, 3299, 198, 6738, 1334, 62, 30604, 1330, 13269, 198, 6738, 1334, 62, 30604, 13, 79, 945, 364, 1330, 19449, 46677, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 11748, 7007...
3.054688
256
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: Qot_GetPriceReminder.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() import Common_pb2 as Common__pb2 import Qot_Common_pb2 as Qot__Common__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='Qot_GetPriceReminder.proto', package='Qot_GetPriceReminder', syntax='proto2', serialized_pb=_b('\n\x1aQot_GetPriceReminder.proto\x12\x14Qot_GetPriceReminder\x1a\x0c\x43ommon.proto\x1a\x10Qot_Common.proto\"k\n\x11PriceReminderItem\x12\x0b\n\x03key\x18\x01 \x02(\x03\x12\x0c\n\x04type\x18\x02 \x02(\x05\x12\r\n\x05value\x18\x03 \x02(\x01\x12\x0c\n\x04note\x18\x04 \x02(\t\x12\x0c\n\x04\x66req\x18\x05 \x02(\x05\x12\x10\n\x08isEnable\x18\x06 \x02(\x08\"r\n\rPriceReminder\x12&\n\x08security\x18\x01 \x02(\x0b\x32\x14.Qot_Common.Security\x12\x39\n\x08itemList\x18\x02 \x03(\x0b\x32\'.Qot_GetPriceReminder.PriceReminderItem\"=\n\x03\x43\x32S\x12&\n\x08security\x18\x01 \x01(\x0b\x32\x14.Qot_Common.Security\x12\x0e\n\x06market\x18\x02 \x01(\x05\"E\n\x03S2C\x12>\n\x11priceReminderList\x18\x01 \x03(\x0b\x32#.Qot_GetPriceReminder.PriceReminder\"1\n\x07Request\x12&\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x19.Qot_GetPriceReminder.C2S\"j\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12&\n\x03s2c\x18\x04 \x01(\x0b\x32\x19.Qot_GetPriceReminder.S2CBJ\n\x13\x63om.futu.openapi.pbZ3github.com/futuopen/ftapi4go/pb/qotgetpricereminder') , dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,]) _PRICEREMINDERITEM = _descriptor.Descriptor( name='PriceReminderItem', full_name='Qot_GetPriceReminder.PriceReminderItem', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='Qot_GetPriceReminder.PriceReminderItem.key', index=0, number=1, type=3, cpp_type=2, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='type', full_name='Qot_GetPriceReminder.PriceReminderItem.type', index=1, number=2, type=5, cpp_type=1, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='Qot_GetPriceReminder.PriceReminderItem.value', index=2, number=3, type=1, cpp_type=5, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='note', full_name='Qot_GetPriceReminder.PriceReminderItem.note', index=3, number=4, type=9, cpp_type=9, label=2, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='freq', full_name='Qot_GetPriceReminder.PriceReminderItem.freq', index=4, number=5, type=5, cpp_type=1, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='isEnable', full_name='Qot_GetPriceReminder.PriceReminderItem.isEnable', index=5, number=6, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=84, serialized_end=191, ) _PRICEREMINDER = _descriptor.Descriptor( name='PriceReminder', full_name='Qot_GetPriceReminder.PriceReminder', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='security', full_name='Qot_GetPriceReminder.PriceReminder.security', index=0, number=1, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='itemList', full_name='Qot_GetPriceReminder.PriceReminder.itemList', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=193, serialized_end=307, ) _C2S = _descriptor.Descriptor( name='C2S', full_name='Qot_GetPriceReminder.C2S', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='security', full_name='Qot_GetPriceReminder.C2S.security', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='market', full_name='Qot_GetPriceReminder.C2S.market', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=309, serialized_end=370, ) _S2C = _descriptor.Descriptor( name='S2C', full_name='Qot_GetPriceReminder.S2C', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='priceReminderList', full_name='Qot_GetPriceReminder.S2C.priceReminderList', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=372, serialized_end=441, ) _REQUEST = _descriptor.Descriptor( name='Request', full_name='Qot_GetPriceReminder.Request', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='c2s', full_name='Qot_GetPriceReminder.Request.c2s', index=0, number=1, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=443, serialized_end=492, ) _RESPONSE = _descriptor.Descriptor( name='Response', full_name='Qot_GetPriceReminder.Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='retType', full_name='Qot_GetPriceReminder.Response.retType', index=0, number=1, type=5, cpp_type=1, label=2, has_default_value=True, default_value=-400, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='retMsg', full_name='Qot_GetPriceReminder.Response.retMsg', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='errCode', full_name='Qot_GetPriceReminder.Response.errCode', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='s2c', full_name='Qot_GetPriceReminder.Response.s2c', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=494, serialized_end=600, ) _PRICEREMINDER.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY _PRICEREMINDER.fields_by_name['itemList'].message_type = _PRICEREMINDERITEM _C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY _S2C.fields_by_name['priceReminderList'].message_type = _PRICEREMINDER _REQUEST.fields_by_name['c2s'].message_type = _C2S _RESPONSE.fields_by_name['s2c'].message_type = _S2C DESCRIPTOR.message_types_by_name['PriceReminderItem'] = _PRICEREMINDERITEM DESCRIPTOR.message_types_by_name['PriceReminder'] = _PRICEREMINDER DESCRIPTOR.message_types_by_name['C2S'] = _C2S DESCRIPTOR.message_types_by_name['S2C'] = _S2C DESCRIPTOR.message_types_by_name['Request'] = _REQUEST DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) PriceReminderItem = _reflection.GeneratedProtocolMessageType('PriceReminderItem', (_message.Message,), dict( DESCRIPTOR = _PRICEREMINDERITEM, __module__ = 'Qot_GetPriceReminder_pb2' # @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.PriceReminderItem) )) _sym_db.RegisterMessage(PriceReminderItem) PriceReminder = _reflection.GeneratedProtocolMessageType('PriceReminder', (_message.Message,), dict( DESCRIPTOR = _PRICEREMINDER, __module__ = 'Qot_GetPriceReminder_pb2' # @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.PriceReminder) )) _sym_db.RegisterMessage(PriceReminder) C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict( DESCRIPTOR = _C2S, __module__ = 'Qot_GetPriceReminder_pb2' # @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.C2S) )) _sym_db.RegisterMessage(C2S) S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict( DESCRIPTOR = _S2C, __module__ = 'Qot_GetPriceReminder_pb2' # @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.S2C) )) _sym_db.RegisterMessage(S2C) Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict( DESCRIPTOR = _REQUEST, __module__ = 'Qot_GetPriceReminder_pb2' # @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.Request) )) _sym_db.RegisterMessage(Request) Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( DESCRIPTOR = _RESPONSE, __module__ = 'Qot_GetPriceReminder_pb2' # @@protoc_insertion_point(class_scope:Qot_GetPriceReminder.Response) )) _sym_db.RegisterMessage(Response) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pbZ3github.com/futuopen/ftapi4go/pb/qotgetpricereminder')) # @@protoc_insertion_point(module_scope)
[ 2, 2980, 515, 416, 262, 8435, 11876, 17050, 13, 220, 8410, 5626, 48483, 0, 198, 2, 2723, 25, 1195, 313, 62, 3855, 18124, 8413, 5540, 13, 1676, 1462, 198, 198, 11748, 25064, 198, 62, 65, 28, 17597, 13, 9641, 62, 10951, 58, 15, 60, ...
2.378686
5,358
from django.db import models from django.db.models.deletion import CASCADE from accounts.models import User
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, 1330, 35106, 34, 19266, 198, 198, 6738, 5504, 13, 27530, 1330, 11787, 198 ]
3.40625
32
import time import numpy as np import vtk from vtk.util import numpy_support from svtk.lib.toolbox.integer import minmax from svtk.lib.toolbox.idarray import IdArray from svtk.lib.toolbox.numpy_helpers import normalize import math as m
[ 11748, 640, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 410, 30488, 198, 6738, 410, 30488, 13, 22602, 1330, 299, 32152, 62, 11284, 198, 198, 6738, 38487, 30488, 13, 8019, 13, 25981, 3524, 13, 41433, 1330, 949, 9806, 198, 6738, ...
3.025316
79
import time import numpy as np from tqdm import tqdm from utils import RandomCNOT, RandomCNOTs def RandomSearch(cnot_creater, solver, epochs=100, save_path=None): ''' Parameters: cnot_creater: CNOT solver: epochs: save_path: ''' best_score = 0 start_time = time.time() for epoch in range(epochs): cnot_layers = cnot_creater() sc, model = solver(cnot_layers) if sc>best_score: best_score = sc best_model = model if save_path is not None: with open(save_path, 'w') as f: f.write(best_model) print('No_%d: score = %g, best_score = %g, time = %gs'%(epoch, sc, best_score, time.time()-start_time)) # print(best_model) return best_score, best_model
[ 11748, 640, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 198, 6738, 3384, 4487, 1330, 14534, 34, 11929, 11, 14534, 34, 11929, 82, 628, 198, 4299, 14534, 18243, 7, 66, 1662, 62, 20123, 263, ...
2.004878
410
#!/usr/bin/env python # # Copyright 2013 Darragh Bailey # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import yaml from testtools import ExpectedException from yaml.composer import ComposerError from jenkins_jobs.config import JJBConfig from jenkins_jobs.parser import YamlParser from tests import base
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 2211, 360, 3258, 10471, 20330, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 42...
3.692308
221
from bot_interface import * import math GameState(SeijiBot()).connect()
[ 6738, 10214, 62, 39994, 1330, 1635, 198, 11748, 10688, 198, 198, 8777, 9012, 7, 4653, 20770, 20630, 3419, 737, 8443, 3419, 198 ]
3.318182
22
_base_ = "./resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO_01_02MasterChefCan.py" OUTPUT_DIR = ( "output/gdrn/ycbvPbrSO/resnest50d_AugCosyAAEGray_BG05_visib10_mlBCE_DoubleMask_ycbvPbr100e_SO/09_10PottedMeatCan" ) DATASETS = dict(TRAIN=("ycbv_010_potted_meat_can_train_pbr",))
[ 62, 8692, 62, 796, 366, 19571, 411, 77, 395, 1120, 67, 62, 12512, 36734, 88, 3838, 7156, 2433, 62, 40469, 2713, 62, 4703, 571, 940, 62, 4029, 2749, 36, 62, 25628, 45195, 62, 88, 21101, 47322, 1671, 3064, 68, 62, 15821, 62, 486, 62...
1.948718
156
from datetime import date, datetime, timedelta import time START_DATE = date(2021, 5, 25) duration = timedelta(days=100)
[ 6738, 4818, 8079, 1330, 3128, 11, 4818, 8079, 11, 28805, 12514, 198, 11748, 640, 198, 198, 2257, 7227, 62, 35, 6158, 796, 3128, 7, 1238, 2481, 11, 642, 11, 1679, 8, 198, 32257, 796, 28805, 12514, 7, 12545, 28, 3064, 8, 628 ]
2.928571
42
# https://leetcode.com/problems/palindrome-partitioning/
[ 2, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 18596, 521, 5998, 12, 3911, 653, 278, 14, 198, 220, 220, 220, 220, 220, 220, 220, 220 ]
2.241379
29
import numpy as np import keras import random from keras.datasets import mnist from keras import backend as K K.set_floatx('float64') if __name__ == "__main__": # m = Mnist() # # res = m.partitioned_by_rows(9) # # print(res["test"][1].shape) # for _ in range(10): # print(m.gen_dummy_non_iid_weights()) fake_data = Mnist().fake_non_iid_data(min_train=10,max_train=10,data_split=(0.6, 0.3, 0.1)) train_data, test_data, valid_data = fake_data x_train, y_train = train_data x_test, y_test = test_data x_valid, y_valid = valid_data print(y_valid)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 41927, 292, 198, 11748, 4738, 198, 6738, 41927, 292, 13, 19608, 292, 1039, 1330, 285, 77, 396, 198, 6738, 41927, 292, 1330, 30203, 355, 509, 198, 42, 13, 2617, 62, 22468, 87, 10786, 22468, 24...
2.270992
262
# pylint: disable=C0121 """http://www.logilab.org/ticket/124337""" import gtk def print_some_constant(arg=gtk.BUTTONS_OK): """crash because gtk.BUTTONS_OK, a gtk enum type, is returned by astroid as a constant """ print(arg)
[ 2, 279, 2645, 600, 25, 15560, 28, 34, 486, 2481, 201, 198, 37811, 4023, 1378, 2503, 13, 6404, 346, 397, 13, 2398, 14, 43350, 14, 17464, 31496, 37811, 201, 198, 201, 198, 11748, 308, 30488, 201, 198, 201, 198, 4299, 3601, 62, 11246, ...
2.236842
114
#!/usr/bin/env python # -*- coding: utf-8 -*- """ __title__ = '' __author__ = 'HaiFeng' __mtime__ = '2016/8/16' """ import time from py_at.EnumDefine import * ########################################################################
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 834, 7839, 834, 796, 10148, 198, 834, 9800, 834, 796, 705, 39, 1872, 37, 1516, 6, 198, 834, 76, 2435, 8...
3.025974
77
from __future__ import print_function, division import numpy as np from numpy import identity, dot, zeros, zeros_like def rf_den_via_rf0(self, rf0, v): """ Whole matrix of the interacting response via non-interacting response and interaction""" rf = zeros_like(rf0) I = identity(rf0.shape[1]) for ir,r in enumerate(rf0): rf[ir] = dot(np.linalg.inv(I-dot(r,v)), r) return rf def rf_den(self, ww): """ Full matrix interacting response from NAO GW class""" rf0 = self.rf0(ww) return rf_den_via_rf0(self, rf0, self.kernel_sq)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 299, 32152, 1330, 5369, 11, 16605, 11, 1976, 27498, 11, 1976, 27498, 62, 2339, 198, 198, 4299, 374, 69, 62, 6559, 62, 8869, 62, ...
2.592417
211
from app.model.model import Model
[ 6738, 598, 13, 19849, 13, 19849, 1330, 9104, 628 ]
3.888889
9
import numpy as np import pytest from numpy.testing import assert_allclose from numpy.testing import assert_array_equal from sklearn.ensemble._hist_gradient_boosting.histogram import ( _build_histogram_naive, _build_histogram, _build_histogram_no_hessian, _build_histogram_root_no_hessian, _build_histogram_root, _subtract_histograms ) from sklearn.ensemble._hist_gradient_boosting.types import HISTOGRAM_DTYPE from sklearn.ensemble._hist_gradient_boosting.types import G_H_DTYPE from sklearn.ensemble._hist_gradient_boosting.types import X_BINNED_DTYPE
[ 11748, 299, 32152, 355, 45941, 198, 11748, 12972, 9288, 198, 198, 6738, 299, 32152, 13, 33407, 1330, 6818, 62, 439, 19836, 198, 6738, 299, 32152, 13, 33407, 1330, 6818, 62, 18747, 62, 40496, 198, 198, 6738, 1341, 35720, 13, 1072, 11306,...
2.794258
209
from django.conf.urls import patterns, include, url from django.contrib import admin from gmappolygons import views urlpatterns = patterns('', url(r'^$', views.index, name='index'), url(r'^search', views.search, name='search'), url(r'^submit/$', views.submit, name='submit'), url(r'^show/(?P<area_id>\d+)/', views.show, name='show'), )
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 7572, 11, 2291, 11, 19016, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 308, 76, 1324, 3366, 70, 684, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 7572, 10786, 3256,...
2.684615
130
from django.db import models from django import forms from django.contrib.auth.models import User from PIL import Image from django.utils.timezone import now ## User Update Profile
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 1330, 220, 5107, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 42625, 14208, 13, 26791, 13, 243...
3.607843
51
import os import logging import numpy as np from typing import Optional import torch from torch.utils.data import DataLoader from ..eval import Metric from .dataset import CHMMBaseDataset from .dataset import collate_fn as default_collate_fn logger = logging.getLogger(__name__) OUT_RECALL = 0.9 OUT_PRECISION = 0.8 def initialize_matrices(self): """ Initialize <HMM> transition and emission matrices Returns ------- self """ assert self._training_dataset and self._valid_dataset # inject prior knowledge about transition and emission self._init_state_prior = torch.zeros(self._config.d_hidden, device=self._config.device) + 1e-2 self._init_state_prior[0] += 1 - self._init_state_prior.sum() intg_obs = list(map(np.array, self._training_dataset.obs + self._valid_dataset.obs)) # construct/load initial transition matrix dataset_dir = os.path.split(self._config.train_path)[0] transmat_path = os.path.join(dataset_dir, "init_transmat.pt") if getattr(self._config, "load_init_mat", False): if os.path.isfile(transmat_path): logger.info("Loading initial transition matrix from disk") self._init_trans_mat = torch.load(transmat_path) # if the loaded transmat does not have the proper shape, re-calculate it. s0_transmat, s1_transmat = self._init_trans_mat.shape if not (s0_transmat == s1_transmat == self.config.d_obs): self._init_trans_mat = None if self._init_trans_mat is None: self._init_trans_mat = torch.tensor(initialise_transmat( observations=intg_obs, label_set=self._config.bio_label_types )[0], dtype=torch.float) if getattr(self._config, "save_init_mat", False): logger.info("Saving initial transition matrix") torch.save(self._init_trans_mat, transmat_path) # construct/load initial emission matrix emissmat_path = os.path.join(dataset_dir, "init_emissmat.pt") if getattr(self._config, "load_init_mat", False): if os.path.isfile(emissmat_path): logger.info("Loading initial emission matrix from disk") self._init_emiss_mat = torch.load(emissmat_path) # if the loaded emissmat does not have the proper shape, re-calculate it. s0_emissmat, s1_emissmat, s2_emissmat = self._init_emiss_mat.shape if not (s0_emissmat == self.config.n_src) and (s1_emissmat == s2_emissmat == self.config.d_obs): self._init_emiss_mat = None if self._init_emiss_mat is None: self._init_emiss_mat = torch.tensor(initialise_emissions( observations=intg_obs, label_set=self._config.bio_label_types, sources=self._config.sources, src_priors=self._config.src_priors )[0], dtype=torch.float) if getattr(self._config, "save_init_mat", False): logger.info("Saving initial emission matrix") torch.save(self._init_emiss_mat, emissmat_path) return self def save(self, output_dir: Optional[str] = None, save_optimizer: Optional[bool] = False, model_name: Optional[str] = 'chmm', optimizer_name: Optional[str] = 'chmm-optimizer', pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'): """ Save model parameters as well as trainer parameters Parameters ---------- output_dir: model directory save_optimizer: whether to save optimizer model_name: model name (suffix free) optimizer_name: optimizer name (suffix free) pretrain_optimizer_name: pretrain optimizer name (suffix free) Returns ------- None """ output_dir = output_dir if output_dir is not None else self._config.output_dir logger.info(f"Saving model to {output_dir}") model_state_dict = self._model.state_dict() torch.save(model_state_dict, os.path.join(output_dir, f'{model_name}.bin')) self._config.save(output_dir) if save_optimizer: logger.info("Saving optimizer and scheduler") torch.save(self._optimizer.state_dict(), os.path.join(output_dir, f"{optimizer_name}.bin")) torch.save(self._pretrain_optimizer.state_dict(), os.path.join(output_dir, f"{pretrain_optimizer_name}.bin")) return None def load(self, input_dir: Optional[str] = None, load_optimizer: Optional[bool] = False, model_name: Optional[str] = 'chmm', optimizer_name: Optional[str] = 'chmm-optimizer', pretrain_optimizer_name: Optional[str] = 'chmm-pretrain-optimizer'): """ Load model parameters. Parameters ---------- input_dir: model directory load_optimizer: whether load other trainer parameters model_name: model name (suffix free) optimizer_name: optimizer name (suffix free) pretrain_optimizer_name: pretrain optimizer name (suffix free) Returns ------- self """ input_dir = input_dir if input_dir is not None else self._config.output_dir if self._model is not None: logger.warning(f"The original model {type(self._model)} in {type(self)} is not None. " f"It will be overwritten by the loaded model!") logger.info(f"Loading model from {input_dir}") self.initialize_model() self._model.load_state_dict(torch.load(os.path.join(input_dir, f'{model_name}.bin'))) self._model.to(self.config.device) if load_optimizer: logger.info("Loading optimizer and scheduler") if self._optimizer is None: self.initialize_optimizers() if os.path.isfile(os.path.join(input_dir, f"{optimizer_name}.bin")): self._optimizer.load_state_dict( torch.load(os.path.join(input_dir, f"{optimizer_name}.bin"), map_location=self.config.device) ) else: logger.warning("Optimizer file does not exist!") if os.path.isfile(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")): self._pretrain_optimizer.load_state_dict( torch.load(os.path.join(input_dir, f"{pretrain_optimizer_name}.bin")) ) else: logger.warning("Pretrain optimizer file does not exist!") return self def save_results(self, output_dir: str, valid_results: Optional[Metric] = None, file_name: Optional[str] = 'results', disable_final_valid: Optional[bool] = False, disable_test: Optional[bool] = False, disable_inter_results: Optional[bool] = False) -> None: """ Save training (validation) results Parameters ---------- output_dir: output directory, should be a folder valid_results: validation results during the training process file_name: file name disable_final_valid: disable final validation process (getting validation results of the trained model) disable_test: disable test process disable_inter_results: do not save inter-results Returns ------- None """ if not disable_final_valid: logger.info("Getting final validation metrics") valid_metrics = self.valid() else: valid_metrics = None if not disable_test: logger.info("Getting test metrics.") test_metrics = self.test() else: test_metrics = None # write validation and test results result_file = os.path.join(output_dir, f'{file_name}.txt') logger.info(f"Writing results to {result_file}") self.write_result(file_path=result_file, valid_results=valid_results, final_valid_metrics=valid_metrics, test_metrics=test_metrics) if not disable_inter_results: # save validation inter results logger.info(f"Saving inter results") inter_result_file = os.path.join(output_dir, f'{file_name}-inter.pt') torch.save(valid_results.__dict__, inter_result_file) return None def initialise_startprob(observations, label_set, src_idx=None): """ calculate initial hidden states (not used in our setup since our sequences all begin from [CLS], which corresponds to hidden state "O". :param src_idx: source index :param label_set: a set of all possible label_set :param observations: n_instances X seq_len X n_src X d_obs :return: probabilities for the initial hidden states """ n_src = observations[0].shape[1] logger.info("Constructing start distribution prior...") init_counts = np.zeros((len(label_set),)) if src_idx is not None: for obs in observations: init_counts[obs[0, src_idx].argmax()] += 1 else: for obs in observations: for z in range(n_src): init_counts[obs[0, z].argmax()] += 1 for i, label in enumerate(label_set): if i == 0 or label.startswith("B-"): init_counts[i] += 1 startprob_prior = init_counts + 1 startprob_ = np.random.dirichlet(init_counts + 1E-10) return startprob_, startprob_prior # TODO: try to use a more reliable source to start the transition and emission def initialise_transmat(observations, label_set, src_idx=None): """ initialize transition matrix :param src_idx: the index of the source of which the transition statistics is computed. If None, use all sources :param label_set: a set of all possible label_set :param observations: n_instances X seq_len X n_src X d_obs :return: initial transition matrix and transition counts """ logger.info("Constructing transition matrix prior...") n_src = observations[0].shape[1] trans_counts = np.zeros((len(label_set), len(label_set))) if src_idx is not None: for obs in observations: for k in range(0, len(obs) - 1): trans_counts[obs[k, src_idx].argmax(), obs[k + 1, src_idx].argmax()] += 1 else: for obs in observations: for k in range(0, len(obs) - 1): for z in range(n_src): trans_counts[obs[k, z].argmax(), obs[k + 1, z].argmax()] += 1 # update transition matrix with prior knowledge for i, label in enumerate(label_set): if label.startswith("B-") or label.startswith("I-"): trans_counts[i, label_set.index("I-" + label[2:])] += 1 elif i == 0 or label.startswith("I-"): for j, label2 in enumerate(label_set): if j == 0 or label2.startswith("B-"): trans_counts[i, j] += 1 transmat_prior = trans_counts + 1 # initialize transition matrix with dirichlet distribution transmat_ = np.vstack([np.random.dirichlet(trans_counts2 + 1E-10) for trans_counts2 in trans_counts]) return transmat_, transmat_prior def initialise_emissions(observations, label_set, sources, src_priors, strength=1000): """ initialize emission matrices :param sources: source names :param src_priors: source priors :param label_set: a set of all possible label_set :param observations: n_instances X seq_len X n_src X d_obs :param strength: Don't know what this is for :return: initial emission matrices and emission counts? """ logger.info("Constructing emission probabilities...") obs_counts = np.zeros((len(sources), len(label_set)), dtype=np.float64) # extract the total number of observations for each prior for obs in observations: obs_counts += obs.sum(axis=0) for source_index, source in enumerate(sources): # increase p(O) obs_counts[source_index, 0] += 1 # increase the "reasonable" observations for pos_index, pos_label in enumerate(label_set[1:]): if pos_label[2:] in src_priors[source]: obs_counts[source_index, pos_index] += 1 # construct probability distribution from counts obs_probs = obs_counts / (obs_counts.sum(axis=1, keepdims=True) + 1E-3) # initialize emission matrix matrix = np.zeros((len(sources), len(label_set), len(label_set))) for source_index, source in enumerate(sources): for pos_index, pos_label in enumerate(label_set): # Simple case: set P(O=x|Y=x) to be the recall recall = 0 if pos_index == 0: recall = OUT_RECALL elif pos_label[2:] in src_priors[source]: _, recall = src_priors[source][pos_label[2:]] matrix[source_index, pos_index, pos_index] = recall for pos_index2, pos_label2 in enumerate(label_set): if pos_index2 == pos_index: continue elif pos_index2 == 0: precision = OUT_PRECISION elif pos_label2[2:] in src_priors[source]: precision, _ = src_priors[source][pos_label2[2:]] else: precision = 1.0 # Otherwise, we set the probability to be inversely proportional to the precision # and the (unconditional) probability of the observation error_prob = (1 - recall) * (1 - precision) * (0.001 + obs_probs[source_index, pos_index2]) # We increase the probability for boundary errors (i.e. I-ORG -> B-ORG) if pos_index > 0 and pos_index2 > 0 and pos_label[2:] == pos_label2[2:]: error_prob *= 5 # We increase the probability for errors with same boundary (i.e. I-ORG -> I-GPE) if pos_index > 0 and pos_index2 > 0 and pos_label[0] == pos_label2[0]: error_prob *= 2 matrix[source_index, pos_index, pos_index2] = error_prob error_indices = [i for i in range(len(label_set)) if i != pos_index] error_sum = matrix[source_index, pos_index, error_indices].sum() matrix[source_index, pos_index, error_indices] /= (error_sum / (1 - recall) + 1E-5) emission_priors = matrix * strength emission_probs = matrix return emission_probs, emission_priors
[ 11748, 28686, 198, 11748, 18931, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 19720, 1330, 32233, 198, 198, 11748, 28034, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 198, 198, 6738, 11485, 18206, 1330, 3395, 1173, 198, 673...
2.192857
6,860
from random import Random from collections_extended import setlist # The version of seeding to use for random SEED_VERSION = 2 # Common alphabets to use ALPHANUM = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' BASE58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def encode_base_n(num, base, min_length=0): '''Convert an integer into a list of integers storing the number in base base. If a minimum length is specified, the result will be 0-padded. ''' out = [] while num > 0 or len(out) < min_length: num, remainder = divmod(num, base) out.append(remainder) return out def decode_base_n(int_list, base): '''Convert a list of numbers representing a number in base base to an integer.''' out = 0 for index, num in enumerate(int_list): if num >= base or num < 0: raise ValueError out += (base ** index) * num return out def add_check_digits(int_list, base, num_check_chars): '''Calculate a checksum for int_list and translate into a number of base base made up of num_check_chars digits. Args: int_list: A list of integers >= 0 and < base base: The number of characters in the alphabet num_check_chars: The number of check characters to return Returns: A list of integers that represent the checksum in base base. ''' check_digits = calc_check_digits(int_list, base, num_check_chars) return int_list + check_digits def eval_check_digits(decrypted_ints, base, num_check_chars): '''Evaluate the check digits in decrypted_ints. Args: decrypted_ints: A list of integers >=0 and < base (the result of add_check_digits) Returns: The decrypted_ints without the check digits Raises: ValueError: if the check digits don't match ''' if num_check_chars == 0: return decrypted_ints int_list = decrypted_ints[:-num_check_chars] check_digits = decrypted_ints[-num_check_chars:] if calc_check_digits(int_list, base, num_check_chars) != check_digits: raise ValueError() return int_list def encode(int_list, alphabet): '''Encode ints using alphabet.''' char_list = [] for i in int_list: if i > len(alphabet) or i < 0: raise ValueError char_list.append(alphabet[i]) return ''.join(char_list) def decode(s, alphabet): '''Decode a string s using alphabet returning a list of ints.''' try: return [alphabet.index(c) for c in s] except (TypeError, IndexError): raise ValueError def obfuscate(num, key, alphabet, min_chars=0, num_check_chars=1): ''' Obfuscate num using key. This does some minor encryption by adding values to a key and a moving value. The moving value is so that one small change makes all of the resulting characters change. Args: num: The integer to obfuscate key: An int, string or bytes to generate key values (anything that can be passed to random.seed) alphabet: A list of characters to use for the alphabet min_chars: A minimum number of chars for the resulting string num_check_chars: The number of chars to use as a check Returns: A string encoding the number in the passed alphabet and encrypted with key. Raises: ValueError: if num is not a number or < 0 ''' try: if num < 0: raise ValueError() except TypeError: raise ValueError() base = len(alphabet) num_as_ints = encode_base_n(num, base, min_chars) unencrypted_digits = add_check_digits(num_as_ints, base, num_check_chars) encrypted_digits = encrypt(unencrypted_digits, key, base) return encode(encrypted_digits, alphabet) def deobfuscate(s, key, alphabet, num_check_chars=1): '''Deobfuscate a string using key and alphabet. key, alphabet and num_check_chars must be identical to the values used to obfuscate. Args: s: The string to deobfuscate key: The key used to obfuscate alphabet: The alphabet used to obfuscate num_check_chars: The number of chars to use as a check Returns: The deobfuscated integer. Raises: ValueError: if s isn't a string, s doesn't use alphabet or the checksum doesn't match ''' base = len(alphabet) encrypted_ints = decode(s, alphabet) decrypted_ints = decrypt(encrypted_ints, key, base) num_as_ints = eval_check_digits(decrypted_ints, base, num_check_chars) return decode_base_n(num_as_ints, base)
[ 198, 6738, 4738, 1330, 14534, 198, 198, 6738, 17268, 62, 2302, 1631, 1330, 900, 4868, 198, 198, 2, 383, 2196, 286, 384, 8228, 284, 779, 329, 4738, 198, 5188, 1961, 62, 43717, 796, 362, 198, 198, 2, 8070, 435, 746, 397, 1039, 284, ...
2.880248
1,453
"""Information for the outgoing response code - the HTTP response code (default is "200 Ok") headers - a list of key/value pairs used for the WSGI start_response """ code = None headers = [] def add_header(key, value): """Helper function to append (key, value) to the list of response headers""" headers.append( (key, value) ) # Eventually add cookie support?
[ 37811, 21918, 329, 262, 28181, 2882, 628, 220, 2438, 532, 262, 14626, 2882, 2438, 357, 12286, 318, 366, 2167, 6762, 4943, 198, 220, 24697, 532, 257, 1351, 286, 1994, 14, 8367, 14729, 973, 329, 262, 25290, 18878, 923, 62, 26209, 198, 1...
3.523364
107
jog = {} #pegando dados jog['Nome do jogador'] = str(input('Digite o nome do jogador: ')).strip().title() jog['Total partidas'] = int(input('Quantas partidas jogou: ')) #lista de gol gols = [] #Quantos gols em cada partida for i in range(0, jog['Total partidas']): gols.append(int(input(f'Quantos gols na partida {i}: '))) #total de gol totGols = 0 for g in gols: totGols += g #print(totGols) #adicionando dicionario jog['Total gols'] = totGols jog['Gols em partidas'] = gols #print(jog) #Mostrando resultados print(f'O jogador: {jog["Nome do jogador"]}, jogou {jog["Total partidas"]} partidas e ' f'marcou ao todo no campeonato {jog["Total gols"]} gols') print('Partidas:') for pos, v in enumerate(gols): print(f'Partida {pos}: {v} gols') ''' Esse programa vai analisar informaes de um jogador Primeiro criamos um dicionrio vazio, jog, e pedimos interaes ao usurio como nome e total de partidas criado uma lista vazia chamada gols, e assim entra no loop for para saber quantos gols em cada partida, usando o limite de 0 e o valor de total de partidas Para cada loop a lista gols da append() no valor Assim criado uma variavel de controle totGols zerada, e dentro do loop for, onde g iria rodar sobre gols Onde totGols iria incrimentar g, somando todos os gols Em seguida adicionamos ao dicionrio, com o indice total de gols e gols em partidas, pelo totGols e gols respectivamente No print ser mostrado os resultados, e por fim um loop com pos e v rodando sobre o enumarete() de gols para mostrar cada gols nas partidas '''
[ 73, 519, 796, 23884, 198, 198, 2, 22071, 25440, 9955, 418, 198, 73, 519, 17816, 45, 462, 466, 48342, 7079, 20520, 796, 965, 7, 15414, 10786, 19511, 578, 267, 299, 462, 466, 48342, 7079, 25, 705, 29720, 36311, 22446, 7839, 3419, 198, ...
2.536859
624
###################################################################### # # File: b2sdk/v1/account_info.py # # Copyright 2021 Backblaze Inc. All Rights Reserved. # # License https://www.backblaze.com/using_b2_code.html # ###################################################################### from abc import abstractmethod import inspect import logging import os from typing import Optional from b2sdk import _v2 as v2 from b2sdk.account_info.sqlite_account_info import DEFAULT_ABSOLUTE_MINIMUM_PART_SIZE from b2sdk.utils import limit_trace_arguments logger = logging.getLogger(__name__) # Retain legacy get_minimum_part_size and facilitate for optional s3_api_url # translate legacy "minimum_part_size" to new style "recommended_part_size"
[ 29113, 29113, 4242, 2235, 198, 2, 198, 2, 9220, 25, 275, 17, 21282, 74, 14, 85, 16, 14, 23317, 62, 10951, 13, 9078, 198, 2, 198, 2, 15069, 33448, 5157, 2436, 6201, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 13789, 3740, 13...
3.481481
216
# -*- coding: utf-8 -*- """ pbkdf2 hashing handler module. """ import hashlib import re import pyrin.configuration.services as config_services import pyrin.security.utils.services as security_utils_services from pyrin.security.hashing.decorators import hashing from pyrin.security.hashing.handlers.base import HashingBase from pyrin.security.hashing.handlers.exceptions import InvalidHashingRoundsCountError, \ InvalidPBKDF2InternalAlgorithmError, InvalidHashingSaltLengthError
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 40842, 74, 7568, 17, 49544, 21360, 8265, 13, 198, 37811, 198, 198, 11748, 12234, 8019, 198, 11748, 302, 198, 198, 11748, 279, 2417, 259, 13, 11250, 3924, 13,...
3.261745
149
'''Defines a pipeline step which prepares training and test data for named entity recognition. ''' import ast import json import pickle from mccore import EntityRecognizer from mccore import ner from mccore import persistence import pandas as pd from sklearn.utils import resample from src.step import Step
[ 7061, 6, 7469, 1127, 257, 11523, 2239, 543, 25978, 3047, 290, 1332, 1366, 329, 198, 13190, 9312, 9465, 13, 198, 198, 7061, 6, 198, 198, 11748, 6468, 198, 11748, 33918, 198, 11748, 2298, 293, 198, 198, 6738, 285, 535, 382, 1330, 20885,...
3.702381
84
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals __all__ = ['requireSklearn'] from jubakit.compat import PYTHON3 try: import embedded_jubatus embedded_available = True except ImportError: embedded_available = False try: import numpy import scipy import sklearn sklearn_available = True except ImportError: sklearn_available = False try: from unittest import skipUnless except ImportError:
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 11, 28000, 1098, 62, 17201, 874, 198, 198, 834, 439, 834, 796, 37250, 46115, 15739, ...
3.078431
153
# -*- coding: utf-8 -*- """ Created on Wed Jun 28 13:03:05 2017 @author: ZHOU Yuncheng """ import cntk as C import _cntk_py import cntk.layers import cntk.initializer import cntk.losses import cntk.metrics import cntk.logging import cntk.io.transforms as xforms import cntk.io import cntk.train import os import numpy as np import yolo2 import CloneModel # default Paths relative to current python file. abs_path = os.path.dirname(os.path.abspath(__file__)) model_path = os.path.join(abs_path, "Models") # model dimensions image_height = 416 image_width = 416 num_channels = 3 # RGB num_truth_boxes = 14 box_dim = 5 # centerX, centerY, Width, Height, class_type num_classes = 3 # object type count. i.e. tomato, flower, stem, et, al. num_anchors = 5 model_name = "Yolo2Net.model" # Create a minibatch source. # Create the network. # Create trainer # Train and test # Train and evaluate the network. # # get train sample size evaluate sample size # if __name__=='__main__': anchor_data = 'anchor.txt' if not os.path.exists(anchor_data): raise RuntimeError("File '%s' does not exist." %anchor_data) anchors = open_anchor_file(anchor_data) if anchors.shape[0] < num_anchors: raise RuntimeError("Anchor dimension is less than %s" %num_anchors) # network = create_yolo2net(anchors) # cntk.logging.graph.plot(network['output'], 'yolo2.png') train_data = 'train.txt' train_rois = 'train.rois.txt' test_data = 'train.txt' test_rois = 'train.rois.txt' sample_size = get_sample_counts(train_data, test_data) net_train_and_eval(train_data, train_rois, test_data, test_rois, priors = anchors, epoch_size=sample_size[0], block_size = None, minibatch_size = 32, max_epochs = 130, log_to_file = 'Yolo2Net.log') # Must call MPI finalize when process exit without exceptions cntk.train.distributed.Communicator.finalize()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 7653, 2579, 1511, 25, 3070, 25, 2713, 2177, 198, 198, 31, 9800, 25, 1168, 46685, 20757, 2395, 782, 198, 37811, 198, 198, 11748, 269, 429, ...
2.23617
940
# -*- coding: utf-8 -*- # __author__= "Ruda" # Date: 2018/10/16 ''' import os from rongcloud import RongCloud app_key = os.environ['APP_KEY'] app_secret = os.environ['APP_SECRET'] rcloud = RongCloud(app_key, app_secret) r = rcloud.User.getToken(userId='userid1', name='username', portraitUri='http://www.rongcloud.cn/images/logo.png') print(r) {'token': 'P9YNVZ2cMQwwaADiNDVrtRZKF+J2pVPOWSNlYMA1yA1g49pxjZs58n4FEufsH9XMCHTk6nHR6unQTuRgD8ZS/nlbkcv6ll4x', 'userId': 'userid1', 'code': 200} r = rcloud.Message.publishPrivate( fromUserId='userId1', toUserId={"userId2","userid3","userId4"}, objectName='RC:VcMsg', content='{"content":"hello","extra":"helloExtra","duration":20}', pushContent='thisisapush', pushData='{"pushData":"hello"}', count='4', verifyBlacklist='0', isPersisted='0', isCounted='0') print(r) {'code': 200} ''' ''' More: https://github.com/rongcloud/server-sdk-python '''
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 11593, 9800, 834, 28, 366, 49, 15339, 1, 198, 2, 7536, 25, 2864, 14, 940, 14, 1433, 198, 198, 7061, 6, 198, 11748, 28686, 198, 6738, 374, 506, 17721, 1330, 371, ...
2.199536
431
from django.core.mail import EmailMessage from django.conf import settings
[ 6738, 42625, 14208, 13, 7295, 13, 4529, 1330, 9570, 12837, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 628, 198 ]
3.85
20
''' Author: your name Date: 2021-12-25 17:33:51 LastEditTime: 2021-12-29 10:10:14 LastEditors: Please set LastEditors Description: koroFileHeader : https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE FilePath: /yimingqin/code/WTAL-Uncertainty-Modeling/train.py ''' import torch import torch.nn as nn import numpy as np from collections import OrderedDict import utils
[ 7061, 6, 198, 13838, 25, 534, 1438, 198, 10430, 25, 33448, 12, 1065, 12, 1495, 1596, 25, 2091, 25, 4349, 198, 5956, 18378, 7575, 25, 33448, 12, 1065, 12, 1959, 838, 25, 940, 25, 1415, 198, 5956, 18378, 669, 25, 4222, 900, 4586, 18...
2.619048
147
############################################################################### # Copyright (c) 2017-2020 Koren Lev (Cisco Systems), # # Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others # # # # All rights reserved. This program and the accompanying materials # # are made available under the terms of the Apache License, Version 2.0 # # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### from scan.fetchers.cli.cli_fetcher import CliFetcher from scan.fetchers.db.db_access import DbAccess
[ 29113, 29113, 7804, 4242, 21017, 198, 2, 15069, 357, 66, 8, 2177, 12, 42334, 3374, 77, 16042, 357, 34, 4861, 11998, 828, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220,...
2.461078
334
"""`appengine_config` gets loaded when starting a new application instance.""" import vendor # insert `lib` as a site directory so our `main` module can load # third-party libraries, and override built-ins with newer # versions. vendor.add('lib') import os # Called only if the current namespace is not set.
[ 37811, 63, 1324, 18392, 62, 11250, 63, 3011, 9639, 618, 3599, 257, 649, 3586, 4554, 526, 15931, 198, 11748, 18371, 198, 2, 7550, 4600, 8019, 63, 355, 257, 2524, 8619, 523, 674, 4600, 12417, 63, 8265, 460, 3440, 198, 2, 2368, 12, 106...
3.814815
81
import KNN as K K.clearScreen() dataTraining= K.loadData("dataTraining.txt") X=dataTraining[:,0:3] initial_centroids=K.listToArray([[3, 3,3],[6, 2,4],[8,5,7]]) idx=K.KMean_Run(X,initial_centroids,5) K.SaveData(K.concatenateVectors(X,idx)) K.plotKNN2(X,idx)
[ 198, 198, 11748, 509, 6144, 355, 509, 628, 198, 42, 13, 20063, 23901, 3419, 198, 7890, 44357, 28, 509, 13, 2220, 6601, 7203, 7890, 44357, 13, 14116, 4943, 198, 198, 55, 28, 7890, 44357, 58, 45299, 15, 25, 18, 60, 628, 198, 36733, ...
2
135
#!/usr/bin/env python import pyqtgraph as pg from pyqtgraph import ViewBox from hummingbird.graphics.plotter_args import PlotBoxArgs from hummingbird.graphics.state_plot import StatePlot
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 12972, 80, 25297, 1470, 355, 23241, 198, 6738, 12972, 80, 25297, 1470, 1330, 3582, 14253, 198, 6738, 41465, 16944, 13, 70, 11549, 13, 29487, 353, 62, 22046, 1330, 28114, 14253, 42...
3.357143
56
import json #start print('start') with open('quizoutput.txt') as f: lines = f.readlines() print('loaded quiz data') print('changing to json') json_output = json.loads(lines[0]) print(json_output) with open('quizoutput.txt', 'w') as f: f.write(json_output) # for item in json_output: # print(item['question']) # print('done')
[ 11748, 33918, 198, 198, 2, 9688, 198, 4798, 10786, 9688, 11537, 198, 4480, 1280, 10786, 421, 528, 22915, 13, 14116, 11537, 355, 277, 25, 198, 220, 220, 220, 3951, 796, 277, 13, 961, 6615, 3419, 198, 220, 220, 220, 3601, 10786, 14578, ...
2.577778
135
#!/usr/bin/python import requests import boto3 import time import geopy.distance import xml.etree.ElementTree as ET import itertools import sys import pickle S3_BUCKET = "panku-gdzie-jestes-latest-storage" def lambda_handler(event, context): services = [Traficar, Veturilo, Panek] for service in services: print("==== Service %s" % service) service().getAndSaveLocations() return "OK"
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 7007, 198, 11748, 275, 2069, 18, 198, 11748, 640, 198, 11748, 4903, 11081, 13, 30246, 198, 11748, 35555, 13, 316, 631, 13, 20180, 27660, 355, 12152, 198, 11748, 340, 861, 10141, 1...
2.877698
139
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2017-12-08 18:42 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion def update_questionnaires(apps, schema_editor): """ Forward migration function to normalize settings into VMSettings and CloudSettings models :param apps: Django apps :param schema_editor: unused :return: None """ VMSettings = apps.get_model("data", "VMSettings") CloudSettings = apps.get_model("data", "CloudSettings") JobQuestionnaire = apps.get_model("data", "JobQuestionnaire") Job = apps.get_model("data", "Job") for q in JobQuestionnaire.objects.all(): # Create a cloud settings object with the VM project from the questionnaire. # Object initially just has the project name as its name cloud_settings, _ = CloudSettings.objects.get_or_create(name=q.vm_project.name, vm_project=q.vm_project) vm_settings, _ = VMSettings.objects.get_or_create(name=q.vm_project.name, cloud_settings=cloud_settings) q.vm_settings = vm_settings q.save()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 940, 13, 16, 319, 2177, 12, 1065, 12, 2919, 1248, 25, 3682, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.857506
393
from datetime import datetime from . import db from config import COMMENTS_INITIAL_ENABLED from flask.ext.security import UserMixin, RoleMixin from markdown import markdown import bleach # Define models roles_users = db.Table( 'roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))) db.event.listen(User.about, 'set', User.on_changed_body) objects_tags = db.Table( 'object_tags', db.Column('object_id', db.Integer, db.ForeignKey('object.id')), db.Column('tag_id', db.Integer, db.ForeignKey('tag.id'))) db.event.listen(Object.body, 'set', Object.on_changed_body) db.event.listen(Comment.body, 'set', Comment.on_changed_body)
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 764, 1330, 20613, 198, 6738, 4566, 1330, 9440, 28957, 62, 1268, 2043, 12576, 62, 1677, 6242, 30465, 198, 6738, 42903, 13, 2302, 13, 12961, 1330, 11787, 35608, 259, 11, 20934, 35608, 259, 19...
2.715328
274
''' If the child is currently on the nth step, then there are three possibilites as to how it reached there: 1. Reached (n-3)th step and hopped 3 steps in one time 2. Reached (n-2)th step and hopped 2 steps in one time 3. Reached (n-1)th step and hopped 2 steps in one time The total number of possibilities is the sum of these 3 ''' n=int(input()) store=[0 for i in range(n+1)] # Stores the number of possibilites for every i<n store[0]=0 store[1]=1 store[2]=2 store[3]=4 count_possibilities(n, store) print(store[n])
[ 7061, 6, 198, 1532, 262, 1200, 318, 3058, 319, 262, 299, 400, 2239, 11, 198, 8524, 612, 389, 1115, 1184, 571, 346, 2737, 355, 284, 703, 198, 270, 4251, 612, 25, 198, 198, 16, 13, 13618, 357, 77, 12, 18, 8, 400, 2239, 290, 45230,...
2.848649
185
# -*- coding: utf-8 -*- """ /*************************************************************************** PeakExtractor A QGIS plugin This plugin procedurally extracts morphological peaks from a given DEM. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/ ------------------- begin : 2021-03-10 copyright : (C) 2021 by NASA JPL email : russells@jpl.nasa.gov ***************************************************************************/ """ __author__ = "NASA JPL" __date__ = "2021-03-10" __copyright__ = "(C) 2021 by NASA JPL" # This will get replaced with a git SHA1 when you do a git archive __revision__ = "$Format:%H$" from qgis.PyQt.QtCore import QCoreApplication from qgis.core import (QgsProcessing, QgsProcessingAlgorithm, QgsProcessingParameterRasterLayer, QgsProcessingParameterNumber, QgsProcessingParameterFeatureSink, QgsFields, QgsWkbTypes) import processing # import grass.script as grass import math def round_up_to_odd(x: float) -> int: """round the given float up to the nearest odd integer""" n = math.ceil(x) return n + (1 - n%2)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 14, 17174, 17174, 4557, 8162, 198, 23974, 11627, 40450, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220,...
2.270315
603
# Copyright (c) 2017 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.api.openstack import api_version_request as api_version from manila.api.views import quota_class_sets from manila import test from manila.tests.api import fakes
[ 2, 15069, 357, 66, 8, 2177, 7381, 20836, 11, 3457, 13, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220...
3.360656
244
import os import unittest import json import filecmp from genofunk.sequence_utils import * this_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
[ 11748, 28686, 198, 11748, 555, 715, 395, 198, 11748, 33918, 198, 11748, 2393, 48991, 198, 198, 6738, 2429, 1659, 2954, 13, 43167, 62, 26791, 1330, 1635, 198, 198, 5661, 62, 15908, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 697...
2.810345
58
# -*- coding: utf-8 -*- """ Created on Thu Aug 13 09:52:47 2015 @author: wirkert """ import unittest import os import numpy as np import msi.msimanipulations as msimani from msi.io.nrrdreader import NrrdReader from msi.io.nrrdwriter import NrrdWriter from msi.test import helpers
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 26223, 2447, 1511, 7769, 25, 4309, 25, 2857, 1853, 198, 198, 31, 9800, 25, 266, 14232, 861, 198, 37811, 198, 198, 11748, 555, 715, 395, 198, 11...
2.704762
105
# -*- coding: utf-8 -*- """ ============================ crossplatformshell ============================ """ from __future__ import (print_function, division, absolute_import, unicode_literals) import pathlib import io import os import shutil import distutils.dir_util import platform # Use subprocess32 if available try: import subprocess32 as subprocess except: import subprocess as subprocess def check_output(*args, **kwargs): """Subprocess check_output, but prints commands and output by default. Also allows printing of error message for helpful debugging. Use print_all=False to turn off all printing.""" print_all = kwargs.pop('print_all', None) if print_all is not None: print_in = print_all print_out = print_all else: print_in = kwargs.pop('print_in', True) print_out = kwargs.pop('print_out', True) if print_in: print('') print(' '.join(args[0])) try: out_bytes = subprocess.check_output(*args, **kwargs) out_lines = out_bytes.decode('utf-8').splitlines() except subprocess.CalledProcessError as e: # Wrap in try/except so that check_output can print raise e if print_out: for line in out_lines: print(line) return out_lines windows = platform.system() == 'Windows' git = find_git_cmd(windows) def new_path(path_string): """Return pathlib.Path, expanding '~' to a user's HOME directory""" return pathlib.Path(os.path.expanduser(path_string)) def mkdir(*args): """Make directories for the specified paths.""" for arg in args: os.mkdir(str(arg)) def remove(path): """Remove the specified path.""" os.remove(str(path)) def rmtree(path): """Recursively remove paths.""" shutil.rmtree(str(path)) cp = copy def copy_tree(src_path, dst_path): """Recursively copy all files and folders from src_path to dst_path""" distutils.dir_util.copy_tree(str(src_path), str(dst_path)) cp_r = copy_tree def rm(*args): """Delete files, if they exist. Fail silently if a file doesn't exist.""" for path in args: try: os.remove(str(path)) except OSError: pass def rm_rf(*args): """Recursively delete directories, if they exist.""" for path in args: try: shutil.rmtree(str(path)) except OSError: pass # Versioneer versioning from ._version import get_versions __version__ = get_versions()['version'] del get_versions
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 4770, 25609, 198, 19692, 24254, 29149, 198, 4770, 25609, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 357, 4798, 62, 8818, 11, 7297, 11, 4112, 62, 11748, ...
2.556886
1,002
# Copyright (c) 2019 NVIDIA Corporation # If you want to add your own data layer, you should put its name in # __all__ so that it can be imported with 'from text_data_layers import *' __all__ = ['TextDataLayer', 'BertSentenceClassificationDataLayer', 'BertJointIntentSlotDataLayer', 'BertJointIntentSlotInferDataLayer', 'LanguageModelingDataLayer', 'BertTokenClassificationDataLayer', 'BertTokenClassificationInferDataLayer', 'BertPretrainingDataLayer', 'BertPretrainingPreprocessedDataLayer', 'TranslationDataLayer', 'GlueDataLayerClassification', 'GlueDataLayerRegression'] # from abc import abstractmethod import sys import torch from torch.utils import data as pt_data import os import h5py import nemo from nemo.backends.pytorch.nm import DataLayerNM from nemo.core.neural_types import * import random import numpy as np from .datasets import *
[ 2, 15069, 357, 66, 8, 13130, 15127, 10501, 198, 198, 2, 1002, 345, 765, 284, 751, 534, 898, 1366, 7679, 11, 345, 815, 1234, 663, 1438, 287, 198, 2, 11593, 439, 834, 523, 326, 340, 460, 307, 17392, 351, 705, 6738, 2420, 62, 7890, ...
2.613757
378
import h2o from h2o.base import Keyed from h2o.exceptions import H2OValueError from h2o.job import H2OJob from h2o.model import ModelBase from h2o.utils.typechecks import assert_is_type, is_type def get_best_model(self, algorithm=None, criterion=None): """ Get best model of a given family/algorithm for a given criterion from an AutoML object. :param algorithm: One of "basemodel", "deeplearning", "drf", "gbm", "glm", "stackedensemble", "xgboost". If None, pick the best model regardless of the algorithm. :param criterion: Criterion can be one of the metrics reported in leaderboard. If set to None, the same ordering as in the leaderboard will be used. Avaliable criteria: - Regression metrics: deviance, rmse, mse, mae, rmsle - Binomial metrics: auc, logloss, aucpr, mean_per_class_error, rmse, mse - Multinomial metrics: mean_per_class_error, logloss, rmse, mse The following additional leaderboard information can be also used as a criterion: - 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models). - 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row. :return: An H2OModel or None if no model of a given family is present :examples: >>> # Set up an H2OAutoML object >>> aml = H2OAutoML(max_runtime_secs=30) >>> # Launch an AutoML run >>> aml.train(y=y, training_frame=train) >>> gbm = aml.get_best_model("gbm") """ from h2o.exceptions import H2OValueError higher_is_better = ["auc", "aucpr"] assert_is_type(algorithm, None, str) assert_is_type(criterion, None, str) if criterion is not None: criterion = criterion.lower() if "deviance" == criterion: criterion = "mean_residual_deviance" if algorithm is not None: if algorithm.lower() not in ("basemodel", "deeplearning", "drf", "gbm", "glm", "stackedensemble", "xgboost"): raise H2OValueError("Algorithm \"{}\" is not supported!".format(algorithm)) algorithm = algorithm.lower() extra_cols = ["algo"] if criterion in ("training_time_ms", "predict_time_per_row_ms"): extra_cols.append(criterion) leaderboard = h2o.automl.get_leaderboard(self, extra_columns=extra_cols) leaderboard = leaderboard if algorithm is None else ( leaderboard[leaderboard["algo"].tolower() == algorithm, :] if algorithm != "basemodel" else leaderboard[leaderboard["algo"].tolower() != "stackedensemble", :]) if leaderboard.nrow == 0: return None if criterion is None: return h2o.get_model(leaderboard[0, "model_id"]) if criterion not in leaderboard.columns: raise H2OValueError("Criterion \"{}\" is not present in the leaderboard!".format(criterion)) models_in_default_order = _get_models(leaderboard) sorted_lb = leaderboard.sort(by=criterion, ascending=criterion not in higher_is_better) selected_models = _get_models(sorted_lb[sorted_lb[criterion] == sorted_lb[0, criterion]]) picked_model = [model for model in models_in_default_order if model in selected_models][0] return h2o.get_model(picked_model) def _fetch_leaderboard(aml_id, extensions=None): assert_is_type(extensions, None, str, [str]) extensions = ([] if extensions is None else [extensions] if is_type(extensions, str) else extensions) resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions)) dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard" return _fetch_table(resp['table'], key=dest_key, progress_bar=False) def _fetch_table(table, key=None, progress_bar=True): try: # Intentionally mask the progress bar here since showing multiple progress bars is confusing to users. # If any failure happens, revert back to user's original setting for progress and display the error message. ori_progress_state = H2OJob.__PROGRESS_BAR__ H2OJob.__PROGRESS_BAR__ = progress_bar # Parse leaderboard H2OTwoDimTable & return as an H2OFrame fr = h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types) return h2o.assign(fr[1:], key) # removing index and reassign id to ensure persistence on backend finally: H2OJob.__PROGRESS_BAR__ = ori_progress_state def _fetch_state(aml_id, properties=None, verbosity=None): state_json = h2o.api("GET /99/AutoML/%s" % aml_id, data=dict(verbosity=verbosity)) project_name = state_json["project_name"] if project_name is None: raise H2OValueError("No AutoML instance with id {}.".format(aml_id)) leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']] leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None should_fetch = lambda prop: properties is None or prop in properties leader = None if should_fetch('leader'): leader = h2o.get_model(leader_id) if leader_id is not None else None leaderboard = None if should_fetch('leaderboard'): leaderboard = _fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False) event_log = None if should_fetch('event_log'): event_log = _fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False) return dict( project_name=project_name, json=state_json, leader_id=leader_id, leader=leader, leaderboard=leaderboard, event_log=event_log, )
[ 11748, 289, 17, 78, 198, 6738, 289, 17, 78, 13, 8692, 1330, 7383, 276, 198, 6738, 289, 17, 78, 13, 1069, 11755, 1330, 367, 17, 46, 11395, 12331, 198, 6738, 289, 17, 78, 13, 21858, 1330, 367, 17, 46, 33308, 198, 6738, 289, 17, 78...
2.400312
2,568
import math,sys,pygame
[ 11748, 10688, 11, 17597, 11, 9078, 6057, 198 ]
2.875
8
import hassapi as hass import csv from collections import namedtuple Profile = namedtuple( "Profile", ["profile", "x_color", "y_color", "brightness"]) with open("/config/light_profiles.csv") as profiles_file: profiles_reader = csv.reader(profiles_file) next(profiles_reader) LIGHT_PROFILES = [Profile(row[0], float(row[1]), float( row[2]), int(row[3])) for row in profiles_reader]
[ 11748, 468, 82, 15042, 355, 468, 82, 198, 11748, 269, 21370, 198, 6738, 17268, 1330, 3706, 83, 29291, 628, 198, 37046, 796, 3706, 83, 29291, 7, 198, 220, 220, 220, 366, 37046, 1600, 14631, 13317, 1600, 366, 87, 62, 8043, 1600, 366, ...
2.666667
153
# coding: utf-8 from pymongo import MongoClient import conf
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 6738, 279, 4948, 25162, 1330, 42591, 11792, 198, 11748, 1013, 628, 198 ]
3
21
# # Copyright (C) 2014 Dell, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import configparser import json import logging import os import urllib.parse import dcm.agent.exceptions as exceptions import dcm.agent.logger as dcm_logger import dcm.agent.plugins.api.base as plugin_base import dcm.agent.plugins.api.exceptions as plugin_exceptions import dcm.agent.plugins.api.utils as plugin_utils import dcm.agent.utils as utils _g_logger = logging.getLogger(__name__)
[ 2, 198, 2, 220, 15069, 357, 34, 8, 1946, 23617, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262...
3.431579
285
# -*- coding: utf-8 -*- """Common layouts.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import logging import numpy as np from phylib.utils import emit from phylib.utils.geometry import get_non_overlapping_boxes, get_closest_box from .base import BaseLayout from .transform import Scale, Range, Subplot, Clip, NDC from .utils import _get_texture, _in_polygon from .visuals import LineVisual, PolygonVisual logger = logging.getLogger(__name__) #------------------------------------------------------------------------------ # Grid #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # Boxed #------------------------------------------------------------------------------ # Box scaling #-------------------------------------------------------------------------- def _increment_box_scaling(self, cw=1., ch=1.): self._box_scaling = (self._box_scaling[0] * cw, self._box_scaling[1] * ch) self.update() # Layout scaling #-------------------------------------------------------------------------- class Stacked(Boxed): """Layout showing a number of subplots stacked vertically. Parameters ---------- n_boxes : int Number of boxes to stack vertically. box_var : str Name of the GLSL variable with the box index. origin : str top or bottom Note ---- To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL variable specified in `box_var`. """ margin = 0 _origin = 'bottom' def get_box_pos(self, n_boxes): """Return the box bounds for a given number of stacked boxes.""" # Signal bounds. b = np.zeros((n_boxes, 2)) b[:, 1] = np.linspace(-1, 1, n_boxes) if self._origin == 'top': b = b[::-1, :] return b def attach(self, canvas): """Attach the stacked interact to a canvas.""" BaseLayout.attach(self, canvas) canvas.gpu_transforms += self.gpu_transforms canvas.inserter.insert_vert(""" #include "utils.glsl" attribute float {}; uniform float n_boxes; uniform bool u_top_origin; uniform vec2 u_box_size; """.format(self.box_var), 'header', origin=self) canvas.inserter.insert_vert(""" float margin = .1 / n_boxes; float a = 1 - 2. / n_boxes + margin; float b = -1 + 2. / n_boxes - margin; float u = (u_top_origin ? (n_boxes - 1. - {bv}) : {bv}) / max(1., n_boxes - 1.); float y0 = -1 + u * (a + 1); float y1 = b + u * (1 - b); float ym = .5 * (y0 + y1); float yh = u_box_size.y * (y1 - ym); y0 = ym - yh; y1 = ym + yh; vec4 box_bounds = vec4(-1., y0, +1., y1); """.format(bv=self.box_var), 'before_transforms', origin=self) def update_visual(self, visual): """Update a visual.""" BaseLayout.update_visual(self, visual) if 'n_boxes' in visual.program: visual.program['n_boxes'] = self.n_boxes visual.program['u_box_size'] = self._box_scaling visual.program['u_top_origin'] = self._origin == 'top' #------------------------------------------------------------------------------ # Interactive tools #------------------------------------------------------------------------------
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 17227, 38489, 526, 15931, 628, 198, 2, 10097, 26171, 198, 2, 1846, 3742, 198, 2, 10097, 26171, 198, 198, 11748, 18931, 198, 11748, 299, 32152, 355, 45941, 19...
2.733383
1,339
import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web import simplejson from QueryHandler import QueryHandler
[ 198, 198, 11748, 33718, 13, 5450, 18497, 198, 11748, 33718, 13, 1669, 11224, 198, 11748, 33718, 13, 25811, 198, 11748, 33718, 13, 12384, 198, 11748, 2829, 17752, 198, 6738, 43301, 25060, 1330, 43301, 25060 ]
4.323529
34
# Generated by Django 2.0.5 on 2019-04-28 20:22 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 20, 319, 13130, 12, 3023, 12, 2078, 1160, 25, 1828, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.818182
44
from toolz import get PLOT_VALIDATORS = [ ( {"line", "scatter", "bar"}, lambda x: ("x" not in x) or ("y" not in x), "XY plot does not have X and Y.", ), ( {"histogram"}, lambda x: ("step" in x) and ("bins" in x), "Histogram cannot have STEP and BINS.", ), ( {"line", "scatter", "bar"}, lambda x: ("agg" in x["x"]) and ("agg" in x["y"]), "XY plot cannot have an aggregation on X and Y.", ), ( {"histogram", "pie"}, lambda x: ("agg" in get("x", x, {})) or ("agg" in get("y", x, {})) or ("agg" in get("axis", x, {})), "Histograms and pie charts cannot have aggregations.", ), ( {"histogram", "pie"}, lambda x: ("temporal" in get("x", x, {})) or ("temporal" in get("y", x, {})) or ("temporal" in get("axis", x, {})), "Histograms and pie charts cannot have temporal axes.", ), ( {"histogram"}, lambda x: ("x" in x) and ("y" in x), "Histograms can have X or Y, not both.", ), ( {"histogram"}, lambda x: ("x" not in x) and ("y" not in x), "Histograms must have an X or Y.", ), ({"pie"}, lambda x: "axis" not in x, "Pie charts must have an axis."), ( {"line", "bar"}, # SORT is a no-op for scatter. lambda x: ("sort" in x["x"]) and ("sort" in x["y"]), "Cannot sort by two axes.", ), ( {"pie"}, lambda x: (get("hole", x, 0.0) < 0) or (get("hole", x, 0.0) > 1), "HOLE must be between zero and one.", ), ( {"histogram"}, lambda x: get("step", x, 1) <= 0, "STEP must be greater than zero.", ), ( {"histogram"}, lambda x: get("bins", x, 1) <= 0, "BINS must be greater than zero.", ), ( {"histogram", "pie"}, lambda x: "color_by" in x, "Histograms and pie charts cannot have COLOR BY.", ), ({"pie"}, lambda x: "split_by" in x, "Pie charts cannot have SPLIT BY."), ( {"line", "scatter", "bar"}, lambda x: ("split_by" in x) and ("color_by" in x), "Cannot have COLOR BY and SPLIT BY on same plot.", ), ( {"line", "scatter", "bar"}, lambda x: ( # If we don't include this it can throw exceptions for other # validators. ("x" in x) and ("y" in x) ) and (("agg" in x["x"]) or ("agg" in x["y"])) and (("color_by" in x) and ("agg" not in x["color_by"])), "If there's an aggregation on X or Y, COLOR BY must also aggregate.", ), ] def validate_plot(svl_plot): """ Validates the SVL plot. Parameters ---------- svl_plot : dict The SVL plot specifier. Returns ------- Tuple[bool, str] A boolean indicating whether the plot is valid and a message indicating that the plot is either valid or which validations it failed. """ ok = True failure_messages = [] for plots, validator, message in PLOT_VALIDATORS: if (svl_plot["type"] in plots) and validator(svl_plot): ok = False failure_messages.append(message) return ok, "\n".join(failure_messages)
[ 6738, 2891, 89, 1330, 651, 198, 198, 6489, 2394, 62, 23428, 2389, 1404, 20673, 796, 685, 198, 220, 220, 220, 357, 198, 220, 220, 220, 220, 220, 220, 220, 19779, 1370, 1600, 366, 1416, 1436, 1600, 366, 5657, 25719, 198, 220, 220, 220...
2.117311
1,577
''' Building a Decision Tree using CART (from scratch) Note: Code was tested only on dataset with numerical features. Categorical features are not yet fully supported. ''' import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from scikitplot.metrics import plot_confusion_matrix import common.utils as ut # get data from: # https://www.kaggle.com/c/otto-group-product-classification-challenge TRN_DATA_PATH = 'datasets/otto-group-product-classification/train.csv' NUM_SAMPLES = 5000 NUM_FEATS = 93 def visualize_data(feats, true_labels, preds): '''Display labeled data and clustered data ''' print("Visualizing data...") red_feats = ut.reduce_to_2D_by_tsne(feats) label2col_map = ['red', 'orange', 'yellow', 'green', 'blue', 'violet', 'brown', 'gray', 'pink'] label_list = np.unique(true_labels) _, ax = plt.subplots(ncols=2, figsize=(10, 5)) graph_label_pair = zip(ax, [true_labels, preds]) for graph, labels in graph_label_pair: for label in label_list: # get samples with label == label idxs = np.where(labels == label) # get components pc1, pc2 = red_feats['pc1'].values[idxs], red_feats['pc2'].values[idxs] # scatter plot w/ color based on labels graph.scatter(x=pc1, y=pc2, color=label2col_map[label-1], alpha=0.5, label=label) graph.set_xlabel('PC1') graph.set_ylabel('PC2') ax[0].set_title('Labeled Products') ax[1].set_title('Predicted Labels') for graph in ax: graph.legend() # show legend graph.grid(True) # show gridlines plt.show() def get_impurity(labels): '''Calculate Gini impurity ''' num_labels = float(len(labels)) imp = 0.0 _, cnts = np.unique(labels, return_counts=True) for cnt in cnts: cnt = float(cnt) imp += float((cnt/num_labels)*(1-(cnt/num_labels))) return imp def get_best_split_along_column(data, labels, feat_idx, categorical=False): '''Get best split using features in a single column ''' feat_col = data[:, feat_idx] splitter_pool = np.unique(feat_col) # get splitters min_im = np.inf left_idxs = [] right_idxs = [] splitter = None for val in splitter_pool: if categorical: left_labels = labels[feat_col == val] right_labels = labels[feat_col != val] else: left_labels = labels[feat_col >= val] right_labels = labels[feat_col < val] # if all data is placed on only one side # then it is not a meaningful split so we skip if len(left_labels) == len(data) or len(right_labels) == len(data): continue avg_im = len(left_labels) * get_impurity(left_labels) + \ len(right_labels) * get_impurity(right_labels) if avg_im < min_im: min_im = avg_im left_idxs = (feat_col >= val) right_idxs = (feat_col < val) splitter = val if len(left_idxs) + len(right_idxs) > 0: min_im /= (len(left_idxs) + len(right_idxs)) return min_im, splitter, left_idxs, right_idxs def main(): '''Main ''' global TRN_DATA_PATH, NUM_SAMPLES, NUM_FEATS # no need to rescale for decision tree feats, labels = ut.get_data_from_csv(TRN_DATA_PATH, rescale=False) if NUM_SAMPLES < len(feats): feats, labels = ut.sample(feats, labels, NUM_SAMPLES) feats = feats.values if NUM_FEATS < len(feats[0]): idxs = np.random.choice(range(len(feats[0])), NUM_FEATS, replace=False) feats = feats[:, idxs] trn_feats, tst_feats, trn_labels, tst_labels = train_test_split(feats, labels, test_size=0.20, stratify=labels) # build tree print("Building decision tree...") decision_tree = TreeNode() decision_tree.build_tree(trn_feats, trn_labels.values) print("Done!") print("Checking accuracy on training set...") predictions = [] for sample in trn_feats: result = decision_tree.classify(sample) predictions.append(result) # for checking only. must be 100% accuracy on training set print("Training Set Results:\n", classification_report(trn_labels, predictions)) print("Using tree to predict labels...") predictions = [] for sample in tst_feats: result = decision_tree.classify(sample) predictions.append(result) print("Test Set Results:\n", classification_report(tst_labels, predictions)) visualize_data(pd.DataFrame(tst_feats), tst_labels, predictions) # display confusion matrix print("Plotting confusion matrix...") plot_confusion_matrix(tst_labels, predictions, normalize=True) plt.show() return 0 if __name__ == "__main__": main()
[ 7061, 6, 198, 25954, 257, 26423, 12200, 1262, 327, 7227, 357, 6738, 12692, 8, 198, 198, 6425, 25, 6127, 373, 6789, 691, 319, 27039, 351, 29052, 3033, 13, 198, 34, 2397, 12409, 3033, 389, 407, 1865, 3938, 4855, 13, 198, 7061, 6, 198,...
2.226938
2,309
""" FBlas Routine class: it used to represent a routine definition, specified by the user using JSON file. It is used by the Host and Module Codegen (specified by the _codegen variable). Accordingly, some class members could be invalid. """ from codegen import fblas_types from codegen import generator_definitions def are_elements_A_rowstreamed(self): """ :return: True if the elements of A are rowstreamed """ return self._elements_A_order == fblas_types.FblasOrder.FblasRowMajor def add_input_channel(self, routine_channel_name, user_name): ''' Add the channel to the dictionary of input channels If already present, it will be overwritten ''' self._input_channels[routine_channel_name] = user_name def add_output_channel(self, routine_channel_name, user_name): ''' Add the channel to the dictionary of input channels If already present, it will be overwritten ''' self._output_channels[routine_channel_name] = user_name
[ 37811, 198, 220, 220, 220, 376, 3629, 292, 371, 28399, 1398, 25, 340, 973, 284, 2380, 257, 8027, 6770, 11, 7368, 416, 262, 2836, 1262, 19449, 2393, 13, 198, 220, 220, 220, 632, 318, 973, 416, 262, 14504, 290, 19937, 6127, 5235, 357,...
2.764249
386
from hoyolab import main from os import environ from os.path import exists import atoma
[ 6738, 289, 726, 349, 397, 1330, 1388, 198, 6738, 28686, 1330, 551, 2268, 198, 6738, 28686, 13, 6978, 1330, 7160, 198, 11748, 379, 6086, 628, 198 ]
3.461538
26
"""Generated wrapper for NFTCollection Solidity contract.""" # pylint: disable=too-many-arguments import json from typing import ( # pylint: disable=unused-import Any, List, Optional, Tuple, Union, ) from eth_utils import to_checksum_address from mypy_extensions import TypedDict # pylint: disable=unused-import from hexbytes import HexBytes from thirdweb_web3 import Web3 from thirdweb_web3.contract import ContractFunction from thirdweb_web3.datastructures import AttributeDict from thirdweb_web3.providers.base import BaseProvider from zero_ex.contract_wrappers.bases import ContractMethod, Validator from zero_ex.contract_wrappers.tx_params import TxParams # Try to import a custom validator class definition; if there isn't one, # declare one that we can instantiate for the default argument to the # constructor for NFTCollection below. try: # both mypy and pylint complain about what we're doing here, but this # works just fine, so their messages have been disabled here. from . import ( # type: ignore # pylint: disable=import-self NFTCollectionValidator, ) except ImportError: try: from .middleware import MIDDLEWARE # type: ignore except ImportError: pass # pylint: disable=too-many-public-methods,too-many-instance-attributes # pylint: disable=too-many-lines
[ 37811, 8645, 515, 29908, 329, 41288, 4825, 349, 1564, 15831, 414, 2775, 526, 15931, 198, 198, 2, 279, 2645, 600, 25, 15560, 28, 18820, 12, 21834, 12, 853, 2886, 198, 198, 11748, 33918, 198, 6738, 19720, 1330, 357, 220, 1303, 279, 2645...
3.068433
453
from django.urls import path from . import views as sg urlpatterns = [ path('artist', sg.artist), path('genre', sg.genre), path('release', sg.release), path('track', sg.track), path('', sg.sausage_grinder_index), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 198, 6738, 764, 1330, 5009, 355, 264, 70, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 49016, 3256, 264, 70, 13, 49016, 828, 198, 220, 220, 220, 3108, 10786...
2.418367
98
from PyQt5.QtCore import pyqtSlot from PyQt5.QtWidgets import QComboBox from PyQt5.QtWidgets import QLineEdit from sf.mmck.parameters import String from .manager import widget_class_for from .widget import ParameterWidget
[ 6738, 9485, 48, 83, 20, 13, 48, 83, 14055, 1330, 12972, 39568, 38963, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 5377, 2127, 14253, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 1...
2.896104
77
from FitnessPlot import FitnessPlot ''' for n in range(1,6): plot = FitnessPlot(folder_prefix='data_top{}'.format(n)) plot.plot_all_workers() plot.plot_workers_as_average() ''' plot = FitnessPlot(folder_prefix='data_top1', num_workers=16) worker_dict = plot.create_worker_dict() #plot.plot_all_workers() #plot.plot_workers_as_average() #print(worker_dict) for key,value in worker_dict.items(): dict_len = len(value) #if dict_len < 100: # print(key) # print(dict_len) print(key) print(value[len(value)-1])
[ 6738, 34545, 43328, 1330, 34545, 43328, 628, 198, 7061, 6, 198, 1640, 299, 287, 2837, 7, 16, 11, 21, 2599, 198, 220, 220, 220, 7110, 796, 34545, 43328, 7, 43551, 62, 40290, 11639, 7890, 62, 4852, 90, 92, 4458, 18982, 7, 77, 4008, ...
2.477477
222
from space_tracer.main import replace_input, TraceRunner
[ 6738, 2272, 62, 2213, 11736, 13, 12417, 1330, 6330, 62, 15414, 11, 34912, 49493, 628, 628, 628, 628, 628 ]
3.473684
19
import logging
[ 198, 11748, 18931, 628, 628, 198 ]
3.333333
6
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ThreeScale Proxies Rule interface for APIs.""" from .base import ThreeScale import logging import requests import xmltodict import json logger = logging.getLogger(__name__)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 12510, 29990, 1041, 87, 444, 14330, 7071, 329, 23113, 526, 15931, 198, 198, 6738, 764, 8692, 1330, 7683...
2.948052
77
import sys from __init__ import Bot MESSAGE_USAGE = "Usage is python %s [name] [token]" if __name__ == "__main__": if len(sys.argv) == 3: Bot(sys.argv[1], sys.argv[2]) else: print(MESSAGE_USAGE.format(sys.argv[0]))
[ 11748, 25064, 198, 6738, 11593, 15003, 834, 1330, 18579, 198, 198, 44, 1546, 4090, 8264, 62, 2937, 11879, 796, 366, 28350, 318, 21015, 4064, 82, 685, 3672, 60, 685, 30001, 30866, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, ...
2.059829
117
# Created with tutorials: # https://www.digitalocean.com/community/tutorials/how-to-structure-large-flask-applications # http://flask.pocoo.org/docs/0.12/tutorial from flask import Flask, g, render_template from flask_sqlalchemy import SQLAlchemy import sqlite3 # Define WSGI application object. app = Flask(__name__) # Configurations app.config.from_object('config') app.config.from_envvar('CONFIG', silent=True) # Define database object. db = SQLAlchemy(app) # Import a module / component using its blueprint handler variable (mod_auth) from app.api.entries.controllers import mod as entries_module from app.site.controllers import mod as site_module # Register blueprint(s) app.register_blueprint(entries_module) app.register_blueprint(site_module) # app.register_blueprint(xyz_module) # .. # Build the database: # This will create the database file using SQLAlchemy db.create_all()
[ 2, 15622, 351, 27992, 25, 198, 2, 3740, 1378, 2503, 13, 34725, 78, 5829, 13, 785, 14, 28158, 14, 83, 44917, 82, 14, 4919, 12, 1462, 12, 301, 5620, 12, 11664, 12, 2704, 2093, 12, 1324, 677, 602, 198, 2, 2638, 1378, 2704, 2093, 13...
3.151408
284
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import subprocess import numpy as np import datetime import random import warnings import ROOT as rt import math from keras.preprocessing.sequence import pad_sequences from keras.callbacks import Callback from array import array from sklearn.metrics import roc_auc_score, auc, roc_curve def sampleallnum(self): return self.Entries def trainnum(self): return self.End-self.Begin def totalnum(self): return int(math.ceil(1.*(self.gEnd-self.gBegin+self.qEnd-self.qBegin)/(self.batch_size*1.00))) def next(self): while self.endfile==0: self.count+=1 arnum=self.arnum jetset=[] variables=[] labels=[] for i in range(self.batch_size): if(random.random()<0.5): if(self.a-self.gBegin>=len(self.gjetset)): self.a=self.gBegin self.endfile=1 break labels.append([0,1]) jetset.append(self.gjetset[self.a-self.gBegin]) self.a+=1 else: if(self.b-self.qBegin>=len(self.qjetset)): self.b=self.qBegin self.endfile=1 break labels.append([1,0]) jetset.append(self.qjetset[self.b-self.qBegin]) self.b+=1 data=[] data.append(np.array(jetset)) label=np.array(labels) #if(self.totalnum()<=self.count): # if(self.istrain==1):print "\nreset\n" # self.reset() if(self.endfile==1): #print "\nendd\n" self.reset() #print "\n",self.count,self.istrain,"\n" yield data, label #else: #if(self.istrain==1): # print "\n",datetime.datetime.now() #raise StopIteration
[ 11748, 28686, 198, 418, 13, 268, 2268, 17816, 10234, 62, 8697, 47, 62, 23678, 62, 25294, 62, 2538, 18697, 20520, 796, 705, 18, 6, 198, 11748, 850, 14681, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4818, 8079, 198, 11748, 4738, 19...
2.021176
850
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import fields, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2142, 286, 10529, 2238, 13, 4091, 38559, 24290, 2393, 329, 1336, 6634, 290, 15665, 3307, 13, 198, 198, 6738, 16298, 2238, 1330, 7032, 11, 4981, 628 ]
3.243902
41
# Copyright 2018 - Nokia # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from testtools import matchers from vitrage.common.constants import DatasourceAction from vitrage.common.constants import DatasourceOpts as DSOpts from vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants import EntityCategory from vitrage.common.constants import GraphAction from vitrage.common.constants import UpdateMethod from vitrage.common.constants import VertexProperties as VProps from vitrage.datasources.kubernetes.properties import KUBERNETES_DATASOURCE from vitrage.datasources.kubernetes.properties import KubernetesProperties \ as kubProp from vitrage.datasources.kubernetes.transformer import KubernetesTransformer from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE from vitrage.datasources.nova.instance.transformer import InstanceTransformer from vitrage.datasources import transformer_base as tbase from vitrage.datasources.transformer_base import TransformerBase from vitrage.tests import base from vitrage.tests.mocks import mock_driver as mock_sync from vitrage.tests.mocks import utils LOG = logging.getLogger(__name__) cluster_name = 'kubernetes'
[ 2, 15069, 2864, 532, 26182, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2,...
3.534791
503
# Two children, Lily and Ron, want to share a chocolate bar. Each of the squares has an integer on it. # Lily decides to share a contiguous segment of the bar selected such that: # The length of the segment matches Ron's birth month, and, # The sum of the integers on the squares is equal to his birth day. # Determine how many ways she can divide the chocolate. # int s[n]: the numbers on each of the squares of chocolate # int d: Ron's birth day # int m: Ron's birth month # Two children s = '2 5 1 3 4 4 3 5 1 1 2 1 4 1 3 3 4 2 1' caracteres = '18 7' array = list(map(int, s.split())) caracteresList = list(map(int, caracteres.split())) print(birthday(array, caracteresList[0], caracteresList[1]))
[ 2, 4930, 1751, 11, 20037, 290, 6575, 11, 765, 284, 2648, 257, 11311, 2318, 13, 5501, 286, 262, 24438, 468, 281, 18253, 319, 340, 13, 198, 198, 2, 20037, 13267, 284, 2648, 257, 48627, 10618, 286, 262, 2318, 6163, 884, 326, 25, 198, ...
3.127753
227
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = [ 'GetUsersResult', 'AwaitableGetUsersResult', 'get_users', 'get_users_output', ] def get_users(login_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult: """ Use this data source to access information about an existing resource. """ __args__ = dict() __args__['loginName'] = login_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('sonarqube:index/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value return AwaitableGetUsersResult( email=__ret__.email, id=__ret__.id, is_local=__ret__.is_local, login_name=__ret__.login_name, name=__ret__.name)
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 17202, 39410, 25, 428, 2393, 373, 7560, 416, 262, 21624, 12994, 24118, 687, 10290, 357, 27110, 5235, 8, 16984, 13, 17202, 198, 2, 17202, 2141, 407, 4370, 416, 1021, 4556, 345, 821, 1728, 345, 760...
2.609756
451
from abc import abstractmethod from typing import List from rl.action import Action from rl.state import State
[ 6738, 450, 66, 1330, 12531, 24396, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 374, 75, 13, 2673, 1330, 7561, 198, 6738, 374, 75, 13, 5219, 1330, 1812, 628, 198 ]
3.8
30
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ json """ import json # jsonpythonor # json-pythonor # pythonjsonjson
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 17752, 198, 37811, 198, 198, 11748, 33918, 628, 198, 2, 33918, 29412, 273, 628, 198, 2, 33918, 1...
2.37037
54
from ast import literal_eval from typing import Dict, List import click from pygitguardian.models import Detail from ggshield.text_utils import STYLE, display_error, format_text, pluralize
[ 6738, 6468, 1330, 18875, 62, 18206, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 198, 198, 11748, 3904, 198, 6738, 12972, 18300, 14864, 666, 13, 27530, 1330, 42585, 198, 198, 6738, 308, 70, 26662, 13, 5239, 62, 26791, 1330, 3563, 56, 2...
3.555556
54
# pylint: disable=no-member from CommonServerUserPython import * from CommonServerPython import * from sklearn.feature_extraction.text import TfidfVectorizer import pickle import uuid import spacy import string from html.parser import HTMLParser from html import unescape from re import compile as _Re import pandas as pd # define global parsers DBOT_TEXT_FIELD = 'dbot_text' DBOT_PROCESSED_TEXT_FIELD = 'dbot_processed_text' CONTEXT_KEY = 'DBotPreProcessTextData' HTML_PATTERNS = [ re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"), re.compile(r"(?s)<!--(.*?)-->[\n]?"), re.compile(r"(?s)<.*?>"), re.compile(r"&nbsp;"), re.compile(r" +") ] html_parser = HTMLParser() tokenizer = None PRE_PROCESS_TYPES = { 'none': pre_process_none, 'nlp': pre_process_tokenizer, } if __name__ in ['builtins', '__main__']: entry = main() demisto.results(entry)
[ 2, 279, 2645, 600, 25, 15560, 28, 3919, 12, 19522, 198, 6738, 8070, 10697, 12982, 37906, 1330, 1635, 198, 6738, 8070, 10697, 37906, 1330, 1635, 198, 6738, 1341, 35720, 13, 30053, 62, 2302, 7861, 13, 5239, 1330, 309, 69, 312, 69, 38469...
2.487603
363
import logging import os import tensorflow as tf from punc_recover.models.punc_transformer import PuncTransformer from punc_recover.tester.base_tester import BaseTester from utils.text_featurizers import TextFeaturizer
[ 11748, 18931, 198, 11748, 28686, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 4000, 66, 62, 260, 9631, 13, 27530, 13, 79, 19524, 62, 7645, 16354, 1330, 14944, 66, 8291, 16354, 198, 6738, 4000, 66, 62, 260, 9631, 13, 4879, 353...
3.283582
67
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 11748, 4818, 8079, 198, 6738, 5366, 13, 9945, 1330, 20613, 198, 6738, 5366, 13, 85, 17, 1330, 10011, 2611, 44, 4254, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 628 ]
3.289474
38