query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Softmax loss function, vectorized version. Inputs and outputs are the same as softmax_loss_naive.
def softmax_loss_vectorized(W, X, y, reg): # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using no explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# N = X.shape[0] f = np.dot(X, W) f -= np.amax(f, axis = 1, keepdims = True) # for numerical stability exp_f = np.exp(f) exp_fyi = exp_f[range(N), y].reshape((N, 1)) # correct class probabilities sum_exp_f = np.sum(exp_f, axis = 1, keepdims = True) losses = -np.log(exp_fyi / sum_exp_f) loss = 1 / N * np.sum(losses) + reg * np.sum(W * W) P = exp_f / sum_exp_f y_one_hot = np.zeros_like(P) y_one_hot[range(len(y)), y] = 1 df = 1 / N * (P - y_one_hot) dW = np.dot(X.T, df) ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def softmax_loss_naive(W, X, y, reg):\n # Initialize the loss and gradient to zero.\n loss = 0.0\n dW = np.zeros_like(W)\n num_train = X.shape[1]\n num_classes = W.shape[0]\n #############################################################################\n # Compute the softmax loss and its gradient usi...
[ "0.76837397", "0.75555336", "0.755513", "0.75321347", "0.7522562", "0.7514906", "0.7509924", "0.74732995", "0.747066", "0.74670047", "0.7465988", "0.7429649", "0.7429498", "0.74292433", "0.74264807", "0.74254495", "0.7410896", "0.7408939", "0.7406985", "0.738524", "0.7385114"...
0.0
-1
Initialize the axis ranges from proviuded Plot or renderer.
def initialize_axis_ranges(self, plot, transform=None): if transform is None: def transform(x): return x elif isinstance(transform, int): ndigits = transform def transform(x): return round(x, ndigits) # Avoid UI polluting with non-sensical digits self.x_axis_range_low = transform(plot.x_axis.mapper.range.low) self.auto_x_axis_range_low = self.x_axis_range_low self.x_axis_range_high = transform(plot.x_axis.mapper.range.high) self.auto_x_axis_range_high = self.x_axis_range_high self.y_axis_range_low = transform(plot.y_axis.mapper.range.low) self.auto_y_axis_range_low = self.y_axis_range_low self.y_axis_range_high = transform(plot.y_axis.mapper.range.high) self.auto_y_axis_range_high = self.y_axis_range_high
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n ...
[ "0.71792495", "0.7129534", "0.68548447", "0.67877525", "0.6537042", "0.64943486", "0.641954", "0.6235867", "0.6223747", "0.6188791", "0.61873025", "0.61726326", "0.6160985", "0.6133905", "0.61263645", "0.61139727", "0.61138016", "0.61124223", "0.60551167", "0.60020185", "0.60...
0.7375276
0
Setup a host for proper deployment. Assuming Debian Linux.
def setup(): debs = ("python-setuptools", "apache2", "libapache2-mod-wsgi") require("hosts", provided_by=[production, staging]) sudo("apt-get install %s" % " ".join(debs)) sudo("easy_install virtualenv pip") sudo("mkdir -p %(path)s" % env) with cd("%(path)s" % env): sudo("mkdir -p releases; mkdir -p packages") sudo("virtualenv --no-site-packages .") sudo("mkdir -p /var/log/twit-demo; chown www-data:www-data /var/log/twit-demo")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_machine():\n # Initial setup and package install.\n sudo(\"aptitude update\")\n sudo(\"aptitude -y install git-core python-dev python-setuptools \"\n \"postgresql-dev postgresql-client build-essential \"\n \"libpq-dev subversion mercurial...
[ "0.7073709", "0.69501", "0.65425813", "0.63506156", "0.6257246", "0.6216987", "0.61947876", "0.60763687", "0.6047417", "0.601704", "0.59991187", "0.5992791", "0.5950253", "0.58975947", "0.5893902", "0.5886802", "0.5858583", "0.5855254", "0.5841016", "0.5830183", "0.581706", ...
0.6556086
2
Deploy a new relase.
def deploy(): require("hosts", provided_by=[production, staging]) env.release = time.strftime("%Y-%m-%d_%H:%M:%S") upload_tar_from_git() install_requirements() setup_webserver() symlink_current_release() restart_webserver()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deploy():", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n build()\n copy()\n install()", "def deploy():\n update_treesheets()\n restart_treesheets()", "def deploy():\n...
[ "0.7744899", "0.706143", "0.706143", "0.706143", "0.6948104", "0.69311583", "0.6749892", "0.6712348", "0.67008215", "0.66962844", "0.6653298", "0.66502756", "0.6646828", "0.6614402", "0.65999466", "0.65739655", "0.65360653", "0.647553", "0.6442106", "0.64005727", "0.6380691",...
0.68142927
6
Create an archive from the given tree, upload, and untar it.
def upload_tar_from_git(): require("release", provided_by=[deploy]) tree = prompt("Please enter a branch or SHA1 to deploy", default="master") local("git archive --format=tar %s | gzip > %s.tar.gz" % (tree, env['release'])) sudo("mkdir %(path)s/releases/%(release)s" % env) put("%(release)s.tar.gz" % env, "%(path)s/packages/" % env, use_sudo=True) sudo("cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz" % env) local("rm %(release)s.tar.gz" % env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))", "def untar(conn, tarball, path):\n conn.run(f\"tar xf {tarball} -C...
[ "0.6552709", "0.6145302", "0.59912825", "0.590548", "0.5857982", "0.5824896", "0.5788129", "0.57454973", "0.5727448", "0.5701735", "0.5637337", "0.5634134", "0.5598457", "0.55868477", "0.5579905", "0.5539658", "0.5536656", "0.5533471", "0.5514017", "0.5374604", "0.53565294", ...
0.6242641
1
Install the required Python packages inside the virtualenv.
def install_requirements(): require("release", provided_by=[deploy]) with cd("%(path)s" % env): sudo("./bin/pip install -r ./releases/%(release)s/requirements.txt" % env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub_install_python_requirements():\n # Activate the virtualenv\n activate = 'source {0}/{1}/bin/activate'.format(\n env.virtualenv['dir'], env.virtualenv['name'])\n run(activate)\n\n # Install Python requirements\n install = 'pip install -r /vagrant/Flask_app/requirements.txt'\n\n # Jo...
[ "0.79861814", "0.7786814", "0.7778329", "0.77555937", "0.76573926", "0.7628999", "0.7594286", "0.75698286", "0.744025", "0.74060243", "0.73893565", "0.7277837", "0.72283304", "0.72203624", "0.72196406", "0.7206115", "0.71840227", "0.7166442", "0.7125818", "0.706724", "0.70155...
0.6828054
38
Symlink to the new current release.
def symlink_current_release(): require("release", provided_by=[deploy]) with cd("%(path)s/releases" % env): sudo("ln -s %(release)s current_tmp && mv -Tf current_tmp current" % env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symlink():\n releases()\n env.current_path = '/root/your_project/current'\n run('rm %(current_path)s' % env)\n run('ln -s %(current_release)s %(current_path)s' % env)", "def symlink(timestamp):\n if exists(env.current_dir):\n run('rm -r %(current_dir)s' % env)\n run('ln -s %s %s' % (...
[ "0.85483825", "0.7888362", "0.6896718", "0.6580014", "0.640067", "0.6382023", "0.609916", "0.59879214", "0.598062", "0.58782184", "0.5851017", "0.5815926", "0.5792104", "0.57686156", "0.57686156", "0.57261956", "0.57167125", "0.5602009", "0.55953795", "0.55733645", "0.5570685...
0.8182266
1
Remove older releases, keeping the last `keep_num` intact.
def cleanup(keep_num=5): keep_num = int(keep_num) assert keep_num > 0, "[ERROR] keep_num must be > 0; refusing to proceed." with cd("%(path)s/packages" % env): package_files = sorted(run("ls -1").split()) package_files = [_.replace(".tar.gz", "") for _ in package_files] with cd("%(path)s/releases" % env): release_files = sorted(run("ls -1").split()) release_files.remove('current') diff = set(package_files).symmetric_difference(set(release_files)) if diff: raise Exception("[ERROR]: Package and release directories are out of sync;" " refusing to proceed. Please fix this difference manually: %s" % diff) package_files = package_files[:-keep_num] release_files = release_files[:-keep_num] with cd("%(path)s/packages" % env): [sudo("rm %s.tar.gz" % _) for _ in package_files] with cd("%(path)s/releases" % env): [sudo("rm -r %s" % _) for _ in release_files]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _deleteOldVersionsByAge(self, model, max_age, number_to_keep=None):\r\n adapter = getVersionManagementAdapter(model)\r\n\r\n version_ids = self._getOldVersionIds(adapter)\r\n if number_to_keep is not None:\r\n if len(version_ids) < number_to_keep:\r\n return\r\n ...
[ "0.64156675", "0.6380838", "0.63680077", "0.56757736", "0.544219", "0.5342159", "0.5324215", "0.52694285", "0.5213804", "0.51900595", "0.51862675", "0.5181022", "0.51753306", "0.51589394", "0.51021165", "0.50865173", "0.50815326", "0.5056275", "0.5055268", "0.505497", "0.5032...
0.69273704
0
Class handles loading data for many separate nodes.
def __init__(self, dataset: str, train: bool, subset: bool): PERCENT = .3 if dataset == 'MNIST': data = torchvision.datasets.MNIST('./data', train=train, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])) else: raise ValueError data_size = len(data) self.data = data if subset: indx = torch.randperm(data_size)[:int(data_size * PERCENT)] self.samples = self.data.data[indx, :, :] self.labels = self.data.targets[indx] else: self.samples = self.data.data self.labels = self.data.targets self.random_seed = 42
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def load_data(self):", "def dispatch_load(self):\n\n for node in ...
[ "0.7005817", "0.6853901", "0.68161523", "0.67189497", "0.65886134", "0.6483465", "0.64096814", "0.6365568", "0.63560086", "0.62556756", "0.62167966", "0.6192285", "0.618386", "0.6101325", "0.6019031", "0.5991078", "0.59765136", "0.59683806", "0.5959553", "0.59291226", "0.5928...
0.0
-1
Separate data into number of agents
def partition(self, to_partition, indices, nr_agents): return [to_partition[indices[i]:indices[i + 1]] for i in range(nr_agents)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def get_agent_number_of_players(players):\n return sum([count_players(player) for player in players\n if player.startswith('agent')])", "def parse_data(self, data):\n p...
[ "0.6075499", "0.60177237", "0.5733644", "0.56019175", "0.551722", "0.5478866", "0.5476945", "0.5474742", "0.5418446", "0.53685576", "0.5309423", "0.530098", "0.530098", "0.5294951", "0.5268969", "0.525845", "0.52577895", "0.52215326", "0.5189219", "0.51605994", "0.5125644", ...
0.0
-1
Different ways to split data between nodes.
def split(self, how, nr_agents, **kwargs): if how == 'random': self.random_split(nr_agents) elif how == 'uniform': self.uniform_split(nr_agents) elif how == 'non_iid_uniform': self.non_iid_split(nr_agents, kwargs['class_per_node'], random=False) elif how == 'non_iid_random': self.non_iid_split(nr_agents, kwargs['class_per_node'], random=True) return self.get_data()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __split_node(self, cur_node):\n temp = self.Node(cur_node.data_list[len(cur_node.data_list) / 2:], cur_node.next_node)\n cur_node.data_list = cur_node.data_list[:len(cur_node.data_list) / 2]\n cur_node.next_node = temp\n\n if cur_node == self.tail:\n self.tail = cur_node....
[ "0.6493564", "0.6465813", "0.63918084", "0.62302977", "0.6212308", "0.6188923", "0.6174351", "0.61242145", "0.61228055", "0.60877246", "0.6079287", "0.6019898", "0.60130966", "0.598436", "0.59603816", "0.5949242", "0.59408516", "0.59373665", "0.59328663", "0.59160835", "0.591...
0.0
-1
Give each Node random splits of data. Nodes will have different amounts of data.
def random_split(self, nr_agents): np.random.seed(self.random_seed) # Get random indices indices = sorted(np.random.randint(0, high=self.samples.shape[0], size=nr_agents - 1).tolist()) indices = [0] + indices indices += [self.samples.shape[0]] self.samples = self.partition(self.samples, indices, nr_agents) self.labels = self.partition(self.labels, indices, nr_agents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self....
[ "0.68305767", "0.6814471", "0.6748061", "0.6691443", "0.66873974", "0.65587986", "0.6541015", "0.6525626", "0.64753836", "0.64283437", "0.63601255", "0.6316696", "0.63140213", "0.6251376", "0.6247065", "0.62162656", "0.6213909", "0.6207955", "0.61947703", "0.6160189", "0.6145...
0.6804831
2
Give each Node uniform splits of data. Nodes will have same amounts of data.
def uniform_split(self, nr_agents): indices = np.linspace(start=0, stop=self.samples.shape[0], num=nr_agents + 1, dtype=int).tolist() self.samples = self.partition(self.samples, indices, nr_agents) self.labels = self.partition(self.labels, indices, nr_agents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r",...
[ "0.6593751", "0.64535356", "0.6275442", "0.624764", "0.6210491", "0.61586165", "0.61282647", "0.6122043", "0.6105798", "0.6105246", "0.6102763", "0.60595816", "0.6045628", "0.6025434", "0.60080785", "0.5965141", "0.5959221", "0.59428424", "0.5918808", "0.59182876", "0.5914056...
0.6529111
1
Give nodes only certain number of class labels as data.
def non_iid_split(self, nr_agents, class_per_node, random): unique = list(set(self.labels.tolist())) len_unique = len(unique) # Create array that assigns a class to specific nodes # Use 'np.arange' to ensure every class is represented before repeating # A row represents nr_agents, a column represents classes per node agent_class_master = np.arange(start=0, stop=nr_agents * class_per_node) % len_unique np.random.shuffle(agent_class_master) agent_class_master = agent_class_master.reshape(nr_agents, class_per_node) # Split data by labels sample_list = [[] for _ in range(len_unique)] for i in range(len(self.labels)): sample_list[self.labels[i]].append(self.samples[i]) # By class creates uniform or random indices splits to partition data to agents evenly class_count = np.bincount(agent_class_master.ravel()) class_indices = {} for i in range(len(class_count)): if random: indices = sorted(np.random.randint(0, high=len(sample_list[i]), size=class_count[i] - 1).tolist()) indices = [0] + indices indices += [len(sample_list[i])] class_indices[i] = indices else: class_indices[i] = np.linspace(start=0, stop=len(sample_list[i]), num=class_count[i] + 1, dtype=int).tolist() # Main loop that partitions data by the assigned class and proper amount all_agents = [] all_class = [] for agent in agent_class_master: agent_data = [] agent_class = [] for cls in agent: # Proportioned indices for data and grab correctly indexed data temp_indices = class_indices[cls] data_for_agent = sample_list[cls][temp_indices[0]:temp_indices[1] - 1] # Add data and class to this agents list agent_data = agent_data + data_for_agent agent_class = agent_class + [cls for _ in range(len(data_for_agent))] # Drop first index since we used that data, forces next person to use next index class_indices[cls] = temp_indices[1:] # Append agents data and class labels in order all_agents.append(torch.stack(agent_data)) all_class.append(torch.tensor(agent_class)) self.samples = all_agents self.labels = all_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_class5_classification_dataset_sparse_labels() -> tf.data.Dataset:\n\n # Create features\n X = tf.random.normal(shape=(100, 3))\n\n # Create one multi-class (one hot) labels\n y = tf.random.uniform(minval=0, maxval=5, dtype=tf.int32, shape=(100,))\n\n return tf.data.Dataset.from_tensor_slic...
[ "0.63320434", "0.6277881", "0.6271124", "0.6242542", "0.6160365", "0.6119636", "0.60193723", "0.59684384", "0.59684384", "0.5925846", "0.5914046", "0.58883923", "0.58800185", "0.5870202", "0.58377177", "0.58357537", "0.5825657", "0.5825657", "0.5825657", "0.5825657", "0.58256...
0.56607765
32
Uses LoadData class to partition prepare data. Puts data into dataloader objects to make use of batching and shuffling.
def load_mnist_data(nr_nodes, nr_classes, allocation, subset, batch_size): train_loader_list = [] test_loader_list = [] train = LoadData('MNIST', True, subset) test = LoadData('MNIST', False, False) train_data, train_targets = train.split(allocation, nr_nodes, class_per_node=nr_classes) for data, targets in zip(train_data, train_targets): train_dataset = CustomDataset(data, targets) train_loader_list.append(DataLoader(train_dataset, batch_size=batch_size, shuffle=True)) test_data, test_targets = test.split('uniform', nr_nodes) for data, targets in zip(test_data, test_targets): test_dataset = CustomDataset(data, targets) test_loader_list.append(DataLoader(test_dataset, batch_size=batch_size, shuffle=False)) return train_loader_list, test_loader_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n ...
[ "0.7089528", "0.69590586", "0.6943236", "0.6820497", "0.67818403", "0.665066", "0.662082", "0.65793914", "0.65624624", "0.65569514", "0.6556639", "0.6548388", "0.6527811", "0.65153986", "0.6513853", "0.6507452", "0.65053266", "0.6497079", "0.64861447", "0.64734644", "0.646940...
0.0
-1
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['a'] = mean - np.sqrt(3) * stdv internals['b'] = mean + np.sqrt(3) * stdv return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k...
[ "0.6455378", "0.6220392", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.565...
0.6434524
1
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['a'] = mean - np.sqrt(3) * stdv internals['b'] = mean + np.sqrt(3) * stdv return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['k...
[ "0.6455378", "0.6220392", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.57005703", "0.565...
0.6434524
2
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['mu'] = mean internals['sigma'] = stdv return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a'] = mean - np.sqrt(3) * stdv\n internals['b'] = mean + np.sqrt(3) * stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n i...
[ "0.6434524", "0.6434524", "0.6220392", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.5700...
0.6455378
0
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments cov = stdv / mean zeta = np.sqrt(np.log(1. + cov ** 2.)) LAMBDA = np.log(mean) - 0.5 * zeta ** 2. internals = {} internals['LAMBDA'] = LAMBDA internals['zeta'] = zeta return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a...
[ "0.6455378", "0.6434524", "0.6434524", "0.6220392", "0.6185545", "0.6109156", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.5700...
0.6106636
6
This function computes the distribution internal parameters from its two first moments.
def _compute_internals(self, moments): [mean, stdv] = moments internals = {} internals['k'] = mean ** 2. / stdv ** 2. internals['LAMBDA'] = mean / stdv ** 2. return internals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['mu'] = mean\n internals['sigma'] = stdv\n\n return internals", "def _compute_internals(self, moments):\n\n [mean, stdv] = moments\n internals = {}\n internals['a...
[ "0.6455378", "0.6434524", "0.6434524", "0.6185545", "0.6109156", "0.6106636", "0.60708535", "0.60512894", "0.60178155", "0.5966822", "0.59502286", "0.58735156", "0.5850575", "0.58171284", "0.5816514", "0.57661724", "0.5720821", "0.57173246", "0.57122564", "0.5709464", "0.5700...
0.6220392
3
Date the activity was created.
def creation_date(self) -> str: return pulumi.get(self, "creation_date")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_created(self) -> datetime:\n return self._date_created", "def created_date(self):\n return self._created_date", "def created_date(self):\n return self._created_date", "def date_created(self):\n return self._date_created", "def date_created(self):\n return self._d...
[ "0.8107826", "0.7986173", "0.7986173", "0.797429", "0.797429", "0.797429", "0.7832342", "0.78072244", "0.7796537", "0.7792625", "0.76402485", "0.75940555", "0.7574318", "0.75237525", "0.75237525", "0.75183827", "0.7475857", "0.74230283", "0.73979384", "0.7372514", "0.7362803"...
0.8029383
3
The providerassigned unique ID for this managed resource.
def id(self) -> str: return pulumi.get(self, "id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider_id(self):\n return self.get('_id')", "def provider_id(self):\n raise NotImplementedError", "def id(self):\n return self.raw_resource.uuid", "def healthcare_provider_id(self):\n return self._healthcare_provider_id", "def unique_identifier(self) -> str:\n retur...
[ "0.8193402", "0.7851373", "0.77124894", "0.7604287", "0.7477648", "0.7476093", "0.7476093", "0.7476093", "0.7425807", "0.7380237", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.735787", "0.735787", "0.73477197", "...
0.0
-1
Provides a Step Functions Activity data source Example Usage ```python import pulumi import pulumi_aws as aws sfn_activity = aws.sfn.get_activity(name="myactivity") ```
def get_activity(arn: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityResult: __args__ = dict() __args__['arn'] = arn __args__['name'] = name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('aws:sfn/getActivity:getActivity', __args__, opts=opts, typ=GetActivityResult).value return AwaitableGetActivityResult( arn=pulumi.get(__ret__, 'arn'), creation_date=pulumi.get(__ret__, 'creation_date'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_activities():\n pass", "def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None,\n name: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]:\n ...", "def c...
[ "0.6528125", "0.64868456", "0.60511726", "0.6050907", "0.59809947", "0.5972801", "0.5858322", "0.5832111", "0.57496256", "0.57023674", "0.56348556", "0.56348556", "0.5538348", "0.5492945", "0.54754126", "0.5414958", "0.53552705", "0.53485847", "0.53307176", "0.5305301", "0.52...
0.6439963
2
Provides a Step Functions Activity data source Example Usage ```python import pulumi import pulumi_aws as aws sfn_activity = aws.sfn.get_activity(name="myactivity") ```
def get_activity_output(arn: Optional[pulumi.Input[Optional[str]]] = None, name: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityResult]: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_activities():\n pass", "def get_activity(arn: Optional[str] = None,\n name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityResult:\n __args__ = dict()\n __args__['arn'] = arn\n __args__['name'] = name\n opts = pul...
[ "0.652813", "0.64412045", "0.6052704", "0.60521966", "0.59815174", "0.5974512", "0.58580536", "0.58321756", "0.57499844", "0.57043284", "0.5635826", "0.5635826", "0.55382925", "0.5494571", "0.5475397", "0.5416918", "0.53558916", "0.5349023", "0.53315115", "0.5306385", "0.5292...
0.64874995
1
get constraints of the current layer
def get_constraints(self, prev_layer): constraints = [] if self.activation is not None: constraints += self.activation.get_constraints(self, prev_layer) else: # for linear activations current_constraints = [] for channel_indx in range(self.n_in_channels): upper_bound, _ = prev_layer.get_bounds(channel_indx) critical_prob = prev_layer.get_critical_neurons(channel_indx) if critical_prob is None: keep_upper_bound = 0 else: keep_upper_bound = cp.multiply(1 - critical_prob, upper_bound) current_constraints += [ self.layer_input[channel_indx] == prev_layer.get_computation_layer(channel_indx) - keep_upper_bound ] constraints += self.create_constraint( f"{self.name}_linear", current_constraints ) if prev_layer.compute_critical_neurons: constraints += self.create_constraint( f"neuron_importance_bounds_{prev_layer.name}", [prev_layer.neuron_importance >= 0, prev_layer.neuron_importance <= 1], ) return constraints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_constraints(self):\n return self.constraints", "def constraints(self):\n return self._constraints", "def constraints(self):\n return self._constraints", "def get_constraints(self):\n\n return vertcat(*self.g), self.g_min, self.g_max", "def getConstraint(self):\n r...
[ "0.80761766", "0.79321265", "0.79321265", "0.7608304", "0.75793535", "0.7431221", "0.72901845", "0.71523994", "0.7082262", "0.7023915", "0.6928313", "0.6927668", "0.69181097", "0.68804044", "0.6866544", "0.6732997", "0.6618143", "0.66156757", "0.66137815", "0.6589468", "0.658...
0.79009855
3
get the cvxpy variable associated with this layer
def get_cvxpy_variable(self, channel_indx=None): if channel_indx is None: output_channels = cp.hstack( [ self.layer_input[cur_channel_indx] for cur_channel_indx in range(self.n_in_channels) ] ) else: output_channels = self.layer_input[channel_indx] return output_channels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xvar ( self ) :\n return self.__xvar", "def x ( self ) :\n return self.xvar", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))", "def var(self, name):\n ret...
[ "0.6952394", "0.6349231", "0.6311073", "0.6311073", "0.6311073", "0.6210869", "0.614176", "0.61036706", "0.6093137", "0.5973595", "0.5904504", "0.58920914", "0.5774172", "0.5744649", "0.5726218", "0.5713547", "0.56973785", "0.56402665", "0.5625536", "0.5621677", "0.5595852", ...
0.7194828
0
compute the output of this layer based on the weights biases and decision variable
def get_computation_layer(self, channel_indx=0): if channel_indx is None: return self._get_multi_channel_output_flat() normalized_batch = ( self.layer_input[channel_indx] - self.running_mean[channel_indx] ) / (np.sqrt(self.running_var[channel_indx] + self.epsilon)) if self.affine: return (normalized_batch * self.weights[channel_indx]) + self.bias[ channel_indx ] return normalized_batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def get_output(weight, data, regression= \"logistic\"):\n...
[ "0.67609805", "0.6741717", "0.6619007", "0.65938884", "0.650701", "0.65016365", "0.6475759", "0.6448658", "0.63928056", "0.63902205", "0.63838184", "0.63790214", "0.63790214", "0.6343709", "0.6316035", "0.6315198", "0.6305553", "0.62861043", "0.628232", "0.62760216", "0.62695...
0.0
-1
returns the bounds asssociated with input to this layer
def get_bounds(self, channel_indx=None): if channel_indx is None: upper_bound = self.upper_bound.reshape(self.batch_size, -1) lower_bound = self.lower_bound.reshape(self.batch_size, -1) else: upper_bound = self.upper_bound[:, channel_indx, :].reshape( self.batch_size, -1 ) lower_bound = self.lower_bound[:, channel_indx, :].reshape( self.batch_size, -1 ) return upper_bound, lower_bound
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bounds(self):\n return self.kernel.bounds", "def input_bounds(self):\n return self.__input_bounds", "def bounds(self):\n return self.GetBounds()", "def bounds(self):\n return self._bounds", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def Ge...
[ "0.8187093", "0.8182944", "0.7970464", "0.7969971", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.7886397", "0.78816897", "0.78816897", "0.78687114", "0.7821428", "0.77718407", "0.77343", "0.7731461", "0.7682162", "0.7611129", ...
0.6802181
53
routine used to test the current pooling implementation to make sure no discrepency between cvxpy and original pytorch layer
def _test(self): self.pytorch_layer.eval() pytorch_layer = copy.deepcopy(self.pytorch_layer).cpu() image_w_h = int(self.input_size ** 0.5) input_image = torch.rand(1, self.n_in_channels, image_w_h, image_w_h) output_tensor = pytorch_layer(input_image)[0] for channel in range(self.n_in_channels): current_channel = input_image[0, channel].squeeze().flatten().cpu().numpy() normalized_data = (current_channel - self.running_mean[channel]) / np.sqrt( self.running_var[channel] + self.epsilon ) if self.affine: output_numpy = (self.weights[channel] * normalized_data) + self.bias[ channel ] else: output_numpy = normalized_data assert np.isclose( output_numpy, output_tensor[channel].detach().flatten().cpu().numpy(), atol=1e-6, ).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pool_consistency(self) -> None:\n x = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3, 3])\n )\n input_ops = {'X': cast(Operator, x)}\n\n MaxPool(\n 'max_pool1',\n [1, 2, 2, 3],\n Float32(),\n i...
[ "0.6518167", "0.6481036", "0.64059615", "0.6386542", "0.6330426", "0.6294219", "0.62897485", "0.6237556", "0.61123407", "0.603048", "0.6028792", "0.5981502", "0.58038086", "0.57659817", "0.57648206", "0.5753006", "0.573803", "0.57336324", "0.57228494", "0.57161427", "0.571519...
0.5887395
12
returns number of output channels
def get_n_channels(self): return self.n_out_channels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_channels(self):\n return len(self.channels)", "def num_channels_per_output(cls) -> list[tuple[int, ...]]:\n return [\n (16, 24, 40, 112, 320),\n (16, 24, 40, 112, 320),\n (16, 24, 48, 120, 352),\n (24, 32, 48, 136, 384),\n (24, 32, 56, 16...
[ "0.7895569", "0.78375614", "0.7822447", "0.77712107", "0.763508", "0.7623057", "0.76186246", "0.75209033", "0.7185467", "0.717589", "0.71026057", "0.70969474", "0.7064426", "0.7045207", "0.6935932", "0.6927934", "0.69142646", "0.68657845", "0.6865311", "0.6837313", "0.6834410...
0.86962193
0
Adds operations that perform JPEG decoding and resizing to the graph..
def _image_preprocess_fn(image_buffer, input_height, input_width, input_mean, input_std, return_full_size_image=False): # image_buffer 1-D string Tensor representing the raw JPEG image buffer. # Extract image shape from raw JPEG image buffer. image_shape = tf.image.extract_jpeg_shape(image_buffer) # Decode and crop image. offset_x = 0 offset_y = image_shape[0] // 3 # We want to crop off the top fifth of the image crop_width = image_shape[1] crop_height = 2 * image_shape[0] // 3 crop_window = tf.stack([offset_y, offset_x, crop_height, crop_width]) cropped_image = tf.image.decode_and_crop_jpeg(image_buffer, crop_window, channels=3) # Resize image. # decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) decoded_image_4d = tf.expand_dims(cropped_image, 0) resize_shape = tf.stack([input_height, input_width]) resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32) resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int) # Normalize image offset_image = tf.subtract(resized_image, input_mean) mul_image = tf.multiply(offset_image, 1.0 / input_std) if return_full_size_image: return tf.squeeze(mul_image, axis=0), cropped_image return tf.squeeze(mul_image, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, image):", "def process_image(self):\n pass", "def adjust(self, image):\n ...", "def augment(self, image):\n pass", "def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,\n input_std):\n\n jpeg_data = tf.placeholder(tf.strin...
[ "0.6741079", "0.6175979", "0.6148263", "0.59459597", "0.59365785", "0.58881974", "0.58312756", "0.58165944", "0.56556207", "0.56259316", "0.5575787", "0.55362195", "0.55134434", "0.54918855", "0.5491029", "0.54815876", "0.5451435", "0.5449056", "0.5448799", "0.5446489", "0.54...
0.0
-1
Creates input_fn according to parameters
def get_input_fn(input_file_names, batch_size=1, num_epochs=None, shuffle=False, shard_size=3000, return_full_size_image=False): def parse_fn(example): """Parse TFExample records and perform simple data augmentation.""" example_fmt = { "image": tf.FixedLenFeature((), tf.string), "target": tf.FixedLenFeature((), tf.float32, -1) } parsed = tf.parse_single_example(example, example_fmt) if return_full_size_image: preprocessed_image, full_size_image = _image_preprocess_fn( image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128, return_full_size_image=True) return preprocessed_image, parsed["target"], full_size_image preprocessed_image = _image_preprocess_fn(image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128) return preprocessed_image, parsed["target"] def input_fn(): file_names = tf.constant(input_file_names, dtype=tf.string, name='input_file_names') if shuffle: num_shards = len(input_file_names) files = tf.data.Dataset.from_tensor_slices(file_names).shuffle(num_shards) dataset = files.interleave(tf.data.TFRecordDataset, cycle_length=3) dataset = dataset.shuffle(buffer_size=shard_size*2) else: dataset = tf.data.TFRecordDataset(file_names) dataset = dataset.map(map_func=parse_fn, num_parallel_calls=FLAGS.num_parallel_img_parsers) dataset = dataset.batch(batch_size=batch_size) dataset = dataset.repeat(num_epochs) # the input is repeated indefinitely if num_epochs is None dataset = dataset.prefetch(buffer_size=64) # print("Dataset ouput types: {}".format(dataset.output_types)) # print("Dataset ouput shapes: {}".format(dataset.output_shapes)) iterator = dataset.make_one_shot_iterator() return iterator.get_next() return input_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input_fn(sources, train, params):\n \n raise NotImplementedError", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def generate_input_fn(mode='TRAIN'):\n mode = mode.upper()\n if mode == 'TRAIN' or mode == 'EVAL':\n ...
[ "0.77863973", "0.70128167", "0.6983106", "0.6836533", "0.6774335", "0.6678941", "0.66704524", "0.6666463", "0.66138333", "0.66047716", "0.6547102", "0.6434819", "0.6386017", "0.63624465", "0.63570637", "0.63555074", "0.634394", "0.6342538", "0.6278813", "0.62751496", "0.62735...
0.0
-1
Parse TFExample records and perform simple data augmentation.
def parse_fn(example): example_fmt = { "image": tf.FixedLenFeature((), tf.string), "target": tf.FixedLenFeature((), tf.float32, -1) } parsed = tf.parse_single_example(example, example_fmt) if return_full_size_image: preprocessed_image, full_size_image = _image_preprocess_fn( image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128, return_full_size_image=True) return preprocessed_image, parsed["target"], full_size_image preprocessed_image = _image_preprocess_fn(image_buffer=parsed["image"], input_height=299, input_width=299, input_mean=128, input_std=128) return preprocessed_image, parsed["target"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parser(record):\n record_spec = {\n \"input\": tf.FixedLenFeature([seq_len], tf.int64),\n \"labels\": tf.FixedLenFeature([tgt_len], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_len],tf.float32),\n \"target_mask\": tf.FixedLenFeature([tgt_len],tf.float32)\n }\n\n # r...
[ "0.684948", "0.6609394", "0.6605501", "0.6414771", "0.6374184", "0.63535416", "0.6350793", "0.63333046", "0.63030624", "0.6258336", "0.62369555", "0.6210575", "0.6175964", "0.61759615", "0.6129125", "0.61166155", "0.6116493", "0.6086684", "0.60770196", "0.6074543", "0.6071042...
0.5721495
55
3x3 convolution with padding
def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bia...
[ "0.75487494", "0.7377383", "0.7326011", "0.7300764", "0.72812986", "0.7276984", "0.725113", "0.72361106", "0.723129", "0.72272843", "0.71892864", "0.7178802", "0.71645725", "0.71645725", "0.71645725", "0.7158782", "0.7156602", "0.7156602", "0.7156602", "0.7156602", "0.7156602...
0.7081058
74
Constructs a BiRealNet18 model.
def birealnet18(pretrained=False, **kwargs): model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet18(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def birealnet34(pretrained=False, **kwargs):\n ...
[ "0.6268504", "0.620634", "0.6204012", "0.61781776", "0.616232", "0.61510354", "0.61510354", "0.61510354", "0.61510354", "0.61510354", "0.6100227", "0.6087697", "0.6079006", "0.6012798", "0.5975343", "0.59738714", "0.59638786", "0.5960628", "0.5948427", "0.5936224", "0.5920398...
0.70269394
1
Constructs a BiRealNet34 model.
def birealnet34(pretrained=False, **kwargs): model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model", "def birealnet18(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model", "def resnet34(bitW, bitA, pretrained=False, **kwargs):\n mo...
[ "0.6753302", "0.6753302", "0.63931054", "0.63634723", "0.6265441", "0.6239585", "0.6229357", "0.6220196", "0.6198277", "0.6196694", "0.6196694", "0.6196694", "0.61152387", "0.6085224", "0.60785514", "0.6060138", "0.60452986", "0.6025176", "0.6025176", "0.6025176", "0.60139596...
0.73481995
0
Kernel Density Estimation with Scipy
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs): # Note that scipy weights its bandwidth by the covariance of the # input data. To make the results comparable to the other methods, # we divide the bandwidth by the sample standard deviation here. #kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs) kde = gaussian_kde(x) return kde.evaluate(x_grid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kde_sklearn(x, x_grid, bandwidth=0.8, **kwargs):\n \n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n #kde_skl = KernelDensity()\n kde_skl.fit(x[:, np.newaxis])\n # score_samples() returns the log-likelihood of the samples\n log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])\n \...
[ "0.6899102", "0.68290377", "0.6813299", "0.68107426", "0.66186744", "0.66110486", "0.6379504", "0.6353802", "0.6331665", "0.62971747", "0.62869173", "0.6239997", "0.623953", "0.6189176", "0.6178441", "0.6158369", "0.6149099", "0.61140794", "0.6109274", "0.6099322", "0.6058081...
0.6502326
6
Adds a record that a certain peer has a block.
def peer_has_block( self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool ) -> None: if self.target_peak is not None and header_hash == self.target_peak.header_hash: self.peers_changed.set() if header_hash in self.peak_to_peer: self.peak_to_peer[header_hash].add(peer_id) else: self.peak_to_peer[header_hash] = {peer_id} if len(self.peak_to_peer) > 256: # nice power of two item = self.peak_to_peer.popitem(last=False) # Remove the oldest entry # sync target hash is used throughout the sync process and should not be deleted. if self.target_peak is not None and item[0] == self.target_peak.header_hash: self.peak_to_peer[item[0]] = item[1] # Put it back in if it was the sync target self.peak_to_peer.popitem(last=False) # Remove the oldest entry again if new_peak: self.peer_to_peak[peer_id] = Peak(header_hash, height, weight)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def announce_new_block(block):\n for peer in peers:\n url = \"{}add_block\".format(peer)\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))", "def add_block(self, block, proof):\n previous_hash = self.last_block.hash\n\n if previous_hash != block.previous_hash:\n...
[ "0.6661918", "0.663433", "0.66281337", "0.6614235", "0.65480334", "0.65008473", "0.64870596", "0.6486342", "0.6483645", "0.63460624", "0.62318516", "0.61465347", "0.6141715", "0.61389035", "0.61175793", "0.61068624", "0.6093761", "0.6086257", "0.60828084", "0.606044", "0.6018...
0.66007864
4
Clears the peak_to_peer info which can get quite large.
async def clear_sync_info(self) -> None: self.peak_to_peer = orderedDict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self._fingerprint = 0", "def clear(self):\n self.mismatch_error = None\n self.pt_outs = None\n self._onnx_graph = None\n self.upper_graph_info = None\n self.lower_graph_info = None", "def clear(self):\n self.molo_tcp_pack.clear()\n self...
[ "0.6206844", "0.58822215", "0.5839706", "0.5757953", "0.57487977", "0.5747801", "0.5705526", "0.5700115", "0.5695211", "0.56707", "0.5589478", "0.5570707", "0.5559506", "0.55220956", "0.55071974", "0.55066085", "0.5479404", "0.5468562", "0.5449649", "0.5447672", "0.5441297", ...
0.7599027
0
Peak wavelength when the curve is expressed as power density.
def lambda_max(self): return const.b_wien / self.temperature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def wavelength(self):\n return wavelength(energy)", "def peak(self):\n pass", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def peak_PSF(self):\n ...
[ "0.65199757", "0.65100896", "0.6436953", "0.6426267", "0.6408306", "0.62890404", "0.6252019", "0.624753", "0.6180097", "0.61793584", "0.6162006", "0.6152941", "0.61026114", "0.6093139", "0.6071749", "0.6033385", "0.59775233", "0.59704584", "0.5951522", "0.5938655", "0.5900657...
0.5508018
82
Make a hex string from the venue names to use as a unique id. Only the last 8 characters are used for the unique id.
def make_unique_id(venue_list): md5_hash = md5() for name in venue_list: md5_hash.update(name) hash_hex = md5_hash.hexdigest() return hash_hex[-8:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_uuid():\n parts = [Record._hex_string(k) for k in Record.UUID_PARTS]\n return \"-\".join(parts)", "def _unique_id():\n id = \"\"\n for i in xrange(0,8):\n id += choice(ascii_letters)\n return id", "def format_unique_id(address: str) -> str:\n return address.replace(\"...
[ "0.7108777", "0.67829424", "0.67574894", "0.66712064", "0.66090417", "0.6605987", "0.6547099", "0.643587", "0.6423709", "0.64172685", "0.6412221", "0.6411684", "0.63644415", "0.63502777", "0.6323095", "0.62953424", "0.6283156", "0.62323576", "0.61994445", "0.61936736", "0.610...
0.8124629
0
This is the algorithm. Get the score between these two venues.
def score(cur_ven, ven): try: alpha = 750 numerator = (ven["rating"] * 0.75) + (2.5 * (1- eulers**(-ven["ratingSignals"]/144))) cur_coord = (cur_ven["location"]["lat"], cur_ven["location"]["lng"]) ven_coord = (ven["location"]["lat"], ven["location"]["lng"]) denominator = vincenty(cur_coord, ven_coord).meters + alpha except Exception as e: print "{}, \n has produced an error from {}".format(ven["name"], e) return float("-inf") return numerator / denominator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_score(self, a, b):\n ### FILL IN ###", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def reviewer_similarity_score(self, other: _Ver...
[ "0.70562255", "0.66023374", "0.64212406", "0.6366032", "0.6331436", "0.6192957", "0.6185072", "0.61593205", "0.61031914", "0.60388577", "0.6024461", "0.59839195", "0.59788215", "0.59720534", "0.59422135", "0.59361476", "0.5904944", "0.5902023", "0.5900325", "0.5871707", "0.58...
0.6433049
2
Raises a ValueError if matrix `value` is not square.
def assert_square(name: str, value: np.ndarray) -> None: if not len(value.shape) == 2 or value.shape[0] != value.shape[1]: raise ValueError(f"{name} must be a square")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_square(self):\n if self.rows != self.cols:\n raise IndexError(\"Matrix is not square\")", "def test_change_basis_raises_not_square(self, fun):\n A = np.random.rand(4, 6)\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n fun(A)", "...
[ "0.70691025", "0.6647211", "0.62668157", "0.62668157", "0.6163664", "0.6089783", "0.6032893", "0.5889549", "0.58770996", "0.58336306", "0.57889503", "0.57666147", "0.5684565", "0.5575981", "0.5559106", "0.5558494", "0.553429", "0.55041176", "0.5487154", "0.5484843", "0.547051...
0.7312172
0
Calculates the Shannon entropy for probabilities `ps` with `base`.
def shannon_entropy(ps: np.ndarray, base: int = 2) -> float: return -np.sum(ps * np.log(ps) / np.log(base))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(self, base: int = None):\n\n # shannon entropy in nats\n fdist_ = self.fdist\n fdist_[\"prob\"] = fdist_[\"freq\"] / fdist_[\"freq\"].sum()\n fdist_[\"logp\"] = np.log(fdist_[\"prob\"])\n fdist_[\"nats\"] = -fdist_[\"prob\"] * fdist_[\"logp\"]\n entropy_ = fdis...
[ "0.76036006", "0.67516744", "0.6684365", "0.6516514", "0.64760756", "0.62832654", "0.6267193", "0.62411416", "0.6221311", "0.62205845", "0.6211119", "0.6175704", "0.61547273", "0.59715253", "0.5954994", "0.59400564", "0.58892614", "0.5873213", "0.58565325", "0.5835365", "0.58...
0.8918445
0
Simply tests if `img` has 3 channels.
def is_rgb(img: np.ndarray) -> bool: return len(img.shape) >= 1 and img.shape[-1] == 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return...
[ "0.72373766", "0.7074375", "0.67956823", "0.6699473", "0.66691566", "0.6518619", "0.65036654", "0.62500054", "0.62126744", "0.6188705", "0.61673236", "0.61294484", "0.61257964", "0.6089409", "0.594763", "0.59257436", "0.5916708", "0.5915381", "0.58001804", "0.5760735", "0.574...
0.74561703
0
Converts an array [..., channels] of RGB values to HSI color values (H in rad). RGB values are assumed to be normalized to (0, 1).
def rgb_to_hsi(image: np.ndarray) -> np.ndarray: if not is_rgb(image): raise ValueError("Input needs to be an array of RGB values") r = image[..., 0] g = image[..., 1] b = image[..., 2] out = np.zeros_like(image) # allequal = (img == img[:, :, 0, np.newaxis]).all(axis=-1) with np.errstate(invalid="ignore"): tmp = (2.0 * r - g - b) / 2.0 / np.sqrt((r - g) ** 2 + (r - b) * (g - b)) # if r==g==b then 0/0 theta = np.arccos(np.clip(tmp, -1.0, +1.0)) out[..., 0] = np.where(b <= g, theta, 2 * np.pi - theta) # H out[..., 2] = np.sum(image, axis=-1) / 3.0 # I out[..., 1] = 1 - np.amin(image, axis=-1) / out[..., 2] # S if r==g==b==0 then 0/0 np.nan_to_num(out[..., 0:2], copy=False) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb2hsl_img(rgb):\r\n \r\n def core(_rgb, _hsl):\r\n\r\n irgb = _rgb.astype(np.uint16)\r\n ir, ig, ib = irgb[:, :, 0], irgb[:, :, 1], irgb[:, :, 2]\r\n h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]\r\n\r\n imin, imax = irgb.min(2), irgb.max(2)\r\n iadd, isub = i...
[ "0.6703506", "0.6296489", "0.6287106", "0.60505944", "0.59786993", "0.5978399", "0.59774935", "0.59652996", "0.5940715", "0.59046626", "0.58576584", "0.5831712", "0.58161163", "0.58112276", "0.5776856", "0.57461786", "0.57437605", "0.5730638", "0.5720245", "0.56746477", "0.56...
0.7039188
0
Converts an array [..., channels] of RGB values to Digital Y'CbCr (0255). RGB values are assumed to be normalized to (0, 1). Don't forget to cast to uint8 for pillow.
def rgb_to_ycbcr(image: np.ndarray) -> np.ndarray: """ from RGB (0-1). """ if not is_rgb(image): raise ValueError("Input needs to be an array of RGB values") m = np.array( [ [+065.481, +128.553, +024.966], [-037.797, -074.203, +112.000], [+112.000, -093.786, -018.214], ] ) a = np.array([16, 128, 128]) return np.dot(image, m.T) + a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\...
[ "0.65595174", "0.65373284", "0.65213215", "0.6495782", "0.64475703", "0.64128786", "0.6219854", "0.6115602", "0.5978177", "0.5957845", "0.59014165", "0.5828215", "0.5691829", "0.5660269", "0.5658129", "0.56305027", "0.5622264", "0.5549955", "0.55226725", "0.5519686", "0.55096...
0.6613111
0
Returns a triangular matrix with random value between 0 and 1 uniformly.
def random_triangular_matrix(size: int, lower: bool = True) -> np.ndarray: a = np.random.uniform(0, 1, (size, size)) if lower: ind = np.triu_indices(5, 1) else: ind = np.tril_indices(5, 1) a[ind] = 0 return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_rand_mat(dim=3):\n tmp = npr.uniform(-1, 1, (dim,dim))\n\n # make matrix symmetric\n for i in range(dim):\n for j in range(i+1, dim):\n tmp[i,j] = tmp[j,i]\n\n return tmp", "def random_matrix(rows, cols):\n return np.random.randn(rows, cols)", "d...
[ "0.6796665", "0.6433331", "0.6395273", "0.63390714", "0.63257", "0.6278706", "0.61659557", "0.61443967", "0.60865873", "0.5927011", "0.5907153", "0.5863584", "0.5841046", "0.58366776", "0.57918566", "0.5788798", "0.5776318", "0.57745236", "0.57488704", "0.5734677", "0.5692929...
0.74994147
0
Performs batched calculation of `v^T A v` transform. Special case of bilinear form `x^T A y`
def batch_vTAv(A: np.ndarray, v: np.ndarray) -> np.ndarray: """ Faster than Av = np.matmul(A, v[...,:,None]) # [B, X, 1] return np.matmul(v[...,None,:], Av).squeeze((-2, -1)) # [B] """ return np.einsum("...k,...kl,...l->...", v, A, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def __call__(self, x, y):\n #- TODO: compare speed to solution at\n #- http://stackoverflow.com/questions/12729228/simple-effici...
[ "0.59027725", "0.58240545", "0.57738847", "0.5771491", "0.56887174", "0.5658218", "0.56540334", "0.56478906", "0.5588295", "0.55453885", "0.5531856", "0.55062973", "0.548991", "0.5472191", "0.54176724", "0.541392", "0.5401743", "0.5387512", "0.53842276", "0.53838414", "0.5374...
0.6895596
0
Performs a batched inner product over the last dimension. Replacement for deprecated `from numpy.core.umath_tests import inner1d`.
def batch_inner(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray: if verify and a.shape != b.shape: raise ValueError("All dimensions have to be equal") if a.shape[-1] == 0: return np.empty_like(a) return np.einsum("...i,...i->...", a, b) # faster than np.sum(a * b, axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def outer_product(input_sets, axis=0):\n out ...
[ "0.7076053", "0.6733254", "0.6566906", "0.6538848", "0.6200467", "0.5971671", "0.5898091", "0.5882279", "0.586811", "0.58539116", "0.5828389", "0.58158463", "0.57993835", "0.5773256", "0.57691", "0.570448", "0.5675385", "0.5650381", "0.5588937", "0.55867285", "0.5513858", "...
0.6739568
1
Performs a batched outer product over the last dimension.
def batch_outer(a: np.ndarray, b: np.ndarray, verify: bool = True) -> np.ndarray: if verify and a.shape[:-1] != b.shape[:-1]: raise ValueError("All except the last dimension have to be equal") return np.einsum("...i,...j->...ij", a, b) # slightly faster than np.multiply(a[...,:,None], b[...,None,:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_outer_product(a, b):\n a, b = normalize_and_check_ndim([a, b], 2)\n # This is a batchwise version of the matrix multiplication approach\n # used for outer_product(), see explanation there.\n return a[:, :, np.newaxis] * b[:, np.newaxis, :]", "def outer_product(x):\n return keras.backend....
[ "0.755109", "0.70560527", "0.6960709", "0.6768571", "0.65054774", "0.63410676", "0.61378115", "0.61229986", "0.60099125", "0.5946456", "0.59233963", "0.590922", "0.5882272", "0.587886", "0.5864047", "0.58254915", "0.5783885", "0.5765654", "0.5729165", "0.57191443", "0.5591057...
0.66920644
4
`probs` values ndarray `k` take the smallest `k` elements, if `reverse` is False and the largest `k` if `reverse` is True `axis` sorting and selection axis.
def batchtopk( probs: np.ndarray, k: Optional[int] = None, axis: int = -1, reverse: bool = False ) -> Tuple[np.ndarray, np.ndarray]: if k is not None and k <= 0: raise ValueError("k must be larger than 0. Use None to chose all elements.") if axis != -1: raise ValueError("Only last axis supported atm") if len(probs.shape) <= 1: raise ValueError("probs must be at least 2-dimensional") if reverse: sign = -1 else: sign = 1 indices = np.argsort(sign * probs, axis=-1) # use argpartition? probs = np.take_along_axis(probs, indices[..., :k], axis=-1) return indices, probs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tflite_top_k_probs(probs, k):\n\n if k > 0:\n return np.flip(probs[0].argsort()[-k:])\n else:\n return np.flip(probs[0].argsort())", "def tf_top_k_probs(probs, k):\n\n if k > 0:\n return probs.argsort()[-k:][::-1]\n else:\n return probs.argsort()[:][::-1]", "def indi...
[ "0.7283214", "0.6759031", "0.58316225", "0.58071005", "0.5797564", "0.57924837", "0.5782279", "0.5650926", "0.55970734", "0.5593298", "0.558886", "0.5553999", "0.5523884", "0.548393", "0.5477123", "0.5454491", "0.5419435", "0.5367509", "0.5322573", "0.5320744", "0.5307545", ...
0.7433971
0
Calcuates the sum of the logs of the diagonal elements (batchwise if necessary)
def logtrace(m: np.ndarray) -> np.ndarray: """ note: performance cannot easily be improve by numba. `np.diagonal` not supported by numba 0.52.0 """ return np.sum(np.log(np.diagonal(m, axis1=-2, axis2=-1)), axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trace(X):\r\n return extract_diag(X).sum()", "def trace(X):\n return extract_diag(X).sum()", "def ln_sum_i_neq_j(x):\n\tiw_size = x.size(0)\n\tbatch_size = x.size(1)\n\n\t# TODO: Would torch.expand instead of torch.repeat make this faster?\n\tinv_mask = torch.eye(iw_size).unsqueeze(dim=2).repeat(1, 1...
[ "0.6600348", "0.6512393", "0.63457274", "0.62601304", "0.62387985", "0.6219907", "0.6185488", "0.6124841", "0.6087076", "0.606045", "0.6032963", "0.6027041", "0.6024959", "0.60206836", "0.60127777", "0.6009793", "0.598619", "0.5973478", "0.5966066", "0.58722997", "0.58696294"...
0.6544133
1
Shifts `pvals` by the largest value in the last dimension before the exp is calculated to prevent overflow (batchwise if necessary). Can be used if probabilities are normalized again later.
def shiftedexp(pvals: np.ndarray) -> np.ndarray: if pvals.shape[-1] == 0: return np.empty_like(pvals) return np.exp(pvals - np.amax(pvals, axis=-1)[..., None])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benjamini_hochberg_step_down(pvals):\r\n tmp = fdr_correction(pvals)\r\n corrected_vals = empty(len(pvals))\r\n max_pval = 1.\r\n for i in argsort(pvals)[::-1]:\r\n if tmp[i] < max_pval:\r\n corrected_vals[i] = tmp[i]\r\n max_pval = tmp[i]\r\n else:\r\n ...
[ "0.62695336", "0.6066202", "0.58225965", "0.57363963", "0.5489803", "0.5446892", "0.5247545", "0.52059096", "0.5191071", "0.51882726", "0.5157705", "0.5066351", "0.5033633", "0.4979771", "0.4901542", "0.4892328", "0.48860258", "0.48807552", "0.48794442", "0.48614326", "0.4850...
0.7399692
0
Sample from list of probabilities `pvals` with replacement. The probabilities don't need to be normalized.
def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]: return Sampler(np.cumsum(pvals))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _correct_p_values(self, p_vals):\r\n num_tests = len([p_val for p_val in p_vals if p_val is not None])\r\n corrected_p_vals = []\r\n for p_val in p_vals:\r\n if p_val is not None:\r\n corrected_p_vals.append(min(p_val * num_tests, 1))\r\n else:\r\n ...
[ "0.62992084", "0.5945291", "0.57297397", "0.55846256", "0.5577901", "0.5562105", "0.54901296", "0.539993", "0.5361088", "0.5338176", "0.5308725", "0.53033537", "0.5288212", "0.52389336", "0.52308893", "0.52133656", "0.520034", "0.5190075", "0.5185366", "0.5180876", "0.5169325...
0.6719965
0
Sample from the categorical distribution using `pvals`.
def categorical(pvals: np.ndarray) -> int: return sample_probabilities(pvals)() # faster than: np.argmax(np.random.multinomial(1, normalize(pvals)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_probabilities(pvals: np.ndarray) -> Callable[[], int]:\n\n return Sampler(np.cumsum(pvals))", "def sample_categorical(distribution):\n sample = random.random()\n for event, prob in distribution.items():\n if sample < prob:\n return event\n sample -= prob\n raise Va...
[ "0.6776044", "0.650533", "0.64588654", "0.6282057", "0.6240135", "0.6240135", "0.62277555", "0.6160603", "0.6149961", "0.60994315", "0.60755867", "0.60755867", "0.60755867", "0.6050657", "0.6038272", "0.6014659", "0.5976859", "0.5965328", "0.59274614", "0.5909927", "0.5828886...
0.7592371
0
Convert a population (list of observations) to a CDF.
def population2cdf(population: np.ndarray) -> np.ndarray: population = np.sort(population) return np.searchsorted(population, population, side="right") / len(population)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf", "def cdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n ...
[ "0.6000258", "0.56762284", "0.56762284", "0.55965334", "0.54399467", "0.54305595", "0.5391821", "0.5380843", "0.53685206", "0.5361804", "0.53427804", "0.5326272", "0.52933216", "0.5286579", "0.5269808", "0.5198982", "0.5191929", "0.5165977", "0.5149848", "0.51354104", "0.5111...
0.62257254
0
Convert a discrete PDF into a discrete CDF.
def pmf2cdf(pdf: np.ndarray) -> np.ndarray: cdf = np.cumsum(pdf) return cdf / cdf[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cdf_to_pdf(cdf):\n pdf = deepcopy(cdf)\n pdf[1:] -= pdf[:-1].copy()\n return pdf", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def pdf(self,x):\n if x in self.values:\n pdfValue = self.mapping[x]\n else:\n if self.isFloat:\n vals = sorted(list(self.values))\...
[ "0.69857043", "0.61699057", "0.59049094", "0.5864199", "0.58318394", "0.57629466", "0.573494", "0.5734838", "0.56632376", "0.5655967", "0.56498164", "0.56412953", "0.558819", "0.55852294", "0.5578906", "0.5552537", "0.5533782", "0.5533782", "0.551519", "0.5459205", "0.5448148...
0.6718552
1
Tests the null hypothesis that both samples belong to the same distribution.
def _two_sample_kolmogorov_smirnov_pmf( pmf1: np.ndarray, pmf2: np.ndarray, alpha: float = 0.05 ) -> Tuple[float, float, bool]: # note: yields different results as `scipy.stats.ks_2samp` cdf1 = np.cumsum(pmf1) cdf2 = np.cumsum(pmf2) n1 = cdf1[-1] n2 = cdf2[-1] # cannot be inplace because of type conversion cdf1 = cdf1 / n1 cdf2 = cdf2 / n2 statistic, pvalue = _two_sample_kolmogorov_smirnov_same_length(cdf1, cdf2, n1, n2) reject = pvalue < alpha return statistic, pvalue, reject
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mc_t_two_sample_no_mc(self):\r\n x = array([1, 1, 1])\r\n y = array([0, 0, 0])\r\n self.assertEqual(mc_t_two_sample(x, x), (nan, nan, [], nan))", "def test_t_two_sample_no_variance(self):\r\n # By default should return (None, None) to mimic R's t.test.\r\n x = array([1...
[ "0.6507358", "0.6455645", "0.6431785", "0.6414398", "0.6348298", "0.6341265", "0.633865", "0.6277656", "0.6269617", "0.6268249", "0.6254944", "0.6232321", "0.6192658", "0.6153703", "0.6132823", "0.6105993", "0.6078335", "0.6060871", "0.6044297", "0.60279286", "0.60275316", ...
0.0
-1
Calculate stochastic matrix `pm` to the power of infinity, by finding the eigenvector which corresponds to the eigenvalue 1.
def inf_matrix_power(pm: np.ndarray, dtype=np.float64) -> np.ndarray: w, v = np.linalg.eig( pm ) # scipy.linalg.eig would probably by faster as it can return the left and right eigen vectors if not np.isclose(w[0], 1.0): raise ValueError("The first eigenvalue is not none. Is this a right stochastic matrix?") vi = np.linalg.inv(v) d = np.zeros(pm.shape[0], dtype=dtype) d[0] = 1.0 return np.matmul(v, np.matmul(np.diag(d), vi))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack(...
[ "0.62551945", "0.60196066", "0.59203595", "0.5908042", "0.589194", "0.5879424", "0.5878952", "0.5874368", "0.5866997", "0.5835588", "0.58008105", "0.5785766", "0.5769242", "0.576087", "0.5740334", "0.5668045", "0.55842084", "0.5565213", "0.5563019", "0.55508184", "0.55072254"...
0.763889
0
Inverse of np.block. Set axis to (2, 1) to modify the order of the result.
def unblock(arr: np.ndarray, n1: int, n2: int, axis1: int = -1, axis2: int = -2, blocksize: bool = False) -> np.ndarray: """ test (stackoverflow): Ok, so considering I have N block matrices with bm x bn dimension and want to stack them in a m x n matrix, provided N = m x n, I would then have x.reshape(m,n,bm,bn).swapaxes(1,2).reshape(bm*m,-1) """ s = np.array(arr.shape) if s[axis1] % n1 != 0 or s[axis2] % n2 != 0: raise ValueError(f"{s[axis1]}x{s[axis2]} does not divide by {n1}x{n2}") if blocksize: n1 = s[axis1] // n1 n2 = s[axis2] // n2 # this first .split adds a new dimensions on the outside, so if a absolute index # is given for the second axis it must be moved one to the right if axis2 >= 0: _axis2 = axis2 + 1 else: _axis2 = axis2 arr = np.array(np.split(arr, n1, axis1)) arr = np.array(np.split(arr, n2, _axis2)) inv_blocksize = n1 * n2 total = s[axis1] * s[axis2] s[axis2] = inv_blocksize s[axis1] = total // inv_blocksize return np.reshape(arr, s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inverse(self, x, y):", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())", "def...
[ "0.67732894", "0.665631", "0.65951234", "0.65613794", "0.6526042", "0.6483693", "0.6445868", "0.6425579", "0.6381038", "0.6379403", "0.6377041", "0.6358489", "0.6346017", "0.6340541", "0.6327942", "0.63209075", "0.62847763", "0.6274256", "0.62589926", "0.6158384", "0.61480635...
0.6248697
19
Replace colored pixels with a `neutral_color`. The `ratio` defines the 'colorfulness' above which level the pixel should be replace. I.e. if the `ratio` is 1 nothing will be replaced, if `ratio` is 0 only strict greys are kept unmodified.
def remove_color(img: np.ndarray, ratio: float, neutral_color: Tuple[int, int, int] = RGB_WHITE) -> None: channels = img.shape[-1] assert channels == 3, "Not a 3 channel color image" norm = np.std(np.array(RGB_YELLOW)) # this is the same for all pure colors sd = np.std(img, axis=-1) img[sd > ratio * norm] = neutral_color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ratio_to_rgb(ratio):\n b = 0\n if round(ratio, 1) == 0.5:\n r = 255\n g = 255\n elif ratio < 0.5:\n r = int(ratio * 2 * 255.0)\n g = 255\n else:\n r = 255\n g = int((1.0 - ratio) * 2 * 255.0)\n rgb = (r, g, b)\n\n return rgb", "def set_neutral(self)...
[ "0.4983211", "0.48789826", "0.48116347", "0.4737011", "0.47265878", "0.46818957", "0.4571823", "0.45462266", "0.44600105", "0.4397929", "0.42731524", "0.427132", "0.4269558", "0.42693788", "0.42575642", "0.42499575", "0.42406985", "0.42318156", "0.4226033", "0.4211065", "0.41...
0.7399279
0
It normalizes the last dimension of an ndarray to sum to 1. It can be used to convert (batches of) vectors to stochastic vectors or (batches of) matrices to right stochastic matrices. Right stochastic matrices are also called transitions matrices.
def stochastic(x: np.ndarray) -> np.ndarray: n = np.linalg.norm(x, 1, axis=-1, keepdims=True) # n = np.sum(x, axis=-1, keepdims=True) # todo: same result (except dtype), which is faster? with np.errstate(invalid="raise"): # see: `normalized` return x / n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def normalize(a, axis=None):\n a_sum = a.sum(axis)\n if axis and a.ndim > 1:\n a_sum[a_sum...
[ "0.68129104", "0.67557216", "0.6609631", "0.6585958", "0.645604", "0.6451531", "0.6302512", "0.62421376", "0.6205158", "0.61722827", "0.6167662", "0.61229944", "0.61031413", "0.6083917", "0.6082263", "0.60746324", "0.6066852", "0.60581243", "0.605747", "0.6040707", "0.6040707...
0.59728706
31
Viterbi algorithm for finding the optimal path. One square transition matrix can be specified.
def viterbi_dense( p_emit: np.ndarray, p_trans: np.ndarray, p_trans0: Optional[np.ndarray] = None, mask: Optional[np.ndarray] = None ) -> np.ndarray: batch_size, T, N = p_emit.shape if mask is None: mask = np.ones((batch_size, T), dtype=p_trans.dtype) if p_trans0 is None: p_trans0 = np.zeros(N, dtype=p_emit.dtype) return _viterbi_dense_masked(p_emit, p_trans, p_trans0, mask)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n ...
[ "0.6767159", "0.67312855", "0.6485659", "0.6303362", "0.62523675", "0.621314", "0.617608", "0.61201537", "0.60680777", "0.60667634", "0.60551065", "0.6045231", "0.60280347", "0.60061914", "0.59879357", "0.5987022", "0.5967305", "0.59623784", "0.59502643", "0.5880169", "0.5855...
0.0
-1
Viterbi algorithm for finding the optimal path. The number of emission probabilities per index can vary and a separate matrix can be specified for each transition.
def viterbi_sparse(p_emit: Sequence[np.ndarray], p_trans: Sequence[np.ndarray]) -> np.ndarray: T = len(p_emit) assert T - 1 == len(p_trans) trellis = [p_emit[0]] states = [None] for t in range(1, T): weighted_scores = trellis[-1][:, None] + p_trans[t - 1] # [x, y] # scores and p_trans broadcasted max_scores = np.amax(weighted_scores, axis=0) # [y] trellis.append(np.add(max_scores, p_emit[t])) # [y] remember highest score of each path states.append(np.argmax(weighted_scores, axis=0)) # [y] remember index of best path assert len(trellis) == T and len(states) == T tokens = [None] * T # [T] tokens[-1] = np.argmax(trellis[-1], axis=0) # [] for t in range(T - 1, 0, -1): tokens[t - 1] = states[t][tokens[t]] # [] return tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def viterbi_path(prior, transmat, observ_likelihood):\n T = observ_likelihood.shape[-1]\n N = observ_likelihood.shape[0]\n\n path = numpy.zeros(T, dtype=numpy.int32)\n global_score = numpy.zeros(shape=(N,T))\n predecessor_state_index = numpy.zeros(shape=(N,T), dtype=numpy.int32)\n\n t = 1\n gl...
[ "0.705838", "0.6940395", "0.67826474", "0.67684615", "0.67196417", "0.65749055", "0.65712386", "0.6491848", "0.64757305", "0.6461549", "0.64065045", "0.6359924", "0.62643", "0.6219971", "0.6213666", "0.6170746", "0.6034146", "0.602727", "0.58137655", "0.57686436", "0.57664907...
0.5941896
18
np.broadcast_shapes requires `numpy==1.20.0`, which is not available for `python < 3.7`.
def broadcast_shapes(*shapes: Tuple[int, ...]) -> Tuple[int, ...]: arrays = [np.empty(shape) for shape in shapes] return np.broadcast(*arrays).shape
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_broadcast_dims():\r\n test((1, 2, 3))\r\n test((2, 1, 3))\r\n test((2, 3, 1))\r\n test2((1, 2, 3))\r\n test2((2, 1, 3))\r\n test2((2, 3, 1))", "def broadcast_shape(*shapes, **kwargs):\n strict = kwargs.pop(\"strict\", False)\n reversed_shape = []\n for shape in shapes:\n ...
[ "0.63662064", "0.631577", "0.6181781", "0.60380113", "0.59342825", "0.5925404", "0.58053595", "0.57856745", "0.57498085", "0.56896067", "0.55596524", "0.55482703", "0.548472", "0.548077", "0.540709", "0.5397413", "0.53940207", "0.5373903", "0.53049004", "0.526517", "0.5237194...
0.6529921
0
Batched center of mass calculation of 2d arrays
def center_of_mass_2d(arr: np.ndarray, dtype=np.float32) -> np.ndarray: total = np.sum(arr, axis=(-1, -2)) grids = np.ogrid[[slice(0, i) for i in arr.shape[-2:]]] with np.errstate(invalid="ignore"): results = np.array([np.sum(arr * grid.astype(dtype), axis=(-1, -2)) / total for grid in grids], dtype=dtype) results = np.moveaxis(results, 0, -1) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def centerOfMass(data):\r\n dd = []\r\n for d in data:\r\n dd.append(d.coordinate)\r\n\r\n data = dd\r\n data = np.array(data)\r\n n = len(data)\r\n x = sum(data[:,0])\r\n y = sum(data[:,1])\r\n z = sum(data[:,2])\r\n x/=n\r\n y/=n\r\n z/=n\r\n return x,y,z,n", "def _ce...
[ "0.7113222", "0.6913504", "0.68147796", "0.66269344", "0.6623906", "0.6605114", "0.6573181", "0.6555954", "0.65233356", "0.65076435", "0.6476598", "0.64425707", "0.6414334", "0.6406729", "0.63674235", "0.63496435", "0.6226081", "0.6160094", "0.61026037", "0.6084239", "0.60666...
0.74505234
0
Creates a picture grid (left to right, top to bottom).
def image_grid(images: np.ndarray, nrow: int = 8, fill_value=(0, 0, 0)) -> np.ndarray: if not images.ndim == 4: raise ValueError("Input shape must be [n, height, width, channel]") h, w = images.shape[1:3] nbatch = images.shape[0] channels = images.shape[3] rows = ceildiv(nbatch, nrow) cols = min(nbatch, nrow) if channels != len(fill_value): raise ValueError("fill_value must match number of channels") out = np.full((h * rows, w * cols, channels), fill_value, dtype=images.dtype) for i in range(images.shape[0]): row = i // cols col = i % cols out[h * row : h * (row + 1), w * col : w * (col + 1), :] = images[i] return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_grid(images, n_rows=4, n_cols=4):\n k = min(n_rows * n_cols, len(images))\n indices = [i for i in range(k)]\n return _create_grid(images, indices, n_rows, n_cols)", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def generate_image_grid(sess, op):\n ...
[ "0.7382325", "0.7234491", "0.71866596", "0.708115", "0.70502263", "0.7038171", "0.6941421", "0.69232905", "0.68283314", "0.6826088", "0.68064976", "0.67884314", "0.6752977", "0.6732644", "0.67231864", "0.6684483", "0.66807157", "0.6656162", "0.66169745", "0.65663916", "0.6561...
0.0
-1
validate_target verifies that target is a valid MAC address, IP address or hostname
def validate_target(target, arp_table): try: mac = mac_address(target) return mac except TypeError: pass try: ip = ip_address(target) if ip in arp_table.keys(): return arp_table[ip].mac except TypeError: pass if target in arp_table: return arp_table[target].mac else: raise TypeError('{} is not a valid target'.format(target))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n pri...
[ "0.7149191", "0.64991635", "0.64354825", "0.63836634", "0.62301147", "0.62007254", "0.6052976", "0.6006137", "0.59964573", "0.5994269", "0.59239537", "0.58881646", "0.58818513", "0.5870957", "0.5835328", "0.57881117", "0.575523", "0.5737461", "0.57259214", "0.5655272", "0.563...
0.76750195
0
mac_address checks that a given string is in MAC address format
def mac_address(addr): mac = addr.upper() if not _mac_address_pattern.fullmatch(mac): raise TypeError('{} does not match a MAC address pattern'.format(addr)) return mac
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_mac(address):\n m = \"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\"\n if isinstance(address, six.string_types) and re.match(m, address.lower()):\n return True\n return False", "def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char...
[ "0.83441114", "0.7788352", "0.74661726", "0.7465587", "0.74038935", "0.73613924", "0.7340942", "0.73230916", "0.7296056", "0.7203996", "0.7073263", "0.6936628", "0.67489415", "0.6534532", "0.64768", "0.64768", "0.64768", "0.64768", "0.6468425", "0.6463173", "0.6461584", "0....
0.7487672
2
ip_address checks that a given string is in IP address format
def ip_address(addr): parts = addr.split('.') if len(parts) != 4: raise TypeError('{} does not match an IP address pattern'.format(addr)) for part in parts: try: num = int(part) if num < 0 or num > 255: raise TypeError('{} does not match an IP address pattern'.format(addr)) except ValueError: raise TypeError('{} does not match an IP address pattern'.format(addr)) return addr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_ip(string):\n return ipv4.is_ip(string) or ipv6.is_ip(string)", "def validate_ip_address(ip_addr):\n try:\n ip_object = ipaddress.ip_address(ip_addr)\n return True\n except ValueError:\n return False", "def is_valid_ipaddress(str_ip: str) -> bool:\n try:\n ipaddre...
[ "0.8181276", "0.8099573", "0.80747545", "0.80292475", "0.8022243", "0.7864434", "0.78313065", "0.78308326", "0.78138804", "0.7800783", "0.77667433", "0.7727927", "0.7700073", "0.76853204", "0.76780635", "0.76094407", "0.7527797", "0.74806803", "0.74570745", "0.7426013", "0.74...
0.7447207
19
Do not return anything, modify root inplace instead.
def flatten(self, root) -> None: if not root: return None node = root while node: if node.left: rightmost = node.left while rightmost.right: rightmost = rightmost.right rightmost.right = node.right node.right = node.left node.left = None node = node.right return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substit...
[ "0.71672195", "0.70915145", "0.7055208", "0.69615245", "0.67885584", "0.66141015", "0.6551368", "0.6512747", "0.6512747", "0.6512747", "0.6512747", "0.6431704", "0.6411715", "0.6395968", "0.6395968", "0.6368781", "0.63604164", "0.6329711", "0.6324841", "0.6305654", "0.6276602...
0.0
-1
Do not return anything, modify root inplace instead.
def flatten(self, root) -> None: node = root stack = [] while node: if node.left: if node.right: stack.append(node.right) node.right = node.left node.left = None if not node.left and not node.right and stack: node.right = stack.pop() node = node.right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substit...
[ "0.71672195", "0.70915145", "0.7055208", "0.69615245", "0.67885584", "0.66141015", "0.6551368", "0.6512747", "0.6512747", "0.6512747", "0.6512747", "0.6431704", "0.6411715", "0.6395968", "0.6395968", "0.6368781", "0.63604164", "0.6329711", "0.6324841", "0.6305654", "0.6276602...
0.0
-1
Do not return anything, modify root inplace instead.
def flatten(self, root) -> None: # 递归出口一定要到叶子节点 if not root: return None if not root.left and not root.right: return root lefttail = self.flatten(root.left) righttail = self.flatten(root.right) if lefttail: lefttail.right = root.right root.right = root.left root.left = None return righttail if righttail else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substit...
[ "0.71672195", "0.70915145", "0.7055208", "0.69615245", "0.67885584", "0.66141015", "0.6551368", "0.6512747", "0.6512747", "0.6512747", "0.6512747", "0.6431704", "0.6411715", "0.6395968", "0.6395968", "0.6368781", "0.63604164", "0.6329711", "0.6324841", "0.6305654", "0.6276602...
0.0
-1
[initalize spotify class to be used to manage playlists]
def __init__(self): self.sp, self.user = self.init_auth_client() self.logger = logging.getLogger(__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, API, playlist_uri):\n\n self.API = API\n self.playlist_uri = playlist_uri\n self.metadata = None", "def __init__(self, username):\n self.spotify = spotipy.Spotify(simple_auth_token(username))", "def __init__(self, **kwargs):\n self.songs = SongList()\n ...
[ "0.72976667", "0.7259575", "0.6638557", "0.6578827", "0.6547392", "0.6536785", "0.6459842", "0.6452237", "0.6372635", "0.6340433", "0.6314363", "0.6273033", "0.62022495", "0.61825216", "0.61784863", "0.61702627", "0.6157572", "0.6012808", "0.59975034", "0.59931904", "0.595047...
0.0
-1
[authorize and initialize spotify client]
def init_auth_client(self): with open("config.yml", 'r') as ymlfile: cfg = yaml.load(ymlfile) token = util.prompt_for_user_token( cfg['username'], scope=cfg['scope'], client_id=cfg['spotipy_client_id'], client_secret=cfg['spotipy_client_secret'], redirect_uri=cfg['spotipy_redirect_uri']) sp = spotipy.Spotify(auth=token) return sp, cfg['username']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_user(self) -> Any:\n return \\\n spotipy.Spotify(auth_manager=spotipy.oauth2.SpotifyOAuth(scope=\"playlist-modify-public\",\n client_id=self._public_id, client_secret=self._secret_id,\n redirect_uri=self._redirect_uri))", "def authe...
[ "0.7275522", "0.7169035", "0.7160154", "0.7054514", "0.70088327", "0.6976066", "0.6880722", "0.6798923", "0.6781801", "0.67027813", "0.66602165", "0.6602743", "0.64574254", "0.6410066", "0.6331362", "0.63175875", "0.62989295", "0.6185303", "0.6183346", "0.6170306", "0.6090339...
0.78885454
0
[creates a new playlist with given name, desc with given limts]
def create_new_playlist(self, name, desc=''): pl_names, _, _ = self.list_playlists() if name in pl_names: self.logger.debug( 'Playlist Name Already Exists, please use another name') else: pl = self.sp.user_playlist_create( self.user, name, public=False, description=desc) self.sp.user_playlist_change_details( self.user, pl['id'], collaborative=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_playlist(self, playlist_name):\n print(\"create_playlist needs implementation\")", "def create_playlist():\n sp = credentials()\n sp.user_playlist_create('truetiming', name='Billboard Hot 100')", "def create_playlist(self, data):\n pass", "def create_playlist(self, playlist_nam...
[ "0.7272547", "0.68430287", "0.673538", "0.6641419", "0.661609", "0.65667766", "0.6543431", "0.6541754", "0.6530266", "0.6512793", "0.6474164", "0.64047396", "0.63702667", "0.6338417", "0.6316106", "0.6193181", "0.617585", "0.61417544", "0.60676473", "0.6046706", "0.5912048", ...
0.7137164
1
[Will generate a list of 10 songs with given song name]
def search_song(self, name): self.logger.debug('Searched for Song: {}'.format(name)) results = self.sp.search(q='track:' + name, type='track') songs = [song for song in results['tracks']['items']] i = 1 songs_ls = [] table_ls = [] for song in songs: table_ls.append([i, song['name'][0:20].strip(), song['album']['name'][0:20].strip(), "%0.2f" % (song['duration_ms'] / 60000), song['popularity']]) songs_ls.append(song['uri']) i = i + 1 return songs_ls, table_ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs", "def songs_list(name_of_album):\r\n songs = \"\"\r\n data = dbase()\r\n data = data[name_of_album][0]\r\...
[ "0.7061848", "0.6690603", "0.65612096", "0.6520877", "0.6450587", "0.64253664", "0.62649524", "0.6210138", "0.61440945", "0.6127966", "0.612063", "0.61182624", "0.6108939", "0.60201514", "0.60189235", "0.60095435", "0.597353", "0.59719634", "0.59651035", "0.59640324", "0.5957...
0.6639254
2
[list all spotify playlists on users account]
def list_playlists(self, user=None): if user: playlists = self.sp.user_playlists(user)['items'] else: playlists = self.sp.user_playlists(self.user)['items'] pl_names = [pl['name'] for pl in playlists] pl_id = [pl['id'] for pl in playlists] pl_own = [pl['owner']['id'] for pl in playlists] return pl_names, pl_id, pl_own
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_playlists_for_user(self, request): \n user = Account.find_by_id(request.userid)\n playlists = Playlist.find_by_owner(user.key).fetch(20)\n return self.build_playlist_response(playlists)", "def current_user_playlists(self, limit: int = 20, offset: int = 0):\n return self._get('...
[ "0.7721551", "0.76566833", "0.7571428", "0.74551755", "0.74398583", "0.7157419", "0.70816904", "0.7019386", "0.7003413", "0.6947375", "0.69466627", "0.6937739", "0.6878575", "0.67284", "0.6687034", "0.6654773", "0.66512775", "0.6564396", "0.63236004", "0.6259983", "0.6246766"...
0.72365195
5
[list all the songs for a given playlist id]
def list_pl_songs(self, pl_id, user=None): if user: res = self.sp.user_playlist_tracks(user, pl_id) else: res = self.sp.user_playlist_tracks(self.user, pl_id) song_uri_ls = [song['track']['uri'] for song in res['items']] song_ls = [] for i, song in enumerate(res['items']): song_ls.append([i, song['track']['name'][0:20].strip(), song['track']['album']['name'][0:20].strip(), "%0.2f" % (song['track']['duration_ms'] / 60000), song['track']['popularity']]) return song_uri_ls, song_ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_playlist_songs(self, playlist_id):\n url = get_playlist_url(playlist_id)\n result = self.get_request(url)\n return result['result']['tracks'], result['result']['name']", "def get_playlist_songs(self, playlist_id):\n values = {'action' : 'playlist_songs',\n 'filter...
[ "0.81859416", "0.8030631", "0.79889697", "0.75973374", "0.75427765", "0.73269945", "0.73071337", "0.7261857", "0.713018", "0.7062886", "0.70463115", "0.6993529", "0.6951676", "0.69032377", "0.6889015", "0.6864356", "0.68641925", "0.68147", "0.68029433", "0.6800521", "0.679698...
0.6986906
12
[adds a song to a playlist]
def add_song_to_playlist(self, song_uri, playlist_id, user=None): if song_uri[0] in self.list_pl_songs(playlist_id, user=None): logging.debug('Song already in playlist') else: if user: self.sp.user_playlist_add_tracks(user, playlist_id, song_uri) else: self.sp.user_playlist_add_tracks( self.user, playlist_id, song_uri)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_song(self, song: Song):\n self.playlist.append(song)", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'...
[ "0.8281379", "0.8122598", "0.81120324", "0.77928084", "0.7633073", "0.76006603", "0.73052067", "0.7280356", "0.7275825", "0.72581446", "0.72479594", "0.7200695", "0.7198082", "0.71816415", "0.71812034", "0.70947707", "0.7091116", "0.70539707", "0.7045048", "0.70189893", "0.69...
0.7014243
20
[would list out friends, not working]
def list_users_friends(self): user = self.sp.user(self.user) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_friend_list(self):\n self.friends = self.df[['user_id','friends']]", "def find_friends(request):\n find_list = []\n sent_requests = set()\n rec_requests = set()\n sent_f_requests = FriendRequest.objects.filter(\n from_user=request.user\n )\n rec_f_requests = FriendRequest....
[ "0.71894276", "0.7112803", "0.70884013", "0.7054558", "0.7027464", "0.698504", "0.6922425", "0.68556184", "0.68189013", "0.67964447", "0.6792341", "0.6780698", "0.6738868", "0.67151695", "0.66937727", "0.6683127", "0.66392064", "0.6594076", "0.65758586", "0.6565678", "0.65209...
0.67554355
12
onehot encode categorical, normalize scalar/player_id inputs
def preprocess_minimap(minimap): layers = [] for i in range(len(features.MINIMAP_FEATURES)): ## scalar or to large to do one-hot if i == _MINIMAP_SELECTED: layers.append(minimap[i:i+1] / features.MINIMAP_FEATURES[i].scale) ## categorical elif i == _MINIMAP_PLAYER_RELATIVE: layer = np.zeros([features.MINIMAP_FEATURES[i].scale, minimap.shape[1], minimap.shape[2]], dtype=np.float32) for j in range(features.MINIMAP_FEATURES[i].scale): indy, indx = (minimap[i] == j).nonzero() layer[j, indy, indx] = 1 layers.append(layer) return np.concatenate(layers, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __encode_one_hot_util(self):\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df = pd.concat(\n ...
[ "0.75467926", "0.71732116", "0.7166452", "0.7154489", "0.71511304", "0.71498376", "0.7130969", "0.71149933", "0.7075103", "0.70738035", "0.7065001", "0.70538414", "0.70359933", "0.70098567", "0.7004153", "0.6951065", "0.69328225", "0.69303125", "0.6927022", "0.692566", "0.691...
0.0
-1
onehot encode categorical, normalize scalar/player_id inputs
def preprocess_screen(screen): layers = [] for i in range(len(features.SCREEN_FEATURES)): if i == _SCREEN_UNIT_TYPE: layers.append(screen[i:i + 1] / features.SCREEN_FEATURES[i].scale) elif i == _SCREEN_SELECTED: layers.append(screen[i:i + 1] / features.SCREEN_FEATURES[i].scale) elif i == _SCREEN_PLAYER_RELATIVE: layer = np.zeros([features.SCREEN_FEATURES[i].scale, screen.shape[1], screen.shape[2]], dtype=np.float32) for j in range(features.SCREEN_FEATURES[i].scale): indy, indx = (screen[i] == j).nonzero() layer[j, indy, indx] = 1 layers.append(layer) return np.concatenate(layers, axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __encode_one_hot_util(self):\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df = pd.concat(\n ...
[ "0.75467926", "0.71732116", "0.7166452", "0.7154489", "0.71511304", "0.71498376", "0.7130969", "0.71149933", "0.7075103", "0.70738035", "0.7065001", "0.70538414", "0.70359933", "0.70098567", "0.7004153", "0.6951065", "0.69328225", "0.69303125", "0.6927022", "0.692566", "0.691...
0.0
-1
Plot a line from slope and intercept
def abline(slope, intercept): axes = plt.gca() x_vals = np.array(axes.get_xlim()) y_vals = intercept + slope * x_vals plt.plot(x_vals, y_vals, '--')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_regression_line():\r\n axes = plt.gca()\r\n x_vals = np.array(axes.get_xlim())\r\n y_vals = y_intercept() + slope() * x_vals\r\n plt.plot(x_vals, y_vals)", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + s...
[ "0.8633385", "0.837906", "0.8353659", "0.8208162", "0.80252755", "0.7704604", "0.76632464", "0.6855364", "0.6737687", "0.6706104", "0.6655756", "0.66362876", "0.6601379", "0.65816593", "0.6529675", "0.64858264", "0.6483495", "0.64728135", "0.6463536", "0.64627045", "0.6454128...
0.83326244
3
Only for human genome. If this is applied to other species, futher modification is needed.
def mapChrForVersion(c): if c.startswith('chrM'): return 998 elif c == 'chrX': return 999 elif c == 'chrY': return 1000 else: return int(c[3:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_species(self):\n warn(f\"Default Update Species Called for Mechanism = {self.name}.\")\n return []", "def update_species_frames(self):\n pass", "def speciate(self):\n\n\n # Clear out the previous generation\n for spec in self.species.values():\n spec.cha...
[ "0.63167685", "0.63009715", "0.62039506", "0.60391605", "0.5989629", "0.580947", "0.5797044", "0.5579908", "0.5506126", "0.5501652", "0.5439127", "0.54250365", "0.5421335", "0.5421209", "0.5415419", "0.5400045", "0.5384445", "0.53612787", "0.5360659", "0.5338628", "0.5326598"...
0.0
-1
return 0 if they equal, 1 if snp1 less, else 1
def compare(chr1, pos1, chr2, pos2): pos1 = int(pos1) pos2 = int(pos2) if chrsort == 'version': chr1 = mapChrForVersion(chr1) chr2 = mapChrForVersion(chr2) elif chrsort == 'natural': pass # use original chr1, chr2 else: chr1 = chrsort.get(chr1, chr1) chr2 = chrsort.get(chr2, chr2) return -1 if (chr1, pos1) < (chr2, pos2) else 1 if (chr1, pos1) > (chr2, pos2) else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def judge(n1: List[int], n2: List[int]) -> int:\n\tmin_length = min(len(n1),len(n2))\n\tpass_count = 0\n\tfor i in range(min_length):\n\t\tpass_count += get_lowest_16(n1[i]) == get_lowest_16(n2[i])\n\n\treturn pass_count", "def compareones_c(w1,w2,tn):\n nw1 = np.int_(np.copy(w1))\n nw2 = np.int_(np.copy(w...
[ "0.6195848", "0.61714214", "0.5978105", "0.59495175", "0.58783364", "0.58137816", "0.5750367", "0.57387525", "0.56986475", "0.56565875", "0.5649709", "0.55772007", "0.55763227", "0.557493", "0.55728525", "0.5571845", "0.557098", "0.5570138", "0.5562759", "0.55625623", "0.5560...
0.0
-1
Method which calculus game score through pss (player standard stats)
def set_game_score(self): bx = self.get_standard_stats() tcInt = bx["t2p_int"] + bx["t3p_int"] tcConv = bx["t2p_conv"] + bx["t3p_conv"] ft = bx["tl_int"] - bx["tl_conv"] ptos = bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"] #Con "%.2f" % round(x, 2) además de redondear a dos decimales, nos quedamos con los ceros finales result = "%.2f" % round(float(ptos) + (float(0.4)*float(tcConv)) - (float(0.7)*float(tcInt)) - (float(0.4)*float(ft)) + (float(0.7)*float(bx["reb_of"])) + (float(0.3)*float(bx["reb_def"])) + float(bx["steals"]) + (float(0.7)*float(bx["assists"])) + (float(0.7)*float(bx["block_shots"])) - (float(0.4)*float(bx["fouls_cm"])) - float(bx["turnovers"]), 2) self.game_score = "%.2f" % round(Decimal(result)/bx["games"], 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scoreEvaluationFunction(gameState):\n return gameState.getScore()", "def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n ret...
[ "0.68148273", "0.68119067", "0.6762667", "0.6762667", "0.6762667", "0.6762667", "0.6762667", "0.6762667", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0.67188406", "0...
0.6767508
2
Method which calculate DRE metric
def set_dre(self): bx = self.get_standard_stats() ptos = float(bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"]) fga = float(bx["t2p_int"] + bx["t3p_int"]) trb = float(bx["reb_def"] + bx["reb_of"]) d1 = ptos + (0.2*trb) + (1.7*float(bx["steals"])) + (0.535*float(bx["block_shots"])) + (0.5*float(bx["assists"])) d2 = (0.9*fga) + (0.35*float(bx["tl_int"])) + (1.4*float(bx["turnovers"])) + (0.136*float(bx["minutes"])) result = "%.2f" % round(d1-d2, 2) self.dre = "%.2f" % round(Decimal(result)/bx["games"], 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_metrics(self):\n pass", "def calculate(self) -> float:", "def calculate(self):", "def calculate_dataset_metrics(self):\n pass", "def calculateDataRate(self):\n pass", "def getMeasures():", "def denominator(self, ???):", "def calculate(self):\r\n\r\n pass", "d...
[ "0.6594812", "0.65652555", "0.647981", "0.647346", "0.63362116", "0.62262213", "0.6224595", "0.618598", "0.61559206", "0.6152622", "0.61477774", "0.6140981", "0.61260253", "0.61076653", "0.6089313", "0.60319406", "0.60208917", "0.59696853", "0.5940918", "0.59328157", "0.59270...
0.56650126
49
Method which calculates TS Percentage metric for a player
def set_ts_percentage(self): bx = self.get_standard_stats() ptos = float(bx["t2p_conv"]*2 + bx["t3p_conv"]*3 + bx["tl_conv"]) tcInt = float(bx["t2p_int"] + bx["t3p_int"]) tsAttempts = float(tcInt + (0.44*float(bx["tl_int"]))) result = 0.00 if tsAttempts > 0.00: result = (ptos/(2*tsAttempts))*100 self.ts_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pct(self):\n\t\treturn self.bottle.pct()", "def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPerce...
[ "0.67912775", "0.67874295", "0.66820705", "0.6615735", "0.6549017", "0.65139776", "0.65139776", "0.6465816", "0.64634633", "0.6446837", "0.64173675", "0.64117974", "0.6390257", "0.63858217", "0.6356354", "0.6316925", "0.6298419", "0.62818795", "0.6281119", "0.6268235", "0.622...
0.745896
0
Method which calculate USG% for each player from each team
def set_usg_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() tcInt = bx["t2p_int"] + bx["t3p_int"] a = tcInt + (Decimal('0.44')*bx["tl_int"]) + bx["turnovers"] b = team["minutes"]/5 c = (team["t2p_int"] + team["t3p_int"]) + (Decimal('0.44')*team["tl_int"]) + team["turnovers"] result = 0.00 if bx["minutes"] > 0: result = ((Decimal(a)*Decimal(b))/(bx["minutes"]*c))*100 self.usg_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n ...
[ "0.67434204", "0.6500815", "0.6359718", "0.63065845", "0.63019216", "0.6301762", "0.6299715", "0.6148283", "0.6126874", "0.6103746", "0.59502584", "0.59436804", "0.59398586", "0.5932481", "0.58808863", "0.58741057", "0.58336693", "0.58299714", "0.57860583", "0.5785277", "0.57...
0.6911841
0
Method which calculate Total Rebound Percentage
def set_total_reb_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() player_rebounds = bx["reb_def"] + bx["reb_of"] team_rebounds = team["reb_def"] + team["reb_of"] opp_team_rebounds = opp_team["reb_def"] + opp_team["reb_of"] result = 0.00 try: if bx["minutes"] > 0 and bx["minutes"] > 0: result = ((player_rebounds * (team["minutes"]/5)) / (bx["minutes"] * (team_rebounds + opp_team_rebounds)))*100 except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) self.total_reb_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_total_reb_of_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n result = 0.00\n try:\n if bx[\"reb_of\"] > 0 and bx[\"minutes\"] > 0:\n result = ((bx[\"reb_of\"] * (team[\"mi...
[ "0.6523484", "0.6491968", "0.63697445", "0.6303792", "0.6300233", "0.62549525", "0.6121597", "0.6087575", "0.60844654", "0.6044818", "0.6027688", "0.6009361", "0.5989413", "0.5969596", "0.59609574", "0.59609574", "0.59606254", "0.5904455", "0.5899307", "0.58770585", "0.584258...
0.6903449
0
Method which calculate Total Rebound Defensive Percentage
def set_total_reb_def_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() result = 0.00 try: if bx["minutes"] > 0 and bx["minutes"] > 0: result = ((bx["reb_def"] * (team["minutes"]/5)) / (bx["minutes"] * (team["reb_def"] + opp_team["reb_of"])))*100 except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) self.total_reb_def_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_te...
[ "0.6720814", "0.65615296", "0.64223516", "0.63799196", "0.63289535", "0.63207704", "0.63154423", "0.62732965", "0.6261528", "0.6167289", "0.6124796", "0.61081874", "0.6068133", "0.60473025", "0.5999613", "0.5988745", "0.59840715", "0.5983306", "0.5973883", "0.59696484", "0.59...
0.6653622
1
Method which calculate Total Rebound Ofensive Percentage
def set_total_reb_of_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() result = 0.00 try: if bx["reb_of"] > 0 and bx["minutes"] > 0: result = ((bx["reb_of"] * (team["minutes"]/5)) / (bx["minutes"] * (team["reb_of"] + opp_team["reb_def"])))*100 except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) self.total_reb_of_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_total_reb_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n player_rebounds = bx[\"reb_def\"] + bx[\"reb_of\"]\n team_rebounds = team[\"reb_def\"] + team[\"reb_of\"]\n opp_team_rebounds = opp...
[ "0.68673694", "0.66328514", "0.65427613", "0.64778984", "0.64688313", "0.64383364", "0.63684523", "0.6308236", "0.6169081", "0.6124151", "0.60928077", "0.60605025", "0.6027442", "0.60271585", "0.60221356", "0.60200536", "0.6008577", "0.6006814", "0.5980527", "0.5972647", "0.5...
0.65185946
3
Method which calculate Steals Percentage of a player
def set_steals_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() poss = self.get_team_possessions() result = 0.00 if bx["minutes"] > 0: result = ((bx["steals"] * (team["minutes"]/Decimal('5'))) / Decimal(float(bx["minutes"]) * poss)) * 100 self.steals_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def winning_percentage(self):\n return fl...
[ "0.6973858", "0.68804467", "0.68694776", "0.6837882", "0.6696774", "0.65600723", "0.6460961", "0.6452967", "0.64353186", "0.6403429", "0.6403429", "0.6369753", "0.6368016", "0.63537014", "0.63537014", "0.63495165", "0.6336215", "0.632231", "0.62704694", "0.6268536", "0.626853...
0.77550447
0
Method which calculate Effective Field Goal (eTC) of a player
def set_effective_field_goal_percentage(self): bx = self.get_standard_stats() tcInt = float(bx["t2p_int"] + bx["t3p_int"]) tcConv = float(bx["t2p_conv"] + bx["t3p_conv"]) result = 0.00 if tcInt > 0: result = ((tcConv + (0.5 * float(bx["t3p_conv"]))) / tcInt) * 100 self.effective_field_goal_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getETA():", "def getETA():", "def value(self):\n #import pdb; pdb.set_trace()\n return ((self.team1.get_cur_hp() / self.team1.get_total_hp()) - \n (self.team2.get_cur_hp() / self.team2.get_total_hp()))", "def calculate_utility(state, player):\n thisPlayer = player\n \n ...
[ "0.5827492", "0.5827492", "0.5789208", "0.5724517", "0.5719123", "0.5719123", "0.5719123", "0.56517696", "0.5605782", "0.55988175", "0.5591225", "0.55509675", "0.55410445", "0.5514421", "0.5474888", "0.54631376", "0.54623526", "0.5455384", "0.54417086", "0.5441079", "0.543202...
0.5157421
68
Method which calculate Assists Percentage of a player
def set_assists_percentage(self): bx = self.get_standard_stats() team = self.get_team_stats() team_tc_conv = team["t2p_conv"] + team["t3p_conv"] player_tc_conv = bx["t2p_conv"] + bx["t3p_conv"] result = 0.00 try: if bx["minutes"] > 0: result = (bx["assists"] / (((bx["minutes"] / (team["minutes"] / 5)) * team_tc_conv) - player_tc_conv))*100 result = result if result <= 100 and result >= 0 else 0 except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) except InvalidOperation: print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC) self.assists_percentage = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n...
[ "0.7001746", "0.68421483", "0.6813", "0.67631215", "0.67387015", "0.6656908", "0.6627645", "0.6627645", "0.6618226", "0.6604493", "0.66013306", "0.6542234", "0.6510403", "0.64416814", "0.64405453", "0.64405453", "0.64318055", "0.64273596", "0.63814425", "0.63652843", "0.63540...
0.767859
0
Method which calculate Ratio Assists Per Turnover of a player
def set_assists_per_turnover(self): bx = self.get_standard_stats() ratio = bx["assists"] if bx["turnovers"] > 0: ratio = bx["assists"] / bx["turnovers"] self.assists_per_turnover = "%.2f" % round(ratio, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&loc...
[ "0.69899476", "0.66916925", "0.66868806", "0.661133", "0.64811707", "0.63243896", "0.62888366", "0.62563837", "0.623092", "0.6226826", "0.6220208", "0.62145406", "0.6178597", "0.6162016", "0.6135676", "0.6127374", "0.6116254", "0.60962725", "0.60838556", "0.6064083", "0.60435...
0.7008181
0
Method which calculate Assists Ratio of a player
def set_assists_ratio(self): bx = self.get_standard_stats() tcInt = float(bx["t2p_int"] + bx["t3p_int"]) denominador = tcInt + (0.44 * float(bx["tl_int"])) + float(bx["assists"]) +float(bx["turnovers"]) numerador = float(bx["assists"]) result = 0.00 if denominador > 0: result = (numerador / denominador) * 100 self.assists_ratio = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&loc...
[ "0.72646964", "0.64316165", "0.6352074", "0.6281999", "0.6247194", "0.6209247", "0.6207199", "0.61734754", "0.61600745", "0.61037296", "0.609562", "0.607315", "0.60665053", "0.60465765", "0.6046293", "0.60187536", "0.6009164", "0.59791255", "0.5958189", "0.59420407", "0.58815...
0.7257836
1
Method which calculate Defensive Ratio of a player. The total points received in 100 possessions
def set_defensive_ratio(self): bx = self.get_standard_stats() team = self.get_team_stats() opp_team = self.get_opp_team_stats() if bx["minutes"] > 0: opp_fga = opp_team["t2p_int"] + opp_team["t3p_int"] opp_fgm = opp_team["t2p_conv"] + opp_team["t3p_conv"] try: dor = Decimal(opp_team["reb_of"] / (opp_team["reb_of"] + team["reb_def"])) except ZeroDivisionError: print(BCOLORS.FAIL + "Error: División por cero" + BCOLORS.ENDC) dor = 0 except InvalidOperation: print(BCOLORS.FAIL + "Error: Invalid Operation" + BCOLORS.ENDC) dor = 0 try: dfg = Decimal(opp_fgm / opp_fga) except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) dfg = 0 try: fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor)) except: fmwt = 0 stops1 = bx["steals"] + bx["block_shots"] * fmwt * (1 - Decimal('1.07') * dor) + bx["reb_def"] * (1 - fmwt) try: stops2 = (Decimal((opp_fga - opp_fgm - team["block_shots"]) / team["minutes"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team["turnovers"] - team["steals"]) / team["minutes"])) * bx["minutes"] + Decimal(bx["fouls_cm"] / team["fouls_cm"]) * Decimal('0.4') * opp_team["tl_int"] * (1 - Decimal(opp_team["tl_conv"] / opp_team["tl_int"]))**2 except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) stops2 = 0 except InvalidOperation: print(BCOLORS.WARNING + "Error: Invalid Operation" + BCOLORS.ENDC) stops2 = 0 stops = stops1 + stops2 poss = self.get_team_possessions() if bx["minutes"] > 0: stop_percentage = (float(stops) * float(opp_team["minutes"])) / (float(poss) * float(bx["minutes"])) else: stop_percentage = 0.00 opp_points = opp_team["t2p_conv"] * 2 + opp_team["t3p_conv"] * 3 + opp_team["tl_conv"] team_defensive_rating = 100 * (float(opp_points) / poss) try: d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team["tl_conv"]) / float(opp_team["tl_int"])))**2) * float(opp_team["tl_int"])*0.4) result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating)) except ZeroDivisionError: print(BCOLORS.WARNING + "Error: División por cero" + BCOLORS.ENDC) d_pts_per_scposs = 0 result = 0.00 # print("dor: " + str(dor)) # print("dfg: " + str(dfg)) # print("fmwt: " + str(fmwt)) # print("stops1: " + str(stops1)) # print("stops2: " + str(stops2)) # print("stops: " + str(stops)) # print("poss: " + str(poss)) # print("stop_percentage: " + str(stop_percentage)) # print("opp_points: " + str(opp_points)) # print("team_defensive_rating: " + str(team_defensive_rating)) # print("d_pts_per_scposs: " + str(d_pts_per_scposs)) # print("drtg: " + str(result) + "\n") else: result = 0.00 self.drtg = "%.2f" % round(result, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def player_ratio(self, ctx):\r\n player = ctx.message.content.split(' ')[1]\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/character/zul\\'jin/' + player + '?fields=pvp&loc...
[ "0.6531963", "0.6401618", "0.61817557", "0.6132738", "0.6081823", "0.6071461", "0.60199296", "0.5966795", "0.5962311", "0.59621847", "0.5957963", "0.59393156", "0.59319943", "0.5922137", "0.5918801", "0.5913682", "0.5907549", "0.58991516", "0.5896585", "0.5889761", "0.5885497...
0.65123755
1