code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
try:
import cPickle as pickle
except ImportError:
import pickle
import copy
import numpy as np
from src.SpectralAnalysis import utils
from src.SpectralAnalysis import powerspectrum
from src.Spect... | [
"matplotlib.pyplot.title",
"src.SpectralAnalysis.posterior.StackPerPosterior",
"pickle.dump",
"src.SpectralAnalysis.utils.TwoPrint",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.close",
"src.SpectralAnalysis.mle.PerMaxLike",
"src.SpectralAnalysis.posterior.PerPosterior",... | [((6349, 6376), 'src.SpectralAnalysis.utils.TwoPrint', 'utils.TwoPrint', (['resfilename'], {}), '(resfilename)\n', (6363, 6376), False, 'from src.SpectralAnalysis import utils\n'), ((6589, 6643), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.... |
from math import pi
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
# notes: 01:37:742 (97742|2,125moves993|2) -
SHAKES = np.array(
[100560, 100790, 101018, ... | [
"numpy.sin",
"numpy.array",
"numpy.linspace",
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD"
] | [((277, 896), 'numpy.array', 'np.array', (['[100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, \n 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348,\n 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326,\n 119494, 119661, 119827, 121953, 122114, 122... |
from __future__ import division, print_function, unicode_literals
import streamlit as st
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
st.title('Mô hình dự đoán giá nhà đất tại hồ gươm... | [
"streamlit.set_option",
"streamlit.sidebar.write",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"streamlit.title",
"streamlit.sidebar.title",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.dot",
"streamlit.sidebar.text_input",
"streamlit.sidebar.button",
"numpy.linalg.pinv",
... | [((271, 323), 'streamlit.title', 'st.title', (['"""Mô hình dự đoán giá nhà đất tại hồ gươm """'], {}), "('Mô hình dự đoán giá nhà đất tại hồ gươm ')\n", (279, 323), True, 'import streamlit as st\n'), ((449, 2354), 'numpy.array', 'np.array', (['[[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800],\n... |
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by <NAME>.
<EMAIL>.
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
import codecs
import os
import tensorflow as tf
import numpy as np
from hyperparams import Hyperparams as hp
from data_load import load_test_data, load_de... | [
"train.Graph",
"data_load.load_en_vocab",
"numpy.zeros",
"tensorflow.train.Supervisor",
"tensorflow.ConfigProto",
"tensorflow.train.latest_checkpoint",
"numpy.array"
] | [((456, 480), 'train.Graph', 'Graph', ([], {'is_training': '(False)'}), '(is_training=False)\n', (461, 480), False, 'from train import Graph\n'), ((1395, 1410), 'data_load.load_en_vocab', 'load_en_vocab', ([], {}), '()\n', (1408, 1410), False, 'from data_load import load_test_data, load_de_vocab, load_en_vocab\n'), ((1... |
# -*- coding: utf-8 -*-
from __future__ import division
import unittest
import odelab
from odelab.scheme.stochastic import *
from odelab.system import *
from odelab.solver import *
import numpy as np
class Test_OU(unittest.TestCase):
def test_run(self):
sys = OrnsteinUhlenbeck()
scheme = EulerMaruyama()
sc... | [
"numpy.array"
] | [((397, 412), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (405, 412), True, 'import numpy as np\n'), ((745, 772), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.0]'], {}), '([0, 0, 0, 0, 0.0])\n', (753, 772), True, 'import numpy as np\n')] |
import numpy as np
from collections import namedtuple
import skimage.measure
#import matplotlib.pyplot as plt
#import ipdb
# could maybe turn this into a generic mutable namedtuple
class Point2D(object):
__slots__ = "x", "y"
def __init__(self, x, y):
self.x = x
self.y = y
def __iter__(self):
'''itera... | [
"numpy.ones_like",
"numpy.empty",
"numpy.zeros",
"numpy.flipud",
"numpy.fliplr",
"collections.namedtuple",
"numpy.atleast_2d"
] | [((4577, 4629), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', '"""min_x max_x min_y max_y"""'], {}), "('BoundingBox', 'min_x max_x min_y max_y')\n", (4587, 4629), False, 'from collections import namedtuple\n'), ((9981, 10033), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', '"""min_x max... |
import mxnet as mx
from mxnet import nd, autograd
import numpy as np
##################################3
# X, y - training data
# n - number of data points in dataset
# Py - desired label distribution
###################################
def tweak_dist(X, y, num_labels, n, Py):
shape = (n, *X.shape[1:])
Xsh... | [
"numpy.full",
"numpy.random.multinomial",
"numpy.zeros",
"numpy.random.choice"
] | [((326, 341), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (334, 341), True, 'import numpy as np\n'), ((355, 381), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int8'}), '(n, dtype=np.int8)\n', (363, 381), True, 'import numpy as np\n'), ((899, 948), 'numpy.full', 'np.full', (['num_labels', '((1.0 - p) / ... |
"""
Test functions for GEE
External comparisons are to R. The statmodels GEE implementation
should generally agree with the R GEE implementation for the
independence and exchangeable correlation structures. For other
correlation structures, the details of the correlation estimation
differ among implementations and t... | [
"numpy.random.seed",
"numpy.abs",
"pandas.read_csv",
"numpy.ones",
"statsmodels.genmod.generalized_estimating_equations.Multinomial",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"statsmodels.genmod.families.Gaussian",
"numpy.diag",
"os.path.join",
"pandas.Data... | [((21322, 21409), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "[__file__, '-vvs', '-x', '--pdb', '--pdb-failure']", 'exit': '(False)'}), "(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", (21336, 21409), False, 'import nose\n'), ((1230, 1255), 'os.path.abspath', 'os.path.abspath', (['... |
############################################## README #################################################
# This calculates threshold for an image depending upon its spiking activity.
########################################################################################################
import numpy as np
from snn.n... | [
"numpy.shape"
] | [((630, 648), 'numpy.shape', 'np.shape', (['train[0]'], {}), '(train[0])\n', (638, 648), True, 'import numpy as np\n')] |
# all the data from train data set, k-fold validation
import numpy as np
import onnxruntime
import torch
from pandas import read_csv
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
# load a single file as a numpy ar... | [
"numpy.dstack",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"numpy.std",
"numpy.transpose",
"tensorflow.python.keras.utils.np_utils.to_categorical",
"onnxruntime.InferenceSession",
"numpy.mean"
] | [((365, 419), 'pandas.read_csv', 'read_csv', (['filepath'], {'header': 'None', 'delim_whitespace': '(True)'}), '(filepath, header=None, delim_whitespace=True)\n', (373, 419), False, 'from pandas import read_csv\n'), ((746, 763), 'numpy.dstack', 'np.dstack', (['loaded'], {}), '(loaded)\n', (755, 763), True, 'import nump... |
#! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: <NAME>
# email: <EMAIL>
"""
Swin Transformer
1. 类似CNN的层次化构建方法(Hierarchical Feature Maps),特征图尺寸中有对图像下采样4倍、8倍、以及16倍;
这样的Backbone有助于再此基础上构建目标检测、实例分割等任务。
2. 使用Windows Multi-Head Self-Attention (W-MSA)概念。减少计算量。计算复杂度从指数级降到线性级,Multi-head
Self-Attention只在每个Window... | [
"torch.nn.Dropout",
"BasicModule.Mlp",
"torch.jit.is_scripting",
"torch.roll",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"torch.arange",
"torch.nn.functional.pad",
"torch.flatten",
"torch.nn.Linear",
"BasicModule.window_reverse",
"torch.zeros",
"BasicModule.DropPath",
"numpy.ceil",
... | [((6893, 7018), 'BasicModule.PatchEmbed', 'PatchEmbed', ([], {'patch_size': 'patch_size', 'in_c': 'in_chans', 'embed_dim': 'embed_dim', 'norm_layer': '(norm_layer if self.patch_norm else None)'}), '(patch_size=patch_size, in_c=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n', (... |
# -*- coding:utf-8 -*-
import six
import numpy as np
from pyproj import Proj
import operator
from .exceptions import *
class NullProj(object):
"""
Similar to pyproj.Proj, but NullProj does not do actual conversion.
"""
@property
def srs(self):
return ''
def __call__(self, x, y, **kw... | [
"numpy.arctan2",
"numpy.floor",
"numpy.argmin",
"numpy.shape",
"numpy.sin",
"numpy.arange",
"numpy.round",
"numpy.meshgrid",
"numpy.ndim",
"numpy.arcsin",
"numpy.max",
"numpy.linspace",
"numpy.asarray",
"numpy.hypot",
"numpy.min",
"numpy.cos",
"numpy.alltrue",
"numpy.deg2rad",
"n... | [((1494, 1517), 'numpy.alltrue', 'np.alltrue', (['bad'], {'axis': '(0)'}), '(bad, axis=0)\n', (1504, 1517), True, 'import numpy as np\n'), ((1534, 1557), 'numpy.alltrue', 'np.alltrue', (['bad'], {'axis': '(1)'}), '(bad, axis=1)\n', (1544, 1557), True, 'import numpy as np\n'), ((2117, 2131), 'numpy.isscalar', 'np.isscal... |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed ... | [
"optax.adam",
"tensorflow_datasets.load",
"jax.nn.log_softmax",
"absl.logging.info",
"flax.serialization.register_serialization_state",
"flax.serialization.from_state_dict",
"jax.nn.one_hot",
"os.path.dirname",
"optax.apply_updates",
"haiku.data_structures.to_immutable_dict",
"haiku.Conv2D",
"... | [((1307, 1416), 'flax.serialization.register_serialization_state', 'serialization.register_serialization_state', (['HKTree', '_ty_to_state_dict', '_ty_from_state_dict'], {'override': '(True)'}), '(HKTree, _ty_to_state_dict,\n _ty_from_state_dict, override=True)\n', (1349, 1416), False, 'from flax import serializatio... |
import os
import math
import time
import functools
import random
from tqdm import tqdm
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
from pylab import rcParams
rcParams['figure.figsize'] = 20, 20 # noqa
from consts import FONT_SIZE
from utils import (
make_contour... | [
"PIL.Image.new",
"grpc_utils.KuzuClassify",
"utils.make_contours",
"matplotlib.pyplot.imshow",
"utils.filter_polygons_points_intersection",
"utils.vis_pred_center",
"utils.vis_pred_bbox_polygon",
"PIL.ImageDraw.Draw",
"cv2.resize",
"functools.partial",
"math.ceil",
"numpy.asarray",
"os.listd... | [((720, 733), 'grpc_utils.KuzuSegment', 'KuzuSegment', ([], {}), '()\n', (731, 733), False, 'from grpc_utils import KuzuSegment, KuzuClassify\n'), ((749, 763), 'grpc_utils.KuzuClassify', 'KuzuClassify', ([], {}), '()\n', (761, 763), False, 'from grpc_utils import KuzuSegment, KuzuClassify\n'), ((947, 971), 'utils.make_... |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from gammapy.astro.population import (
add_observed_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_snr_parameters,... | [
"gammapy.astro.population.make_catalog_random_positions_cube",
"astropy.table.Table",
"astropy.units.Quantity",
"gammapy.astro.population.add_observed_parameters",
"gammapy.astro.population.make_base_catalog_galactic",
"gammapy.astro.population.add_pulsar_parameters",
"gammapy.astro.population.add_pwn_p... | [((498, 548), 'gammapy.astro.population.make_catalog_random_positions_cube', 'make_catalog_random_positions_cube', ([], {'random_state': '(0)'}), '(random_state=0)\n', (532, 548), False, 'from gammapy.astro.population import add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_ba... |
import numpy as np
import matplotlib.pyplot as plt
PI = np.pi
# =========================define sinc
# ---------------normalized
def sinc1(x):
PI = np.pi
x = np.array(x)
y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x))
return y
def sinc_interpolation(x, t, T):
ns = np.arange... | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matp... | [((556, 580), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', 'Ns'], {}), '(-10, 10, Ns)\n', (567, 580), True, 'import numpy as np\n'), ((586, 614), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(Ns * 2)'], {}), '(-10, 10, Ns * 2)\n', (597, 614), True, 'import numpy as np\n'), ((640, 663), 'numpy.sin', 'np.s... |
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2*np.pi, 10)
y = np.sin(x) #Función original
xvals = np.linspace(0, 2*np.pi, 50)
yinterp = np.interp(xvals, x, y)
plt.plot(x, y, 'o')
plt.plot(xvals, yinterp, '-x')
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sin",
"numpy.linspace",
"numpy.interp"
] | [((56, 85), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10)'], {}), '(0, 2 * np.pi, 10)\n', (67, 85), True, 'import numpy as np\n'), ((88, 97), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (94, 97), True, 'import numpy as np\n'), ((124, 153), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)... |
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required ... | [
"numpy.load",
"numpy.save",
"argparse.ArgumentParser",
"numpy.zeros",
"numpy.array",
"itertools.product"
] | [((862, 881), 'numpy.load', 'np.load', (['args.input'], {}), '(args.input)\n', (869, 881), True, 'import numpy as np\n'), ((1066, 1090), 'numpy.zeros', 'np.zeros', (['cell_map.shape'], {}), '(cell_map.shape)\n', (1074, 1090), True, 'import numpy as np\n'), ((1804, 1872), 'argparse.ArgumentParser', 'argparse.ArgumentPar... |
__copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import pytest
import scine_utilities as scine
import numpy as np
import os
class SigmaVectorEvaluatorPython(scine.SigmaVectorEvaluator):
d... | [
"scine_utilities.NonOrthogonalDavidson",
"scine_utilities.OrthogonalDavidson",
"numpy.diag_indices_from",
"scine_utilities.core.Log.silent",
"numpy.transpose",
"numpy.ones",
"numpy.linalg.eig",
"scine_utilities.SigmaVectorEvaluator.__init__",
"numpy.identity",
"scine_utilities.IndirectSigmaVectorE... | [((687, 711), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)'], {}), '(100, 100)\n', (701, 711), True, 'import numpy as np\n'), ((927, 969), 'scine_utilities.IndirectSigmaVectorEvaluator', 'scine.IndirectSigmaVectorEvaluator', (['matrix'], {}), '(matrix)\n', (961, 969), True, 'import scine_utilities as scine\... |
# coding: utf-8
# # 使用预训练的VGG模型Fine-tune CNN
# In[1]:
# Import packs
import numpy as np
import os
import scipy.io
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import skimage.io
import skimage.transform
import tensorflow as tf
get_ipython().magic(u'matplotlib inline')
cwd = os.getcwd()
pri... | [
"tensorflow.matmul",
"os.path.isfile",
"numpy.random.randint",
"numpy.mean",
"os.path.join",
"numpy.ndarray",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"numpy.transpose",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.reshape",
"tensorflow.initialize_all_v... | [((305, 316), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (314, 316), False, 'import os\n'), ((1812, 1840), 'numpy.ndarray', 'np.ndarray', (['(imgcnt, nclass)'], {}), '((imgcnt, nclass))\n', (1822, 1840), True, 'import numpy as np\n'), ((2630, 2668), 'numpy.random.randint', 'np.random.randint', (['imgcnt'], {'size': 'i... |
"""A population model that creates samples with more and more variants. Suitable for the aligner paper experiments
^ = intersection
E = subset
vx ^ v0 = v0
vx ^ v1 = v0
...
vx ^ vn = v0
v0 E v1
v1 E v2
v2 E v3
...
v(n-1) E vn
This plugin does not honor the site frequency spectrum model and ignores the original 'p' v... | [
"numpy.random.RandomState"
] | [((2112, 2143), 'numpy.random.RandomState', 'np.random.RandomState', (['rng_seed'], {}), '(rng_seed)\n', (2133, 2143), True, 'import numpy as np\n')] |
"""
Functions to rotate a point by a known euler pole.
"""
import numpy as np
from . import fault_vector_functions
def point_rotation_by_Euler_Pole(Point, Euler_Pole):
"""
Compute the velocity of rotation of a point about an Euler pole on a spherical earth.
This function is useful for computing the veloc... | [
"numpy.deg2rad",
"numpy.cross",
"numpy.sin",
"numpy.cos",
"numpy.dot",
"numpy.sqrt"
] | [((1109, 1133), 'numpy.cross', 'np.cross', (['omega', 'R_point'], {}), '(omega, R_point)\n', (1117, 1133), True, 'import numpy as np\n'), ((2334, 2349), 'numpy.deg2rad', 'np.deg2rad', (['lon'], {}), '(lon)\n', (2344, 2349), True, 'import numpy as np\n'), ((2469, 2511), 'numpy.sqrt', 'np.sqrt', (['(R_fixed * R_fixed - X... |
import numpy as np
import random
import os, sys
from scipy import ndimage
import healpy as hp
from astropy.io import fits
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from astropy.io import fits
from importlib import reload
from pycs.misc.cosmostat_init import *
from pycs.misc.mr_prog import... | [
"healpy.write_map",
"healpy.alm2map",
"healpy.mollview",
"numpy.copy",
"healpy.map2alm",
"numpy.deg2rad",
"healpy.ud_grade",
"healpy.nside2npix",
"numpy.where",
"healpy.npix2nside",
"healpy.map2alm_spin",
"healpy.alm2map_spin",
"healpy.smoothing",
"healpy.read_map",
"numpy.sqrt"
] | [((634, 699), 'numpy.where', 'np.where', (['(bincount > 0.5)', '(bincount_weighted / bincount)', 'hp.UNSEEN'], {}), '(bincount > 0.5, bincount_weighted / bincount, hp.UNSEEN)\n', (642, 699), True, 'import numpy as np\n'), ((938, 953), 'healpy.read_map', 'hp.read_map', (['FN'], {}), '(FN)\n', (949, 953), True, 'import h... |
import mobula.layers as L
import numpy as np
def test_sigmoid():
X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
data = L.Data(X, "data")
data.reshape()
l = L.Sigmoid(data)
l.reshape()
assert l.Y.shape == X.shape
l.forward()
l.dY = np.random.random(l.Y.shape) * 10
l.... | [
"mobula.layers.PReLU",
"mobula.layers.Tanh",
"numpy.allclose",
"mobula.layers.ReLU",
"numpy.zeros",
"numpy.square",
"numpy.random.random",
"numpy.arange",
"numpy.exp",
"mobula.layers.Data",
"mobula.layers.SELU",
"mobula.layers.Sigmoid"
] | [((145, 162), 'mobula.layers.Data', 'L.Data', (['X', '"""data"""'], {}), "(X, 'data')\n", (151, 162), True, 'import mobula.layers as L\n'), ((190, 205), 'mobula.layers.Sigmoid', 'L.Sigmoid', (['data'], {}), '(data)\n', (199, 205), True, 'import mobula.layers as L\n'), ((342, 352), 'numpy.exp', 'np.exp', (['(-X)'], {}),... |
import model3 as M
import numpy as np
import tensorflow as tf
params = np.load('lstmpm_d1.npy').item()
params2 = np.load('lstmpm_d2.npy').item()
def get_conv(name):
res = []
# print(params[name])
res.append(params[name]['weights'])
res.append(params[name]['bias'])
# print(res[0].shape)
return res
def get_c... | [
"model3.Saver",
"numpy.load",
"model3.AvgPool",
"model3.MaxPool",
"tensorflow.pad",
"numpy.transpose",
"tensorflow.concat",
"numpy.ones",
"tensorflow.Variable",
"tensorflow.tanh",
"tensorflow.sigmoid"
] | [((7984, 8013), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (7996, 8013), True, 'import numpy as np\n'), ((8365, 8394), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (8377, 8394), True, 'import numpy as np\n'), ((8500, 8513), 'model3.Sa... |
import gpflow
import matplotlib.pyplot as plt
import numpy as np
from robustgp import ConditionalVariance
X = np.random.rand(150, 1)
Y = 0.8 * np.cos(10 * X) + 1.2 * np.sin(8 * X + 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1
gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential())
opt = ... | [
"robustgp.ConditionalVariance",
"gpflow.kernels.SquaredExponential",
"matplotlib.pyplot.show",
"numpy.random.randn",
"numpy.argmax",
"gpflow.optimizers.Scipy",
"gpflow.utilities.read_values",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.random.rand",
"matplotlib.pyplot.subplots"
] | [((111, 133), 'numpy.random.rand', 'np.random.rand', (['(150)', '(1)'], {}), '(150, 1)\n', (125, 133), True, 'import numpy as np\n'), ((320, 345), 'gpflow.optimizers.Scipy', 'gpflow.optimizers.Scipy', ([], {}), '()\n', (343, 345), False, 'import gpflow\n'), ((446, 481), 'gpflow.kernels.SquaredExponential', 'gpflow.kern... |
import pdb
import time
import lib.tf_silent
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from matplotlib.gridspec import GridSpec
import os
import pickle
import argparse
from lib.pinn import PINN
from lib.network import Ne... | [
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.floor",
"os.path.isfile",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.contour",
"pickle.load",
"numpy.round",
"matplotlib.pyplot.tight_layout",
"numpy.meshgrid",
"matpl... | [((395, 420), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (418, 420), False, 'import argparse\n'), ((1189, 1204), 'tensorflow.constant', 'tf.constant', (['xy'], {}), '(xy)\n', (1200, 1204), True, 'import tensorflow as tf\n'), ((2285, 2302), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid... |
import gzip
import pandas as pd
import numpy as np
import io
import os
import re
import torch
import torch.utils.data as data_utils
import subprocess
import zipfile
import zlib
from Bio import AlignIO
from Bio.SeqIO.FastaIO import FastaIterator, as_fasta
from Bio.Align.Applications import MuscleCommandline
class Ind... | [
"torch.from_numpy",
"io.StringIO",
"zipfile.ZipFile",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"io.BytesIO",
"pandas.read_csv",
"Bio.SeqIO.FastaIO.FastaIterator",
"torch.load",
"numpy.floor",
"gzip.open",
"pandas.isnull",
"Bio.Align.Applications.MuscleCommandline",
"torch.save",... | [((2335, 2379), 'pandas.read_csv', 'pd.read_csv', (['input[1]'], {'index_col': '(0)', 'header': '(0)'}), '(input[1], index_col=0, header=0)\n', (2346, 2379), True, 'import pandas as pd\n'), ((2393, 2414), 'pandas.read_csv', 'pd.read_csv', (['input[0]'], {}), '(input[0])\n', (2404, 2414), True, 'import pandas as pd\n'),... |
#!/usr/bin/python
# This file is licensed under MIT license.
# See the LICENSE file in the project root for more information.
import unittest
import rostest
import rosunit
import numpy as np
from numpy.testing import assert_almost_equal
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped, Pose,... | [
"geometry_msgs.msg.PoseStamped",
"car_core.common.msgs_helpers.path_poses_to_array",
"car_core.common.msgs_helpers.array_to_point",
"rosunit.unitrun",
"car_core.common.msgs_helpers.array_to_path_poses",
"car_core.common.geom_helpers.get_closest_path_point",
"car_core.common.msgs_helpers.point_to_array",... | [((3932, 3997), 'rosunit.unitrun', 'rosunit.unitrun', (['"""car_core"""', '"""test_msgs_helpers"""', 'TestMsgsHelpers'], {}), "('car_core', 'test_msgs_helpers', TestMsgsHelpers)\n", (3947, 3997), False, 'import rosunit\n'), ((4002, 4067), 'rosunit.unitrun', 'rosunit.unitrun', (['"""car_core"""', '"""test_geom_helpers""... |
import cv2
import numpy as np
imagen = cv2.imread('imagen.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2RGB)
print(imagen.shape)
print(imagen[0][0][0])
imagen = cv2.resize(imagen,(256, 256))
imagen = cv2.imread('imagen.jpg')
imagen = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)
print(imagen.shape)
print(imagen[0][0])
... | [
"cv2.cvtColor",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"cv2.resize"
] | [((40, 64), 'cv2.imread', 'cv2.imread', (['"""imagen.jpg"""'], {}), "('imagen.jpg')\n", (50, 64), False, 'import cv2\n'), ((74, 113), 'cv2.cvtColor', 'cv2.cvtColor', (['imagen', 'cv2.COLOR_BGR2RGB'], {}), '(imagen, cv2.COLOR_BGR2RGB)\n', (86, 113), False, 'import cv2\n'), ((165, 195), 'cv2.resize', 'cv2.resize', (['ima... |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
# --------------------------------------------------------
# Reorg... | [
"torch.FloatTensor",
"torch.cat",
"torch.nonzero",
"torch.max",
"torch.arange",
"numpy.random.permutation",
"numpy.random.rand",
"numpy.round",
"torch.from_numpy"
] | [((1065, 1114), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.TRAIN.BBOX_NORMALIZE_MEANS'], {}), '(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n', (1082, 1114), False, 'import torch\n'), ((1150, 1198), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.TRAIN.BBOX_NORMALIZE_STDS'], {}), '(cfg.TRAIN.BBOX_NORMALIZE_STDS)\n', (1167, ... |
# -*- coding: utf-8 -*-
"""Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1QPnK5YOh8kRYPOOue6txwrgUqwKOMS0I
"""
# # Use seaborn for pairplot
# !pip install -q seaborn
# !pip install tensorflow==2.0.0
# # Use some functions from tensorflow_... | [
"pandas.DataFrame",
"matplotlib.rc",
"warnings.filterwarnings",
"pandas.read_csv",
"datetime.strptime",
"tensorflow.keras.layers.Dense",
"pandas.isnull",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.style.use",
"numpy.int64",
"tensorflow.keras.optimizers.RMSprop"
] | [((1373, 1405), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (1386, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1455), 'matplotlib.rc', 'mpl.rc', (['"""patch"""'], {'edgecolor': '"""dimgray"""', 'linewidth': '(1)'}), "('patch', edgecolor='dimgr... |
from __future__ import absolute_import
# --------------------------------------------------------
# Spatial Attention Network withFeature Mimicking
# Copyright (c) 2018 University of Illinois
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# --------... | [
"numpy.amax",
"numpy.argsort",
"numpy.zeros"
] | [((1534, 1559), 'numpy.amax', 'np.amax', (['cls_prob'], {'axis': '(1)'}), '(cls_prob, axis=1)\n', (1541, 1559), True, 'import numpy as np\n'), ((1775, 1826), 'numpy.zeros', 'np.zeros', (['(proposals.shape[0], 1)'], {'dtype': 'np.float32'}), '((proposals.shape[0], 1), dtype=np.float32)\n', (1783, 1826), True, 'import nu... |
"""SentencePiece Tokenization for Wiki Dataset
Example:
* python scripts/wiki_sp_tokenize_json.py --word --unigram
"""
import gzip
import json
import subprocess
from pathlib import Path
import sentencepiece as spm
import joblib
import numpy as np
import click
from tqdm import tqdm
from opencc import OpenCC
from wi... | [
"subprocess.run",
"gzip.open",
"sentencepiece.SentencePieceProcessor",
"json.loads",
"wiki_tokenize_json.clean_text",
"click.option",
"click.command",
"opencc.OpenCC",
"pathlib.Path",
"numpy.array",
"wiki_tokenize_json.filter_texts"
] | [((564, 577), 'opencc.OpenCC', 'OpenCC', (['"""t2s"""'], {}), "('t2s')\n", (570, 577), False, 'from opencc import OpenCC\n'), ((2890, 2905), 'click.command', 'click.command', ([], {}), '()\n', (2903, 2905), False, 'import click\n'), ((2907, 2943), 'click.option', 'click.option', (['"""--word"""'], {'is_flag': '(True)'}... |
"""
Tests with the Izhikevich neuron model.
"""
import numpy as np
import matplotlib.pyplot as plt
import pyNN.nest as sim
from pyNN.utility.plotting import Figure, Panel
# === Configure the simulator ================================================
duration = 100
dt = 0.01
sim.setup(timestep=dt, min_delay=0.1)
... | [
"pyNN.nest.run",
"numpy.count_nonzero",
"pyNN.nest.setup",
"pyNN.nest.end",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.logspace",
"numpy.array",
"pyNN.utility.plotting.Panel",
"matplotlib.pyplot.savefig",
"pyNN.nest.Izhikevich"
] | [((281, 318), 'pyNN.nest.setup', 'sim.setup', ([], {'timestep': 'dt', 'min_delay': '(0.1)'}), '(timestep=dt, min_delay=0.1)\n', (290, 318), True, 'import pyNN.nest as sim\n'), ((857, 874), 'pyNN.nest.run', 'sim.run', (['duration'], {}), '(duration)\n', (864, 874), True, 'import pyNN.nest as sim\n'), ((1345, 1402), 'mat... |
'''
utility functions
'''
__author__ = '<NAME>'
import os
from os.path import join
from os.path import abspath
import json
import pandas as pd
import numpy as np
from configs import config as cf
def is_available(filename):
'''
[filename] : str
'''
return os.path.isfile(filename)
def chunks(lst,... | [
"json.load",
"pandas.read_csv",
"os.path.isfile",
"os.path.join",
"pandas.concat",
"numpy.vstack"
] | [((278, 302), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (292, 302), False, 'import os\n'), ((965, 980), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (974, 980), True, 'import pandas as pd\n'), ((1598, 1669), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'delimiter': '""",... |
"""
Test princomp extraction from CLI
"""
import argparse
import os
import numpy as np
from demo_utils import get_random_data
from hebbnets.networks import MultilayerHahNetwork
np.set_printoptions(suppress=True)
def _argparse():
parser = argparse.ArgumentParser(
prog="Testing HebbNet principal c... | [
"numpy.set_printoptions",
"demo_utils.get_random_data",
"argparse.ArgumentParser",
"hebbnets.networks.MultilayerHahNetwork",
"numpy.array_str",
"numpy.argsort",
"numpy.linalg.norm",
"numpy.array",
"numpy.squeeze"
] | [((186, 220), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (205, 220), True, 'import numpy as np\n'), ((254, 410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""Testing HebbNet principal components"""', 'description': '"""Testing HebbNet p... |
# -*- coding: utf-8 -*-
"""
## Author: <NAME>
## Copyright: Copyright 2018-2019, Packt Publishing Limited
## Version: 0.0.1
## Maintainer: <NAME>
## Email: <EMAIL>
## Linkedin: https://www.linkedin.com/in/linus1/
## Contributor : {if you debug, append your name here}
## Contributor Email : {if you debug, appen... | [
"matplotlib.pyplot.xlim",
"sklearn.pipeline.Pipeline",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.random.randn",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.setp",
"sklearn.linear_mode... | [((618, 635), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (632, 635), True, 'import numpy as np\n'), ((804, 827), 'numpy.cos', 'np.cos', (['(1.5 * np.pi * X)'], {}), '(1.5 * np.pi * X)\n', (810, 827), True, 'import numpy as np\n'), ((1018, 1045), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsi... |
import numpy as np
import argparse
from utils import Audio
def sample_wav_audio(path):
audio = Audio()
mel = audio.audio_to_mel(path)
samples = audio.mel_sample(mel, width=128, k=5)
return samples
def save_embeddings(name, samples):
audio = Audio()
avg_embed = np.zeros(256, dtype=np.float32)
... | [
"utils.Audio",
"numpy.save",
"numpy.zeros",
"argparse.ArgumentParser"
] | [((100, 107), 'utils.Audio', 'Audio', ([], {}), '()\n', (105, 107), False, 'from utils import Audio\n'), ((263, 270), 'utils.Audio', 'Audio', ([], {}), '()\n', (268, 270), False, 'from utils import Audio\n'), ((287, 318), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.float32'}), '(256, dtype=np.float32)\n', (295... |
import argparse
import logging
import os
import pickle
import random
import ujson
import sys
import math
from ctypes import c_ulong
from multiprocessing import Array, Queue
from multiprocessing.sharedctypes import RawArray
from queue import Empty
from time import time
import numpy as np
import resource
from scipy.spa... | [
"argparse.ArgumentParser",
"experiments.duplicate_bug_detection_deep_learning.getDataHandlerLexiconEmb",
"sklearn.metrics.accuracy_score",
"numpy.around",
"multiprocessing.Queue",
"data.bug_report_database.BugReportDatabase.fromJson",
"experiments.duplicate_bug_detection_deep_learning.getModel",
"skle... | [((2982, 3001), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2999, 3001), False, 'import logging\n'), ((3010, 3016), 'time.time', 'time', ([], {}), '()\n', (3014, 3016), False, 'from time import time\n'), ((6412, 6437), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6435, 6437)... |
import glob
import numpy as np
from matplotlib import pyplot as plt
for filename in glob.glob("*.dat"):
print(filename)
name = filename.split(".")[0]
data = np.loadtxt(filename, delimiter=",")
size = int(np.sqrt(len(data)))
data = data.reshape((size, size))
fig, ax = plt.subplots(figsize=(5.12,... | [
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((85, 103), 'glob.glob', 'glob.glob', (['"""*.dat"""'], {}), "('*.dat')\n", (94, 103), False, 'import glob\n'), ((170, 205), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (180, 205), True, 'import numpy as np\n'), ((293, 327), 'matplotlib.pyplot.subplots', 'plt... |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 00:25:27 2017
@author: Wayne
"""
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
#%%
mydf1= mydf[outliers.outliers==False]
z = np.log(data.trip_duration+1)
X = mydf1
Xtest ... | [
"pandas.DataFrame",
"sklearn.cluster.MiniBatchKMeans",
"numpy.log",
"xgboost.train",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies",
"pickle.load",
"numpy.mean",
"numpy.exp",
"numpy.intersect1d",
"pandas.concat",
"xgboost.DMatrix",
"numpy.vstack"
] | [((273, 303), 'numpy.log', 'np.log', (['(data.trip_duration + 1)'], {}), '(data.trip_duration + 1)\n', (279, 303), True, 'import numpy as np\n'), ((342, 360), 'xgboost.DMatrix', 'xgb.DMatrix', (['Xtest'], {}), '(Xtest)\n', (353, 360), True, 'import xgboost as xgb\n'), ((899, 952), 'sklearn.model_selection.train_test_sp... |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import cPickle
import ipdb
class Detector():
def __init__(self,weight_file_path,n_labels):
self.image_mean=[103.939,116.779,123.68]
self.n_labels=n_labels
with open(weight_file_path)as f:
self.pretrained_weights=cPickle.load(f)
def get_weight... | [
"tensorflow.nn.relu",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.concat",
"cPickle.load",
"tensorflow.variable_scope",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool",
"tensorflow.matmul",
"tensorflow.get_variable",
"tensorflow.random_norm... | [((1521, 1539), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (1528, 1539), True, 'import numpy as np\n'), ((1544, 1573), 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, dim]'], {}), '(bottom, [-1, dim])\n', (1554, 1573), True, 'import tensorflow as tf\n'), ((2136, 2154), 'numpy.prod', 'np.prod',... |
import numpy as np
import os
import torch
import torch.nn as nn
import pytorch_lightning as pl
from data.VOCdevkit.vocdata import VOCDataset
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, Resize, ToTensor, Normalize, GaussianBlur
from torchvision.transforms.functional import Interp... | [
"numpy.delete",
"torch.gather",
"torch.utils.data.DataLoader",
"os.path.join",
"numpy.sum",
"torch.argsort",
"torchvision.transforms.ToTensor",
"torch.cumsum",
"numpy.max",
"torch.nn.functional.interpolate",
"torchvision.transforms.GaussianBlur",
"torchvision.transforms.Normalize",
"torch.no... | [((1354, 1483), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': 'attn_batch_size', 'shuffle': '(False)', 'num_workers': 'num_workers', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(self.dataset, batch_size=attn_batch_size, shuffle=False,\n num_workers=num_workers, drop_last=True, ... |
# Autoencoder using convolutional layers
# Dataset : MNIST
# Requires : PIL, matplotlib
# Inspired by https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
# To compress data : net.encode(data)
# To decompress data : net.decode(data)
# To mutate data : net(data)
import os
import numpy as np
import matplot... | [
"torch.nn.MSELoss",
"matplotlib.pyplot.show",
"torch.nn.ConvTranspose2d",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.Conv2d",
"os.path.exists",
"numpy.transpose",
"torchvision.transforms.ToPILImage",
"torchvision.utils.make_grid",
"torch.cuda.is_available",
"torch.nn.Linear",
"to... | [((2274, 2295), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2293, 2295), False, 'from torchvision import transforms\n'), ((2306, 2373), 'torchvision.datasets.MNIST', 'MNIST', ([], {'root': 'dataset_dir', 'train': '(True)', 'download': '(True)', 'transform': 'trans'}), '(root=dataset_dir... |
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx
import numpy as np
@Phylanx
def foo():
local_a = np.array((2, 1))
lo... | [
"numpy.array"
] | [((297, 313), 'numpy.array', 'np.array', (['(2, 1)'], {}), '((2, 1))\n', (305, 313), True, 'import numpy as np\n'), ((364, 381), 'numpy.array', 'np.array', (['(57, 1)'], {}), '((57, 1))\n', (372, 381), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import datetime
from dateutil.tz import tzutc
def plot_water_levels(station, dates, levels):
"""Task 2E: Plots water level against time"""
#Assign variables
range_high = [station.typical_range[1]]*len(dates)
range_low = [station.ty... | [
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"numpy.linspace",
"matplotlib.dates.date2num",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tigh... | [((363, 407), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {'label': '"""Water Level"""'}), "(dates, levels, label='Water Level')\n", (371, 407), True, 'import matplotlib.pyplot as plt\n'), ((412, 461), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'range_high'], {'label': '"""Typical High"""'}), "(da... |
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def Redshift(n0, n1, n2, z1=3.6, z=np.linspace(0,10,num=1001)):
Rlow = np.power((1.0 + z), n1)
Rhigh = np.power((1.0 + z), n2)
rbrk = np.power((1.0 + z1), n1 - n2)
R = Rlow * (z <= z1) + rbrk * Rhigh * (z > z1)
R *= n0 / R[0]
ret... | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.quad",
"numpy.power",
"numpy.savetxt",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.concatenate"
] | [((365, 385), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'R', '"""-k"""'], {}), "(z, R, '-k')\n", (373, 385), True, 'import matplotlib.pyplot as plt\n'), ((384, 401), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {}), "('$z$')\n", (394, 401), True, 'import matplotlib.pyplot as plt\n'), ((403, 434), 'matpl... |
import csv
import os
import logging
import argparse
import random
import collections
import operator
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from p... | [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.argmax",
"csv.reader",
"torch.utils.data.RandomSampler",
"pytorch_pretrained_bert.optimization.BertAdam",
"pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained",
"seaborn.heatmap",
"torch.cat",
"torch.cuda.device_c... | [((535, 570), 'seaborn.set_context', 'seaborn.set_context', ([], {'context': '"""talk"""'}), "(context='talk')\n", (554, 570), False, 'import seaborn\n'), ((572, 715), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H... |
import numpy as np
import os
import re
import cPickle
class read_cifar10(object):
def __init__(self, data_path=None, is_training=True):
self.data_path = data_path
self.is_training = is_training
def load_data(self):
files = os.listdir(self.data_path)
if self.is_training is True:
pattern = ... | [
"cPickle.load",
"numpy.hstack",
"os.listdir",
"numpy.vstack",
"re.compile"
] | [((243, 269), 'os.listdir', 'os.listdir', (['self.data_path'], {}), '(self.data_path)\n', (253, 269), False, 'import os\n'), ((320, 348), 're.compile', 're.compile', (['"""(data_batch_)."""'], {}), "('(data_batch_).')\n", (330, 348), False, 'import re\n'), ((655, 670), 'numpy.vstack', 'np.vstack', (['data'], {}), '(dat... |
import ast
from collections import OrderedDict
from .codegen import to_source
from .function_compiler_ast import timeshift, StandardizeDatesSimple
from dolo.compiler.recipes import recipes
from numba import njit
class NumericModel:
calibration = None
calibration_dict = None
covariances = None
markov_c... | [
"dolo.compiler.function_compiler_ast.compile_function_ast",
"dolo.algos.dtmscc.steady_state.residuals",
"dolo.misc.termcolor.colored",
"dolo.compiler.eval_formula.eval_formula",
"dolo.compiler.misc.calibration_to_vector",
"dolo.compiler.triangular_solver.solve_triangular_system",
"numpy.array",
"dolo.... | [((11083, 11113), 're.compile', 're.compile', (['"""(.*)<=(.*)<=(.*)"""'], {}), "('(.*)<=(.*)<=(.*)')\n", (11093, 11113), False, 'import re\n'), ((1256, 1287), 'dolo.compiler.triangular_solver.solve_triangular_system', 'solve_triangular_system', (['system'], {}), '(system)\n', (1279, 1287), False, 'from dolo.compiler.t... |
# load in data
import helper
import numpy as np
import torch
import torch.nn as nn
from string import punctuation
from collections import Counter
from torch.utils.data import TensorDataset, DataLoader
data_dir = './data/Seinfeld_Scripts.txt'
text = helper.load_data(data_dir)
# Check for a GPU
train_on_gpu = torch.cu... | [
"torch.nn.Dropout",
"torch.nn.Embedding",
"numpy.full",
"torch.utils.data.DataLoader",
"helper.save_model",
"helper.load_preprocess",
"torch.nn.Linear",
"collections.Counter",
"torch.nn.LSTM",
"numpy.average",
"numpy.roll",
"torch.cuda.is_available",
"torch.from_numpy",
"helper.load_data",... | [((251, 277), 'helper.load_data', 'helper.load_data', (['data_dir'], {}), '(data_dir)\n', (267, 277), False, 'import helper\n'), ((312, 337), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (335, 337), False, 'import torch\n'), ((1432, 1509), 'helper.preprocess_and_save_data', 'helper.preprocess... |
# -*- coding: utf-8 -*-
"""Next-Word Prediction using Universal Sentence Encoder.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1r2ma5P7w2LE30L1o5mAyNPLE7Qi3JxoL
# **Google drive for local storage**
_NB: All comments are written to facilitate s... | [
"tensorflow_hub.load",
"numpy.save",
"numpy.argmax",
"gdown.download",
"sklearn.model_selection.train_test_split",
"keras.callbacks.LambdaCallback",
"keras.layers.Dense",
"numpy.array",
"google.colab.drive.mount",
"keras.models.Sequential"
] | [((690, 712), 'google.colab.drive.mount', 'drive.mount', (['"""/gdrive"""'], {}), "('/gdrive')\n", (701, 712), False, 'from google.colab import drive\n'), ((1704, 1744), 'gdown.download', 'gdown.download', (['url', 'output'], {'quiet': '(False)'}), '(url, output, quiet=False)\n', (1718, 1744), False, 'import gdown\n'),... |
import argparse
import os
from scipy.special import erf
from scipy.stats import truncnorm
import numpy as np
import data
def build_vector_cache(glove_filename, vec_cache_filename, vocab):
print("Building vector cache...")
with open(glove_filename) as f, open(vec_cache_filename, "w") as f2:
for line i... | [
"numpy.sum",
"data.Configs.base_config",
"numpy.ceil",
"scipy.stats.truncnorm",
"numpy.floor",
"numpy.zeros",
"data.Configs.sick_config",
"os.path.dirname",
"numpy.arange",
"numpy.exp",
"numpy.sign",
"os.path.join",
"numpy.sqrt"
] | [((952, 978), 'numpy.sign', 'np.sign', (['(tgt_loc - (b - a))'], {}), '(tgt_loc - (b - a))\n', (959, 978), True, 'import numpy as np\n'), ((1065, 1128), 'scipy.stats.truncnorm', 'truncnorm', (['((a - x) / sigma)', '((b - x) / sigma)'], {'loc': 'x', 'scale': 'sigma'}), '((a - x) / sigma, (b - x) / sigma, loc=x, scale=si... |
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
pipelines = pd.read_csv('OntoGasGrid/pipeline_owl_generator/pipeline_split.csv').to_numpy()
offtakes = pd.read_csv('OntoGasGrid/grid_component_owl_generator/grid_component_data.csv').to_numpy()
n_offt = len(offtakes[:,0... | [
"pandas.read_csv",
"numpy.zeros",
"pandas.DataFrame",
"numpy.sqrt"
] | [((373, 408), 'numpy.zeros', 'np.zeros', (['(n_offt, 2)'], {'dtype': 'object'}), '((n_offt, 2), dtype=object)\n', (381, 408), True, 'import numpy as np\n'), ((110, 178), 'pandas.read_csv', 'pd.read_csv', (['"""OntoGasGrid/pipeline_owl_generator/pipeline_split.csv"""'], {}), "('OntoGasGrid/pipeline_owl_generator/pipelin... |
from random import shuffle
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import load_iris
import numpy as np
iris = load_iris()
print(type(iris), len(iris.data))
def test1():
XY = np.array(zip(iris.data, iris.target))
np.random.shuffl... | [
"sklearn.datasets.load_iris",
"numpy.mean",
"numpy.random.shuffle",
"sklearn.ensemble.RandomForestRegressor"
] | [((192, 203), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (201, 203), False, 'from sklearn.datasets import load_iris\n'), ((304, 325), 'numpy.random.shuffle', 'np.random.shuffle', (['XY'], {}), '(XY)\n', (321, 325), True, 'import numpy as np\n'), ((628, 651), 'sklearn.ensemble.RandomForestRegressor', '... |
import os
import argparse
import datetime
import numpy as np
from glob import glob
from typing import List, Set, Tuple
"""
Author: <NAME> (<EMAIL>)
Computes character-level Cohen's kappa and percentage
agreement for a set of brat annotated files from two
annotators for a sequence labeling task (e.g. NER).
"""
clas... | [
"numpy.sum",
"os.path.basename",
"numpy.std",
"numpy.zeros",
"datetime.datetime.now",
"numpy.isclose",
"numpy.mean",
"numpy.max",
"numpy.min",
"numpy.arange",
"glob.glob",
"os.path.join",
"doctest.testmod"
] | [((8150, 8166), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (8158, 8166), True, 'import numpy as np\n'), ((8180, 8196), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (8188, 8196), True, 'import numpy as np\n'), ((11195, 11235), 'numpy.isclose', 'np.isclose', (['kappas[0]', '(0.629)'], {'atol... |
import datetime
import os
import keras
import numpy as np
import pandas as pd
from base_model import BaseModel
from multivariate_container import MultivariateContainer
from typing import Union
class MultivariateLSTM(BaseModel):
def __init__(
self,
container: MultivariateContainer,
... | [
"numpy.stack",
"pandas.DataFrame",
"keras.Model",
"keras.Sequential",
"keras.layers.LSTM",
"numpy.transpose",
"os.system",
"keras.utils.plot_model",
"keras.models.model_from_json",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Input",
"keras.utils.print_summary",
"datetime.datetime.no... | [((1417, 1518), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.time_steps, self.num_fea)', 'dtype': '"""float32"""', 'name': '"""input_sequence"""'}), "(shape=(self.time_steps, self.num_fea), dtype='float32',\n name='input_sequence')\n", (1435, 1518), False, 'import keras\n'), ((2002, 2057), 'kera... |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('medals_data.csv')
df[['Gold','Silver','Bronze']].plot(kind='bar',stacked=True)
plt.title('India Olympics Medal')
plt.xlabel('Years')
plt.ylabel('Medals')
n = len(df['Games'])
labels = df.Games.str.slice(0,4)
plt.xticks(np.arange... | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((78, 108), 'pandas.read_csv', 'pd.read_csv', (['"""medals_data.csv"""'], {}), "('medals_data.csv')\n", (89, 108), True, 'import pandas as pd\n'), ((171, 204), 'matplotlib.pyplot.title', 'plt.title', (['"""India Olympics Medal"""'], {}), "('India Olympics Medal')\n", (180, 204), True, 'import matplotlib.pyplot as plt\... |
'''
This code was written by following the following tutorial:
Link: https://medium.com/@martinpella/how-to-use-pre-trained-word-embeddings-in-pytorch-71ca59249f76
This script processes and generates GloVe embeddings
'''
# coding: utf-8
import pickle
from preprocess import Vocabulary
import numpy as np
import json
fr... | [
"numpy.zeros",
"pickle.load",
"numpy.array",
"numpy.random.normal",
"bcolz.open"
] | [((1372, 1399), 'numpy.zeros', 'np.zeros', (['(matrix_len, 300)'], {}), '((matrix_len, 300))\n', (1380, 1399), True, 'import numpy as np\n'), ((411, 422), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (419, 422), True, 'import numpy as np\n'), ((1039, 1053), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (105... |
#!/usr/bin/env python3.5
import os
import dlib
import numpy as np
import cv2
import time
import darknet
from ctypes import *
import math
import random
class YOLO_NN:
def __init__(self, yoloDataFolder):
self.configPath = yoloDataFolder + "/cfg/yolov3-tiny.cfg"
self.weightPath = yoloDataFolder + "/... | [
"numpy.linalg.norm",
"cv2.rectangle",
"darknet.network_height",
"dlib.shape_predictor",
"cv2.imshow",
"os.path.abspath",
"cv2.cvtColor",
"os.path.exists",
"cv2.destroyAllWindows",
"re.search",
"cv2.waitKey",
"cv2.addWeighted",
"dlib.face_recognition_model_v1",
"dlib.get_frontal_face_detect... | [((7846, 7865), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (7862, 7865), False, 'import cv2\n'), ((8035, 8120), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml')"], {}), "(rn.data_dir + '/dlib/haarcascade_frontalface_default.xml'\n )... |
import numpy as np
image_dimensions = (25, 6)
def load(image_dims, path: str = "input/08.txt"):
with open(path) as file:
return np.array([c for c in file.read()]).reshape((-1, image_dims[0] * image_dims[1]))
def number_of_values_in_layer(layer, value):
return np.count_nonzero(layer == value)
def ... | [
"numpy.array",
"numpy.count_nonzero"
] | [((281, 313), 'numpy.count_nonzero', 'np.count_nonzero', (['(layer == value)'], {}), '(layer == value)\n', (297, 313), True, 'import numpy as np\n'), ((590, 611), 'numpy.array', 'np.array', (['final_layer'], {}), '(final_layer)\n', (598, 611), True, 'import numpy as np\n')] |
import argparse
import numpy as np
from benchmark_statistics import Statistics
from benchmark_containers import BenchmarkResultsContainer
##############################################################################
def createBenchmarkResults(benchmark_samples, operation):
benchmark_results = BenchmarkResults... | [
"benchmark_containers.BenchmarkResultsContainer",
"benchmark_statistics.Statistics.getTukeyFences",
"benchmark_statistics.Statistics.getStdErr",
"argparse.ArgumentParser",
"benchmark_statistics.Statistics.getKurtosis",
"benchmark_statistics.Statistics.getIQR",
"numpy.fromfile",
"benchmark_statistics.S... | [((304, 331), 'benchmark_containers.BenchmarkResultsContainer', 'BenchmarkResultsContainer', ([], {}), '()\n', (329, 331), False, 'from benchmark_containers import BenchmarkResultsContainer\n'), ((439, 483), 'benchmark_statistics.Statistics.getTukeyFences', 'Statistics.getTukeyFences', (['benchmark_samples'], {}), '(be... |
import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from data_loader.datasets_custom import TextImageDataset, COCOTextImageDataset
from base import BaseDataLoader
def text_image_collate_fn(data):
collate_data = {}
# Sort a data list by right caption lengt... | [
"numpy.stack",
"data_loader.datasets_custom.COCOTextImageDataset",
"torch.stack",
"torchvision.transforms.RandomHorizontalFlip",
"torch.LongTensor",
"data_loader.datasets_custom.TextImageDataset",
"torchvision.transforms.Normalize",
"torch.tensor",
"torchvision.transforms.ToTensor"
] | [((2042, 2081), 'torch.LongTensor', 'torch.LongTensor', (['right_caption_lengths'], {}), '(right_caption_lengths)\n', (2058, 2081), False, 'import torch\n'), ((2567, 2606), 'torch.LongTensor', 'torch.LongTensor', (['wrong_caption_lengths'], {}), '(wrong_caption_lengths)\n', (2583, 2606), False, 'import torch\n'), ((289... |
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import tensorflow as tf
from keras.models import model_from_json
import json
from sklearn.metrics import roc_curve, auc, confusion_matrix
import numpy as np
import pandas as pd
from copy import deepcop... | [
"copy.deepcopy",
"json.load",
"matplotlib.pyplot.show",
"utils.load_data",
"sklearn.metrics.roc_curve",
"tensorflow.keras.backend.clear_session",
"sklearn.metrics.auc",
"matplotlib.pyplot.figure",
"keras.models.model_from_json",
"numpy.arange",
"numpy.interp",
"sklearn.metrics.confusion_matrix... | [((669, 703), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (684, 703), False, 'from keras.models import model_from_json\n'), ((948, 980), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (978, 980), True, 'import te... |
import numpy as np
import nimfa
V = np.random.rand(40, 100)
nmf = nimfa.Nmf(V, seed="nndsvd", rank=10, max_iter=12, update='euclidean',
objective='fro')
nmf_fit = nmf()
| [
"numpy.random.rand",
"nimfa.Nmf"
] | [((38, 61), 'numpy.random.rand', 'np.random.rand', (['(40)', '(100)'], {}), '(40, 100)\n', (52, 61), True, 'import numpy as np\n'), ((68, 158), 'nimfa.Nmf', 'nimfa.Nmf', (['V'], {'seed': '"""nndsvd"""', 'rank': '(10)', 'max_iter': '(12)', 'update': '"""euclidean"""', 'objective': '"""fro"""'}), "(V, seed='nndsvd', rank... |
import numpy as np
import pandas as pd
from sklearn import model_selection
import tensorflow as tf
from pathlib import Path
"""
<NAME>, WAK2116, ELEN-E6889, Spring 2019
Final Project
This python file trains a neural network that predicts an activity level
based on a jpg image from a traffic camera
... | [
"tensorflow.image.crop_to_bounding_box",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.empty",
"tensorflow.Session",
"numpy.expand_d... | [((568, 608), 'pandas.read_csv', 'pd.read_csv', (['"""./labeled_data/labels.txt"""'], {}), "('./labeled_data/labels.txt')\n", (579, 608), True, 'import pandas as pd\n'), ((642, 693), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['df'], {'test_size': '(0.1)'}), '(df, test_size=0.1)\n'... |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 18:22:04 2011
@author: -
"""
import os
import numpy
from matplotlib import pyplot
from neuronpy.graphics import spikeplot
from bulbspikes import *
from neuronpy.util import spiketrain
from params import sim_var
homedir = os.path.join(os.path.relpath('..'))
analysis_pa... | [
"numpy.multiply",
"numpy.abs",
"numpy.ma.masked_where",
"synweightsnapshot.SynWeightSnapshot",
"neuronpy.graphics.spikeplot.SpikePlot",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.arange",
"os.path.relpath",
"numpy.linspace",
"numpy.add",
"os.path.join",
... | [((286, 307), 'os.path.relpath', 'os.path.relpath', (['""".."""'], {}), "('..')\n", (301, 307), False, 'import os\n'), ((1748, 1850), 'synweightsnapshot.SynWeightSnapshot', 'synweightsnapshot.SynWeightSnapshot', ([], {'nummit': "sim_var['num_mitral']", 'numgran': "sim_var['num_granule']"}), "(nummit=sim_var['num_mitral... |
import os
from data_loader.data_generator import DataGenerator
from models.invariant_basic import invariant_basic
from trainers.trainer import Trainer
from Utils.config import process_config
from Utils.dirs import create_dirs
from Utils import doc_utils
from Utils.utils import get_args
from data_loader import data_help... | [
"Utils.dirs.create_dirs",
"Utils.config.process_config",
"pandas.read_csv",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.reset_default_graph",
"numpy.mean",
"data_loader.data_generator.DataGenerator",
"numpy.array",
"tensorflow.compat.v1.ConfigProto... | [((434, 547), 'Utils.config.process_config', 'process_config', (['"""/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json"""'], {}), "(\n '/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json'\n )\n", (448, 547), False, 'from Utils.config import pr... |
"""Methods used to build ROC."""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, auc
# seaborn settings
sns.set_style("white")
sns.set_context("paper")
color_palette = sns.color_palette("colorblind")
sns.set_palette(color_palette)
d... | [
"matplotlib.pyplot.title",
"seaborn.set_style",
"matplotlib.pyplot.xlim",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylim",
"seaborn.tsplot",
"numpy.zeros",
"sklearn.metrics.auc",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linspace",
"seaborn.color_palette",
"numpy.interp",
"mat... | [((190, 212), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (203, 212), True, 'import seaborn as sns\n'), ((213, 237), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (228, 237), True, 'import seaborn as sns\n'), ((254, 285), 'seaborn.color_palette', 'sns.co... |
import numpy as np
import scipy.sparse
import akg
from akg import tvm
from akg import topi
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import ta... | [
"tests.common.tensorio.compare_tensor",
"akg.tvm.context",
"tests.common.gen_random.random_gaussian",
"tests.common.base.get_rtol_atol",
"akg.tvm.ir_builder.create",
"akg.utils.dsl_create.get_broadcast_shape",
"numpy.zeros",
"akg.utils.format_transform.get_shape",
"akg.tvm.decl_buffer",
"akg.utils... | [((753, 775), 'akg.utils.format_transform.get_shape', 'get_shape', (['dense.shape'], {}), '(dense.shape)\n', (762, 775), False, 'from akg.utils.format_transform import to_tvm_nd_array, get_shape\n'), ((795, 811), 'akg.utils.format_transform.get_shape', 'get_shape', (['shape'], {}), '(shape)\n', (804, 811), False, 'from... |
# encoding: utf-8
"""
Input/output package.
"""
from __future__ import absolute_import, division, print_function
import io as _io
import contextlib
import numpy as np
from .audio import load_audio_file
from .midi import load_midi, write_midi
from ..utils import suppress_warnings, string_types
ENCODING = 'utf8'
#... | [
"numpy.sum",
"numpy.ones_like",
"numpy.any",
"numpy.append",
"numpy.array",
"numpy.loadtxt",
"io.open",
"warnings.warn",
"numpy.vstack"
] | [((1773, 1802), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'ndmin': '(2)'}), '(filename, ndmin=2)\n', (1783, 1802), True, 'import numpy as np\n'), ((2590, 2606), 'numpy.array', 'np.array', (['events'], {}), '(events)\n', (2598, 2606), True, 'import numpy as np\n'), ((3721, 3750), 'numpy.loadtxt', 'np.loadtxt', (['f... |
import os
from vibration_compensation import read_gcode, Data
import pytest
from numpy.testing import *
import numpy as np
import scipy as sp
import vibration_compensation.bokeh_imports as plt
@pytest.fixture(scope="module")
def figures():
path, filename = os.path.split(os.path.realpath(__file__))
... | [
"numpy.sum",
"os.makedirs",
"vibration_compensation.read_gcode",
"os.path.realpath",
"pytest.fixture",
"vibration_compensation.bokeh_imports.Figure",
"numpy.linalg.norm",
"os.path.splitext",
"vibration_compensation.bokeh_imports.save",
"pytest.approx",
"os.path.join"
] | [((205, 235), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (219, 235), False, 'import pytest\n'), ((641, 673), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (655, 673), False, 'import pytest\n'), ((328, 356), 'os.path.join', ... |
#!/usr/bin/env python
import numpy as np
import copy
import rospy
import rospkg
import rosparam
import threading
import argparse
from geometry_msgs.msg import Vector3
from std_msgs.msg import Header, Float64
from sub8_msgs.msg import Thrust, ThrusterStatus
from mil_ros_tools import wait_for_param, thread_lock, numpy_to... | [
"geometry_msgs.msg.Vector3",
"rosparam.load_file",
"rospy.Subscriber",
"argparse.ArgumentParser",
"mil_ros_tools.thread_lock",
"std_msgs.msg.Float64",
"numpy.mean",
"numpy.isclose",
"rospy.get_name",
"mil_ros_tools.numpy_to_point",
"ros_alarms.AlarmBroadcaster",
"rospy.Duration",
"rospy.logw... | [((533, 549), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (547, 549), False, 'import threading\n'), ((3696, 3712), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (3710, 3712), False, 'import rospy\n'), ((5932, 5949), 'mil_ros_tools.thread_lock', 'thread_lock', (['lock'], {}), '(lock)\n', (5943, 5949), ... |
import torch
import numpy as np
from torch.autograd import Variable
import torch.optim as optim
import argparse
import random
import os
import models
import torchvision.utils as vutils
import utils
import dataLoader
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser()
# The locationi of training ... | [
"random.randint",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"torch.FloatTensor",
"os.system",
"numpy.ones",
"torch.cat",
"models.globalIllumination",
"utils.writeErrToFile",
"utils.turnErrorIntoNumpy",
"numpy.mean",
"random.seed",
"torch.cuda.is_availab... | [((266, 291), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (289, 291), False, 'import argparse\n'), ((1842, 1882), 'os.system', 'os.system', (["('cp *.py %s' % opt.experiment)"], {}), "('cp *.py %s' % opt.experiment)\n", (1851, 1882), False, 'import os\n'), ((1970, 1994), 'random.randint', 'r... |
import numpy
""" Utility variables and functions
"""
aa2au = 1.8897261249935897 # bohr / AA
# converts nuclear charge to atom label
Z2LABEL = {
1: 'H', 2: 'He',
3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'NA', 12: 'Mg... | [
"numpy.abs"
] | [((2360, 2372), 'numpy.abs', 'numpy.abs', (['a'], {}), '(a)\n', (2369, 2372), False, 'import numpy\n'), ((2851, 2863), 'numpy.abs', 'numpy.abs', (['a'], {}), '(a)\n', (2860, 2863), False, 'import numpy\n')] |
from __future__ import print_function, division
import os
from os.path import exists
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict
from lib.model import ImMatchNet
from lib.pf_willow_dataset import PFDataset
from lib.normaliza... | [
"lib.torch_util.BatchTensorToVars",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.path.join",
"lib.normalization.NormalizeImageDict",
"numpy.isnan",
"lib.point_tnf.corr_to_matches",
"numpy.mean",
"torch.cuda.is_available",
"lib.model.ImMatchNet",
"lib.eval_util.pck_metric"
] | [((656, 681), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (679, 681), False, 'import torch\n'), ((711, 775), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute PF Willow matches"""'}), "(description='Compute PF Willow matches')\n", (734, 775), False, 'impo... |
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import SubplotZero
import numpy as np
import cyllene.f_functionclass as f_funct
import sympy as sp
'''
A lot of problems need to be resolved:
1)Can we keep a record of the graphs graphed? this can be done by just keeping the numpy arrays ?
2)w... | [
"mpl_toolkits.axes_grid.axislines.SubplotZero",
"sympy.Interval",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange"
] | [((486, 499), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (496, 499), True, 'import matplotlib.pyplot as plt\n'), ((512, 538), 'mpl_toolkits.axes_grid.axislines.SubplotZero', 'SubplotZero', (['self.fig', '(111)'], {}), '(self.fig, 111)\n', (523, 538), False, 'from mpl_toolkits.axes_grid.axislines ... |
import sc_utils
import model_factory
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
INPUT_LENGTH = 100
# Prepare data
X_train, Y_train, X_test, Y_test = sc_utils.load_data()
X_train, Y_train, X_val, Y_val, X_test, Y_test, tokenizer = sc_utils.p... | [
"keras.preprocessing.sequence.pad_sequences",
"sc_utils.load_data",
"sc_utils.create_embedding_matrix",
"numpy.array",
"model_factory.create_rnn_model",
"sc_utils.preprocess_data"
] | [((229, 249), 'sc_utils.load_data', 'sc_utils.load_data', ([], {}), '()\n', (247, 249), False, 'import sc_utils\n'), ((310, 382), 'sc_utils.preprocess_data', 'sc_utils.preprocess_data', (['X_train', 'Y_train', 'X_test', 'Y_test', 'INPUT_LENGTH'], {}), '(X_train, Y_train, X_test, Y_test, INPUT_LENGTH)\n', (334, 382), Fa... |
#%%
import os
import pickle
import cloudpickle
import itertools
import glob
import numpy as np
import scipy as sp
import pandas as pd
import git
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
# Seaborn, useful for graphics
import seaborn as s... | [
"pandas.DataFrame",
"matplotlib.pyplot.hist",
"numpy.argmax",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"git.Repo",
"ccutils.model.log_p_m_unreg",
"ccutils.viz.set_plotting_style",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"pi... | [((394, 426), 'ccutils.viz.set_plotting_style', 'ccutils.viz.set_plotting_style', ([], {}), '()\n', (424, 426), False, 'import ccutils\n'), ((486, 532), 'git.Repo', 'git.Repo', (['"""./"""'], {'search_parent_directories': '(True)'}), "('./', search_parent_directories=True)\n", (494, 532), False, 'import git\n'), ((733,... |
import unittest
import numpy as np
from frozendict import frozendict
from msdm.core.distributions import DictDistribution
from msdm.algorithms import ValueIteration, PolicyIteration, LRTDP
from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid
from msdm.domains impor... | [
"unittest.main",
"msdm.algorithms.PolicyIteration",
"msdm.tests.domains.Counter",
"msdm.tests.domains.Geometric",
"msdm.algorithms.ValueIteration",
"msdm.tests.domains.make_russell_norvig_grid",
"msdm.algorithms.LRTDP",
"numpy.isclose",
"msdm.domains.GridWorld",
"msdm.domains.heavenorhell.HeavenOr... | [((5289, 5304), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5302, 5304), False, 'import unittest\n'), ((422, 432), 'msdm.tests.domains.Counter', 'Counter', (['(3)'], {}), '(3)\n', (429, 432), False, 'from msdm.tests.domains import Counter, GNTFig6_6, Geometric, VaryingActionNumber, make_russell_norvig_grid\n')... |
"""
Minimal character-level Vanilla RNN model. Written b_y <NAME> (@karpathy)
BSD License
"""
import numpy as np
import unicodedata
import string
import codecs
# data I/O
data = codecs.open('data/potter.txt', 'r', encoding='utf8', errors='ignore').read()
fake = codecs.open('data/output.txt', 'w', encoding='utf8')
char... | [
"unicodedata.normalize",
"numpy.zeros_like",
"codecs.open",
"numpy.copy",
"numpy.random.randn",
"numpy.log",
"unicodedata.category",
"numpy.zeros",
"numpy.clip",
"numpy.exp",
"numpy.dot",
"numpy.sqrt"
] | [((263, 315), 'codecs.open', 'codecs.open', (['"""data/output.txt"""', '"""w"""'], {'encoding': '"""utf8"""'}), "('data/output.txt', 'w', encoding='utf8')\n", (274, 315), False, 'import codecs\n'), ((1301, 1327), 'numpy.zeros', 'np.zeros', (['(hidden_size, 1)'], {}), '((hidden_size, 1))\n', (1309, 1327), True, 'import ... |
"""Optimization result."""
import warnings
from collections import Counter
from copy import deepcopy
from typing import Sequence, Union
import numpy as np
import pandas as pd
from ..objective import History
from ..problem import Problem
from ..util import assign_clusters, delete_nan_inf
OptimizationResult = Union['... | [
"pandas.DataFrame",
"copy.deepcopy",
"numpy.argmax",
"collections.Counter",
"numpy.isnan",
"numpy.argmin",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.min",
"warnings.warn"
] | [((5219, 5238), 'copy.deepcopy', 'deepcopy', (['self.list'], {}), '(self.list)\n', (5227, 5238), False, 'from copy import deepcopy\n'), ((9657, 9674), 'pandas.DataFrame', 'pd.DataFrame', (['lst'], {}), '(lst)\n', (9669, 9674), True, 'import pandas as pd\n'), ((10229, 10359), 'warnings.warn', 'warnings.warn', (['"""get_... |
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple, Union, TypeVar, Iterable, Dict
from goa import problems
T = TypeVar("T")
def plot_population(
problem: problems.BaseProblem,
X: Union[T, Iterable[T]],
ax: plt.Axes = None,
c: str = "darkblue",
linestyle: str = ":",
... | [
"numpy.subtract",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.cos",
"typing.TypeVar"
] | [((141, 153), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (148, 153), False, 'from typing import Tuple, Union, TypeVar, Iterable, Dict\n'), ((608, 635), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (618, 635), True, 'import matplotlib.pyplot as plt\n'), (... |
import numpy as np
from casim.calculations import word_entropy
def test_word_entropy():
test_arr = np.array([1, 0, 0, 1, 1, 0, 1, 0])
assert np.round(word_entropy(test_arr, 3), decimals=1) == 2.5
| [
"casim.calculations.word_entropy",
"numpy.array"
] | [((105, 139), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 1, 0, 1, 0]'], {}), '([1, 0, 0, 1, 1, 0, 1, 0])\n', (113, 139), True, 'import numpy as np\n'), ((161, 186), 'casim.calculations.word_entropy', 'word_entropy', (['test_arr', '(3)'], {}), '(test_arr, 3)\n', (173, 186), False, 'from casim.calculations import word_en... |
import numpy as np
import matplotlib.pyplot as plt
import glob
from sys import argv
from os.path import exists as file_exists
methods = ['drude', 'c36']
mol1, mol2 = str(argv[1]), str(argv[2])
sysname = mol1+'_'+mol2
def blockavg(x,nblocks=30):
lblock = int(len(x)/nblocks)
m = []
for i in range(nblocks):
... | [
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib... | [((420, 431), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (428, 431), True, 'import numpy as np\n'), ((1330, 1344), 'numpy.array', 'np.array', (['osmp'], {}), '(osmp)\n', (1338, 1344), True, 'import numpy as np\n'), ((1374, 1386), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1384, 1386), True, 'imp... |
import matplotlib.pyplot as plt
import numpy as np
#returns the binding energy predicted by nuclear liquid drop model
def BE_liquidDrop(N,Z): #N=num of neutrons, Z=num of protons
#num of nucleons
A = N+Z
#physical constants (from Alex's notes, in MeV)
a1 = 15.49
a2 = 17.23
a3 = 0.697
a4 = 22.6
#nuclear li... | [
"numpy.zeros",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
] | [((1336, 1392), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(mat, interpolation='nearest', origin='lower')\n", (1346, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1420, 14... |
import pytest
from anndata import AnnData
from pandas.testing import assert_frame_equal
import numpy as np
from squidpy.gr import moran, ripley_k, co_occurrence
MORAN_K = "moranI"
def test_ripley_k(adata: AnnData):
"""Check ripley score and shape."""
ripley_k(adata, cluster_key="leiden")
# assert rip... | [
"pandas.testing.assert_frame_equal",
"squidpy.gr.co_occurrence",
"numpy.testing.assert_allclose",
"pytest.raises",
"squidpy.gr.ripley_k",
"numpy.random.choice",
"numpy.array_equal",
"pytest.mark.parametrize",
"squidpy.gr.moran"
] | [((1825, 1866), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_jobs"""', '[1, 2]'], {}), "('n_jobs', [1, 2])\n", (1848, 1866), False, 'import pytest\n'), ((3614, 3679), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('n_jobs', 'n_splits')", '[(1, 2), (2, 2)]'], {}), "(('n_jobs', 'n_splits'), [(... |
import gym
import numpy as np
import torch
import torch.optim as optim
from utils_main import make_env, save_files
from neural_network import ActorCritic
from ppo_method import ppo
from common.multiprocessing_env import SubprocVecEnv
from itertools import count
use_cuda = torch.cuda.is_available()
device = torch.devic... | [
"numpy.stack",
"ppo_method.ppo",
"utils_main.save_files",
"gym.make",
"common.multiprocessing_env.SubprocVecEnv",
"numpy.save",
"neural_network.ActorCritic",
"torch.FloatTensor",
"torch.cat",
"itertools.count",
"numpy.hstack",
"torch.cuda.is_available",
"torch.device",
"utils_main.make_env... | [((274, 299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (297, 299), False, 'import torch\n'), ((309, 352), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (321, 352), False, 'import torch\n'), ((456, 475), 'common.multiprocess... |
import cotk
from cotk._utils.file_utils import get_resource_file_path
from cotk.dataloader.dataloader import *
from collections import Counter
import numpy as np
from itertools import chain
class Score(DataField):
def get_next(self, dataset):
r"""read text and returns the next label(integer). Note that it... | [
"itertools.chain.from_iterable",
"collections.Counter",
"numpy.max",
"numpy.array"
] | [((15676, 15731), 'numpy.array', 'np.array', (["[self.data[key]['score'][i] for i in indexes]"], {}), "([self.data[key]['score'][i] for i in indexes])\n", (15684, 15731), True, 'import numpy as np\n'), ((15048, 15074), 'numpy.max', 'np.max', (["res['post_length']"], {}), "(res['post_length'])\n", (15054, 15074), True, ... |
import string
import numpy as np
import pandas as pd
import pytest
from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar,
geom_col, geom_boxplot, geom_text, geom_rect,
after_stat, position_dodge, position_dodge2,
position_jitter, positio... | [
"plotnine.geom_boxplot",
"plotnine.position_dodge2",
"numpy.ones",
"numpy.arange",
"plotnine.position_dodge",
"plotnine.aes",
"plotnine.position_stack",
"pandas.DataFrame",
"plotnine.position_jitter",
"plotnine.after_stat",
"numpy.random.RandomState",
"plotnine.position_nudge",
"pytest.raise... | [((520, 553), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234567890)'], {}), '(1234567890)\n', (541, 553), True, 'import numpy as np\n'), ((560, 612), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]}"], {}), "({'x': [1, 2, 1, 2], 'y': [1, 1, 2, 2]})\n", (572, 612), True, 'impo... |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import warnings
from math import exp
import numpy as np
def fit_factory(discard=1):
def fit(x, y):
p = np.polyfit(x, y, 1)
v = np.polyval(p, x)
e = np.abs(y - v)
drop_idxs = np.argsort(e)[-... | [
"numpy.abs",
"numpy.log",
"numpy.polyfit",
"numpy.polyval",
"numpy.argsort",
"numpy.diff",
"numpy.array",
"warnings.warn",
"numpy.delete",
"numpy.all"
] | [((210, 229), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (220, 229), True, 'import numpy as np\n'), ((242, 258), 'numpy.polyval', 'np.polyval', (['p', 'x'], {}), '(p, x)\n', (252, 258), True, 'import numpy as np\n'), ((271, 284), 'numpy.abs', 'np.abs', (['(y - v)'], {}), '(y - v)\n', (277, 2... |
#!/usr/bin/env python3
import os, numpy as np, argparse
def relFit(nu, eps): return 7.33972668 * np.power(eps, 1/6.0) / np.sqrt(nu)
def etaFit(nu, eps): return np.power(eps, -0.25) * np.power(nu, 0.75)
def lambdaFit(nu, eps): return 5.35507603 * np.power(eps,-1/6.0) * np.sqrt(nu);
def runspec(nu, eps, run, cs... | [
"argparse.ArgumentParser",
"numpy.power",
"os.system",
"numpy.linspace",
"numpy.log10",
"os.getenv",
"numpy.sqrt"
] | [((762, 779), 'numpy.sqrt', 'np.sqrt', (['(nu / eps)'], {}), '(nu / eps)\n', (769, 779), True, 'import os, numpy as np, argparse\n'), ((1307, 1324), 'numpy.sqrt', 'np.sqrt', (['(nu / eps)'], {}), '(nu / eps)\n', (1314, 1324), True, 'import os, numpy as np, argparse\n'), ((1329, 1496), 'os.system', 'os.system', (['("""e... |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
def find_smallest_positive(alist):
# find first positive value
minpos = -1
for x in alist:
if x > 0:
minpos = x
... | [
"collections.defaultdict",
"numpy.mean"
] | [((3311, 3328), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3322, 3328), False, 'from collections import defaultdict\n'), ((4362, 4382), 'numpy.mean', 'np.mean', (['importances'], {}), '(importances)\n', (4369, 4382), True, 'import numpy as np\n')] |
from __future__ import print_function
from create_tree import *
import numpy as np
import random
DATA_DIR = "../data/"
def curriculum_depth(i, num_examples, max_depth):
curriculum_max_depth= int((max_depth*i)/num_examples)
#print(i, curriculum_max_depth,)
if curriculum_max_depth > 0:
random_depth ... | [
"numpy.random.randint"
] | [((326, 365), 'numpy.random.randint', 'np.random.randint', (['curriculum_max_depth'], {}), '(curriculum_max_depth)\n', (343, 365), True, 'import numpy as np\n'), ((857, 877), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (874, 877), True, 'import numpy as np\n')] |
# Network
import numpy as np
import pandas as pd
import simulator
import random
from igraph import *
import matplotlib.pyplot as plt
class Network():
"""docstring for Network"""
def __init__(self, simulator):
# Genero un grafo random
self.g = Graph.Erdos_Renyi(simulator.num_nodi,simulator.p_link)
# Ini... | [
"pandas.DataFrame",
"numpy.count_nonzero",
"numpy.concatenate",
"numpy.copy",
"matplotlib.pyplot.clf",
"numpy.zeros",
"numpy.nonzero",
"numpy.random.random",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((408, 441), 'numpy.zeros', 'np.zeros', (['(simulator.num_nodi, 1)'], {}), '((simulator.num_nodi, 1))\n', (416, 441), True, 'import numpy as np\n'), ((458, 506), 'numpy.zeros', 'np.zeros', (['(simulator.num_nodi, 1)'], {'dtype': 'np.int8'}), '((simulator.num_nodi, 1), dtype=np.int8)\n', (466, 506), True, 'import numpy... |
# -*- coding: utf-8 -*-
# @Author: wqshen
# @Email: <EMAIL>
# @Date: 2020/6/10 14:43
# @Last Modified by: wqshen
import numpy as np
from logzero import logger
from .point_stat_base import PointStatBase
class ContinuousVariableVerification(PointStatBase):
def __init__(self, forecast=None, obs=None, fcsterr=None,... | [
"numpy.quantile",
"numpy.average",
"numpy.abs",
"numpy.std",
"numpy.corrcoef",
"scipy.stats.spearmanr",
"numpy.isnan",
"numpy.percentile",
"numpy.array",
"logzero.logger.warning",
"scipy.stats.kendalltau"
] | [((1751, 1771), 'numpy.average', 'np.average', (['forecast'], {}), '(forecast)\n', (1761, 1771), True, 'import numpy as np\n'), ((2252, 2267), 'numpy.average', 'np.average', (['obs'], {}), '(obs)\n', (2262, 2267), True, 'import numpy as np\n'), ((2936, 2952), 'numpy.std', 'np.std', (['forecast'], {}), '(forecast)\n', (... |
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from numpy.random import randint, rand
from sir import *
def SIR_continuous_reinfected(b,k,time,ii,r):
"""
Simulates continuous SIR model
ii = initial percentage of infected
time = Days of simulation
b = proba... | [
"numpy.zeros",
"scipy.integrate.solve_ivp",
"numpy.random.randint",
"numpy.array",
"numpy.int",
"numpy.linspace",
"numpy.random.rand"
] | [((717, 743), 'numpy.linspace', 'np.linspace', (['(0)', 'time', 'time'], {}), '(0, time, time)\n', (728, 743), True, 'import numpy as np\n'), ((755, 827), 'scipy.integrate.solve_ivp', 'solve_ivp', (['SIR', '[0, time]', '[1 - ii, 0, ii]'], {'method': '"""RK45"""', 't_eval': 't_eval'}), "(SIR, [0, time], [1 - ii, 0, ii],... |
"""
==================================================================
Compare LogisticRegression solver with sklearn's liblinear backend
==================================================================
"""
import time
import warnings
import numpy as np
from numpy.linalg import norm
import matplotlib.pyplot as plt
f... | [
"celer.LogisticRegression",
"matplotlib.pyplot.show",
"libsvmdata.fetch_libsvm",
"warnings.filterwarnings",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"time.time",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"numpy.array",
"numpy.linalg.no... | [((427, 498), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Objective did not converge"""'}), "('ignore', message='Objective did not converge')\n", (450, 498), False, 'import warnings\n'), ((499, 572), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Minibatching utilities."""
import itertools
import operator
import os
import pickle
import numpy as np
import torch
from sklearn.utils import shuffle
from torch.autograd impor... | [
"itertools.chain.from_iterable",
"torch.LongTensor",
"os.path.exists",
"numpy.argsort",
"numpy.mean",
"sklearn.utils.shuffle",
"torch.no_grad",
"os.path.join",
"operator.itemgetter"
] | [((22510, 22525), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (22517, 22525), True, 'import numpy as np\n'), ((9204, 9281), 'sklearn.utils.shuffle', 'shuffle', (["self.src[idx]['data']", "self.trg[idx]['data']"], {'random_state': 'self.seed'}), "(self.src[idx]['data'], self.trg[idx]['data'], random_state=s... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.