code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
def calculate_iou(bboxes1, bboxes2):
"""
This calculates the intersection over union of N bounding boxes
in the form N x [left, top, right, bottom], e.g for N=2:
>> bb = [[21,34,45,67], [67,120, 89, 190]]
:param bboxes1: np array: N x 4 ground truth bounding boxes
:param bb... | [
"numpy.clip",
"numpy.maximum",
"numpy.minimum"
] | [((770, 810), 'numpy.maximum', 'np.maximum', (['bboxes1[:, 0]', 'bboxes2[:, 0]'], {}), '(bboxes1[:, 0], bboxes2[:, 0])\n', (780, 810), True, 'import numpy as np\n'), ((834, 874), 'numpy.maximum', 'np.maximum', (['bboxes1[:, 1]', 'bboxes2[:, 1]'], {}), '(bboxes1[:, 1], bboxes2[:, 1])\n', (844, 874), True, 'import numpy ... |
import h5py
import numpy as np
from code.model import UNetClassifier
def load_dataset(covid_file_path, normal_file_path):
covid = h5py.File(covid_file_path, 'r')['covid']
normal = h5py.File(normal_file_path, 'r')['normal']
all_images = np.expand_dims(np.concatenate([covid, normal]), axis=3)
all_labe... | [
"numpy.arange",
"numpy.concatenate",
"h5py.File"
] | [((325, 386), 'numpy.concatenate', 'np.concatenate', (['[[1] * covid.shape[0], [0] * normal.shape[0]]'], {}), '([[1] * covid.shape[0], [0] * normal.shape[0]])\n', (339, 386), True, 'import numpy as np\n'), ((137, 168), 'h5py.File', 'h5py.File', (['covid_file_path', '"""r"""'], {}), "(covid_file_path, 'r')\n", (146, 168... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
from collections import Counter
import numpy as np
import theano
import theano.tensor as T
punctuation = set(string.punctuation)
punctuation.add('\n')
punctuation.add('\t')
punctuation.add(u'’')
punctuation.add(u'‘')
punctuation.add(u'“')
punctuation.add(u'”... | [
"numpy.clip",
"numpy.unique",
"numpy.asarray",
"numpy.max",
"numpy.argsort",
"numpy.percentile"
] | [((1789, 1802), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (1799, 1802), True, 'import numpy as np\n'), ((1582, 1595), 'numpy.argsort', 'np.argsort', (['v'], {}), '(v)\n', (1592, 1595), True, 'import numpy as np\n'), ((417, 430), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (427, 430), True, 'import n... |
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from oddt.scoring.models import classifiers, regressors
@pytest.mark.filterwarnings('ignore:Stochastic Optimizer')
@pytest.mark.parametrize('cls',
[classifiers.svm(probabil... | [
"oddt.scoring.models.regressors.randomforest",
"numpy.testing.assert_array_almost_equal",
"pytest.mark.filterwarnings",
"numpy.ones",
"oddt.scoring.models.regressors.neuralnetwork",
"pickle.dumps",
"numpy.log",
"oddt.scoring.models.classifiers.neuralnetwork",
"oddt.scoring.models.regressors.svm",
... | [((180, 237), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Stochastic Optimizer"""'], {}), "('ignore:Stochastic Optimizer')\n", (206, 237), False, 'import pytest\n'), ((559, 577), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (573, 577), True, 'import numpy as np\n'), ((71... |
"""
Test the fits-module by loading a dumped rtfits result and performing
all actions again
"""
import unittest
import numpy as np
import cloudpickle
import matplotlib.pyplot as plt
import copy
import os
class TestDUMPS(unittest.TestCase):
def setUp(self):
self.sig0_dB_path = os.path.dirname(__file__) + ... | [
"cloudpickle.load",
"numpy.allclose",
"numpy.subtract",
"matplotlib.pyplot.close",
"os.path.dirname",
"unittest.main",
"numpy.rad2deg"
] | [((6174, 6189), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6187, 6189), False, 'import unittest\n'), ((521, 543), 'cloudpickle.load', 'cloudpickle.load', (['file'], {}), '(file)\n', (537, 543), False, 'import cloudpickle\n'), ((292, 317), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n... |
#!/usr/bin/python
#coding = utf-8
import numpy as np
import pandas as pd
import mysql.connector
class mysqlTool():
"""
This is the API to connect with mysql database.
"""
def __init__(self,databaseNameString:str,hostAddress:str,userName:str,passWord:str):
self.targetDB = mysql.connector.connect(
host = hostA... | [
"py2neo.Node",
"sqlalchemy.create_engine",
"numpy.isnan",
"pandas.DataFrame",
"py2neo.Relationship",
"pandas.read_sql"
] | [((946, 1006), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'self.targetCursor.column_names'}), '(result, columns=self.targetCursor.column_names)\n', (958, 1006), True, 'import pandas as pd\n'), ((1689, 1749), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {'columns': 'self.targetCursor.column_names'}... |
import numpy as np
import random
N = 10
def null(a, rtol=1e-5):
u, s, v = np.linalg.svd(a)
rank = (s > rtol*s[0]).sum()
return rank, v[rank:].T.copy()
def gen_data(N, noisy=False):
lower = -1
upper = 1
dim = 2
X = np.random.rand(dim, N)*(upper-lower)+lower
while True:
Xsa... | [
"numpy.linalg.svd",
"numpy.all",
"numpy.ones",
"numpy.random.rand"
] | [((82, 98), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {}), '(a)\n', (95, 98), True, 'import numpy as np\n'), ((535, 544), 'numpy.all', 'np.all', (['y'], {}), '(y)\n', (541, 544), True, 'import numpy as np\n'), ((249, 271), 'numpy.random.rand', 'np.random.rand', (['dim', 'N'], {}), '(dim, N)\n', (263, 271), True, 'im... |
#!/usr/bin/env python
import numpy
import itertools
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Structure
from crystal import fillcell, tikz_atoms
def dfh(single = True, defect = False):
if defect:
single = False
a = 5.43
... | [
"pymatgen.core.structure.Structure",
"itertools.combinations",
"crystal.fillcell",
"numpy.array",
"pymatgen.core.lattice.Lattice",
"numpy.linalg.norm",
"pymatgen.core.operations.SymmOp.from_rotation_and_translation",
"crystal.tikz_atoms"
] | [((1909, 1924), 'crystal.fillcell', 'fillcell', (['atoms'], {}), '(atoms)\n', (1917, 1924), False, 'from crystal import fillcell, tikz_atoms\n'), ((1949, 1983), 'numpy.array', 'numpy.array', (['[0.625, 0.625, 0.625]'], {}), '([0.625, 0.625, 0.625])\n', (1960, 1983), False, 'import numpy\n'), ((2001, 2038), 'itertools.c... |
# coding: utf-8
"""
@brief test log(time=1s)
"""
import unittest
import pandas
import numpy
from scipy.sparse.linalg import lsqr as sparse_lsqr
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from pandas_streaming.df import pandas_groupby_nan, numpy_types
class TestPandasHelper(ExtTestCase):
d... | [
"pandas.DataFrame",
"numpy.isnan",
"pandas_streaming.df.numpy_types",
"unittest.main",
"pyquickhelper.pycode.ignore_warnings",
"pandas_streaming.df.pandas_groupby_nan"
] | [((4454, 4482), 'pyquickhelper.pycode.ignore_warnings', 'ignore_warnings', (['UserWarning'], {}), '(UserWarning)\n', (4469, 4482), False, 'from pyquickhelper.pycode import ExtTestCase, ignore_warnings\n'), ((5306, 5321), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5319, 5321), False, 'import unittest\n'), ((27... |
import shutil
import numpy as np
ALL_SYNTHS_LIST = 'synth_imgs.txt'
TRAIN_IMAGES_LIST = 'train_imgs.txt'
VAL_IMAGES_LIST = 'val_imgs.txt'
TEST_IMAGES_LIST = 'test_imgs.txt'
TRAIN_STOP = 342000
VAL_STOP = TRAIN_STOP + 38000
'''
390000 examples : 342000 train and 38000 val (90/10 splits on 380000), 10000 test
'''
wi... | [
"shutil.copy",
"numpy.random.permutation"
] | [((428, 465), 'numpy.random.permutation', 'np.random.permutation', (['files.shape[0]'], {}), '(files.shape[0])\n', (449, 465), True, 'import numpy as np\n'), ((556, 594), 'shutil.copy', 'shutil.copy', (['files[i]', '"""./train_imgs/"""'], {}), "(files[i], './train_imgs/')\n", (567, 594), False, 'import shutil\n'), ((60... |
import importlib
import json
import os
import pdb
import sys
import fnet
import pandas as pd
import tifffile
import numpy as np
from fnet.transforms import normalize
def pearson_loss(x, y):
#x = output
#y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sq... | [
"fnet.fnet_model.Model",
"importlib.import_module",
"pandas.read_csv",
"numpy.corrcoef",
"tifffile.imread",
"os.path.join",
"numpy.max",
"numpy.array",
"os.path.isdir",
"numpy.min",
"pandas.concat",
"fnet.transforms.normalize"
] | [((1322, 1339), 'numpy.corrcoef', 'np.corrcoef', (['x', 'y'], {}), '(x, y)\n', (1333, 1339), True, 'import numpy as np\n'), ((1503, 1544), 'importlib.import_module', 'importlib.import_module', (["('fnet.' + module)"], {}), "('fnet.' + module)\n", (1526, 1544), False, 'import importlib\n'), ((1552, 1577), 'os.path.isdir... |
import numpy as np
import numpy.random as rand
from functools import reduce
class Network:
def __init__(self, layer_sizes):
# layer_sizes: list of numbers representing number of neurons per layer
# Create a numpy array of biases for each layer except the (first) input layer
self.biases =... | [
"numpy.exp",
"numpy.dot",
"numpy.random.randn"
] | [((322, 338), 'numpy.random.randn', 'rand.randn', (['l', '(1)'], {}), '(l, 1)\n', (332, 338), True, 'import numpy.random as rand\n'), ((539, 555), 'numpy.random.randn', 'rand.randn', (['y', 'x'], {}), '(y, x)\n', (549, 555), True, 'import numpy.random as rand\n'), ((857, 867), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-... |
import collections
import torch
import einops
import cached_property
import padertorch as pt
# loss: torch.Tenso r =None,
# losses: dict =None,
# scalars: dict =None,
# histograms: dict =None,
# audios: dict =None,
# images: dict =None,
class ReviewSummary(collections.abc.Mapping):
"""
>>> review_summary... | [
"matplotlib.pyplot.imshow",
"padertorch.summary.spectrogram_to_image",
"matplotlib.pyplot.grid",
"padertorch.data.batch.example_to_numpy",
"padertorch.summary.mask_to_image",
"torch.isfinite",
"einops.rearrange",
"paderbox.visualization.axes_context",
"paderbox.io.play.play",
"numpy.einsum",
"pa... | [((834, 855), 'torch.isfinite', 'torch.isfinite', (['value'], {}), '(value)\n', (848, 855), False, 'import torch\n'), ((1106, 1156), 'padertorch.data.batch.example_to_numpy', 'pt.data.batch.example_to_numpy', (['value'], {'detach': '(True)'}), '(value, detach=True)\n', (1136, 1156), True, 'import padertorch as pt\n'), ... |
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import HuberRegressor
import numpy as np
import pickle
from dataloader import HeadlineDataset
from csv import writer
import os, subprocess
im... | [
"subprocess.run",
"dataloader.HeadlineDataset",
"csv.writer",
"pickle.load",
"sklearn.metrics.mean_squared_error",
"collections.Counter",
"os.chdir",
"numpy.array",
"sklearn.ensemble.GradientBoostingRegressor"
] | [((1300, 1309), 'collections.Counter', 'Counter', ([], {}), '()\n', (1307, 1309), False, 'from collections import Counter\n'), ((1366, 1388), 'dataloader.HeadlineDataset', 'HeadlineDataset', (['"""dev"""'], {}), "('dev')\n", (1381, 1388), False, 'from dataloader import HeadlineDataset\n'), ((1405, 1432), 'dataloader.He... |
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
This module contains useful fudge math routines that do not fit into any other module.
"""
from pqu import PQU
from fudg... | [
"brownies.legacy.endl.endl2dmathClasses.endl2dmath",
"numpy.float64",
"fudge.core.utilities.brb.getType",
"brownies.legacy.endl.endl3dmathClasses.endl3dmath"
] | [((390, 408), 'numpy.float64', 'numpy.float64', (['(1.0)'], {}), '(1.0)\n', (403, 408), False, 'import numpy\n'), ((902, 1001), 'brownies.legacy.endl.endl3dmathClasses.endl3dmath', 'endl3dmathClasses.endl3dmath', (['d3'], {'xLabel': 'xLabel', 'yLabel': 'yLabel', 'zLabel': 'zLabel', 'checkDataType': '(0)'}), '(d3, xLabe... |
import keras
import random
import numpy as np
from glob import glob
from keras.models import Model
from keras.utils import np_utils
from keras.models import load_model
import matplotlib.pyplot as plt
import os
import keras.backend as K
import tensorflow as tf
from keras.utils import to_categorical
from tqdm import tqdm... | [
"numpy.ptp",
"keras.backend.gradients",
"keras.utils.to_categorical",
"sys.path.append",
"keras.layers.core.K.set_learning_phase",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.empty",
"matplotlib.pyplot.yticks",
"glob.glob",
"numpy.abs",
"matplotlib.pyplot.xticks",
"numpy.argmax",
"mat... | [((332, 353), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (347, 353), False, 'import sys\n'), ((8210, 8464), 'keras.models.load_model', 'load_model', (['"""/home/parth/Interpretable_ML/saved_models/SimUnet/model_lrsch.hdf5"""'], {'custom_objects': "{'gen_dice_loss': gen_dice_loss, 'dice_whole_... |
import pytest
grblas = pytest.importorskip("grblas")
from metagraph.tests.util import default_plugin_resolver
from . import RoundTripper
from metagraph.plugins.numpy.types import NumpyMatrixType
from metagraph.plugins.graphblas.types import GrblasMatrixType
import numpy as np
def test_matrix_roundtrip_dense_square(... | [
"numpy.array",
"pytest.importorskip"
] | [((24, 53), 'pytest.importorskip', 'pytest.importorskip', (['"""grblas"""'], {}), "('grblas')\n", (43, 53), False, 'import pytest\n'), ((403, 465), 'numpy.array', 'np.array', (['[[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3]]'], {}), '([[1.1, 2.2, 3.3], [3.3, 3.3, 9.9], [3.3, 0.0, -3.3]])\n', (411, 465), True, 'im... |
#!/usr/bin/env python
""" This file is a Python translation of the MATLAB file acm.m
Python version by RDL 29 Mar 2012
Copyright notice from acm.m:
copyright 1996, by <NAME>. For use with the book
"Statistical Digital Signal Processing and Modeling"
(John Wiley & Sons, 1996).
"""
from __future__ import print_fu... | [
"numpy.insert",
"numpy.abs",
"numpy.dot",
"numpy.linalg.lstsq",
"convm.convm"
] | [((995, 1010), 'convm.convm', 'convm', (['x', '(p + 1)'], {}), '(x, p + 1)\n', (1000, 1010), False, 'from convm import convm\n'), ((1117, 1135), 'numpy.insert', 'np.insert', (['a', '(0)', '(1)'], {}), '(a, 0, 1)\n', (1126, 1135), True, 'import numpy as np\n'), ((1195, 1209), 'numpy.dot', 'np.dot', (['err', 'a'], {}), '... |
import numpy as np
class RegularizeOrthogonal(object):
"""
Orthogonal
"""
def __init__(self, coeff_lambda=0.0):
self.coeff_lambda = coeff_lambda
def cost(self, layers):
c = 0.0
for layer in layers:
wt = layer.w.transpose()
for j in range(layer.outp... | [
"numpy.outer",
"numpy.zeros_like"
] | [((795, 812), 'numpy.zeros_like', 'np.zeros_like', (['wt'], {}), '(wt)\n', (808, 812), True, 'import numpy as np\n'), ((1022, 1040), 'numpy.outer', 'np.outer', (['wtj', 'wtj'], {}), '(wtj, wtj)\n', (1030, 1040), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from pandas.util import testing as pdt
import pytest
from spandex import TableFrame
from spandex.io import db_to_df, df_to_db
def test_tableframe(loader):
table = loader.tables.sample.hf_bg
for cache in [False, True]:
tf = TableFrame(table, index_col='gid', cach... | [
"spandex.io.db_to_df",
"numpy.issubdtype",
"spandex.TableFrame",
"pytest.importorskip",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal"
] | [((1216, 1262), 'pytest.importorskip', 'pytest.importorskip', (['"""urbansim.sim.simulation"""'], {}), "('urbansim.sim.simulation')\n", (1235, 1262), False, 'import pytest\n'), ((1366, 1402), 'spandex.TableFrame', 'TableFrame', (['parcels'], {'index_col': '"""gid"""'}), "(parcels, index_col='gid')\n", (1376, 1402), Fal... |
# encoding: UTF-8
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name = 'crrCython',
ext_modules = cythonize("crrCython.pyx"),
include_dirs = [numpy.get_include()]
)
| [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((146, 172), 'Cython.Build.cythonize', 'cythonize', (['"""crrCython.pyx"""'], {}), "('crrCython.pyx')\n", (155, 172), False, 'from Cython.Build import cythonize\n'), ((192, 211), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (209, 211), False, 'import numpy\n')] |
import pandas as pd
import pandas
import numpy as np
#provide local path
testfile='../input/test.csv'
data = open(testfile).readlines()
sequences={} #(key, value) = (id , sequence)
for i in range(1,len(data)):
line=data[i]
line =line.replace('"','')
line = line[:-1].split(',')
id = int(... | [
"numpy.array",
"numpy.linalg.inv",
"numpy.linalg.det"
] | [((1019, 1030), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (1027, 1030), True, 'import numpy as np\n'), ((1032, 1043), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1040, 1043), True, 'import numpy as np\n'), ((1067, 1083), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (1080, 1083), True, 'import... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sample 10^6 particles from anisotropic Hernquist DF.
Created: February 2021
Author: <NAME>
"""
import sys
from emcee import EnsembleSampler as Sampler
import numpy as np
sys.path.append("../src")
from constants import G, M_sun, kpc
from hernquist import calc_DF_aniso... | [
"numpy.savez",
"numpy.sqrt",
"numpy.random.rand",
"numpy.random.choice",
"numpy.where",
"numpy.log",
"emcee.EnsembleSampler",
"numpy.array",
"numpy.linalg.norm",
"hernquist.calc_DF_aniso",
"sys.path.append"
] | [((223, 248), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (238, 248), False, 'import sys\n'), ((1068, 1093), 'hernquist.calc_DF_aniso', 'calc_DF_aniso', (['q', 'p', 'M', 'a'], {}), '(q, p, M, a)\n', (1081, 1093), False, 'from hernquist import calc_DF_aniso\n'), ((2235, 2284), 'emcee.En... |
# coding: utf-8
import os, pickle, csv, json
import subprocess
from typing import NamedTuple, List, TextIO, Tuple, Dict, Optional, Union, Iterable, Hashable
import numpy as np
import pandas as pd
from scipy import stats
from itertools import product, groupby, takewhile
from collections import namedtuple, Counter
impor... | [
"csv.DictReader",
"logging.debug",
"multiprocessing.cpu_count",
"numpy.array",
"scipy.stats.ttest_ind",
"numpy.linalg.norm",
"pandas.api.extensions.register_series_accessor",
"numpy.arange",
"os.path.exists",
"numpy.mean",
"subprocess.Popen",
"subprocess.run",
"itertools.product",
"numpy.d... | [((386, 407), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (400, 407), False, 'import matplotlib\n'), ((5594, 5646), 'pandas.api.extensions.register_series_accessor', 'pd.api.extensions.register_series_accessor', (['"""foldit"""'], {}), "('foldit')\n", (5636, 5646), True, 'import pandas as pd\n... |
import os
import argparse
import numpy as np
import scipy
import imageio
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import graphkke.generate_graphs.graph_generation as graph_generation
import graphkke.generate_graphs.generate_SDE as generate_SDE
parser = argparse.ArgumentParser()
parser.add_... | [
"sklearn.cluster.KMeans",
"graphkke.generate_graphs.generate_SDE.LemonSlice2D",
"argparse.ArgumentParser",
"imageio.imwrite",
"numpy.asarray",
"os.path.join",
"numpy.zeros",
"graphkke.generate_graphs.graph_generation.LemonGraph",
"matplotlib.pyplot.scatter",
"imageio.mimsave",
"scipy.rand",
"m... | [((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n'), ((1114, 1130), 'numpy.zeros', 'np.zeros', (['[d, n]'], {}), '([d, n])\n', (1122, 1130), True, 'import numpy as np\n'), ((1246, 1315), 'graphkke.generate_graphs.generate_SDE.LemonSlice2D', 'gene... |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_finance import candlestick_ohlc
# import matplotlib as mpl then mpl.use('TkAgg')
import pandas as pd
import numpy as np
from datetime import datetime
df = pd.read_csv('BitMEX-OHLCV-1d.csv')
df.columns = ['date', 'open', 'high', 'low', 'clo... | [
"mpl_finance.candlestick_ohlc",
"numpy.reshape",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((236, 270), 'pandas.read_csv', 'pd.read_csv', (['"""BitMEX-OHLCV-1d.csv"""'], {}), "('BitMEX-OHLCV-1d.csv')\n", (247, 270), True, 'import pandas as pd\n'), ((351, 378), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (361, 378), True, 'import matplotlib.pyplot as plt\n')... |
#!/usr/bin/env python
from __future__ import division, print_function
import numpy as np
import rospy
from rospkg.rospack import RosPack
from copy import deepcopy
from tf2_ros import TransformListener, Buffer
from bopt_grasp_quality.srv import bopt, boptResponse
from bayesian_optimization import Random_Explorer
from b... | [
"rospy.logerr",
"tf2_ros.TransformListener",
"rospy.init_node",
"rospy.get_param",
"numpy.array",
"rospy.Time",
"geometry_msgs.msg.PoseStamped",
"rospy.get_name",
"rospy.Duration",
"rospy.sleep",
"bayesian_optimization.opt_nodes.RS_Node"
] | [((486, 499), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (497, 499), False, 'from geometry_msgs.msg import PoseStamped, Pose, Transform\n'), ((1025, 1050), 'rospy.init_node', 'rospy.init_node', (['"""ros_bo"""'], {}), "('ros_bo')\n", (1040, 1050), False, 'import rospy\n'), ((1297, 1337), 'rospy.g... |
#!/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as nl
import scipy.linalg as sl
import scipy.stats as ss
import time
aca = np.ascontiguousarray
def nul(n):
return np.zeros((n, n))
def iuc(x, y):
"""
Checks if pair of generalized EVs x,y is inside the unit circle. Here for ... | [
"numpy.abs",
"numpy.eye",
"numpy.linalg.solve",
"numpy.linalg.qr",
"numpy.sqrt",
"numpy.isclose",
"time.sleep",
"numpy.linalg.det",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.empty_like",
"scipy.linalg.ordqz",
"time.time",
"numpy.round",
"numpy.random.shuffle"
] | [((200, 216), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (208, 216), True, 'import numpy as np\n'), ((354, 382), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (367, 382), True, 'import numpy as np\n'), ((709, 737), 'numpy.empty_like', 'np.empty_like', (['x'], {'dt... |
import collections
import logging
from event_model import DocumentRouter, RunRouter
import numpy
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import matplotlib
from qtpy.QtWidgets import ( # noqa
QLabel,
QWidget,
QVB... | [
"logging.getLogger",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"traitlets.traitlets.Set",
"qtpy.QtWidgets.QVBoxLayout",
"matplotlib.use",
"qtpy.QtWidgets.QLabel",
"qtpy.QtWidgets.QWidget",
"numpy.asarray",
"traitlets.traitlets.Bool",
"traitlets.traitlets.List",
"collections.defa... | [((562, 586), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (576, 586), False, 'import matplotlib\n'), ((682, 718), 'logging.getLogger', 'logging.getLogger', (['"""bluesky_browser"""'], {}), "('bluesky_browser')\n", (699, 718), False, 'import logging\n'), ((852, 875), 'traitlets.traitlets.... |
#!/usr/bin/env python
# Break up idstr file into separate measid/objectid lists per exposure on /data0
import os
import sys
import numpy as np
import time
from dlnpyutils import utils as dln, db
from astropy.io import fits
import sqlite3
import socket
from argparse import ArgumentParser
def breakup_idstr(dbfile):
... | [
"os.path.exists",
"dlnpyutils.utils.match",
"sys.exit",
"sqlite3.connect",
"argparse.ArgumentParser",
"os.makedirs",
"dlnpyutils.utils.strlen",
"dlnpyutils.utils.create_index",
"numpy.array",
"astropy.io.fits.getdata",
"dlnpyutils.utils.readlines",
"os.path.basename",
"time.time",
"numpy.d... | [((421, 432), 'time.time', 'time.time', ([], {}), '()\n', (430, 432), False, 'import time\n'), ((532, 624), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz"""', '(1)'], {}), "(\n '/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.g... |
import autograd.numpy as anp
import numpy as np
from autograd import value_and_grad
from pymoo.factory import normalize
from pymoo.util.ref_dirs.energy import squared_dist
from pymoo.util.ref_dirs.optimizer import Adam
from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions
c... | [
"numpy.abs",
"numpy.eye",
"autograd.numpy.row_stack",
"pymoo.util.ref_dirs.optimizer.Adam",
"numpy.any",
"numpy.array",
"pymoo.util.reference_direction.scale_reference_directions",
"numpy.linspace",
"pymoo.util.ref_dirs.energy.squared_dist",
"autograd.value_and_grad",
"numpy.full"
] | [((3256, 3275), 'autograd.numpy.row_stack', 'anp.row_stack', (['vals'], {}), '(vals)\n', (3269, 3275), True, 'import autograd.numpy as anp\n'), ((3444, 3461), 'numpy.any', 'np.any', (['(D < 1e-12)'], {}), '(D < 1e-12)\n', (3450, 3461), True, 'import numpy as np\n'), ((1252, 1258), 'pymoo.util.ref_dirs.optimizer.Adam', ... |
import numpy as np
import math
import fatpack
import matplotlib.pyplot as plt
import pandas as pd
#Create a function that reutrns the Goodman correction:
def Goodman_method_correction(M_a,M_m,M_max):
M_u = 1.5*M_max
M_ar = M_a/(1-M_m/M_u)
return M_ar
def Equivalent_bending_moment(M_ar,Neq,m):
P = M_ar... | [
"math.pow",
"numpy.array",
"numpy.max"
] | [((433, 461), 'math.pow', 'math.pow', (['(M_sum / Neq)', '(1 / m)'], {}), '(M_sum / Neq, 1 / m)\n', (441, 461), False, 'import math\n'), ((596, 613), 'numpy.array', 'np.array', (['[Sm, S]'], {}), '([Sm, S])\n', (604, 613), True, 'import numpy as np\n'), ((683, 692), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (689, 69... |
# %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\User... | [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"math.log",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"sklearn.compose.ColumnTransformer",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.InputLay... | [((299, 388), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\yxie367\\\\Documents\\\\GitHub\\\\Mushrooms\\\\DATA\\\\mushrooms.csv"""'], {}), "(\n 'C:\\\\Users\\\\yxie367\\\\Documents\\\\GitHub\\\\Mushrooms\\\\DATA\\\\mushrooms.csv')\n", (310, 388), True, 'import pandas as pd\n'), ((1241, 1260), 'numpy.arange... |
#!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from conversion import read_imgs_masks
from os.path import isfile, basename
XERR=0.1
ELINEWIDTH=3
CAPSIZE=5
CAPTHICK=3
FMT='cD'
def harm_plot(ydata, labels, outPrefix, bshell_b):
'... | [
"conversion.read_imgs_masks",
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"os.path.isfile",
"matplotlib.pyplot.figure",
"os.path.basen... | [((60, 81), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (74, 81), False, 'import matplotlib\n'), ((553, 568), 'numpy.shape', 'np.shape', (['ydata'], {}), '(ydata)\n', (561, 568), True, 'import numpy as np\n'), ((640, 653), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (650,... |
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish... | [
"rpy2.robjects.pandas2ri.activate",
"rpy2.robjects.pandas2ri.py2ri",
"rpy2.robjects.r.assign",
"rpy2.robjects.numpy2ri.py2ri",
"pandas.merge",
"matplotlib_venn.venn3",
"rpy2.robjects.Formula",
"rpy2.robjects.packages.importr",
"numpy.array",
"pandas.DataFrame",
"warnings.warn",
"pandas.isna",
... | [((1288, 1308), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (1306, 1308), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((1310, 1329), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (1327, 1329), False, 'from rpy2.robjects import numpy2ri,... |
"""
Takes the MNIST dataset as input (images and labels separated)
and creates a new dataset only with 0's and 1's
"""
import numpy as np
DATA_PATH = "data/raw/"
OUTPUT_PATH = "data/processed/mnist/"
X = np.loadtxt(DATA_PATH + "mnist2500_X.txt")
labels = np.loadtxt(DATA_PATH + "mnist2500_labels.txt")
X_new = []
labe... | [
"numpy.loadtxt",
"numpy.savetxt"
] | [((206, 247), 'numpy.loadtxt', 'np.loadtxt', (["(DATA_PATH + 'mnist2500_X.txt')"], {}), "(DATA_PATH + 'mnist2500_X.txt')\n", (216, 247), True, 'import numpy as np\n'), ((257, 303), 'numpy.loadtxt', 'np.loadtxt', (["(DATA_PATH + 'mnist2500_labels.txt')"], {}), "(DATA_PATH + 'mnist2500_labels.txt')\n", (267, 303), True, ... |
from pykalman import KalmanFilter
import numpy as np
kf = KalmanFilter(transition_matrices=np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
... | [
"numpy.array",
"numpy.eye"
] | [((899, 939), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (907, 939), True, 'import numpy as np\n'), ((954, 976), 'numpy.eye', 'np.eye', (['(6)'], {'dtype': 'float'}), '(6, dtype=float)\n', (960, 976), True, 'import numpy as np\n'), ((92, 304), 'numpy.array... |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import gzip
import numpy as np
import... | [
"numpy.mean",
"numpy.median",
"gzip.open",
"numpy.percentile",
"matplotlib.pyplot.subplots"
] | [((2511, 2583), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(16, 9)', 'dpi': '(300)', 'squeeze': '(False)', 'sharey': '(True)'}), '(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)\n', (2523, 2583), True, 'import matplotlib.pyplot as plt\n'), ((865, 912), 'gzip.open', 'gzip.op... |
import argparse
import os
import os.path as osp
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from scipy.stats import norm
import matplotlib
# matplotlib.use('agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import subprocess
import shutil
import chainer
from ch... | [
"net_200x200.Conv_Siam_VAE",
"os.listdir",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.swapaxes",
"os.remove",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"data_generator.DataGenerator",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.cm.get_cmap",
"... | [((1128, 1166), 'numpy.swapaxes', 'np.swapaxes', (['train_b0[train_ind]', '(1)', '(3)'], {}), '(train_b0[train_ind], 1, 3)\n', (1139, 1166), True, 'import numpy as np\n'), ((1176, 1214), 'numpy.swapaxes', 'np.swapaxes', (['train_b1[train_ind]', '(1)', '(3)'], {}), '(train_b1[train_ind], 1, 3)\n', (1187, 1214), True, 'i... |
# utils/test_kronecker.py
"""Tests for rom_operator_inference.utils._kronecker."""
import pytest
import numpy as np
import rom_operator_inference as opinf
# Index generation for fast self-product kronecker evaluation =================
def test_kron2c_indices(n_tests=100):
"""Test utils._kronecker.kron2c_indices... | [
"rom_operator_inference.utils.kron3c",
"numpy.prod",
"numpy.allclose",
"rom_operator_inference.utils.kron2c",
"numpy.random.random",
"numpy.kron",
"numpy.array",
"numpy.random.randint",
"rom_operator_inference.utils.expand_cubic",
"rom_operator_inference.utils.kron3c_indices",
"numpy.empty_like"... | [((338, 367), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['(4)'], {}), '(4)\n', (364, 367), True, 'import rom_operator_inference as opinf\n'), ((654, 683), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['(3)'], {}), '(3)\n', (680, 683), True, 'import ro... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 12:48:08 2020
@author: smith
"""
import spacy
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
import os
import multiprocessing
import csv
import re
impo... | [
"logging.basicConfig",
"numpy.mean",
"seaborn.regplot",
"sklearn.decomposition.PCA",
"numpy.set_printoptions",
"numpy.std",
"os.path.join",
"sklearn.manifold.TSNE",
"seaborn.set_style",
"numpy.append",
"numpy.empty",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.p... | [((569, 594), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (582, 594), True, 'import seaborn as sns\n'), ((624, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s - %(asctime)s: %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'logging.INFO'}... |
#!/usr/bin/env python
# encoding: utf-8
from flask import Flask, request, jsonify
import base64
import numpy as np
from util.args_help import fill_from_args
import os
import logging
from dpr.simple_mmap_dataset import Corpus
from dpr.faiss_index import ANNIndex
logger = logging.getLogger(__name__)
class Options():
... | [
"logging.getLogger",
"logging.basicConfig",
"util.args_help.fill_from_args",
"flask.Flask",
"base64.b64encode",
"os.path.join",
"flask.request.get_json",
"numpy.zeros",
"logging.root.removeHandler",
"flask.jsonify"
] | [((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((865, 1029), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(filename)s:%(lineno)d - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if ... |
# Noysim -- Noise simulation tools for Aimsun.
# Copyright (c) 2010-2011 by <NAME>, Ghent University & Griffith University.
#
# Basic geometry functions and classes
import numpy
import pylab
EPSILON = 10e-12 # smallest difference for points/directions
#--------------------------------------------------... | [
"numpy.radians",
"numpy.sqrt",
"pylab.plot",
"numpy.arcsin",
"pylab.figure",
"numpy.arctan2",
"pylab.show"
] | [((6690, 6704), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (6702, 6704), False, 'import pylab\n'), ((3341, 3402), 'numpy.sqrt', 'numpy.sqrt', (['((self.x - other.x) ** 2 + (self.y - other.y) ** 2)'], {}), '((self.x - other.x) ** 2 + (self.y - other.y) ** 2)\n', (3351, 3402), False, 'import numpy\n'), ((4364, 445... |
import itertools
import logging
import netCDF4
import numpy
from .. import core
from ..constants import masked as cfdm_masked
from ..decorators import (
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
from ..functions import abspath
from ..mixin.container import C... | [
"logging.getLogger",
"numpy.prod",
"numpy.ma.getmaskarray",
"numpy.asanyarray",
"numpy.array",
"numpy.ma.is_masked",
"numpy.where",
"numpy.ma.masked_all",
"itertools.product",
"numpy.ndim",
"numpy.ma.masked_where",
"numpy.empty",
"numpy.ma.where",
"numpy.amin",
"numpy.ma.array",
"numpy... | [((412, 439), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (429, 439), False, 'import logging\n'), ((24164, 24184), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (24177, 24184), False, 'import numpy\n'), ((40415, 40435), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}... |
#!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN MPI WITH FLEXIBLE STATISTICS
Created on 2020
@author: <NAME> (Email: <EMAIL>)
"""
import os
import sys
import h5py
import timeit
import random
import calendar
import numpy as np
import pandas as pd
from mpi4py impo... | [
"numpy.where",
"timeit.default_timer",
"random.seed",
"h5py.File",
"numpy.split",
"numpy.arange"
] | [((1212, 1234), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1232, 1234), False, 'import timeit\n'), ((1372, 1389), 'random.seed', 'random.seed', (['rank'], {}), '(rank)\n', (1383, 1389), False, 'import random\n'), ((1484, 1518), 'numpy.arange', 'np.arange', (['(total_file_num + remain)'], {}), '(... |
import torch
import torch.nn as nn
import os
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from .modules import WavePool, WaveUnpool, ImagePool, NLayerDiscriminator
from utils.metrics import compute_dice_metric
from utils.losses import DiceLoss
import numpy as np
class WaveEncoder(nn.Mod... | [
"torch.nn.ReLU",
"os.makedirs",
"numpy.random.random",
"torch.nn.ReflectionPad2d",
"os.path.join",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.BCELoss",
"os.path.isdir",
"torch.round",
"utils.losses.DiceLoss"
] | [((479, 500), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (497, 500), True, 'import torch.nn as nn\n'), ((521, 542), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (528, 542), True, 'import torch.nn as nn\n'), ((565, 589), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3... |
import kfserving
from typing import List, Union
import numpy as np
class Predictor(): # pylint:disable=too-few-public-methods
def __init__(self, clf: kfserving.KFModel):
self.clf = clf
def predict_fn(self, arr: Union[np.ndarray, List]) -> np.ndarray:
instances = []
for req_data in arr... | [
"numpy.array"
] | [((557, 586), 'numpy.array', 'np.array', (["resp['predictions']"], {}), "(resp['predictions'])\n", (565, 586), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Emperor Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "0.9.3-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from unittest import TestCase, main
from ... | [
"os.close",
"numpy.testing.assert_almost_equal",
"numpy.array",
"unittest.main",
"emperor.parse.parse_coords",
"tempfile.mkstemp"
] | [((2964, 2970), 'unittest.main', 'main', ([], {}), '()\n', (2968, 2970), False, 'from unittest import TestCase, main\n'), ((677, 697), 'emperor.parse.parse_coords', 'parse_coords', (['coords'], {}), '(coords)\n', (689, 697), False, 'from emperor.parse import parse_coords\n'), ((1010, 1049), 'numpy.testing.assert_almost... |
import cv2
import numpy as np
import imutils
from collections import defaultdict
# mouse callback function
def define_points(target_img):
corners = []
refPt = []
def draw_circle(event,x,y,flags,param):
global refPt
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(param,(x,y),5,(... | [
"numpy.sqrt",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.HoughLines",
"cv2.destroyAllWindows",
"numpy.sin",
"cv2.setMouseCallback",
"cv2.line",
"numpy.diff",
"numpy.argmin",
"cv2.waitKey",
"numpy.round",
"numpy.ones",
"cv2.getPerspectiveTransform",
"cv2.kmeans",
"numpy.... | [((429, 453), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (444, 453), False, 'import cv2\n'), ((458, 512), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle', 'target_img'], {}), "('image', draw_circle, target_img)\n", (478, 512), False, 'import cv2\n'), ((674... |
import logging as log
import numpy as np
import h5py
import humblerl as hrl
from humblerl import Callback, Interpreter
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal
from torch.utils.data import Dataset
from common_utils import get_model_path_if_exists
from third... | [
"numpy.random.rand",
"torch.from_numpy",
"torch.exp",
"torch.softmax",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"numpy.take_along_axis",
"logging.info",
"torch.nn.LSTM",
"torch.mean",
"torch.eye",
"torch.distributions.Normal",
"logging.warning",
"h5py.File",
"numpy.squeez... | [((11699, 11724), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11722, 11724), False, 'import torch\n'), ((12676, 12782), 'common_utils.get_model_path_if_exists', 'get_model_path_if_exists', ([], {'path': 'model_path', 'default_path': "rnn_params['ckpt_path']", 'model_name': '"""MDN-RNN"""'})... |
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import rcParams
params = {
# 'text.latex.preamble': ['\\usepackage{gensymb}'],
# 'text.usetex': True,
'font.family': 'Helvetica',
'lines.solid_capstyle'... | [
"seaborn.set_palette",
"matplotlib.rcParams.update",
"pandas.read_csv",
"matplotlib.pyplot.gcf",
"seaborn.set_context",
"numpy.argmax",
"numpy.max",
"seaborn.set_style",
"os.path.realpath",
"seaborn.boxplot",
"seaborn.scatterplot",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.subplo... | [((365, 388), 'matplotlib.rcParams.update', 'rcParams.update', (['params'], {}), '(params)\n', (380, 388), False, 'from matplotlib import rcParams\n'), ((390, 457), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'font_scale': '(1.6)', 'rc': "{'lines.linewidth': 2}"}), "('paper', font_scale=1.6, rc={'lines... |
import numpy as np
def split_ids(args, ids, folds=10):
if args.dataset == 'COLORS-3':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(500)]
val_ids = [np.arange(500, 3000)]
test_ids = [np.arange(3000, 10500)]
elif args.dataset == 'TRIANGL... | [
"numpy.concatenate",
"numpy.array",
"numpy.arange"
] | [((186, 200), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (195, 200), True, 'import numpy as np\n'), ((221, 241), 'numpy.arange', 'np.arange', (['(500)', '(3000)'], {}), '(500, 3000)\n', (230, 241), True, 'import numpy as np\n'), ((263, 285), 'numpy.arange', 'np.arange', (['(3000)', '(10500)'], {}), '(3000... |
# coding: utf-8
# - We are creating a very simple machine learning model.<br>
# - Using dataset: tic-tac-toe.data.txt with user-defined columns.<br>
# - We are treating this problem as a supervised learning problem.<br>
# In[74]:
# This the rough sketch of the processing that happened in my brain while creating the... | [
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.where",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessin... | [((662, 709), 'pandas.read_csv', 'pd.read_csv', (['"""../tic-tac-toe.data.txt"""'], {'sep': '""","""'}), "('../tic-tac-toe.data.txt', sep=',')\n", (673, 709), True, 'import pandas as pd\n'), ((724, 771), 'pandas.read_csv', 'pd.read_csv', (['"""../tic-tac-toe.data.txt"""'], {'sep': '""","""'}), "('../tic-tac-toe.data.tx... |
"""
Functions to manipulate data from PostgreSQL database includes a
parallelise dataframe that runs a function on a pandas data frame
in parallel, as well as a loop_chunks function. This reads a chunk
from the database performs an operation and uploads to a new
table in the database.
"""
import numpy as np
import pa... | [
"pandas.read_sql_query",
"utils.db_connect.alchemy_input_output_close",
"utils.db_connect.alchemy_input_output_open",
"multiprocessing.cpu_count",
"numpy.array_split",
"multiprocessing.Pool",
"time.time"
] | [((901, 930), 'numpy.array_split', 'np.array_split', (['df', 'num_cores'], {}), '(df, num_cores)\n', (915, 930), True, 'import numpy as np\n'), ((942, 957), 'multiprocessing.Pool', 'Pool', (['num_cores'], {}), '(num_cores)\n', (946, 957), False, 'from multiprocessing import Pool, cpu_count\n'), ((1764, 1802), 'utils.db... |
import os
import numpy as np
import urllib
from absl import flags
import tensorflow as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
flags.DEFINE_float(
"learning_rate", default=0.001, help="Initial learning rate.")
flags.DEFINE_integer(
"epochs", default=100, help="Numb... | [
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.estimator.inputs.numpy_input_fn",
"tensorflow.metrics.mean",
"tensorflow.nn.softplus",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_mean",
"absl.flags.DEFINE_float",
"tensorflow.app.run"... | [((174, 260), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.001)', 'help': '"""Initial learning rate."""'}), "('learning_rate', default=0.001, help=\n 'Initial learning rate.')\n", (192, 260), False, 'from absl import flags\n'), ((261, 350), 'absl.flags.DEFINE_integer', '... |
import os
import shutil
import argparse
import torch
from torch import nn
from torchvision.utils import save_image, make_grid
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import utils.utils as utils
from utils.constants import *
class GenerationMode(enum.Enum):
SINGLE_IMAGE = 0,
INT... | [
"numpy.sqrt",
"utils.utils.save_and_maybe_display_image",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"numpy.sin",
"numpy.moveaxis",
"utils.utils.get_available_file_name",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.repeat",
"argparse.ArgumentParser",
"numpy.max",... | [((1093, 1114), 'numpy.min', 'np.min', (['generated_img'], {}), '(generated_img)\n', (1099, 1114), True, 'import numpy as np\n'), ((1136, 1157), 'numpy.max', 'np.max', (['generated_img'], {}), '(generated_img)\n', (1142, 1157), True, 'import numpy as np\n'), ((3254, 3267), 'numpy.sin', 'np.sin', (['omega'], {}), '(omeg... |
"""a module solely for finding how add_a_list and add_tuple_list compare.
it's effectively the empirical proof for how LongIntTable.add() chooses
the fastest method with it's get_fastest_method() function."""
from __future__ import print_function
from math import log10
import time
import random
from os import getcwd
... | [
"itertools.cycle",
"time.clock",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"random.randrange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.getcwd",
"numpy.array",
"matplotlib.pyplot.figure",
"dicetables.additiveevents.AdditiveEvents",
"numpy.save",
"matplotlib.pyplot.ion",
... | [((5825, 5851), 'math.log10', 'log10', (['events_tuples[0][1]'], {}), '(events_tuples[0][1])\n', (5830, 5851), False, 'from math import log10\n'), ((6730, 6756), 'dicetables.additiveevents.AdditiveEvents', 'AdditiveEvents', (['input_dict'], {}), '(input_dict)\n', (6744, 6756), False, 'from dicetables.additiveevents imp... |
# coding: utf-8
# In[20]:
import numpy as np
import pydensecrf.densecrf as dcrf
import os
import cv2
import random
from tqdm import tqdm
# In[21]:
from skimage.color import gray2rgb
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, accuracy_score
from pyden... | [
"numpy.ascontiguousarray",
"numpy.array",
"pydensecrf.utils.unary_from_softmax",
"matplotlib.pyplot.imshow",
"pydensecrf.densecrf.DenseCRF2D",
"os.path.exists",
"os.listdir",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.exp",
"os.mkdir",
"matplotlib.pyplot.axis",
"numpy.maximum",
"numpy... | [((1290, 1309), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))\n', (1298, 1309), True, 'import numpy as np\n'), ((2223, 2239), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (2231, 2239), True, 'import numpy as np\n'), ((2253, 2272), 'numpy.zeros', 'np.zeros', (['(h, w, 2)'], {}), '((h, w, 2))... |
import numpy as np
import matplotlib.pyplot as plt
import os
from pyburst.grids import grid_analyser, grid_strings, grid_tools
# resolution tests
y_factors = {'dt': 3600,
'fluence': 1e39,
'peak': 1e38,
}
y_labels = {'dt': '$\Delta t$',
'rate': 'Burst rate',
... | [
"matplotlib.pyplot.savefig",
"pyburst.grids.grid_tools.enumerate_params",
"numpy.where",
"os.path.join",
"matplotlib.pyplot.close",
"pyburst.grids.grid_strings.plots_path",
"matplotlib.pyplot.tight_layout",
"pyburst.grids.grid_analyser.Kgrid",
"numpy.full",
"matplotlib.pyplot.subplots",
"matplot... | [((1303, 1345), 'pyburst.grids.grid_tools.enumerate_params', 'grid_tools.enumerate_params', (['unique_subset'], {}), '(unique_subset)\n', (1330, 1345), False, 'from pyburst.grids import grid_analyser, grid_strings, grid_tools\n'), ((2387, 2436), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', '(2)'], {'sharex': '(... |
# Copyright (c) 2020-present, Assistive Robotics Lab
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from transformers.training_utils import fit
from transformers.transformers import (
InferenceTransformerEncoder,
... | [
"torch.manual_seed",
"torch.optim.lr_scheduler.MultiStepLR",
"argparse.ArgumentParser",
"common.logging.logger.info",
"torch.nn.L1Loss",
"torch.cuda.device_count",
"transformers.training_utils.fit",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"common.losses.QuatDistance",
"numpy.random.s... | [((541, 562), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (558, 562), False, 'import torch\n'), ((563, 581), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (577, 581), True, 'import numpy as np\n'), ((833, 858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '(... |
#!/usr/bin/env nemesis
"""
This script creates a spatial database for the initial stress and state
variables for a Maxwell plane strain material.
"""
sim = "gravity_vardensity"
materials = ["crust","mantle"]
import numpy
import h5py
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
from spatialdata.geoco... | [
"spatialdata.spatialdb.SimpleIOAscii.SimpleIOAscii",
"h5py.File",
"numpy.array",
"numpy.zeros",
"spatialdata.geocoords.CSCart.CSCart"
] | [((351, 359), 'spatialdata.geocoords.CSCart.CSCart', 'CSCart', ([], {}), '()\n', (357, 359), False, 'from spatialdata.geocoords.CSCart import CSCart\n'), ((659, 903), 'numpy.array', 'numpy.array', (['[[0.62200847, 0.16666667, 0.0446582, 0.16666667], [0.16666667, 0.62200847, \n 0.16666667, 0.0446582], [0.16666667, 0.... |
import inspect
from collections import namedtuple
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
from sklearn.exceptions import NotFittedError
from uq360.algorithms.posthocuq import PostHocUQ
class MetamodelRegression(PostHocUQ):
"""... | [
"numpy.abs",
"collections.namedtuple",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.expand_dims",
"inspect.isclass",
"sklearn.ensemble.GradientBoostingRegressor"
] | [((6132, 6145), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (6142, 6145), True, 'import numpy as np\n'), ((6158, 6171), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (6168, 6171), True, 'import numpy as np\n'), ((7884, 7916), 'numpy.hstack', 'np.hstack', (['[X, y_hat_meta_prime]'], {}), '([X, y_hat_meta... |
import unittest
from collections import defaultdict
import numpy as np
import pandas as pd
from ife.io.io import ImageReader
class TestMomentFeatures(unittest.TestCase):
def test_moment_output_type(self) -> None:
features = ImageReader.read_from_single_file("ife/data/small_rgb.jpg")
moment = fe... | [
"ife.io.io.ImageReader.read_from_single_file",
"numpy.zeros"
] | [((240, 299), 'ife.io.io.ImageReader.read_from_single_file', 'ImageReader.read_from_single_file', (['"""ife/data/small_rgb.jpg"""'], {}), "('ife/data/small_rgb.jpg')\n", (273, 299), False, 'from ife.io.io import ImageReader\n'), ((945, 1004), 'ife.io.io.ImageReader.read_from_single_file', 'ImageReader.read_from_single_... |
import numpy as np
from math import pi
import torch
from pykeops.torch import LazyTensor
from plyfile import PlyData, PlyElement
from helper import *
import torch.nn as nn
import torch.nn.functional as F
# from matplotlib import pyplot as plt
from pykeops.torch.cluster import grid_cluster, cluster_ranges_centroids, fr... | [
"torch.nn.ReLU",
"numpy.sqrt",
"torch.max",
"math.sqrt",
"torch.sum",
"torch.nn.GroupNorm",
"pyvtk.PointData",
"pyvtk.VtkData",
"pyvtk.CellData",
"torch.zeros_like",
"torch.nn.init.uniform_",
"pykeops.torch.LazyTensor",
"torch.randn",
"torch.ones_like",
"torch.nn.LeakyReLU",
"torch.Ten... | [((2722, 2736), 'pyvtk.VtkData', 'VtkData', (['*data'], {}), '(*data)\n', (2729, 2736), False, 'from pyvtk import PolyData, PointData, CellData, Scalars, Vectors, VtkData, PointData\n'), ((5780, 5805), 'pykeops.torch.LazyTensor', 'LazyTensor', (['x[:, None, :]'], {}), '(x[:, None, :])\n', (5790, 5805), False, 'from pyk... |
from random import randint, seed
import numpy as np
from os import path, mkdir
from maze_utils import generate_grid
seed_number = 69
training_folder = "training"
testing_folder = "testing"
tot_elem_training = 100 # numero di matrici da generare
tot_elem_testing = 20 # numero di matrici da generare
max_w = 10 ... | [
"os.path.exists",
"maze_utils.generate_grid",
"random.seed",
"os.mkdir",
"numpy.random.seed",
"numpy.savetxt",
"random.randint"
] | [((707, 734), 'numpy.random.seed', 'np.random.seed', (['seed_number'], {}), '(seed_number)\n', (721, 734), True, 'import numpy as np\n'), ((739, 756), 'random.seed', 'seed', (['seed_number'], {}), '(seed_number)\n', (743, 756), False, 'from random import randint, seed\n'), ((1101, 1128), 'os.path.exists', 'path.exists'... |
# coding: utf-8
# $ \newcommand{\cat}[2][\phantom{i}]{\ket{C^{#2}_{#1\alpha}}} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\bra}[1]{\langle#1|} $
# $ \newcommand{\braket}[2]{\langle#1|#2\rangle} $
# $\newcommand{\au}{\hat{a}^\dagger}$
# $\newcommand{\ad}{\hat{a}}$
# $\newcommand{\bu}{\hat{b}^\dagger}$
# ... | [
"numpy.sqrt"
] | [((2879, 2890), 'numpy.sqrt', 'sqrt', (['(Δ * χ)'], {}), '(Δ * χ)\n', (2883, 2890), False, 'from numpy import sqrt\n')] |
import numpy as np
class NumpyDynamic:
def __init__(self, dtype, array_size=(100,)):
self.data = np.zeros(array_size, dtype)
self.array_size = list(array_size)
self.size = 0
def add(self, x):
if self.size == self.array_size[0]:
self.array_size[0] *= 2
... | [
"numpy.zeros"
] | [((112, 139), 'numpy.zeros', 'np.zeros', (['array_size', 'dtype'], {}), '(array_size, dtype)\n', (120, 139), True, 'import numpy as np\n'), ((330, 372), 'numpy.zeros', 'np.zeros', (['self.array_size', 'self.data.dtype'], {}), '(self.array_size, self.data.dtype)\n', (338, 372), True, 'import numpy as np\n')] |
import numpy as np
from abc import ABCMeta, abstractmethod
class Node(object):
"""Represents state in MCTS search tree.
Args:
state (object): The environment state corresponding to this node in the search tree.
Note:
Node object is immutable. Node is left without exit edges (empty dict)... | [
"numpy.sqrt"
] | [((2113, 2134), 'numpy.sqrt', 'np.sqrt', (['state_visits'], {}), '(state_visits)\n', (2120, 2134), True, 'import numpy as np\n')] |
import numpy as np
import time
import pytest
import jax.numpy as jnp
import jax.config as config
import torch
import tensorflow as tf
from tensornetwork.linalg import linalg
from tensornetwork import backends
from tensornetwork.backends.numpy import numpy_backend
from tensornetwork.backends.jax import jax_backend
#pyli... | [
"jax.config.update",
"tensornetwork.linalg.linalg.zeros",
"tensornetwork.linalg.linalg.randn",
"numpy.testing.assert_allclose",
"tensornetwork.backends.backend_factory.get_backend",
"tensornetwork.linalg.linalg.eye",
"numpy.random.seed",
"tensornetwork.linalg.linalg.ones",
"time.time",
"tensornetw... | [((342, 379), 'jax.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (355, 379), True, 'import jax.config as config\n'), ((1806, 1851), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (... |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import librosa
import os
import sys
import time
from datetime import datetime
from pathlib import Path
from src.python.audio_transforms import *
from src.python.model_predict import *
from src.python.graphics import plot_graph
# Hardcoding a few variable... | [
"pathlib.Path",
"src.python.graphics.plot_graph",
"numpy.array",
"datetime.datetime.now",
"librosa.effects.trim",
"librosa.load"
] | [((1090, 1129), 'pathlib.Path', 'Path', (['DIR_ROOT', '"""demo_files"""', '"""results"""'], {}), "(DIR_ROOT, 'demo_files', 'results')\n", (1094, 1129), False, 'from pathlib import Path\n'), ((3100, 3117), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (3108, 3117), True, 'import numpy as np\n'), ((481, 48... |
import unittest
import numpy as np
from xcube.webapi.controllers.time_series import get_time_series_info, get_time_series_for_point, \
get_time_series_for_geometry, get_time_series_for_geometry_collection
from ..helpers import new_test_service_context
class TimeSeriesControllerTest(unittest.TestCase):
def ... | [
"xcube.webapi.controllers.time_series.get_time_series_for_point",
"xcube.webapi.controllers.time_series.get_time_series_info",
"numpy.datetime64"
] | [((441, 514), 'xcube.webapi.controllers.time_series.get_time_series_for_point', 'get_time_series_for_point', (['ctx', '"""demo"""', '"""conc_tsm"""'], {'lon': '(-150.0)', 'lat': '(-30.0)'}), "(ctx, 'demo', 'conc_tsm', lon=-150.0, lat=-30.0)\n", (466, 514), False, 'from xcube.webapi.controllers.time_series import get_ti... |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : eval-referential.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 30.07.2019
# Last Modified Date: 16.10.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# ... | [
"jacinle.io.load_json",
"IPython.core.ultratb.FormattedTB",
"jacinle.utils.meter.GroupMeters",
"jacinle.random.randint",
"jacinle.cli.argument.JacArgumentParser",
"jacinle.random.choice_list",
"jacinle.io.load",
"numpy.fmin",
"jacinle.utils.tqdm.tqdm_gofor"
] | [((890, 960), 'IPython.core.ultratb.FormattedTB', 'ultratb.FormattedTB', ([], {'mode': '"""Plain"""', 'color_scheme': '"""Linux"""', 'call_pdb': '(True)'}), "(mode='Plain', color_scheme='Linux', call_pdb=True)\n", (909, 960), False, 'from IPython.core import ultratb\n'), ((976, 995), 'jacinle.cli.argument.JacArgumentPa... |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score
from category_encoders import MEstimateEncoder
import numpy as np
from collections import defaultdict
import os
from sklearn.metrics import roc_au... | [
"numpy.log",
"sklearn.metrics.roc_auc_score",
"numpy.arange",
"numpy.histogram",
"numpy.max",
"numpy.empty",
"numpy.min",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"numpy.abs",
"sklearn.model_selection.train_test_split",
"sklearn.pipeline.Pipeline",
"sklearn.ensemble.GradientBo... | [((492, 539), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('encoder', enc), ('model', modelo)]"], {}), "([('encoder', enc), ('model', modelo)])\n", (500, 539), False, 'from sklearn.pipeline import Pipeline\n'), ((2336, 2365), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['preds', 'true'], {}), '(preds, tru... |
import numpy as np
import numpy.linalg as LA
from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1
from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2
from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3
from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4
from .path_alg import solve_path, pathalgo... | [
"numpy.vdot",
"numpy.array",
"numpy.linspace",
"numpy.mean"
] | [((9722, 9762), 'numpy.array', 'np.array', (['[(beta / ww) for beta in BETA]'], {}), '([(beta / ww) for beta in BETA])\n', (9730, 9762), True, 'import numpy as np\n'), ((9864, 9878), 'numpy.array', 'np.array', (['BETA'], {}), '(BETA)\n', (9872, 9878), True, 'import numpy as np\n'), ((5733, 5757), 'numpy.linspace', 'np.... |
import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.models.cnn import load_cnn
from deep_a... | [
"deep_audio_features.models.cnn.load_cnn",
"deep_audio_features.utils.model_editing.drop_layers",
"argparse.ArgumentParser",
"deep_audio_features.dataloading.dataloading.FeatureExtractorDataset",
"deep_audio_features.lib.training.test",
"os.path.realpath",
"numpy.array",
"torch.cuda.is_available",
"... | [((1363, 1382), 'deep_audio_features.models.cnn.load_cnn', 'load_cnn', (['modelpath'], {}), '(modelpath)\n', (1371, 1382), False, 'from deep_audio_features.models.cnn import load_cnn\n'), ((1621, 1655), 'deep_audio_features.utils.model_editing.drop_layers', 'drop_layers', (['model', 'layers_dropped'], {}), '(model, lay... |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | [
"numpy.array",
"deepspeech.io.utility.pad_sequence",
"deepspeech.utils.log.Log"
] | [((804, 817), 'deepspeech.utils.log.Log', 'Log', (['__name__'], {}), '(__name__)\n', (807, 817), False, 'from deepspeech.utils.log import Log\n'), ((2330, 2362), 'numpy.array', 'np.array', (['tokens'], {'dtype': 'np.int64'}), '(tokens, dtype=np.int64)\n', (2338, 2362), True, 'import numpy as np\n'), ((2484, 2523), 'dee... |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Laboratorio #3 - Predicción de textos
#
# * <NAME> - 17315
# * <NAME> - 17509
# * <NAME> - 17088
# %%
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Dense
from keras.mode... | [
"keras.preprocessing.text.Tokenizer",
"random.shuffle",
"nltk.corpus.stopwords.words",
"nltk.download",
"keras.utils.to_categorical",
"keras.models.Sequential",
"numpy.array",
"keras.layers.LSTM",
"keras.layers.Dense",
"pandas.DataFrame",
"re.sub",
"keras.preprocessing.sequence.pad_sequences",... | [((684, 710), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (697, 710), False, 'import nltk\n'), ((767, 793), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (782, 793), False, 'from nltk.corpus import stopwords\n'), ((6048, 6077), 'random.sh... |
import hashlib
import json
import math
import os
import dill
import base64
from sys import exit
import requests
from bson import ObjectId
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
#from cryptography.hazmat.primitives.asymmetric import padding
#from cryptography... | [
"bson.ObjectId.is_valid",
"numpy.ones",
"hashlib.md5",
"requests_toolbelt.MultipartEncoderMonitor",
"math.pow",
"tqdm.tqdm",
"os.environ.get",
"json.dumps",
"math.log",
"os.path.dirname",
"dill.dumps"
] | [((3041, 3058), 'math.pow', 'math.pow', (['(1024)', 'i'], {}), '(1024, i)\n', (3049, 3058), False, 'import math\n'), ((7038, 7146), 'tqdm.tqdm', 'tqdm', ([], {'desc': 'f"""{NEURO_AI_STR} Uploading"""', 'unit': '"""B"""', 'unit_scale': '(True)', 'total': 'encoder_len', 'unit_divisor': '(1024)'}), "(desc=f'{NEURO_AI_STR}... |
""" Code to implement ScaleFactor:: decorator supported
in gtlike.
The gtlike feature is documented here:
https://confluence.slac.stanford.edu/display/ST/Science+Tools+Development+Notes?focusedCommentId=103582318#comment-103582318
Author: <NAME>
"""
import operator
from copy import deepcopy
... | [
"doctest.testmod",
"uw.like.Models.Constant",
"numpy.concatenate",
"copy.deepcopy"
] | [((5021, 5049), 'copy.deepcopy', 'deepcopy', (['model_class.gtlike'], {}), '(model_class.gtlike)\n', (5029, 5049), False, 'from copy import deepcopy\n'), ((6931, 6948), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6946, 6948), False, 'import doctest\n'), ((5311, 5339), 'uw.like.Models.Constant', 'Constant',... |
#This is a class because it stores its model parameters and has a 'prediction' function which returns predictions for input data
import numpy as np
from baseModel import baseModel, ModellingError as me
from datetime import datetime
import pandas as pd
class ModellingError(me): pass
class ConstantMonthlyModel(baseMode... | [
"pandas.DataFrame.from_records",
"pandas.DatetimeIndex",
"numpy.random.randn"
] | [((823, 854), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data'], {}), '(data)\n', (848, 854), True, 'import pandas as pd\n'), ((958, 989), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["data_pd['ts']"], {}), "(data_pd['ts'])\n", (974, 989), True, 'import pandas as pd\n'), ((1453, 1491), 'numpy.ran... |
# Copyright 2018 <NAME>, <NAME>.
# (Strongly inspired by original Google BERT code and Hugging Face's code)
""" Fine-tuning on A Classification Task with pretrained Transformer """
import itertools
import csv
import fire
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import toke... | [
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"fire.Fire",
"torch.nn.Tanh",
"torch.exp",
"os.walk",
"utils.truncate_tokens_pair",
"pandas.DataFrame",
"tokenization.FullTokenizer",
"torch.utils.data.Dataset.__init__",
"models.Config.from_json",
"utils.set_seeds",
"nump... | [((563, 601), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""\t"""', 'dtype': 'str'}), "(path, sep='\\t', dtype=str)\n", (574, 601), True, 'import pandas as pd\n'), ((7755, 7788), 'train.Config.from_json', 'train.Config.from_json', (['train_cfg'], {}), '(train_cfg)\n', (7777, 7788), False, 'import train\n'), ... |
# Training to a set of multiple objects (e.g. ShapeNet or DTU)
# tensorboard logs available in logs/<expname>
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import warnings
import trainlib
from model import make_model, loss
from render import NeRF... | [
"util.get_cuda",
"torch.from_numpy",
"torch.nn.MSELoss",
"data.get_split_dataset",
"torchvision.utils.save_image",
"render.NeRFRenderer.from_conf",
"os.path.exists",
"util.gen_rays",
"torch.randint",
"model.loss.get_rgb_loss",
"util.batched_index_select_nd",
"numpy.random.choice",
"util.psnr... | [((630, 670), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""'}), "(action='ignore')\n", (653, 670), False, 'import warnings\n'), ((2238, 2313), 'util.args.parse_args', 'util.args.parse_args', (['extra_args'], {'training': '(True)', 'default_ray_batch_size': '(128)'}), '(extra_args, ... |
#
# File:
# color4.py
#
# Synopsis:
# Draws sixteen sample color boxs with RGB labels.
#
# Category:
# Colors
#
# Author:
# <NAME>
#
# Date of initial publication:
# January, 2006
#
# Description:
# This example draws sixteen color boxes using the RGB
# values for named colors. The boxes are... | [
"Ngl.polyline_ndc",
"Ngl.polygon_ndc",
"Ngl.Resources",
"Ngl.end",
"Ngl.open_wks",
"Ngl.text_ndc",
"numpy.zeros",
"Ngl.frame"
] | [((1636, 1651), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (1649, 1651), False, 'import Ngl\n'), ((1772, 1811), 'Ngl.open_wks', 'Ngl.open_wks', (['wks_type', '"""color4"""', 'rlist'], {}), "(wks_type, 'color4', rlist)\n", (1784, 1811), False, 'import Ngl\n'), ((2069, 2088), 'numpy.zeros', 'numpy.zeros', (['(5)... |
import numpy as np
import matplotlib.pyplot as plt
import freqent.freqentn as fen
import dynamicstructurefactor.sqw as sqw
from itertools import product
import os
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
savepath = '/media/daniel/storage11/Dropbox/LLM_Danny/frequencySpaceDissipation/tests/freqentn_te... | [
"numpy.sqrt",
"numpy.asarray",
"freqent.freqentn._nd_gauss_smooth",
"matplotlib.pyplot.close",
"freqent.freqentn.corr_matrix",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"numpy.meshgrid",
"dynamicstructurefactor.sqw.azimuthal_average_3D",
"matplotlib.pyplot.subplots",
"numpy.arange",
"mat... | [((326, 342), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (335, 342), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1889), 'numpy.linspace', 'np.linspace', (['(-xmax / 2)', '(xmax / 2)', 'nx'], {}), '(-xmax / 2, xmax / 2, nx)\n', (1864, 1889), True, 'import numpy as np\n'), ((1897, 1... |
# -*- coding: utf-8 -*-
""""
Bandidos estocásticos: introducción, algoritmos y experimentos
TFG Informática
Sección 8.4.4
Figuras 26, 27 y 28
Autor: <NAME>
"""
import math
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
def computemTeor(n,Delta):
if Del... | [
"matplotlib.pyplot.ylabel",
"scipy.stats.norm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"math.log",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.empty",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((486, 506), 'numpy.empty', 'np.empty', (['(n // 2 + 1)'], {}), '(n // 2 + 1)\n', (494, 506), True, 'import numpy as np\n'), ((512, 528), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (522, 528), True, 'import scipy.stats as stats\n'), ((1994, 2010), 'scipy.stats.norm', 'stats.norm', (['(0)', '(1... |
"""
LCCS Level 3 Classification
| Class name | Code | Numeric code |
|----------------------------------|-----|-----|
| Cultivated Terrestrial Vegetated | A11 | 111 |
| Natural Terrestrial Vegetated | A12 | 112 |
| Cultivated Aquatic Vegetated | A23 | 123 |
| Natural Aquatic Vegetated | A24 | 124 |
| Art... | [
"numpy.uint8",
"numpy.zeros",
"logging.warning",
"numpy.zeros_like"
] | [((1476, 1533), 'numpy.zeros_like', 'numpy.zeros_like', (['classification_array'], {'dtype': 'numpy.uint8'}), '(classification_array, dtype=numpy.uint8)\n', (1492, 1533), False, 'import numpy\n'), ((1546, 1567), 'numpy.zeros_like', 'numpy.zeros_like', (['red'], {}), '(red)\n', (1562, 1567), False, 'import numpy\n'), ((... |
from manimlib.imports import *
from srcs.utils import run
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg
from sklearn import svm # sklearn = scikit-learn
from sklearn.datasets import make_moons
def mplfig_to_npimage(fig):
""" Converts a matplotlib ... | [
"numpy.arange",
"numpy.sinc",
"matplotlib.pyplot.close",
"sklearn.datasets.make_moons",
"numpy.linspace",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.sin",
"numpy.frombuffer",
"matplotlib.pyplot.subplots",
"srcs.utils.run",
"sklearn.svm.SVC"
] | [((452, 472), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (467, 472), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((758, 792), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (771, 792), Tr... |
import copy
import datetime
import os
import random
import traceback
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from inference.inference_utils import get_trange, get_tqdm
def init_random_seed(value=0):
random.seed(value)
np.random.seed(va... | [
"torch.manual_seed",
"traceback.format_exc",
"torch.utils.data.DataLoader",
"os.path.join",
"random.seed",
"torch.is_tensor",
"datetime.datetime.now",
"numpy.random.seed",
"numpy.concatenate",
"copy.deepcopy",
"torch.no_grad",
"torch.cuda.manual_seed",
"inference.inference_utils.get_trange",... | [((280, 298), 'random.seed', 'random.seed', (['value'], {}), '(value)\n', (291, 298), False, 'import random\n'), ((303, 324), 'numpy.random.seed', 'np.random.seed', (['value'], {}), '(value)\n', (317, 324), True, 'import numpy as np\n'), ((329, 353), 'torch.manual_seed', 'torch.manual_seed', (['value'], {}), '(value)\n... |
#!/usr/bin/env python3
import numpy as np
import h5py
import matplotlib.pyplot as plt
# import plotly.graph_objects as go
#========= Configuration ===========
DIR ="../data"
file_name = "particle"#"rhoNeutral" #"P"
h5 = h5py.File('../data/'+file_name+'.hdf5','r')
Lx = h5.attrs["Lx"]
Ly = h5.attrs["Ly"]
Lz = h5.at... | [
"matplotlib.pyplot.plot",
"h5py.File",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((225, 273), 'h5py.File', 'h5py.File', (["('../data/' + file_name + '.hdf5')", '"""r"""'], {}), "('../data/' + file_name + '.hdf5', 'r')\n", (234, 273), False, 'import h5py\n'), ((407, 453), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'Nt', 'step': '(1)', 'dtype': 'int'}), '(start=0, stop=Nt, step=1, dt... |
# -*- coding: utf-8 -*-
"""
Iris classification example, pratice on using high-level API
Algorithms: Neutral Network
Reference: https://www.tensorflow.org/get_started/tflearn
Date: Jun 14, 2017
@author: <NAME>
@Library: tensorflow - high-level API with tf.contrib.learn
"""
from __future__ import absolute_import
fro... | [
"os.path.exists",
"tensorflow.contrib.learn.DNNClassifier",
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.contrib.learn.datasets.base.load_csv_with_header",
"numpy.array",
"tensorflow.constant"
] | [((1176, 1303), 'tensorflow.contrib.learn.datasets.base.load_csv_with_header', 'tf.contrib.learn.datasets.base.load_csv_with_header', ([], {'filename': 'IRIS_TRAINING', 'target_dtype': 'np.int', 'features_dtype': 'np.float32'}), '(filename=IRIS_TRAINING,\n target_dtype=np.int, features_dtype=np.float32)\n', (1227, 1... |
# import scipy.signal
from gym.spaces import Box, Discrete
import numpy as np
import torch
from torch import nn
import IPython
# from torch.nn import Parameter
import torch.nn.functional as F
from torch.distributions import Independent, OneHotCategorical, Categorical
from torch.distributions.normal import Normal
# # fr... | [
"torch.as_tensor",
"torch.distributions.Categorical",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.exp",
"torch.tanh",
"torch.nn.init.uniform_",
"torch.nn.Embedding",
"torch.nn.functional.mse_loss",
"torch.distributions.Normal",
"numpy.ones",
"torch.randn_like",
"torch.nn.functional.one_ho... | [((628, 650), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (641, 650), False, 'from torch import nn\n'), ((1513, 1539), 'torch.distributions.Categorical', 'Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (1524, 1539), False, 'from torch.distributions import Independent, OneHo... |
"""This module contains the code related to the DAG and the scheduler."""
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from networkx.drawing import nx_pydot... | [
"networkx.relabel_nodes",
"matplotlib.pyplot.savefig",
"networkx.topological_sort",
"pathlib.Path",
"networkx.drawing.nx_pydot.pydot_layout",
"networkx.DiGraph",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"networkx.draw_networkx_nodes",
"matplotlib.pyplot.close",
"numpy.array",
"netw... | [((5769, 5802), 'networkx.topological_sort', 'nx.topological_sort', (['reversed_dag'], {}), '(reversed_dag)\n', (5788, 5802), True, 'import networkx as nx\n'), ((6347, 6377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 12)'}), '(figsize=(16, 12))\n', (6359, 6377), True, 'import matplotlib.pyplo... |
#!/usr/bin/env python
# coding: utf-8
# ## E2E Xgboost MLFLOW
# In[45]:
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, pandas_udf,udf,lit
import azure.synapse.ml.predict as pcontext
import azure.synapse.ml.predict.utils._logger as synapse_predict_logger
import numpy as np
import panda... | [
"mlflow.pyfunc.save_model",
"numpy.random.rand",
"azure.synapse.ml.predict.bind_model",
"numpy.random.randint",
"pandas.DataFrame",
"xgboost.DMatrix",
"xgboost.XGBRFRegressor"
] | [((500, 521), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (514, 521), True, 'import numpy as np\n'), ((571, 599), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'size': '(5)'}), '(1, size=5)\n', (588, 599), True, 'import numpy as np\n'), ((626, 656), 'xgboost.DMatrix', 'xgb.DMatr... |
import numpy as np
import matplotlib.pyplot as plt
import g_functions as g_f
R1 = 2
R2 = .6
M = 500
Delta = .1
NB_POINTS = 2**10
EPSILON_IMAG = 1e-8
parameters = {
'M' : M,
'R1' : R1,
'R2' : R2,
'NB_POINTS' : NB_POINTS,
'EPSILON_IMAG' : EPSILON_IMAG,
've... | [
"g_functions.find_rho",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"g_functions.denoiser",
"numpy.zeros",
"g_functions.find_spectrum",
"g_functions.make_sample",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((398, 432), 'g_functions.make_sample', 'g_f.make_sample', (['parameters', 'Delta'], {}), '(parameters, Delta)\n', (413, 432), True, 'import g_functions as g_f\n'), ((474, 505), 'g_functions.find_rho', 'g_f.find_rho', (['parameters', 'Delta'], {}), '(parameters, Delta)\n', (486, 505), True, 'import g_functions as g_f\... |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import datetime as dt
from statsmodels.stats.multitest import fdrcorrection
from pylab import savefig
# FUNCTIONS YOU CAN USE:
# analyses(filepath) spits out a nifty heatmap to let you check ... | [
"numpy.linalg.pinv",
"pandas.read_csv",
"numpy.isfinite",
"numpy.sin",
"statsmodels.api.OLS",
"pandas.to_datetime",
"matplotlib.pyplot.twinx",
"numpy.datetime64",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"statsmodels.stats.multitest.fdrcorrection",
"matplotlib.pyplot.savefig",
"seaborn.... | [((1308, 1329), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (1319, 1329), True, 'import pandas as pd\n'), ((2167, 2189), 'numpy.sin', 'np.sin', (['time_delta_rad'], {}), '(time_delta_rad)\n', (2173, 2189), True, 'import numpy as np\n'), ((2217, 2239), 'numpy.cos', 'np.cos', (['time_delta_rad']... |
"""Implement the Unit class."""
import numpy as np
from .. import config, constants
__all__ = ["Pixels", "Degrees", "Munits", "Percent"]
class _PixelUnits:
def __mul__(self, val):
return val * config.frame_width / config.pixel_width
def __rmul__(self, val):
return val * config.frame_width ... | [
"numpy.array_equal"
] | [((399, 437), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.X_AXIS'], {}), '(axis, constants.X_AXIS)\n', (413, 437), True, 'import numpy as np\n'), ((495, 533), 'numpy.array_equal', 'np.array_equal', (['axis', 'constants.Y_AXIS'], {}), '(axis, constants.Y_AXIS)\n', (509, 533), True, 'import numpy as np\n'... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2D linear elasticity example
Solve the equilibrium equation -\nabla \cdot \sigma(x) = f(x) for x\in\Omega
with the strain-displacement equation:
\epsilon = 1/2(\nabla u + \nabla u^T)
and the constitutive law:
\sigma = 2*\mu*\epsilon + \lambda*(\nabla\cdot u)I,... | [
"numpy.sqrt",
"utils.Geom_examples.QuarterAnnulus",
"numpy.array",
"tensorflow.keras.layers.Dense",
"numpy.arctan2",
"utils.tfp_loss.tfp_function_factory",
"tensorflow.dynamic_stitch",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.concatenate",
"tensorflow.convert_to_tensor",
"numpy... | [((1429, 1447), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1443, 1447), True, 'import numpy as np\n'), ((1448, 1470), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (1466, 1470), True, 'import tensorflow as tf\n'), ((2429, 2495), 'utils.Geom_examples.QuarterAnnulus'... |
import datetime
import os
import sys
from cmath import inf
from typing import Any
import hypothesis.extra.numpy as xps
import hypothesis.strategies as st
import numpy
import pytest
from hypothesis import assume, given
from eopf.product.utils import (
apply_xpath,
conv,
convert_to_unix_time,
is_date,
... | [
"numpy.iinfo",
"eopf.product.utils.convert_to_unix_time",
"hypothesis.extra.numpy.integer_dtypes",
"eopf.product.utils.conv",
"hypothesis.extra.numpy.boolean_dtypes",
"datetime.datetime",
"eopf.product.utils.is_date",
"numpy.int64",
"hypothesis.strategies.booleans",
"hypothesis.strategies.text",
... | [((7718, 7759), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""EPSILON"""', '[0.1]'], {}), "('EPSILON', [0.1])\n", (7741, 7759), False, 'import pytest\n'), ((457, 523), 'os.path.join', 'os.path.join', (['EMBEDED_TEST_DATA_FOLDER', '"""snippet_xfdumanifest.xml"""'], {}), "(EMBEDED_TEST_DATA_FOLDER, 'snippet... |
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn import feature_selection as fs
from sklearn import naive_bayes
from sklearn import model_selection
from sklearn import metrics
from sklearn import linear_model
from sklearn import svm
from imblearn.under_sampling import Ne... | [
"numpy.unique",
"sklearn.feature_selection.VarianceThreshold",
"pandas.read_csv",
"imblearn.under_sampling.NeighbourhoodCleaningRule",
"sklearn.feature_selection.SelectFromModel",
"imblearn.over_sampling.SMOTE",
"sklearn.svm.LinearSVC",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_scor... | [((714, 752), 'sklearn.feature_selection.mutual_info_regression', 'fs.mutual_info_regression', (['features', 'y'], {}), '(features, y)\n', (739, 752), True, 'from sklearn import feature_selection as fs\n'), ((1027, 1049), 'sklearn.feature_selection.VarianceThreshold', 'fs.VarianceThreshold', ([], {}), '()\n', (1047, 10... |
import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from losses import focal_loss,weighted_binary_crossentropy
from utils import Dataset
class DeepFM(object):
def __init__(self, params):
self.feature_size = params['feature_size']
self.field_size = params['field_size'... | [
"tensorflow.local_variables_initializer",
"numpy.sqrt",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.reduce_sum",
"tensorflow.gradients",
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.clip_by_global_norm",
"tensorflow.nn.embedding_lookup",
"tensorflow.random_normal",
"... | [((945, 1011), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""feature_index"""'}), "(tf.int32, shape=[None, None], name='feature_index')\n", (959, 1011), True, 'import tensorflow as tf\n'), ((1038, 1106), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'sh... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.