code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Useful starting lines
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
from sklearn import linear_model
# from __future__ import absolute_import
from labs.ex03.template import helpers
from labs.ex04.template.costs import compute_rmse, compute_mse
from la... | [
"numpy.random.seed",
"numpy.logspace",
"labs.ex04.template.plots.cross_validation_visualization_for_degree",
"labs.ex04.template.costs.compute_mse_for_ridge",
"numpy.sin",
"labs.ex04.template.plots.bias_variance_decomposition_visualization",
"labs.ex04.template.ridge_regression.ridge_regression",
"lab... | [((904, 923), 'labs.ex03.template.helpers.load_data', 'helpers.load_data', ([], {}), '()\n', (921, 923), False, 'from labs.ex03.template import helpers\n'), ((1067, 1087), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1081, 1087), True, 'import numpy as np\n'), ((1102, 1132), 'numpy.random.permuta... |
"""Vertical structure functions for ROMS
:func:`sdepth`
Depth of s-levels
:func:`zslice`
Slice a 3D field in s-coordinates to fixed depth
:func:`multi_zslice`
Slice a 3D field to several depth levels
:func:`z_average`
Vertical average of a 3D field
:func:`s_stretch`
Compute vertical stretching arrays Cs_r or... | [
"numpy.outer",
"numpy.sum",
"numpy.tanh",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"xarray.DataArray",
"numpy.linspace",
"numpy.array",
"numpy.cosh",
"numpy.exp",
"numpy.sinh"
] | [((1334, 1347), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (1344, 1347), True, 'import numpy as np\n'), ((1462, 1475), 'numpy.asarray', 'np.asarray', (['C'], {}), '(C)\n', (1472, 1475), True, 'import numpy as np\n'), ((3411, 3424), 'numpy.asarray', 'np.asarray', (['F'], {}), '(F)\n', (3421, 3424), True, 'impo... |
import numpy as np
import sys
#for calculate the loss
from sklearn.metrics import log_loss
from sklearn.metrics import make_scorer
#import three machine learning models
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
#for stan... | [
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.StreamHandler",
"sklearn.preprocessing.MinMaxScaler",
"logging.Formatter",
"sklearn.metrics.make_scorer",
"random.seed",
"numpy.array",
"sklearn.svm.SVC",
"configparser.ConfigParser",
"logging.getLogger"
] | [((675, 701), 'logging.getLogger', 'logging.getLogger', (['"""cumul"""'], {}), "('cumul')\n", (692, 701), False, 'import logging\n'), ((702, 719), 'random.seed', 'random.seed', (['(1123)'], {}), '(1123)\n', (713, 719), False, 'import random\n'), ((720, 740), 'numpy.random.seed', 'np.random.seed', (['(1123)'], {}), '(11... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from cnocr import CnOcr
# 后续生成票据图像时的大小,按照标准增值税发票版式240mmX140mm来设定
height_resize = 1400
width_resize = 2400
# 实例化不同用途CnOcr对象
ocr = CnOcr(name='') # 混合字符
ocr_numbers = CnOcr(name='numbers', cand_alphabet='0123456789.') # 纯数字
ocr_UpperSerial =... | [
"cv2.GaussianBlur",
"cv2.approxPolyDP",
"cv2.getPerspectiveTransform",
"cv2.arcLength",
"numpy.argmax",
"numpy.ones",
"numpy.argmin",
"cv2.rectangle",
"cv2.imencode",
"cv2.imshow",
"cv2.line",
"cv2.warpPerspective",
"cv2.contourArea",
"cv2.dilate",
"cv2.imwrite",
"cv2.resize",
"cnocr... | [((208, 222), 'cnocr.CnOcr', 'CnOcr', ([], {'name': '""""""'}), "(name='')\n", (213, 222), False, 'from cnocr import CnOcr\n'), ((245, 295), 'cnocr.CnOcr', 'CnOcr', ([], {'name': '"""numbers"""', 'cand_alphabet': '"""0123456789."""'}), "(name='numbers', cand_alphabet='0123456789.')\n", (250, 295), False, 'from cnocr im... |
import numpy as np
# create array data
predict = np.array([[1,2,2,1],
[4.5,2.5,10,0.5],
[6,6,8,4],
[6.26,6.26,8.26,4.26]],np.double)
truth = np.array([[1,4,3,3],
[1.2,2.2,2.2,1.2],
[5,2,8,1],
[6.1,6.1,8.1,4.1],... | [
"numpy.zeros",
"numpy.any",
"numpy.array",
"numpy.argmax"
] | [((51, 152), 'numpy.array', 'np.array', (['[[1, 2, 2, 1], [4.5, 2.5, 10, 0.5], [6, 6, 8, 4], [6.26, 6.26, 8.26, 4.26]]', 'np.double'], {}), '([[1, 2, 2, 1], [4.5, 2.5, 10, 0.5], [6, 6, 8, 4], [6.26, 6.26, \n 8.26, 4.26]], np.double)\n', (59, 152), True, 'import numpy as np\n'), ((202, 322), 'numpy.array', 'np.array'... |
from kb import KB, TRAIN_LABEL, DEV_LABEL, TEST_LABEL
import random
import numpy as np
class SampleKB:
def __init__(self, num_relations, num_entities,
arities=[0.0, 1.0, 0.0],
fb_densities=[0.0, 0.0, 0.0],
arg_densities=[0., 0.1, 0.0],
fact_prob=0... | [
"numpy.random.seed",
"argparse.ArgumentParser",
"random.uniform",
"random.sample",
"kb.KB",
"random.seed",
"os.path.join",
"numpy.random.shuffle"
] | [((7344, 7364), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7358, 7364), True, 'import numpy as np\n'), ((7398, 7493), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""create artificial dataset (train+test) with rules (all arity 2)"""'], {}), "(\n 'create artificial dataset (train+... |
'''
@author: <NAME>
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from algorithms import primes1, primes2, primes3, primes4, primes5, primes6, primes7, primes8
ubounds = range(0, 10000, 100)
num = len(ubounds)
results = []
for algorithm in (primes1, primes2, primes3, primes4, primes5, primes6... | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"time.time",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((812, 848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Upper bound for primes"""'], {}), "('Upper bound for primes')\n", (822, 848), True, 'import matplotlib.pyplot as plt\n'), ((849, 897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time in seconds to generate primes"""'], {}), "('Time in seconds to generat... |
"""
echopype data model inherited from based class EchoData for EK60 data.
"""
import datetime as dt
import numpy as np
import xarray as xr
from .echo_data import EchoData
class EchoDataEK60(EchoData):
"""Class for manipulating EK60 echo data that is already converted to netCDF."""
def __init__(self, file_p... | [
"numpy.log10",
"xarray.open_dataset",
"datetime.datetime.now"
] | [((956, 1008), 'xarray.open_dataset', 'xr.open_dataset', (['self.file_path'], {'group': '"""Environment"""'}), "(self.file_path, group='Environment')\n", (971, 1008), True, 'import xarray as xr\n'), ((1027, 1072), 'xarray.open_dataset', 'xr.open_dataset', (['self.file_path'], {'group': '"""Beam"""'}), "(self.file_path,... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 14:42:37 2019
@author: owenmadin
"""
import numpy
from bayesiantesting.kernels.bayes import ThermodynamicIntegration
from bayesiantesting.models.continuous import GaussianModel
def main():
priors = {"uniform": ("uniform", numpy.array([-5... | [
"bayesiantesting.kernels.bayes.ThermodynamicIntegration",
"numpy.array",
"bayesiantesting.models.continuous.GaussianModel"
] | [((377, 420), 'bayesiantesting.models.continuous.GaussianModel', 'GaussianModel', (['"""gaussian"""', 'priors', '(0.0)', '(1.0)'], {}), "('gaussian', priors, 0.0, 1.0)\n", (390, 420), False, 'from bayesiantesting.models.continuous import GaussianModel\n'), ((575, 712), 'bayesiantesting.kernels.bayes.ThermodynamicIntegr... |
from PIL import Image
import numpy as np
img = Image.open('cifar.png')
pic = np.array(img)
noise = np.random.randint(-10,10,pic.shape[-1])
print(noise.shape)
pic = pic+noise
pic = pic.astype(np.uint8)
asd = Image.fromarray(pic) | [
"PIL.Image.fromarray",
"numpy.random.randint",
"numpy.array",
"PIL.Image.open"
] | [((47, 70), 'PIL.Image.open', 'Image.open', (['"""cifar.png"""'], {}), "('cifar.png')\n", (57, 70), False, 'from PIL import Image\n'), ((77, 90), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (85, 90), True, 'import numpy as np\n'), ((99, 140), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)', 'pic... |
import torch
from torch.autograd import Variable
from scattering.scattering1d.utils import pad1D, modulus, subsample_fourier
from scattering.scattering1d.utils import compute_border_indices
import numpy as np
import pytest
def test_pad1D(random_state=42):
"""
Tests the correctness and differentiability of pad... | [
"numpy.abs",
"torch.sqrt",
"numpy.ones",
"torch.randn",
"scattering.scattering1d.utils.subsample_fourier",
"numpy.fft.fft",
"numpy.random.RandomState",
"pytest.raises",
"scattering.scattering1d.utils.modulus",
"numpy.max",
"torch.zeros",
"scattering.scattering1d.utils.pad1D",
"numpy.fft.ifft... | [((335, 366), 'torch.manual_seed', 'torch.manual_seed', (['random_state'], {}), '(random_state)\n', (352, 366), False, 'import torch\n'), ((2321, 2352), 'torch.manual_seed', 'torch.manual_seed', (['random_state'], {}), '(random_state)\n', (2338, 2352), False, 'import torch\n'), ((2463, 2473), 'scattering.scattering1d.u... |
from pathlib import Path
import numpy as np
from .config import Config
from .spin import Spin
def load(path: Path) -> Config:
with path.open() as file:
lines = file.readlines()
global_optimum, best_solution = lines[0].split(' ')
global_optimum = float(global_optimum.strip())
b... | [
"numpy.array"
] | [((445, 468), 'numpy.array', 'np.array', (['best_solution'], {}), '(best_solution)\n', (453, 468), True, 'import numpy as np\n')] |
import lyse
import runmanager.remote as rm
import numpy as np
import mloop_config
import sys
import logging
import os
from labscript_utils.setup_logging import LOG_PATH
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_v... | [
"numpy.isnan",
"labscript_utils.check_version",
"logging.Formatter",
"lyse.data",
"mloop_config.get",
"os.path.join",
"logging.FileHandler",
"lyse.routine_storage.queue.put",
"Queue.Queue",
"runmanager.remote.get_globals",
"threading.Thread",
"lyse.routine_storage.optimisation.is_alive",
"lo... | [((313, 350), 'labscript_utils.check_version', 'check_version', (['"""lyse"""', '"""2.5.0"""', '"""4.0"""'], {}), "('lyse', '2.5.0', '4.0')\n", (326, 350), False, 'from labscript_utils import check_version\n'), ((352, 394), 'labscript_utils.check_version', 'check_version', (['"""zprocess"""', '"""2.13.1"""', '"""4.0"""... |
from .util import Audio
from abc import ABC, abstractmethod
import numpy as np
from scipy import fft, signal
from IPython.display import display
from bokeh.plotting import figure, show
from bokeh.layouts import gridplot
from bokeh.models.mappers import LinearColorMapper
from bokeh.models.ranges import DataRange1d
from ... | [
"bokeh.io.output_notebook",
"bokeh.plotting.figure",
"numpy.abs",
"numpy.asarray",
"scipy.fft.rfft",
"numpy.ndim",
"bokeh.models.tools.HoverTool",
"bokeh.models.mappers.LinearColorMapper",
"numpy.log10",
"scipy.signal.stft"
] | [((431, 448), 'bokeh.io.output_notebook', 'output_notebook', ([], {}), '()\n', (446, 448), False, 'from bokeh.io import output_notebook\n'), ((2962, 3132), 'bokeh.plotting.figure', 'figure', ([], {'width': '(800)', 'height': '(400)', 'x_axis_label': '"""time [s]"""', 'y_axis_label': '"""amplitude"""', 'tools': '"""pan,... |
import pandas as pd
import numpy as np
from urllib.parse import urlparse
import io
import gc
import re
import string
from utils import *
import tensorflow as tf
def load_vectors(fname,count_words):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline()... | [
"tensorflow.keras.preprocessing.text.Tokenizer",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.random.multivariate_normal",
"numpy.reshape",
"io.open",
"numpy.cov",
"pandas.concat"
] | [((213, 281), 'io.open', 'io.open', (['fname', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""ignore"""'}), "(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n", (220, 281), False, 'import io\n'), ((1759, 1831), 'pandas.concat', 'pd.concat', (['[entity1, mention_dt, url_dt1, ... |
#!/usr/bin/env python3
import os, time, json
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.colors import LogNorm
import scipy.signal as signal
import argparse
import pdb
import tinydb as db
f... | [
"argparse.ArgumentParser",
"numpy.amin",
"matplotlib.pyplot.clf",
"numpy.histogram",
"matplotlib.colors.LogNorm",
"numpy.arange",
"numpy.exp",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.hlines",
"pandas.DataFrame",
"matplotlib.pyplot.locator_p... | [((532, 555), 'pygama.utils.set_plot_style', 'set_plot_style', (['"""clint"""'], {}), "('clint')\n", (546, 555), False, 'from pygama.utils import set_plot_style\n'), ((669, 732), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""waveform viewer for mj60"""'}), "(description='waveform viewer... |
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import pickle
import numpy as np
import nltk
from PIL import Image
import cv2
import glob
import random
# depracated
# def get_data_direct(img_size, texture_size,
# imgs_fn = None, ... | [
"numpy.uint8",
"cv2.waitKey",
"cv2.imwrite",
"random.sample",
"random.shuffle",
"numpy.zeros",
"numpy.transpose",
"numpy.argmin",
"PIL.Image.open",
"random.choice",
"numpy.asarray",
"numpy.random.randint",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"cv2.imshow",
"os.path.j... | [((3733, 3770), 'numpy.transpose', 'np.transpose', (['imgs_data', '[0, 3, 1, 2]'], {}), '(imgs_data, [0, 3, 1, 2])\n', (3745, 3770), True, 'import numpy as np\n'), ((9622, 9654), 'cv2.imwrite', 'cv2.imwrite', (['"""test_img.png"""', 'img'], {}), "('test_img.png', img)\n", (9633, 9654), False, 'import cv2\n'), ((10027, ... |
from gudhi.wasserstein import wasserstein_distance
import numpy as np
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): <NAME>
Copyright (C) 2019 Inria
... | [
"numpy.array",
"gudhi.wasserstein.wasserstein_distance",
"numpy.sqrt"
] | [((531, 582), 'numpy.array', 'np.array', (['[[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]'], {}), '([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])\n', (539, 582), True, 'import numpy as np\n'), ((595, 631), 'numpy.array', 'np.array', (['[[2.8, 4.45], [9.5, 14.1]]'], {}), '([[2.8, 4.45], [9.5, 14.1]])\n', (603, 631), True, 'imp... |
import numpy as np
import itertools
from graph_nets import utils_tf
from root_gnn.src.datasets.base import DataSet
n_node_features = 6
max_nodes = 3 # including the particle that decays
def num_particles(event):
return len(event) // n_node_features
def make_graph(event, debug=False):
# each particle contains... | [
"numpy.zeros",
"numpy.array",
"graph_nets.utils_tf.data_dicts_to_graphs_tuple"
] | [((1265, 1300), 'numpy.array', 'np.array', (['[x[0] for x in all_edges]'], {}), '([x[0] for x in all_edges])\n', (1273, 1300), True, 'import numpy as np\n'), ((1317, 1352), 'numpy.array', 'np.array', (['[x[1] for x in all_edges]'], {}), '([x[1] for x in all_edges])\n', (1325, 1352), True, 'import numpy as np\n'), ((207... |
# Load the necessary libraries
import matplotlib.pyplot as plt
import numpy
import pandas
import sklearn.cluster as cluster
import sklearn.metrics as metrics
bikeshare = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\BikeSharingDemand_Train.csv',
delimit... | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"sklearn.cluster.KMeans",
"numpy.zeros",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.export_graphviz",
"sklearn.metrics.silhouette_score",
"graphviz.Source",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
... | [((178, 308), 'pandas.read_csv', 'pandas.read_csv', (['"""C:\\\\Users\\\\minlam\\\\Documents\\\\IIT\\\\Machine Learning\\\\Data\\\\BikeSharingDemand_Train.csv"""'], {'delimiter': '""","""'}), "(\n 'C:\\\\Users\\\\minlam\\\\Documents\\\\IIT\\\\Machine Learning\\\\Data\\\\BikeSharingDemand_Train.csv'\n , delimiter=... |
from tfc import utfc
from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot
import numpy as onp
import jax.numpy as np
from jax import vmap, jacfwd, jit, lax
import tqdm
import pickle
from scipy.optimize import fsolve
from scipy.integrate import simps
from time import process_time as timer
## TEST PARAMETE... | [
"jax.numpy.array",
"jax.numpy.dot",
"tfc.utils.egrad",
"tfc.utils.TFCDictRobust",
"time.process_time",
"scipy.optimize.fsolve",
"jax.numpy.finfo",
"jax.numpy.sqrt",
"numpy.ones",
"tfc.utils.NllsClass",
"jax.numpy.linalg.norm",
"jax.numpy.hstack",
"jax.numpy.zeros",
"jax.numpy.abs",
"tfc.... | [((754, 797), 'tfc.utfc', 'utfc', (['N', 'nCx', 'ms'], {'basis': '"""CP"""', 'x0': '(-1)', 'xf': '(1.0)'}), "(N, nCx, ms, basis='CP', x0=-1, xf=1.0)\n", (758, 797), False, 'from tfc import utfc\n'), ((804, 847), 'tfc.utfc', 'utfc', (['N', 'nCy', 'mc'], {'basis': '"""CP"""', 'x0': '(-1)', 'xf': '(1.0)'}), "(N, nCy, mc, ... |
"""
Data
================
data storage and manipulation classes, should be sufficient to run the game without display
"""
from enum import Enum
import numpy
class Facing(Enum):
YP = 0
XP = 1
ZN = 2
YN = 3
XN = 4
ZP = 5
# gives a directional delta array in hex coordinates for given Facing
de... | [
"numpy.add",
"numpy.subtract"
] | [((1398, 1433), 'numpy.add', 'numpy.add', (['self.position', 'direction'], {}), '(self.position, direction)\n', (1407, 1433), False, 'import numpy\n'), ((1442, 1482), 'numpy.add', 'numpy.add', (['self.momentum_next', 'direction'], {}), '(self.momentum_next, direction)\n', (1451, 1482), False, 'import numpy\n'), ((1867,... |
from icecube.icetray import OMKey
from icecube.simclasses import I3MapModuleKeyI3ExtraGeometryItemCylinder, I3ExtraGeometryItemCylinder
from icecube.dataclasses import I3Position, ModuleKey
from I3Tray import I3Units
import numpy as np
from os.path import expandvars
from_cable_shadow = expandvars("$I3_BUILD/ice-mod... | [
"icecube.simclasses.I3MapModuleKeyI3ExtraGeometryItemCylinder",
"numpy.radians",
"os.path.expandvars",
"icecube.dataclasses.I3Position",
"numpy.loadtxt"
] | [((291, 396), 'os.path.expandvars', 'expandvars', (['"""$I3_BUILD/ice-models/resources/models/cable_position/orientation.cable_shadow.txt"""'], {}), "(\n '$I3_BUILD/ice-models/resources/models/cable_position/orientation.cable_shadow.txt'\n )\n", (301, 396), False, 'from os.path import expandvars\n'), ((399, 496),... |
import itertools
import operator
import os
import pickle
import re
import sys
import time
import cv2
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import skvideo.io
from keras_frcnn import roi_helpers
import keras_frcnn.resnet as nn
import numpy as np
video_folder = '..... | [
"numpy.argmax",
"os.popen",
"os.walk",
"keras.models.Model",
"keras.backend.image_dim_ordering",
"pickle.load",
"numpy.random.randint",
"cv2.rectangle",
"keras.layers.Input",
"sys.setrecursionlimit",
"os.path.join",
"os.path.abspath",
"numpy.transpose",
"numpy.max",
"re.findall",
"cv2.... | [((375, 425), 'os.path.abspath', 'os.path.abspath', (["(video_folder + videoName + '.mp4')"], {}), "(video_folder + videoName + '.mp4')\n", (390, 425), False, 'import os\n'), ((446, 508), 'os.path.abspath', 'os.path.abspath', (["(video_folder + 'OUTPUT/' + videoName + '.mp4')"], {}), "(video_folder + 'OUTPUT/' + videoN... |
import numpy as np # type: ignore
city_num = 20
file_path = "./coordinates/"
output_file = "random_" + str(city_num) + "_cities.csv"
if __name__ == "__main__":
# “continuous uniform” distribution random
np_cities = np.random.random((city_num, 2))
np.savetxt(file_path + output_file, np_cities, delimiter="... | [
"numpy.savetxt",
"numpy.random.random"
] | [((226, 257), 'numpy.random.random', 'np.random.random', (['(city_num, 2)'], {}), '((city_num, 2))\n', (242, 257), True, 'import numpy as np\n'), ((262, 323), 'numpy.savetxt', 'np.savetxt', (['(file_path + output_file)', 'np_cities'], {'delimiter': '""","""'}), "(file_path + output_file, np_cities, delimiter=',')\n", (... |
"""Contains DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import time
import logging
import gzip
import copy
import numpy as np
import inspect
from utils.decoder.swig_wrapper import Scorer
from utils.decoder.swig_... | [
"copy.deepcopy",
"gzip.open",
"numpy.shape",
"inspect.isgeneratorfunction",
"utils.decoder.swig_wrapper.Scorer",
"utils.decoder.swig_wrapper.ctc_beam_search_decoder_batch"
] | [((3843, 4070), 'utils.decoder.swig_wrapper.ctc_beam_search_decoder_batch', 'ctc_beam_search_decoder_batch', ([], {'probs_split': 'probs_split', 'vocabulary': 'vocab_list', 'beam_size': 'beam_size', 'num_processes': 'num_processes', 'ext_scoring_func': 'self._ext_scorer', 'cutoff_prob': 'cutoff_prob', 'cutoff_top_n': '... |
import os
import pathlib
import re
import time
import sys
import json
import cv2
import h5py
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib as mpl
from scipy.s... | [
"PyQt5.QtCore.pyqtSignal",
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"matplotlib.pyplot.axes",
"numpy.empty",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtGui.QColor",
"numpy.iinfo",
"numpy.clip",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.pyplot.fig... | [((1200, 1216), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1209, 1216), True, 'import matplotlib.pyplot as plt\n'), ((2367, 2474), 'numpy.array', 'np.array', (['[[0, 0, 0, 0.3], [0, 0, 1, 1], [0, 0.7, 0, 1], [1, 0, 0, 1], [0.7, 0.5, 0, 1]]'], {'dtype': '"""float"""'}), "([[0, 0, 0, 0.3],... |
import io
import os
import unittest
import numpy as np
from sklearn.linear_model import LogisticRegression
from dragnet import Extractor
from dragnet.blocks import TagCountNoCSSReadabilityBlockifier
from dragnet.util import get_and_union_features
from dragnet.compat import str_cast
with io.open(os.path.join('test',... | [
"unittest.main",
"dragnet.Extractor",
"numpy.flatnonzero",
"dragnet.util.get_and_union_features",
"sklearn.linear_model.LogisticRegression",
"dragnet.compat.str_cast",
"numpy.array",
"dragnet.blocks.TagCountNoCSSReadabilityBlockifier",
"os.path.join"
] | [((2451, 2466), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2464, 2466), False, 'import unittest\n'), ((300, 356), 'os.path.join', 'os.path.join', (['"""test"""', '"""datafiles"""', '"""models_testing.html"""'], {}), "('test', 'datafiles', 'models_testing.html')\n", (312, 356), False, 'import os\n'), ((521, 55... |
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved."""
from .feature import Feature
from scipy.stats import entropy
import numpy as np
class KLDivergence(Feature):
r"""
A feature that computes the KL divergence between the
logits of each data points given by a classifier mean logits
... | [
"numpy.repeat"
] | [((1168, 1231), 'numpy.repeat', 'np.repeat', (['mean_logit[..., np.newaxis]', 'logits.shape[1]'], {'axis': '(1)'}), '(mean_logit[..., np.newaxis], logits.shape[1], axis=1)\n', (1177, 1231), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import time
from pathlib import Path
from experiments.evaluation import calculate_metrics
from causal_estimators.ipw_estimator import IPWEstimator
from causal_estimators.standardization_estimator import \
StandardizationEstimator, StratifiedStandardizationEstimator
from exper... | [
"sklearn.preprocessing.StandardScaler",
"numpy.logspace",
"experiments.evaluation.calculate_metrics",
"sklearn.tree.DecisionTreeClassifier",
"loading.load_from_folder",
"pathlib.Path",
"sklearn.svm.SVC",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"causal_estimators.standardizatio... | [((1465, 1536), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'UndefinedMetricWarning'}), "(action='ignore', category=UndefinedMetricWarning)\n", (1486, 1536), False, 'import warnings\n'), ((1754, 1769), 'pathlib.Path', 'Path', (['"""results"""'], {}), "('results')\n", (1... |
import os
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils as utils
import sys
import argparse
import matplotlib
import pdb
import numpy as np
import time
import random
import re
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
from tqdm import trange
from s... | [
"numpy.random.seed",
"argparse.ArgumentParser",
"random.sample",
"torch.manual_seed",
"torch.cuda.manual_seed",
"random.seed"
] | [((674, 696), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (685, 696), False, 'import random\n'), ((749, 774), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (763, 774), True, 'import numpy as np\n'), ((779, 807), 'torch.manual_seed', 'torch.manual_seed', (['args.see... |
import numpy as np
import torch
class UnityEnv():
"""Unity Reacher Environment Wrapper
https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Learning-Environment-Examples.md
"""
def __init__(self, env_file='data/Reacher.exe', no_graphics=True, mlagents=False):
if mlagents:
... | [
"numpy.array",
"unityagents.UnityEnvironment",
"numpy.clip"
] | [((468, 529), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': 'env_file', 'no_graphics': 'no_graphics'}), '(file_name=env_file, no_graphics=no_graphics)\n', (484, 529), False, 'from unityagents import UnityEnvironment\n'), ((1213, 1236), 'numpy.clip', 'np.clip', (['actions', '(-1)', '(1)'], {}), ... |
import matplotlib.pyplot as plt
import numpy as np
import re
import os
import sys
from matplotlib import rcParams
from cycler import cycler
import itertools
if len(sys.argv) < 2:
print("Especifique la carpeta con resultados con la siguiente sintaxis:")
print("python %s carpeta_resultados" % sys.argv[0])
e... | [
"matplotlib.pyplot.title",
"cycler.cycler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"re.match",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.arange",
"itertools.cycle",
"matplotlib.pyplot.xlabel",
"matp... | [((753, 779), 'os.listdir', 'os.listdir', (['results_folder'], {}), '(results_folder)\n', (763, 779), False, 'import os\n'), ((1396, 1458), 'itertools.cycle', 'itertools.cycle', (["('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.')"], {}), "(('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.'))\n", (1411, 1458), False, 'import ite... |
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from PIL import Image
# for testing purposes, remove this later!
from sys import exit
"""Data visualization on the Airbnb New York dataset from Kaggle.
The dataset provides 16 pieces of data in the followi... | [
"matplotlib.pyplot.title",
"csv.reader",
"numpy.clip",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.unique",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ba... | [((2392, 2422), 'PIL.Image.open', 'Image.open', (['"""new_york_map.png"""'], {}), "('new_york_map.png')\n", (2402, 2422), False, 'from PIL import Image\n'), ((2551, 2599), 'numpy.unique', 'np.unique', (["data['room_type']"], {'return_counts': '(True)'}), "(data['room_type'], return_counts=True)\n", (2560, 2599), True, ... |
# -*- coding: utf-8 -*-
import cv2
import argparse
import time
import numpy as np
from training import Model
classes = []
FRAME_SIZE = 256
font = cv2.FONT_HERSHEY_SIMPLEX
switch = False
def detect(image):
crop_image = image[112:112 + FRAME_SIZE, 192:192 + FRAME_SIZE]
result = model.predict(crop_image)
in... | [
"cv2.putText",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.waitKey",
"cv2.imwrite",
"time.time",
"cv2.VideoCapture",
"training.Model",
"cv2.destroyWindow",
"cv2.rectangle",
"cv2.imshow",
"cv2.namedWindow"
] | [((326, 343), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (335, 343), True, 'import numpy as np\n'), ((348, 419), 'cv2.putText', 'cv2.putText', (['image', 'classes[index]', '(192, 112)', 'font', '(1)', '(0, 255, 0)', '(2)'], {}), '(image, classes[index], (192, 112), font, 1, (0, 255, 0), 2)\n', (359, 4... |
# This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
def mach_iter(AoA, Node, formulation, freestream):
... | [
"json.dump",
"json.loads",
"time.time",
"numpy.sin",
"subprocess.call",
"numpy.cos",
"multiprocessing.Pool",
"os.chdir"
] | [((252, 263), 'time.time', 'time.time', ([], {}), '()\n', (261, 263), False, 'import time\n'), ((2816, 2839), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (2826, 2839), False, 'import json\n'), ((3313, 3334), 'os.chdir', 'os.chdir', (['"""../../../"""'], {}), "('../../../')\n", (3321, 3334), Fa... |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: wikirecs
# language: python
# name: wikirecs
# ---
... | [
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.sum",
"wikirecs.display_recs_with_history",
"wikirecs.print_user_history",
"recommenders.ImplicitCollaborativeRecommender",
"recommenders.MostRecentRecommender",
"recommenders.MostFrequentRecommender",
"matplotlib.pyplot.figure",
"numpy.arange",
... | [((1071, 1088), 'itables.javascript.load_datatables', 'load_datatables', ([], {}), '()\n', (1086, 1088), False, 'from itables.javascript import load_datatables\n'), ((1109, 1147), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(100)'], {}), "('display.max_rows', 100)\n", (1122, 1147), True, 'import ... |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 12:03:59 2017
@author: Kevin
"""
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import LeaveOneGroupOut,GridSearchCV
dataPath = 'UTDallas/'
dataName = 'UTD'
nJobs = 12 # Numb... | [
"sklearn.metrics.roc_auc_score",
"numpy.shape",
"numpy.where",
"numpy.arange",
"numpy.loadtxt",
"sklearn.neural_network.MLPClassifier",
"sklearn.model_selection.LeaveOneGroupOut"
] | [((480, 539), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_all.csv')"], {'delimiter': '""","""'}), "(dataPath + dataName + '_all.csv', delimiter=',')\n", (490, 539), True, 'import numpy as np\n'), ((549, 608), 'numpy.loadtxt', 'np.loadtxt', (["(dataPath + dataName + '_acc.csv')"], {'delimiter': '""","""'}... |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
# vertices: frames x meshVerNum x 3
# trifaces: facePolygonNum x 3 = 22800 x 3
def ComputeNormal(vertices, trifaces):
if vertices.shape[0] > 5000:
print('ComputeNormal: Warning: too big to compute {0}'.format(vertices.shape) )
... | [
"numpy.ravel",
"numpy.cross",
"numpy.zeros",
"numpy.max",
"sklearn.preprocessing.normalize",
"numpy.reshape",
"torch.reshape",
"torch.nn.functional.normalize",
"numpy.unique",
"torch.from_numpy"
] | [((620, 642), 'numpy.reshape', 'np.reshape', (['U', '[-1, 3]'], {}), '(U, [-1, 3])\n', (630, 642), True, 'import numpy as np\n'), ((650, 672), 'numpy.reshape', 'np.reshape', (['V', '[-1, 3]'], {}), '(V, [-1, 3])\n', (660, 672), True, 'import numpy as np\n'), ((690, 704), 'numpy.cross', 'np.cross', (['U', 'V'], {}), '(U... |
import os
import numpy as np
import cv2
import sys
#sys.path.insert(0, '/home/kumarak/Desktop/campus_temp/pred2/')
#import get_dataset_colormap
read="./all_at_100_nocol/"
gtread=open("./thinglabels.txt").readlines()
gt={}
#print(gtread)
for i in gtread:
gt[int(i.split(':')[0])]=i.split(':')[1][1:-1]
#print(gt)
#map=... | [
"cv2.imread",
"os.listdir",
"numpy.unique"
] | [((435, 451), 'os.listdir', 'os.listdir', (['read'], {}), '(read)\n', (445, 451), False, 'import os\n'), ((508, 535), 'cv2.imread', 'cv2.imread', (['(read + filename)'], {}), '(read + filename)\n', (518, 535), False, 'import cv2\n'), ((560, 574), 'numpy.unique', 'np.unique', (['img'], {}), '(img)\n', (569, 574), True, ... |
import cv2
import numpy as np
img = cv2.imread('imagem.jpg')
##img = cv2.imread('imagem3.jpg',0)
cv2.imshow('imagem',img)
img = cv2.GaussianBlur(img, (7, 5), 0)
cv2.imshow('imagemblur',img)
gray_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
circles = cv2.HoughCircles(gray_img,cv2.HOUGH_GRADIENT,1,30,
... | [
"cv2.GaussianBlur",
"cv2.HoughCircles",
"cv2.circle",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.around",
"cv2.imshow"
] | [((40, 64), 'cv2.imread', 'cv2.imread', (['"""imagem.jpg"""'], {}), "('imagem.jpg')\n", (50, 64), False, 'import cv2\n'), ((103, 128), 'cv2.imshow', 'cv2.imshow', (['"""imagem"""', 'img'], {}), "('imagem', img)\n", (113, 128), False, 'import cv2\n'), ((135, 167), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(7, 5)... |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from unittest import mock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
f... | [
"ax.core.arm.Arm",
"ax.metrics.chemistry.ChemistryMetric",
"ax.utils.testing.core_stubs.get_trial",
"numpy.isnan"
] | [((1119, 1181), 'ax.metrics.chemistry.ChemistryMetric', 'ChemistryMetric', ([], {'name': '"""test_metric"""', 'problem_type': 'problem_type'}), "(name='test_metric', problem_type=problem_type)\n", (1134, 1181), False, 'from ax.metrics.chemistry import ChemistryMetric, ChemistryProblemType\n'), ((2811, 2822), 'ax.utils.... |
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
from geometry_msgs.msg import PoseArray, Pose
from tf.transformations import euler_from_quaternion
import time
import math
import struct
import ctypes
from scipy import ndimage
import matplotlib.pyplot as plt
from nav_msgs.msg im... | [
"rospy.Subscriber",
"sensor_msgs.point_cloud2.read_points",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"numpy.asarray",
"numpy.zeros",
"rospy.Publisher",
"time.time",
"numpy.append",
"numpy.sin",
"numpy.array",
"rospy.init_node",
"numpy.cos",
"tf.transformations.euler_from_quatern... | [((602, 658), 'rospy.Publisher', 'rospy.Publisher', (['"""/build_map3D"""', 'PoseArray'], {'queue_size': '(1)'}), "('/build_map3D', PoseArray, queue_size=1)\n", (617, 658), False, 'import rospy\n'), ((751, 762), 'time.time', 'time.time', ([], {}), '()\n', (760, 762), False, 'import time\n'), ((810, 839), 'rospy.init_no... |
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | [
"tensorflow.cond",
"numpy.argmax",
"tensorflow.identity",
"rlgraph.get_backend",
"torch.cat",
"tensorflow.assign",
"tensorflow.one_hot",
"rlgraph.components.ContainerMerger",
"tensorflow.concat",
"tensorflow.no_op",
"torch.exp",
"tensorflow.exp",
"rlgraph.spaces.BoolBox",
"rlgraph.utils.de... | [((1376, 1389), 'rlgraph.get_backend', 'get_backend', ([], {}), '()\n', (1387, 1389), False, 'from rlgraph import get_backend\n'), ((5665, 5700), 'rlgraph.utils.decorators.rlgraph_api', 'rlgraph_api', ([], {'must_be_complete': '(False)'}), '(must_be_complete=False)\n', (5676, 5700), False, 'from rlgraph.utils.decorator... |
import os
from PIL import Image
import numpy as np
## 图像数据集的均值与方差的计算
root_path = '../train_data'
_filename = os.listdir(root_path)
filename = []
for _file in _filename:
if not _file.endswith('.txt'):
filename.append(_file)
#均值之和
R_channel_m = 0
G_channel_m = 0
B_channel_m = 0
#方差之和
R_channel_s = 0
G_ch... | [
"numpy.sum",
"numpy.power",
"numpy.array",
"os.path.join",
"os.listdir",
"numpy.sqrt"
] | [((112, 133), 'os.listdir', 'os.listdir', (['root_path'], {}), '(root_path)\n', (122, 133), False, 'import os\n'), ((1446, 1472), 'numpy.sqrt', 'np.sqrt', (['(B_channel_s / num)'], {}), '(B_channel_s / num)\n', (1453, 1472), True, 'import numpy as np\n'), ((1479, 1505), 'numpy.sqrt', 'np.sqrt', (['(G_channel_s / num)']... |
import argparse
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils import data
from skimage import color
from PIL import Image
import matplotlib.pyplot as plt
from cnn_model import Model
# from cnn_model2... | [
"cnn_model.Model",
"matplotlib.pyplot.show",
"keras.datasets.cifar10.load_data",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split",
"torch.cat",
"torch.Tensor",
"numpy.array",
"torch.utils.data.TensorDataset",
"torch.nn.functional.interpolate",
... | [((2261, 2280), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2278, 2280), False, 'from keras.datasets import cifar10\n'), ((2357, 2411), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, ran... |
import myutils
from torch.nn import Module, Parameter
import torch.nn.functional as F
import torch
import torch.nn as nn
import numpy as np
class TripletLoss(Module):
def __init__(self, instance, margin=1.0):
super(TripletLoss, self).__init__()
self.margin = margin
self.instance = instance
... | [
"torch.mean",
"torch.t",
"torch.from_numpy",
"torch.eye",
"torch.where",
"torch.exp",
"numpy.where",
"torch.max",
"torch.arange",
"torch.nn.functional.log_softmax",
"torch.pow",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"torch.sum",
"torch.min",
"numpy.unique",
"torch.transpo... | [((564, 601), 'torch.arange', 'torch.arange', (['(0)', 'nB'], {'dtype': 'torch.long'}), '(0, nB, dtype=torch.long)\n', (576, 601), False, 'import torch\n'), ((2262, 2299), 'torch.arange', 'torch.arange', (['(0)', 'nB'], {'dtype': 'torch.long'}), '(0, nB, dtype=torch.long)\n', (2274, 2299), False, 'import torch\n'), ((3... |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.relaxation.py
#
# Copyright (C) 2012-2017 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Processes NMR relaxation and related data
"""
###########... | [
"numpy.abs",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.argmin",
"numpy.isnan",
"numpy.exp",
"multiprocessing.Queue",
"numpy.random.normal",
"pandas.DataFrame",
"numpy.std",
"numpy.loadtxt",
"pandas.concat",
"re.match",
"scipy.optimize.curve_fit",
"os.path.expandvars",
"panda... | [((1673, 1681), 'multiprocessing.Queue', 'Queue', (['(1)'], {}), '(1)\n', (1678, 1681), False, 'from multiprocessing import Queue, Process\n'), ((1698, 1705), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1703, 1705), False, 'from multiprocessing import Queue, Process\n'), ((5319, 5349), 'pandas.DataFrame.from_i... |
import numpy as np
import matplotlib.pyplot as plt
import VoigtFit
import pickle
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1... | [
"VoigtFit.DataSet",
"numpy.loadtxt",
"VoigtFit.SaveDataSet"
] | [((424, 458), 'numpy.loadtxt', 'np.loadtxt', (['UVB_fname'], {'unpack': '(True)'}), '(UVB_fname, unpack=True)\n', (434, 458), True, 'import numpy as np\n'), ((487, 521), 'numpy.loadtxt', 'np.loadtxt', (['VIS_fname'], {'unpack': '(True)'}), '(VIS_fname, unpack=True)\n', (497, 521), True, 'import numpy as np\n'), ((533, ... |
from time import time
import os
import numpy as np
from scipy.stats import multivariate_normal
from experiments.lnpdfs.create_target_lnpfs import build_Goodwin_grad
from sampler.SVGD.python.svgd import SVGD as SVGD
unknown_params = [1, 2] + np.arange(4, 12).tolist()
num_dimensions = len(unknown_params)
seed=1
target_l... | [
"numpy.atleast_2d",
"os.makedirs",
"os.path.dirname",
"numpy.zeros",
"os.path.exists",
"time.time",
"numpy.array",
"numpy.arange",
"numpy.eye",
"numpy.savez",
"sampler.SVGD.python.svgd.SVGD",
"numpy.sqrt"
] | [((711, 731), 'numpy.atleast_2d', 'np.atleast_2d', (['theta'], {}), '(theta)\n', (724, 731), True, 'import numpy as np\n'), ((1126, 1132), 'time.time', 'time', ([], {}), '()\n', (1130, 1132), False, 'from time import time\n'), ((1227, 1233), 'time.time', 'time', ([], {}), '()\n', (1231, 1233), False, 'from time import ... |
import numpy as np
import hypers as hp
class TestLearning:
def setup(self):
self.n3 = np.random.rand(10, 10, 30)
self.n4 = np.random.rand(10, 10, 10, 30)
self.n5 = np.random.rand(10, 10, 10, 2, 30)
self.h3 = hp.hparray(self.n3)
self.h4 = hp.hparray(self.n4)
self.h5... | [
"numpy.random.rand",
"hypers.hparray"
] | [((100, 126), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(30)'], {}), '(10, 10, 30)\n', (114, 126), True, 'import numpy as np\n'), ((145, 175), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)', '(30)'], {}), '(10, 10, 10, 30)\n', (159, 175), True, 'import numpy as np\n'), ((194, 227), 'num... |
#! /usr/bin/env python
"""
runcalsaa.py - Module to perform SAA correction in the CALNIC pipeline
(After CALNICA, before CALNICB) by running the PEDSUB, BEP, and SAACLEAN tasks.
PEDSUB is run only to improve the calculations of the SAA persistence and BEP
signature; no pedestal correction is actually applied to the fin... | [
"pyraf.iraf.flprcache",
"os.remove",
"os.rename",
"nictools.nic_rem_persist.NicRemPersist",
"os.path.exists",
"time.strftime",
"pyraf.iraf.pedsub",
"pyraf.iraf.saaclean.unlearn",
"time.time",
"os.path.isfile",
"numpy.where",
"time.localtime",
"astropy.io.fits.open",
"pyraf.iraf.pedsub.unle... | [((3892, 3912), 'astropy.io.fits.open', 'pyfits.open', (['calname'], {}), '(calname)\n', (3903, 3912), True, 'from astropy.io import fits as pyfits\n'), ((6895, 6922), 'os.rename', 'os.rename', (['F_Final', 'calname'], {}), '(F_Final, calname)\n', (6904, 6922), False, 'import os, time, sys\n'), ((7553, 7588), 'astropy.... |
import gym
import numpy as np
from gym_UR3.envs.mujoco import MujocoUR3Env
import time
def main():
env = gym.make('UR3-v0')
Da = env.action_space.shape[0]
obs=env.reset()
start = time.time()
for i in range(100):
env.reset()
print('{}th episode'.format(i+1))
for j i... | [
"numpy.random.uniform",
"numpy.zeros",
"gym.make",
"time.time"
] | [((115, 133), 'gym.make', 'gym.make', (['"""UR3-v0"""'], {}), "('UR3-v0')\n", (123, 133), False, 'import gym\n'), ((201, 212), 'time.time', 'time.time', ([], {}), '()\n', (210, 212), False, 'import time\n'), ((581, 592), 'time.time', 'time.time', ([], {}), '()\n', (590, 592), False, 'import time\n'), ((425, 436), 'nump... |
import os
import numpy as np
from . import __file__ as filepath
__all__ = ["Inoue14"]
class Inoue14(object):
def __init__(self, scale_tau=1.):
"""
IGM absorption from Inoue et al. (2014)
Parameters
----------
scale_tau : float
Parameter multiplied to t... | [
"numpy.zeros_like",
"scipy.interpolate.CubicSpline",
"os.path.dirname",
"numpy.exp",
"numpy.loadtxt",
"os.path.join"
] | [((692, 726), 'os.path.join', 'os.path.join', (['path', '"""LAFcoeff.txt"""'], {}), "(path, 'LAFcoeff.txt')\n", (704, 726), False, 'import os\n'), ((746, 780), 'os.path.join', 'os.path.join', (['path', '"""DLAcoeff.txt"""'], {}), "(path, 'DLAcoeff.txt')\n", (758, 780), False, 'import os\n'), ((801, 834), 'numpy.loadtxt... |
"""
Work in progress for reading some other kind of complex NITF.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
import logging
from typing import Union, Tuple, List, Optional, Callable, Sequence
import copy
from datetime import datetime
import numpy
from scipy.constants import foot
from sarpy.geomet... | [
"sarpy.io.general.format_function.ComplexFormatFunction._forward_magnitude_theta",
"sarpy.io.complex.sicd_elements.GeoData.SCPType",
"sarpy.io.complex.sicd_elements.RadarCollection.RadarCollectionType",
"sarpy.io.general.format_function.ComplexFormatFunction",
"logging.error",
"sarpy.io.complex.sicd_eleme... | [((2044, 2071), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2061, 2071), False, 'import logging\n'), ((24348, 24410), 'sarpy.io.complex.sicd_elements.SICD.SICDType', 'SICDType', ([], {'CollectionInfo': 'collection_info', 'ImageData': 'image_data'}), '(CollectionInfo=collection_info, I... |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import torch
import time
from torch.autograd import Variable
import captcha_setting
import my_dataset
from captcha_cnn_model import CNN
def main():
print('开始对图片进行预测')
cnn = CNN()
cnn.eval()
cnn.load_state_dict(torch.load('model.pkl'))
... | [
"pandas.DataFrame",
"torch.autograd.Variable",
"torch.load",
"numpy.hstack",
"time.time",
"captcha_cnn_model.CNN",
"my_dataset.get_predict_data_loader"
] | [((246, 251), 'captcha_cnn_model.CNN', 'CNN', ([], {}), '()\n', (249, 251), False, 'from captcha_cnn_model import CNN\n'), ((383, 419), 'my_dataset.get_predict_data_loader', 'my_dataset.get_predict_data_loader', ([], {}), '()\n', (417, 419), False, 'import my_dataset\n'), ((1442, 1461), 'numpy.hstack', 'np.hstack', (['... |
#!/usr/bin/python3
# Script to shape the desired output to be processed (MMODES)
# the datatable way
# @author: <NAME>
# Creation: 09/06/2019
import os
import re
import numpy as np
import datatable as dt
from datatable import f
def log(cons, media):
'''
Writes information of consortium object to file
''... | [
"os.path.isfile",
"datatable.fread",
"numpy.linspace",
"re.compile"
] | [((359, 396), 're.compile', 're.compile', (['"""#+ SIMULATION (\\\\d+) #+"""'], {}), "('#+ SIMULATION (\\\\d+) #+')\n", (369, 396), False, 'import re\n'), ((404, 424), 'os.path.isfile', 'os.path.isfile', (['logf'], {}), '(logf)\n', (418, 424), False, 'import os\n'), ((1072, 1103), 'numpy.linspace', 'np.linspace', (['(d... |
"""
Utility routines for the maximum entropy module.
Most of them are either Python replacements for the corresponding Fortran
routines or wrappers around matrices to allow the maxent module to
manipulate ndarrays, scipy sparse matrices, and PySparse matrices a
common interface.
Perhaps the logsumexp() function belon... | [
"math.exp",
"numpy.log",
"numpy.std",
"numpy.empty",
"numpy.asarray",
"cmath.log",
"numpy.transpose",
"numpy.ones",
"numpy.arange",
"numpy.array",
"numpy.reshape",
"cmath.exp",
"numpy.dot",
"math.log",
"builtins.range",
"doctest.testmod"
] | [((3174, 3199), 'numpy.empty', 'np.empty', (['n'], {'dtype': 'object'}), '(n, dtype=object)\n', (3182, 3199), True, 'import numpy as np\n'), ((6991, 7002), 'math.log', 'math.log', (['s'], {}), '(s)\n', (6999, 7002), False, 'import math\n'), ((25128, 25145), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (25143... |
import numpy as np
def softmax(x, axis=None):
max = np.max(x,axis=axis,keepdims=True)
e_x = np.exp(x - max)
sum = np.sum(e_x,axis=axis,keepdims=True)
f_x = e_x / sum
return f_x | [
"numpy.max",
"numpy.sum",
"numpy.exp"
] | [((57, 92), 'numpy.max', 'np.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (63, 92), True, 'import numpy as np\n'), ((101, 116), 'numpy.exp', 'np.exp', (['(x - max)'], {}), '(x - max)\n', (107, 116), True, 'import numpy as np\n'), ((127, 164), 'numpy.sum', 'np.sum', (['e_x'],... |
import cv2
import numpy as np
import statistics as stat
class optical_braille_recognition():
def __init__(self) -> None:
pass
def make_histogram_y(self, img):
'''
Organiza os dados da projeção horizontal na imagem
Entrada:
img -> Array da imagem
... | [
"numpy.zeros",
"cv2.warpAffine",
"numpy.max",
"numpy.array",
"numpy.arange",
"statistics.mode",
"cv2.getRotationMatrix2D"
] | [((467, 483), 'numpy.zeros', 'np.zeros', (['height'], {}), '(height)\n', (475, 483), True, 'import numpy as np\n'), ((1098, 1113), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1106, 1113), True, 'import numpy as np\n'), ((2554, 2573), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (2562, ... |
import heapq as hq
import math
import numpy as np
from models.geometry_utils import *
# TODO: Generalize to 3D?
class Node:
def __init__(self, pos, parent=None, g_cost=math.inf, f_cost=math.inf):
self.pos = pos
self.parent = parent
self.g_cost = g_cost
self.f_cost =... | [
"heapq.heappush",
"math.ceil",
"math.floor",
"heapq.heappop",
"heapq._siftdown",
"heapq._siftup",
"numpy.array",
"numpy.linalg.norm"
] | [((1065, 1117), 'math.ceil', 'math.ceil', (['((bounds[1][0] - bounds[0][0]) / cell_size)'], {}), '((bounds[1][0] - bounds[0][0]) / cell_size)\n', (1074, 1117), False, 'import math\n'), ((1137, 1189), 'math.ceil', 'math.ceil', (['((bounds[1][1] - bounds[0][1]) / cell_size)'], {}), '((bounds[1][1] - bounds[0][1]) / cell_... |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, dirname, exists
from os import makedirs, pardir
FOLDER_REAL_DATA = join(dirname(__file__), 'real_data')
FOLDER_SIMULATOR_INPUT = join(dirname(__file__), 'simulator_input')
FOLDER_REAL_DATA_ANALYSIS = join(FOLDER_REAL_DATA,... | [
"numpy.sum",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"os.path.join",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"os.path.dirname",
"os.path.exists",
"numpy.cumsum",
"numpy.intersect1d",
"matplotlib.pyplot.xticks",
"numpy.average",
"matplotlib.pyplot.lege... | [((298, 332), 'os.path.join', 'join', (['FOLDER_REAL_DATA', '"""analysis"""'], {}), "(FOLDER_REAL_DATA, 'analysis')\n", (302, 332), False, 'from os.path import join, dirname, exists\n'), ((357, 392), 'os.path.join', 'join', (['pardir', '"""experiments/results"""'], {}), "(pardir, 'experiments/results')\n", (361, 392), ... |
import numpy as np
from numpy import linalg as la
import invprob.sparse as sparse
def fb_lasso(A, y, reg_param, iter_nb, x_ini=None, inertia=False, verbose=False):
''' Use the Forward-Backward algorithm to find a minimizer of:
reg_param*norm(x,1) + 0.5*norm(Ax-y,2)**2
Eventually outputs the f... | [
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.linalg.norm"
] | [((628, 645), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (636, 645), True, 'import numpy as np\n'), ((665, 682), 'numpy.zeros', 'np.zeros', (['iter_nb'], {}), '(iter_nb)\n', (673, 682), True, 'import numpy as np\n'), ((719, 750), 'numpy.zeros', 'np.zeros', (['(A.shape[1], iter_nb)'], {}), '((A.shape[1... |
#!/usr/bin/env python3
import argparse, os, sys, time, shutil, tqdm
import warnings, json, gzip
import numpy as np
import copy
from sklearn.model_selection import GroupKFold
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader, ... | [
"numpy.isin",
"numpy.random.seed",
"argparse.ArgumentParser",
"misc_utils.evaluator",
"os.path.isfile",
"torch.device",
"time.asctime",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torch.matmul",
"functools.partial",
"copy.deepcopy",
"tqdm.tqdm",
"torch.manual_... | [((409, 445), 'functools.partial', 'functools.partial', (['print'], {'flush': '(True)'}), '(print, flush=True)\n', (426, 445), False, 'import functools\n'), ((1081, 1103), 'os.path.isfile', 'os.path.isfile', (['in_dir'], {}), '(in_dir)\n', (1095, 1103), False, 'import argparse, os, sys, time, shutil, tqdm\n'), ((1739, ... |
""" TensorMONK :: layers :: Activations """
__all__ = ["Activations"]
import torch
import torch.nn as nn
import torch.nn.functional as F
def maxout(tensor: torch.Tensor) -> torch.Tensor:
if not tensor.size(1) % 2 == 0:
raise ValueError("MaxOut: tensor.size(1) must be divisible by n_splits"
... | [
"torch.ones",
"torch.nn.functional.selu",
"torch.nn.functional.prelu",
"torch.nn.functional.relu6",
"torch.nn.functional.gelu",
"torch.sigmoid",
"torch.nn.functional.leaky_relu",
"torch.nn.functional.relu",
"torch.nn.functional.elu",
"torch.nn.functional.softplus",
"numpy.prod",
"torch.tanh"
] | [((4037, 4051), 'torch.nn.functional.relu', 'F.relu', (['tensor'], {}), '(tensor)\n', (4043, 4051), True, 'import torch.nn.functional as F\n'), ((4112, 4127), 'torch.nn.functional.relu6', 'F.relu6', (['tensor'], {}), '(tensor)\n', (4119, 4127), True, 'import torch.nn.functional as F\n'), ((4187, 4222), 'torch.nn.functi... |
from sedac_gpw_parser import population
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
file_lons = np.arange(-180, 180, 40)
file_lats = np.arange(90, -20, -50)
DATA_FOLDER = os.path.expanduser("~") + "/.srtm30/"
def get_population_data(country_id):
pop = ... | [
"numpy.sum",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.round",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.zeros_like",
"numpy.isfinite",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.nansum",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot... | [((154, 178), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(40)'], {}), '(-180, 180, 40)\n', (163, 178), True, 'import numpy as np\n'), ((191, 214), 'numpy.arange', 'np.arange', (['(90)', '(-20)', '(-50)'], {}), '(90, -20, -50)\n', (200, 214), True, 'import numpy as np\n'), ((229, 252), 'os.path.expanduser', 'os.... |
from __future__ import division, print_function
from typing import List, Tuple, Callable
import numpy as np
import scipy
import matplotlib.pyplot as plt
class Perceptron:
def __init__(self, nb_features=2, max_iteration=10, margin=1e-4):
'''
Args :
nb_features : Number of feature... | [
"numpy.dot",
"numpy.linalg.norm",
"numpy.random.shuffle"
] | [((1280, 1304), 'numpy.linalg.norm', 'np.linalg.norm', (['features'], {}), '(features)\n', (1294, 1304), True, 'import numpy as np\n'), ((1444, 1466), 'numpy.random.shuffle', 'np.random.shuffle', (['seq'], {}), '(seq)\n', (1461, 1466), True, 'import numpy as np\n'), ((1516, 1543), 'numpy.dot', 'np.dot', (['self.w', 'fe... |
import numpy as np
import os
import time
np.set_printoptions(threshold=np.inf)
def input(fname):
day_dir = os.path.realpath(__file__).split('/')[:-1]
fname = os.path.join('/',*day_dir, fname)
data = []
with open(fname) as f:
for line in f:
data.append(line.strip())
return data... | [
"numpy.set_printoptions",
"os.path.realpath",
"time.time",
"numpy.array",
"os.path.join"
] | [((42, 79), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (61, 79), True, 'import numpy as np\n'), ((1513, 1524), 'time.time', 'time.time', ([], {}), '()\n', (1522, 1524), False, 'import time\n'), ((1629, 1640), 'time.time', 'time.time', ([], {}), '()\n', (1638,... |
from keras.models import load_model
# from matplotlib.font_manager import FontProperties
import cv2
import numpy as np
import exptBikeNYC
size =10
model = exptBikeNYC.build_model(False)
model.load_weights('MODEL/c3.p3.t3.resunit4.lr0.0002.best.h5')
f = open("area.csv", "r")
# 临时存储某时间的人数
person_num = []
# 存储各时间的人数尺寸(n... | [
"exptBikeNYC.build_model",
"numpy.array"
] | [((156, 186), 'exptBikeNYC.build_model', 'exptBikeNYC.build_model', (['(False)'], {}), '(False)\n', (179, 186), False, 'import exptBikeNYC\n'), ((1330, 1347), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (1338, 1347), True, 'import numpy as np\n'), ((1260, 1278), 'numpy.array', 'np.array', (['train_x1']... |
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from datetime import datetime
LOGDIR = '/tmp/17springAI/mnist/objectiveFunc/' + datetime.now().strftime('%Y%m%d-%H%M%S') + '/'
def activation(act_func, logit):
if act_func == "relu":
return tf.nn.relu(log... | [
"tensorflow.contrib.keras.losses.mean_squared_error",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"tensorflow.matmul",
"numpy.mean",
"tensorflow.GPUOptions",
"tensorflow.truncated_normal",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.nn... | [((4916, 4971), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""./MNIST_data"""'], {'one_hot': '(True)'}), "('./MNIST_data', one_hot=True)\n", (4941, 4971), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((601, 635), 'tensorflow.summary.histog... |
import unittest
import numpy as np
from rastervision.core.class_map import (ClassItem, ClassMap)
from rastervision.evaluations.segmentation_evaluation import (
SegmentationEvaluation)
from rastervision.label_stores.segmentation_raster_file import (
SegmentationInputRasterFile)
from rastervision.label_stores.s... | [
"unittest.main",
"numpy.ones",
"rastervision.core.class_map.ClassItem",
"rastervision.label_stores.segmentation_raster_file.SegmentationInputRasterFile",
"rastervision.label_stores.segmentation_raster_file_test.TestingRasterSource",
"rastervision.evaluations.segmentation_evaluation.SegmentationEvaluation"... | [((2129, 2144), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2142, 2144), False, 'import unittest\n'), ((658, 692), 'numpy.ones', 'np.ones', (['(5, 5, 3)'], {'dtype': 'np.uint8'}), '((5, 5, 3), dtype=np.uint8)\n', (665, 692), True, 'import numpy as np\n'), ((773, 807), 'rastervision.label_stores.segmentation_ra... |
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline 缩放
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (12, 8)
# Normal distributed x and y vector with mean 0 and standard deviation 1
x = np.random.normal(0, 1, 200)
y = np.random.normal(0, 1, 200)
X = np.vstack((x, y)) # 2xn
# 缩放
sx, sy ... | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.style.use",
"numpy.array",
"numpy.random.normal",
"numpy.vstack"
] | [((76, 99), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (89, 99), True, 'import matplotlib.pyplot as plt\n'), ((219, 246), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(200)'], {}), '(0, 1, 200)\n', (235, 246), True, 'import numpy as np\n'), ((251, 278), 'numpy... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 16:22:17 2021
@author: mike_ubuntu
"""
import numpy as np
from functools import reduce
import copy
import gc
import time
import datetime
import pickle
import warnings
import epde.globals as global_var
import torch
from epde.decorators import H... | [
"epde.interface.token_family.TF_Pool",
"numpy.abs",
"numpy.maximum",
"epde.supplementary.Population_Sort",
"numpy.ones",
"gc.collect",
"numpy.isclose",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"torch.flatten",
"numpy.multiply",
"numpy.copy",
"numpy.std",
"numpy.ndim",
"num... | [((728, 742), 'numpy.copy', 'np.copy', (['Input'], {}), '(Input)\n', (735, 742), True, 'import numpy as np\n'), ((21638, 21697), 'epde.decorators.Reset_equation_status', 'Reset_equation_status', ([], {'reset_input': '(False)', 'reset_output': '(True)'}), '(reset_input=False, reset_output=True)\n', (21659, 21697), False... |
""" Functionality to analyse bias triangles
@author: amjzwerver
"""
#%%
import numpy as np
import qcodes
import qtt
import qtt.pgeometry
import matplotlib.pyplot as plt
from qcodes.plots.qcmatplotlib import MatPlot
from qtt.data import diffDataset
def plotAnalysedLines(clicked_pts, linePoints1_2, linePt3_vert, li... | [
"numpy.abs",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"matplotlib.pyplot.gca",
"numpy.round",
"qtt.pgeometry.intersect2lines",
"qtt.pgeometry.plot2Dline",
"qtt.pgeometry.dehom",
"qtt.pgeometry.fitPlane",
"qtt.data.diffDataset",
"matplotlib.pyplot.get_fignums",
... | [((918, 974), 'qtt.pgeometry.plot2Dline', 'qtt.pgeometry.plot2Dline', (['linePoints1_2', '""":c"""'], {'alpha': '(0.5)'}), "(linePoints1_2, ':c', alpha=0.5)\n", (942, 974), False, 'import qtt\n'), ((980, 1035), 'qtt.pgeometry.plot2Dline', 'qtt.pgeometry.plot2Dline', (['linePt3_vert', '""":b"""'], {'alpha': '(0.4)'}), "... |
import face_recognition
import cv2
import numpy as np
import os
import re
from itertools import chain
known_people_folder='./database'
def scan_known_people(known_people_folder):
known_names = []
known_face_encodings = []
for file in image_files_in_folder(known_people_folder):
basename = os.path.... | [
"face_recognition.compare_faces",
"numpy.argmin",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"cv2.cvtColor",
"face_recognition.face_encodings",
"cv2.destroyAllWindows",
"cv2.resize",
"face_recognition.face_distance",
"os.path.basename",
"cv2.waitKey",
"re.match",
"os.listdir",
"cv2.p... | [((1610, 1629), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1626, 1629), False, 'import cv2\n'), ((5245, 5268), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5266, 5268), False, 'import cv2\n'), ((2053, 2091), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HLS'], {... |
import os
import sys
import h5py
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--root', help='path to root directory')
args = parser.parse_args()
root = args.root
fname = os.path.join(root, 'metadata/train.txt')
flist = [os.path.join(root, 'h5', line.strip())
fo... | [
"h5py.File",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.median",
"numpy.savetxt",
"numpy.zeros",
"os.path.join"
] | [((79, 104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (102, 104), False, 'import argparse\n'), ((220, 260), 'os.path.join', 'os.path.join', (['root', '"""metadata/train.txt"""'], {}), "(root, 'metadata/train.txt')\n", (232, 260), False, 'import os\n'), ((357, 402), 'os.path.join', 'os.pat... |
import argparse
import time
import numpy as np
import pyvisa
# Parse folder path, file name, and measurement parameters from command line
# arguments. Remember to include the "python" keyword before the call to the
# python file from the command line, e.g. python example.py "arg1" "arg2".
# Folder paths must use for... | [
"numpy.absolute",
"pyvisa.ResourceManager",
"argparse.ArgumentParser",
"time.time",
"numpy.array"
] | [((366, 456), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Measure and save max power point tracking data"""'}), "(description=\n 'Measure and save max power point tracking data')\n", (389, 456), False, 'import argparse\n'), ((1727, 1747), 'numpy.absolute', 'np.absolute', (['V_start... |
from __future__ import division
from __future__ import print_function
from evaluation import get_roc_score, clustering_latent_space
from input_data import load_adj_feature
from kcore import compute_kcore, expand_embedding
from model import *
from optimizer import OptimizerAE, OptimizerVAE
from preprocessing impo... | [
"numpy.save",
"kcore.expand_embedding",
"numpy.std",
"tensorflow.global_variables_initializer",
"tensorflow.placeholder_with_default",
"tensorflow.sparse_tensor_to_dense",
"tensorflow.Session",
"kcore.compute_kcore",
"time.time",
"scipy.sparse.triu",
"numpy.mean",
"input_data.load_adj_feature"... | [((3122, 3209), 'input_data.load_adj_feature', 'load_adj_feature', (['"""../Cross-talk/Fegs_1.npy"""', '"""../Cross-talk/Cross-talk_Matrix.txt"""'], {}), "('../Cross-talk/Fegs_1.npy',\n '../Cross-talk/Cross-talk_Matrix.txt')\n", (3138, 3209), False, 'from input_data import load_adj_feature\n'), ((4033, 4044), 'time.... |
import numpy as np
import pandas as pd
from dstk.preprocessing import (onehot_encode,
mark_binary,
nan_to_binary,
num_to_str)
# Create test data
df = pd.DataFrame()
df['numeric1'] = [0, 1, 0, 0, 1, 1]
df['numeric2'] = [1.0... | [
"pandas.DataFrame",
"dstk.preprocessing.nan_to_binary",
"numpy.isnan",
"dstk.preprocessing.onehot_encode",
"dstk.preprocessing.num_to_str",
"dstk.preprocessing.mark_binary"
] | [((248, 262), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (260, 262), True, 'import pandas as pd\n'), ((597, 625), 'dstk.preprocessing.num_to_str', 'num_to_str', (['df', "['numeric1']"], {}), "(df, ['numeric1'])\n", (607, 625), False, 'from dstk.preprocessing import onehot_encode, mark_binary, nan_to_binary, ... |
"""Utilities for multiprocessing."""
from contextlib import contextmanager
import logging
import time
from dask.distributed import Client, LocalCluster, progress
from dask_jobqueue import PBSCluster
import numpy as np
_logger = logging.getLogger(__name__)
def map_function(function, function_args, pbs=False, **clust... | [
"dask.distributed.Client",
"dask.distributed.LocalCluster",
"time.sleep",
"numpy.shape",
"dask.distributed.progress",
"numpy.array",
"dask_jobqueue.PBSCluster",
"logging.getLogger"
] | [((230, 257), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (247, 257), False, 'import logging\n'), ((2557, 2572), 'dask.distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (2563, 2572), False, 'from dask.distributed import Client, LocalCluster, progress\n'), ((2619, 2632), ... |
import numpy as np
import gym
import gym_carsim
from gym import spaces
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = ... | [
"rl.memory.SequentialMemory",
"rl.agents.dqn.DQNAgent",
"numpy.random.seed",
"gym.make",
"keras.layers.Activation",
"keras.layers.Flatten",
"rl.policy.BoltzmannQPolicy",
"keras.optimizers.Adam",
"gym.ObservationWrapper.__init__",
"keras.layers.Dense",
"gym.spaces.Box",
"keras.models.Sequential... | [((1015, 1033), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (1023, 1033), False, 'import gym\n'), ((1061, 1085), 'numpy.random.seed', 'np.random.seed', (['(98283476)'], {}), '(98283476)\n', (1075, 1085), True, 'import numpy as np\n'), ((1422, 1434), 'keras.models.Sequential', 'Sequential', ([], {}), '()... |
from pyml.tree.regression import DecisionTreeRegressor
from pyml.metrics.pairwise import euclidean_distance
import numpy as np
# TODO: 使用平方误差,还是绝对值误差,还是Huber Loss
class GradientBoostingRegression():
def __init__(self,
learning_rate=0.1,
base_estimator=DecisionTreeRegressor,
n_estimators=500... | [
"numpy.zeros",
"pyml.metrics.pairwise.euclidean_distance",
"numpy.array"
] | [((2317, 2533), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8], [2, 3, 4, 5, 6, 7, 8, 9], [3, 4, 5, 6, 7, 8, 9, \n 10], [4, 5, 6, 7, 8, 9, 10, 11], [5, 6, 7, 8, 9, 10, 11, 12], [6, 7, 8,\n 9, 10, 11, 12, 13], [7, 8, 9, 10, 11, 12, 13, 14]]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8], [2, 3, 4, 5, 6, 7, 8, 9], [3... |
# was stanza.models.pos.model
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from biaffine import BiaffineScorer
from hlstm import HighwayLSTM
from dropout import WordDropout
... | [
"torch.nn.Dropout",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.from_numpy",
"biaffine.BiaffineScorer",
"torch.nn.ModuleList",
"dropout.WordDropout",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.nn.utils.rnn.PackedSequence",
"torch.zeros",
"torch.randn",
"torch.nn.Linear",
"hlstm.H... | [((2077, 2308), 'hlstm.HighwayLSTM', 'HighwayLSTM', (['input_size', "self.args['tag_hidden_dim']", "self.args['tag_num_layers']"], {'batch_first': '(True)', 'bidirectional': '(True)', 'dropout': "self.args['dropout']", 'rec_dropout': "self.args['tag_rec_dropout']", 'highway_func': 'torch.tanh'}), "(input_size, self.arg... |
"""Unit tests for orbitpy.util module.
"""
import unittest
import numpy as np
from numpy.core.numeric import tensordot
from instrupy.util import Orientation
from instrupy import Instrument
from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft
import orbitpy.util
import propcov
from util.spacecrafts import sp... | [
"orbitpy.util.OrbitState.from_json",
"orbitpy.util.OrbitState.state_from_dict",
"propcov.Rvector6",
"numpy.deg2rad",
"orbitpy.util.SpacecraftBus.from_json",
"propcov.AbsoluteDate.fromJulianDate",
"instrupy.Instrument.from_json",
"orbitpy.util.OrbitState.date_from_dict",
"orbitpy.util.Spacecraft.from... | [((442, 515), 'orbitpy.util.OrbitState.date_from_dict', 'OrbitState.date_from_dict', (["{'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75}"], {}), "({'@type': 'JULIAN_DATE_UT1', 'jd': 2459270.75})\n", (467, 515), False, 'from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft\n'), ((582, 713), 'orbitpy.util.OrbitSta... |
import numpy as np
import matplotlib.pyplot as plt
#plt.rc('font', family='serif')
#plt.rc('text', usetex=True)
sol1err = np.fromfile('../out/sol1err')
sol2err = np.fromfile('../out/sol2err')
L2err = np.sqrt(sol2err**2 + sol1err**2)
h = np.fromfile('../out/h')
x = np.sort(h)
fig, ax = plt.subplots(1,1)
for i in ran... | [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"numpy.fromfile",
"numpy.sort",
"numpy.log10",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((124, 153), 'numpy.fromfile', 'np.fromfile', (['"""../out/sol1err"""'], {}), "('../out/sol1err')\n", (135, 153), True, 'import numpy as np\n'), ((164, 193), 'numpy.fromfile', 'np.fromfile', (['"""../out/sol2err"""'], {}), "('../out/sol2err')\n", (175, 193), True, 'import numpy as np\n'), ((202, 238), 'numpy.sqrt', 'n... |
import math
import random
import numpy
from tools import *
'''
Parametric Optimizers to search for optimal TSP solution.
Method 1: Stochastic Hill Climbing search
Method 2: Random Search - Used as benchmark
'''
# Initialize the population, a collection of paths
def createPath(m):
n = numpy.arange(1,m+1)
numpy.ra... | [
"numpy.arange",
"numpy.random.shuffle"
] | [((290, 312), 'numpy.arange', 'numpy.arange', (['(1)', '(m + 1)'], {}), '(1, m + 1)\n', (302, 312), False, 'import numpy\n'), ((312, 335), 'numpy.random.shuffle', 'numpy.random.shuffle', (['n'], {}), '(n)\n', (332, 335), False, 'import numpy\n'), ((840, 860), 'numpy.arange', 'numpy.arange', (['v.size'], {}), '(v.size)\... |
import cv2
import numpy as np
import os
# import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import pathlib
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
from obje... | [
"cv2.waitKey",
"tensorflow.convert_to_tensor",
"numpy.asarray",
"cv2.imshow",
"object_detection.utils.label_map_util.create_category_index_from_labelmap",
"cv2.VideoCapture",
"object_detection.utils.ops.reframe_box_masks_to_image_masks",
"tensorflow.cast",
"numpy.array",
"cv2.destroyAllWindows",
... | [((801, 894), 'object_detection.utils.label_map_util.create_category_index_from_labelmap', 'label_map_util.create_category_index_from_labelmap', (['PATH_TO_LABELS'], {'use_display_name': '(True)'}), '(PATH_TO_LABELS,\n use_display_name=True)\n', (851, 894), False, 'from object_detection.utils import label_map_util\n... |
"""
"""
from configparser import ConfigParser, SectionProxy
from os import path
import os
from typing import List, Tuple, Any, Optional, Dict
import numpy as np
import tqdm
from general_utils.config import config_util, config_parser_singleton
from general_utils.exportation import csv_exportation
from ge... | [
"os.mkdir",
"data_providing_module.data_provider_registry.registry.register_consumer",
"data_providing_module.configurable_registry.config_registry.register_configurable",
"os.path.exists",
"numpy.where",
"stock_data_analysis_module.ml_models.evolutionary_computation.TradingPopulation",
"general_utils.c... | [((3132, 3178), 'numpy.where', 'np.where', (['(actual_predictions == 1)', '(True)', '(False)'], {}), '(actual_predictions == 1, True, False)\n', (3140, 3178), True, 'import numpy as np\n'), ((6217, 6282), 'data_providing_module.configurable_registry.config_registry.register_configurable', 'configurable_registry.config_... |
import numpy as np
from pydex.core.designer import Designer
def simulate(ti_controls, model_parameters):
return np.array([
np.exp(model_parameters[0] * ti_controls[0])
])
designer = Designer()
designer.simulate = simulate
reso = 21j
tic = np.mgrid[0:1:reso]
designer.ti_controls_candidates = np.arra... | [
"numpy.random.seed",
"pydex.core.designer.Designer",
"numpy.array",
"numpy.exp",
"numpy.random.normal"
] | [((202, 212), 'pydex.core.designer.Designer', 'Designer', ([], {}), '()\n', (210, 212), False, 'from pydex.core.designer import Designer\n'), ((332, 351), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (346, 351), True, 'import numpy as np\n'), ((392, 444), 'numpy.random.normal', 'np.random.normal',... |
import subprocess
from PIL import Image
import torchvision.transforms as transforms
import torch
import functools
import random
import math
import cv2
import numpy as np
import os
# Object annotation class:
class BodyPart:
def __init__(self, name, xmin, ymin, xmax, ymax, x, y, w, h):
self.name = name
... | [
"torch.nn.Dropout",
"cv2.bitwise_and",
"numpy.ones",
"numpy.clip",
"cv2.ellipse",
"cv2.rectangle",
"torchvision.transforms.Normalize",
"torch.no_grad",
"cv2.inRange",
"os.path.join",
"random.randint",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"torch.nn.ReflectionPad2d"... | [((2016, 2030), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (2025, 2030), False, 'import cv2\n'), ((2855, 2878), 'cv2.merge', 'cv2.merge', (['out_channels'], {}), '(out_channels)\n', (2864, 2878), False, 'import cv2\n'), ((3377, 3430), 'numpy.ma.array', 'np.ma.array', (['matrix'], {'mask': 'mask', 'fill_value':... |
'''
Created on 10-Jul-2018
@author: <NAME>
'''
# We will use seaborn to create plots
import seaborn as sns
# Matplotlib will help us to draw the plots
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# Import pandas to manage data set
import pandas as pd
# Import NumPy for all mathematics operations on ... | [
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"seaborn.countplot",
"seaborn.distplot",
"numpy.random.permutation",
"seaborn.set"
] | [((187, 212), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (194, 212), True, 'import seaborn as sns\n'), ((493, 530), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'delimiter': '""","""'}), "(file_name, delimiter=',')\n", (504, 530), True, 'import pandas as pd\n'), ((1634, 166... |
import os
import inspect
from tqdm import tqdm
import numpy as np
import typing
import cv2
import torchvision
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
# root (correct even if called)
CRT_ABS_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# ke... | [
"os.mkdir",
"numpy.load",
"torch.cat",
"numpy.arange",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"torch.is_tensor",
"cv2.resize",
"numpy.random.shuffle",
"tqdm.tqdm",
"numpy.save",
"inspect.currentframe",
"torchvi... | [((1773, 1876), 'os.path.join', 'os.path.join', (['CRT_ABS_PATH', "PATH_TO_DATASET['EI339']", "DATASET_MAPPING_FN['EI339']['train']['data']"], {}), "(CRT_ABS_PATH, PATH_TO_DATASET['EI339'], DATASET_MAPPING_FN[\n 'EI339']['train']['data'])\n", (1785, 1876), False, 'import os\n'), ((1906, 2010), 'os.path.join', 'os.pa... |
# -*- coding utf-8 -*-
import cv2
import os
import numpy as np
from sklearn.model_selection import train_test_split
import random
import tensorflow as tf
def read_data(img_path, image_h = 64, image_w = 64):
image_data = []
label_data = []
image = cv2.imread(img_path)
#cv2.namedWindow("Image... | [
"tensorflow.random_uniform",
"numpy.dot",
"tensorflow.summary.scalar",
"tensorflow.subtract",
"random.randint",
"cv2.copyMakeBorder",
"cv2.imread",
"tensorflow.placeholder",
"tensorflow.zeros",
"numpy.array",
"tensorflow.matmul",
"tensorflow.summary.histogram",
"numpy.random.rand",
"tensor... | [((1750, 1808), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.001)'], {'name': '"""Optimizer"""'}), "(0.001, name='Optimizer')\n", (1783, 1808), True, 'import tensorflow as tf\n'), ((1982, 2009), 'tensorflow.summary.merge', 'tf.summary.merge', (['summaries'], {}), '(summaries)\... |
import logging
import os
from abc import ABC
import gin
import MinkowskiEngine as ME
import numpy as np
import open3d as o3d
import torch
from src.models import get_model
class BaseFeatureExtractor(ABC):
def __init__(self):
logging.info(f"Initialize {self.__class__.__name__}")
def extract_feature(s... | [
"torch.ones",
"MinkowskiEngine.SparseTensor",
"torch.load",
"MinkowskiEngine.utils.sparse_quantize",
"os.path.exists",
"open3d.geometry.PointCloud",
"numpy.asarray",
"logging.info",
"open3d.geometry.KDTreeSearchParamHybrid",
"gin.configurable",
"MinkowskiEngine.utils.batched_coordinates",
"src... | [((420, 438), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (436, 438), False, 'import gin\n'), ((2034, 2052), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (2050, 2052), False, 'import gin\n'), ((3102, 3120), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (3118, 3120), False, 'import... |
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from xgboost import XGBRegressor
import os
from django.conf import settings
import numpy as np
from functools import lru_cache
RANDOM_STATE = 42
def get_path(course, file):
return os.path.join(settings.PROJECT_ROOT, '..', 'pandas_api', 'static', ... | [
"sklearn.preprocessing.MinMaxScaler",
"os.path.isfile",
"numpy.histogram",
"xgboost.XGBRegressor",
"functools.lru_cache",
"os.path.join"
] | [((344, 365), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (353, 365), False, 'from functools import lru_cache\n'), ((254, 344), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', '""".."""', '"""pandas_api"""', '"""static"""', '"""mit"""', 'course', 'file'], {}), "(settings.... |
# -*- coding: utf-8 -*-
"""
Functions for mapping AHBA microarray dataset to atlases and and parcellations
in MNI space
"""
from functools import reduce
from nilearn._utils import check_niimg_3d
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from abagen import datasets, io, process, ... | [
"abagen.utils.check_metric",
"abagen.utils.xyz_to_ijk",
"abagen.io.read_probes",
"abagen.process.drop_mismatch_samples",
"numpy.diag",
"numpy.unique",
"pandas.DataFrame",
"abagen.utils.closest_centroid",
"abagen.process.get_stable_probes",
"abagen.process.normalize_expression",
"nilearn._utils.c... | [((1758, 1821), 'abagen.utils.expand_roi', 'utils.expand_roi', (['sample'], {'dilation': 'tolerance', 'return_array': '(True)'}), '(sample, dilation=tolerance, return_array=True)\n', (1774, 1821), False, 'from abagen import datasets, io, process, utils\n'), ((2095, 2135), 'numpy.unique', 'np.unique', (['nz_labels'], {'... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE DESCRIPTION HERE
Created on 2019-03-05 16:38
@author: ncook
Version 0.0.1
"""
import numpy as np
import os
from apero import core
from apero import lang
from apero.core import constants
from apero.science import preprocessing as pp
from apero.io import drs_im... | [
"apero.core.setup",
"apero.science.preprocessing.median_one_over_f_noise",
"apero.science.preprocessing.correct_top_bottom",
"apero.core.run",
"apero.science.preprocessing.quality_control",
"apero.science.preprocessing.get_hot_pixels",
"apero.io.drs_image.rotate_image",
"os.path.exists",
"apero.io.d... | [((686, 716), 'apero.core.constants.load', 'constants.load', (['__INSTRUMENT__'], {}), '(__INSTRUMENT__)\n', (700, 716), False, 'from apero.core import constants\n'), ((2269, 2314), 'apero.core.setup', 'core.setup', (['__NAME__', '__INSTRUMENT__', 'fkwargs'], {}), '(__NAME__, __INSTRUMENT__, fkwargs)\n', (2279, 2314), ... |
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model, metrics, preprocessing
from sklearn.model_selection import train_test_split
import itertools
import typing
class LinearRegression():
def __init__(self, n_features, optimiser):
np.random.se... | [
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"numpy.sum",
"sklearn.datasets.fetch_california_housing",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.mean",
... | [((3275, 3292), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3289, 3292), True, 'import numpy as np\n'), ((3300, 3350), 'sklearn.datasets.fetch_california_housing', 'datasets.fetch_california_housing', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (3333, 3350), False, 'from sklearn import da... |
# <NAME> and <NAME>
# Created: 6/05/2013
# Last Updated: 6/14/2013
# For JCAP
import numpy as np
from PyQt4 import QtCore
from dictionary_helpers import *
import date_helpers
import filename_handler
import datareader
# global dictionary holds all processed (z, x, y, rate) data for the experiment
DEP_DATA = []
zndec ... | [
"date_helpers.dateObjFloat",
"numpy.sin",
"numpy.array",
"datareader.DataReader",
"numpy.cos",
"numpy.round",
"PyQt4.QtCore.pyqtSignal"
] | [((538, 561), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['list'], {}), '(list)\n', (555, 561), False, 'from PyQt4 import QtCore\n'), ((631, 655), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['tuple'], {}), '(tuple)\n', (648, 655), False, 'from PyQt4 import QtCore\n'), ((671, 693), 'PyQt4.QtCore.pyqtSignal',... |
from __future__ import print_function
import numpy as np
from ._PLSbase import plsbase as pls_base
from .utilities import nanmatprod, isValid
from .engines import pls as pls_engine
class pls(pls_base):
"""
This is the classic multivariate NIPALS PLS algorithm.
Parameters:
X: {N, P} array like
... | [
"numpy.nansum",
"numpy.sum",
"numpy.power",
"numpy.square",
"numpy.isnan",
"numpy.linalg.inv"
] | [((2506, 2538), 'numpy.linalg.inv', 'np.linalg.inv', (['(self.P.T @ self.W)'], {}), '(self.P.T @ self.W)\n', (2519, 2538), True, 'import numpy as np\n'), ((3002, 3035), 'numpy.power', 'np.power', (['self.Xstd', 'self.scaling'], {}), '(self.Xstd, self.scaling)\n', (3010, 3035), True, 'import numpy as np\n'), ((3285, 330... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.