code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from collections import OrderedDict
import numpy as np
from edgeml_pytorch.trainer.drocc_trainer import DROCCTra... | [
"numpy.mean",
"edgeml_pytorch.trainer.drocc_trainer.DROCCTrainer",
"torch.nn.ReLU",
"os.path.exists",
"numpy.ones",
"torch.set_printoptions",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.std",
"os.path.join",
"torch.from_numpy",
"torch.is_tensor",
"torch.tensor",
"torch.cuda.is_availab... | [((2779, 2807), 'numpy.ones', 'np.ones', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (2786, 2807), True, 'import numpy as np\n'), ((3052, 3074), 'numpy.mean', 'np.mean', (['train_data', '(0)'], {}), '(train_data, 0)\n', (3059, 3074), True, 'import numpy as np\n'), ((3082, 3103), 'numpy.std', 'np.std', (['... |
"""
This python code demonstrates an edge-based active contour model as an application of the
Distance Regularized Level Set Evolution (DRLSE) formulation in the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, "Distance Regularized Level Set Evolution and Its Application to Image Segmentation",
IEEE Trans. Ima... | [
"numpy.ones",
"numpy.max",
"skimage.io.imread",
"lv_set.find_lsf.find_lsf",
"numpy.min",
"lv_set.show_fig.draw_all"
] | [((2340, 2358), 'lv_set.find_lsf.find_lsf', 'find_lsf', ([], {}), '(**params)\n', (2348, 2358), False, 'from lv_set.find_lsf import find_lsf\n'), ((2387, 2419), 'lv_set.show_fig.draw_all', 'draw_all', (['phi', "params['img']", '(10)'], {}), "(phi, params['img'], 10)\n", (2395, 2419), False, 'from lv_set.show_fig import... |
#!/usr/bin/env python
# coding: utf-8
# # Lab 02
#
# ## Solving a system of nonlinear equations
#
# ### <NAME>, Б01-818
#
# IV.12.7.д
# $$\begin{cases} x^7 - 5x^2y^4 + 1510 = 0 \\ y^3 - 3x^4y - 105 = 0 \end{cases}$$
# $$\begin{cases} x_{n+1} = \sqrt{\frac{x_n^7 + 1510}{5y_n^4}} \\ y_{n+1} = \sqrt[3]{3x_{n}^4y_{n}... | [
"logging.getLogger",
"numpy.abs",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.cbrt"
] | [((3476, 3495), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3493, 3495), False, 'import logging\n'), ((3984, 4097), 'pandas.DataFrame', 'pd.DataFrame', (["{'Начальное приближение': x_init_vec_fpi, 'Результат': fpi_results,\n 'Итераций': fpi_iterations}"], {}), "({'Начальное приближение': x_init_vec_... |
import numpy as np
import unittest
from itertools import product
from ml_techniques.svm import *
class PermutationDataTest(unittest.TestCase):
def testpropershape(self):
data = np.random.random((10, 4))
labels = np.random.randint(0, 2, 10)*2-1
data_per = permut_data(data)
self.a... | [
"numpy.random.random",
"itertools.product",
"numpy.random.randint",
"numpy.random.randn",
"numpy.random.permutation"
] | [((193, 218), 'numpy.random.random', 'np.random.random', (['(10, 4)'], {}), '((10, 4))\n', (209, 218), True, 'import numpy as np\n'), ((3285, 3315), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (3301, 3315), True, 'import numpy as np\n'), ((5136, 5166), 'numpy.random.random', '... |
"""
The CPTPState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Govern... | [
"numpy.trace",
"numpy.sqrt",
"pygsti.modelmembers.states.densestate.DenseState.__init__",
"numpy.rollaxis",
"numpy.array",
"numpy.einsum",
"numpy.imag",
"numpy.take",
"numpy.real",
"numpy.dot",
"numpy.empty",
"pygsti.evotypes.Evotype.cast",
"pygsti.modelmembers.states.state.State._to_vector"... | [((2705, 2727), 'pygsti.modelmembers.states.state.State._to_vector', '_State._to_vector', (['vec'], {}), '(vec)\n', (2722, 2727), True, 'from pygsti.modelmembers.states.state import State as _State\n'), ((2903, 2937), 'numpy.rollaxis', '_np.rollaxis', (['self.basis_mxs', '(0)', '(3)'], {}), '(self.basis_mxs, 0, 3)\n', ... |
import rospy
import numpy as np
import cv2
class ScalarStable(object):
"""Represents a stabilized scalar"""
def __init__(self,
x=.0,
vx=.0,
p_cov=.03, m_cov=.01,
time=None):
"""ScalarStabilized constructor"""
self.x = x
... | [
"numpy.array",
"rospy.Time",
"numpy.float32",
"cv2.KalmanFilter"
] | [((411, 433), 'cv2.KalmanFilter', 'cv2.KalmanFilter', (['(2)', '(1)'], {}), '(2, 1)\n', (427, 433), False, 'import cv2\n'), ((522, 552), 'numpy.array', 'np.array', (['[[1, 1]]', 'np.float32'], {}), '([[1, 1]], np.float32)\n', (530, 552), True, 'import numpy as np\n'), ((1073, 1116), 'numpy.array', 'np.array', (['[[self... |
#!/usr/bin/env python
import os
import sys
import sqlite3
import pandas as pd
import numpy as np
from scraper import create_data_folder, read_config
from collections import OrderedDict
def main():
"""
Mainly for debugging purposes.
"""
config_file = read_config()
# Pick a file
try:
c... | [
"pandas.read_sql_query",
"scraper.read_config",
"collections.OrderedDict",
"os.listdir",
"sqlite3.connect",
"pandas.read_csv",
"os.path.join",
"pandas.set_option",
"sys.exc_info",
"scraper.create_data_folder",
"numpy.savetxt",
"pandas.DataFrame"
] | [((269, 282), 'scraper.read_config', 'read_config', ([], {}), '()\n', (280, 282), False, 'from scraper import create_data_folder, read_config\n'), ((701, 755), 'scraper.create_data_folder', 'create_data_folder', (["config_file['extracted_data_path']"], {}), "(config_file['extracted_data_path'])\n", (719, 755), False, '... |
"""Defines procedures for training, and evaluation automatic camfi annotation models,
and for using them for making automatic annotations (inference). Depends on camfi.util,
camfi.datamodel.autoannotation, camfi.datamodel.geometry, camfi.datamode.via, as well
as ._torchutils and ._models."""
from datetime import datet... | [
"numpy.count_nonzero",
"numpy.array",
"camfi.datamodel.via.ViaRegionAttributes",
"numpy.arange",
"camfi.datamodel.geometry.CircleShapeAttributes",
"pathlib.Path",
"numpy.where",
"camfi.datamodel.autoannotation.Prediction.from_tensor_dict",
"torch.hub.load_state_dict_from_url",
"scipy.sparse.csgrap... | [((3085, 3091), 'pathlib.Path', 'Path', ([], {}), '()\n', (3089, 3091), False, 'from pathlib import Path\n'), ((4578, 4598), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (4590, 4598), False, 'import torch\n'), ((4729, 4838), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'b... |
"""
Analyses skewness for continuous features
Options:
A. Log
B. Yeo-Johnson
C. QuantileTransformer
"""
import json
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import power_transform, quantile_transform
from pathlib import Path
p = Path(__f... | [
"numpy.reshape",
"pandas.concat",
"src.logger.LOGGER.info",
"pathlib.Path"
] | [((464, 488), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Load data"""'], {}), "('Load data')\n", (475, 488), False, 'from src.logger import LOGGER\n'), ((576, 615), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Logarithm"""'], {}), "('Process data - Logarithm')\n", (587, 615), False, 'from src.logge... |
import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints,... | [
"deepchem.data.DiskDataset.from_numpy",
"deepchem.data.CSVLoader",
"numpy.random.rand",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"deepchem.feat.DummyFeaturizer",
"deepchem.data.NumpyDataset",
"os.path.abspath",
"deepchem.data.DiskDataset"
] | [((227, 271), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (241, 271), True, 'import numpy as np\n'), ((278, 332), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks... |
import models
import os
import copy
import torch
import torch.nn as nn
from lifelines import KaplanMeierFitter as KMFitter
import pycox
import numpy as np
# local
import catdist
import data_utils
import _concordance
import _nll
import _saver
def str_to_bool(arg):
"""Convert an argument string into its boolean val... | [
"numpy.unique",
"catdist.CatDist",
"torch.zeros_like",
"torch.isnan",
"torch.zeros"
] | [((1717, 1778), 'catdist.CatDist', 'catdist.CatDist', ([], {'logits': 'None', 'args': 'args', 'probs': 'Gprobs', 'k': 'None'}), '(logits=None, args=args, probs=Gprobs, k=None)\n', (1732, 1778), False, 'import catdist\n'), ((699, 713), 'torch.isnan', 'torch.isnan', (['x'], {}), '(x)\n', (710, 713), False, 'import torch\... |
#!/usr/bin/env python
# coding: utf-8
__author__ = '<NAME>'
__copyright__ = 'Copyright 2017-2020, <NAME>'
__license__ = 'MIT'
__version__ = '0.5'
__email__ = '<EMAIL>'
__status__ = 'Development'
__description__ = 'Tkinter based GUI, visualizing PASCAL VOC object detection annotation'
"""
Changelog:
- 2020-06-16 11:3... | [
"logging.getLogger",
"Tkinter.Entry",
"cv2.rectangle",
"logging.StreamHandler",
"colorsys.hsv_to_rgb",
"numpy.array",
"PIL.ImageDraw.Draw",
"Tkinter.Scrollbar",
"Tkinter.LabelFrame",
"Tkinter.Label",
"os.path.exists",
"os.listdir",
"Tkinter.Tk.__init__",
"Tkinter.Listbox",
"Tkinter.Strin... | [((2452, 2471), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (2467, 2471), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((2483, 2505), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im_pil'], {}), '(im_pil)\n', (2497, 2505), False, 'from PIL import Image, ImageTk, ImageFont, ImageD... |
__author__ = 'multiangle'
# 这是实现 霍夫曼树相关的文件, 主要用于 针对层次softmax进行 word2vec 优化方案的一种
'''
至于 为什么要进行层次softmax 可以简单理解 因为词表很大 针对上完个类别单词进行softmax 计算量大 更新参数过多 无法训练,而采用softmax 层次化 只需要 计算几个有限单词的sigmod 就可以 更新参数也非常少
提高训练速度
什么是霍夫曼树 简单理解就是 将训练文本 进行词频统计 通过构建加权最短路径来构造二叉树 这样 词频高的 位置在前 词频低的位置在后 每一个 霍夫曼编码代表一个词 路径 并且是唯一 不是其他词的前缀
'''
impo... | [
"numpy.zeros"
] | [((4947, 4974), 'numpy.zeros', 'np.zeros', (['[1, self.vec_len]'], {}), '([1, self.vec_len])\n', (4955, 4974), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""
Source processing routines
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
from collections import OrderedDict
from astropy.cosmology import default_cosmology
import numpy as np
import os
import pysynphot as S
import astropy.table as at
fro... | [
"os.path.exists",
"collections.OrderedDict",
"numpy.isscalar",
"os.path.join",
"pysynphot.ArraySpectrum",
"numpy.array",
"astropy.cosmology.default_cosmology.get",
"warnings.warn",
"astropy.table.Table.read"
] | [((732, 745), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (743, 745), False, 'from collections import OrderedDict\n'), ((753, 777), 'numpy.isscalar', 'np.isscalar', (['sourcenames'], {}), '(sourcenames)\n', (764, 777), True, 'import numpy as np\n'), ((2274, 2316), 'os.path.join', 'os.path.join', (['"""s... |
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
... | [
"kivy.uix.label.Label",
"numpy.ndarray.argsort",
"kivy.uix.textinput.TextInput",
"csv.reader"
] | [((1260, 1286), 'kivy.uix.textinput.TextInput', 'TextInput', ([], {'multiline': '(False)'}), '(multiline=False)\n', (1269, 1286), False, 'from kivy.uix.textinput import TextInput\n'), ((1398, 1439), 'kivy.uix.textinput.TextInput', 'TextInput', ([], {'password': '(True)', 'multiline': '(False)'}), '(password=True, multi... |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 14:14:45 2020
@author: Nikki
"""
import numpy as np
import cv2
import transform as tform
import sys
import math
import scipy.spatial
import markers
###---------------------------------------------------------------------------
# Allows video to be initialized usin... | [
"math.sqrt",
"cv2.imshow",
"math.cos",
"numpy.array",
"cv2.ellipse",
"sys.exc_info",
"cv2.destroyAllWindows",
"cv2.setMouseCallback",
"markers.aot_1_markers",
"numpy.asarray",
"markers.mrb3_markers",
"cv2.addWeighted",
"markers.aot_3_markers",
"cv2.waitKey",
"transform.get_best_transform... | [((1885, 1915), 'transform.get_best_transform', 'tform.get_best_transform', (['x', 'y'], {}), '(x, y)\n', (1909, 1915), True, 'import transform as tform\n'), ((1930, 1960), 'transform.get_best_transform', 'tform.get_best_transform', (['y', 'x'], {}), '(y, x)\n', (1954, 1960), True, 'import transform as tform\n'), ((300... |
import numpy as np
from src import const
#TODO: should be imported from aguirregabiria_simple.py
def period_profit(p: np.ndarray, lambdas: np.ndarray, betas_transition=const.betas_transition):
"""
Correct expected period return profit. See ReadMe for derivation
"""
constant_part = (p-const.c) * np.e *... | [
"numpy.log",
"numpy.array",
"numpy.allclose"
] | [((518, 538), 'numpy.array', 'np.array', (['[1.4, 1.2]'], {}), '([1.4, 1.2])\n', (526, 538), True, 'import numpy as np\n'), ((553, 578), 'numpy.array', 'np.array', (['[0.5, 0.4, 0.1]'], {}), '([0.5, 0.4, 0.1])\n', (561, 578), True, 'import numpy as np\n'), ((598, 791), 'numpy.array', 'np.array', (['[[np.e ** (-3.0 * 0.... |
import os.path
from scipy.optimize import fsolve
import math
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import utils_Florian as utils
def equations(p, t_peak, t_half):
x, y = p
return (0.5 * (math.exp(-x * t_peak) - math.exp(-y * t_peak)) - (math.exp(-x * t_half) - math.exp... | [
"scipy.optimize.fsolve",
"numpy.abs",
"numpy.argmax",
"numpy.exp",
"numpy.linspace",
"numpy.argmin",
"pandas.DataFrame",
"math.exp"
] | [((404, 418), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (416, 418), True, 'import pandas as pd\n'), ((1088, 1155), 'scipy.optimize.fsolve', 'fsolve', (['equations', 'initial_conditions[alpha]'], {'args': '(t_peak, t_half)'}), '(equations, initial_conditions[alpha], args=(t_peak, t_half))\n', (1094, 1155), F... |
import re
import string
import numpy as np
from tqdm import tqdm
from typing import List
from docqa.triviaqa.read_data import TriviaQaQuestion
from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score
from docqa.utils import flatten_iterable, split
"""
Tools for turning the aliases and answer strings from... | [
"docqa.triviaqa.trivia_qa_eval.normalize_answer",
"docqa.utils.flatten_iterable",
"re.compile",
"tqdm.tqdm",
"trivia_qa.build_span_corpus.TriviaQaWebDataset",
"data_processing.text_utils.NltkAndPunctTokenizer",
"numpy.array",
"numpy.zeros",
"multiprocessing.Pool",
"docqa.triviaqa.trivia_qa_eval.f1... | [((10899, 10919), 'trivia_qa.build_span_corpus.TriviaQaWebDataset', 'TriviaQaWebDataset', ([], {}), '()\n', (10917, 10919), False, 'from trivia_qa.build_span_corpus import TriviaQaWebDataset\n'), ((5407, 5429), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (5423, 5429), False, 'from do... |
import nifty.tools as nt
import numpy as np
import z5py
from elf.label_multiset import deserialize_multiset
from tqdm import trange
def check_serialization(mset1, mset2):
if len(mset1) != len(mset2):
print("Serialization sizes disagree:", len(mset1), len(mset2))
return False
if not np.array_... | [
"numpy.unique",
"elf.label_multiset.deserialize_multiset",
"nifty.tools.blocking",
"z5py.File",
"numpy.array_equal",
"tqdm.trange"
] | [((1798, 1840), 'nifty.tools.blocking', 'nt.blocking', (['[0, 0, 0]', 'roi_end', '[1, 1, 1]'], {}), '([0, 0, 0], roi_end, [1, 1, 1])\n', (1809, 1840), True, 'import nifty.tools as nt\n'), ((1861, 1892), 'tqdm.trange', 'trange', (['blocking.numberOfBlocks'], {}), '(blocking.numberOfBlocks)\n', (1867, 1892), False, 'from... |
import numpy as np
from pandas import read_csv
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from config import *
from lib.preprocess.read_data import DataReader
from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer
class DataPreprocessor:... | [
"lib.scaler.preprocessing_data.data_normalizer.DataNormalizer",
"lib.preprocess.read_data.DataReader",
"numpy.column_stack",
"numpy.array",
"numpy.concatenate"
] | [((717, 729), 'lib.preprocess.read_data.DataReader', 'DataReader', ([], {}), '()\n', (727, 729), False, 'from lib.preprocess.read_data import DataReader\n'), ((2432, 2461), 'lib.scaler.preprocessing_data.data_normalizer.DataNormalizer', 'DataNormalizer', (['scaler_method'], {}), '(scaler_method)\n', (2446, 2461), False... |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Compare an image file and its associated uncertainty image.
#
# <NAME>
# Created: 2021-06-03
# Last modified: 2021-06-03
#--------------------------------------------------------------------------
#*********************************... | [
"logging.basicConfig",
"logging.getLogger",
"numpy.__version__.split",
"numpy.abs",
"os.getenv",
"matplotlib.pyplot.gcf",
"theil_sen.linefit",
"imp.reload",
"sys.stderr.write",
"astropy.io.fits.getdata",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.isnan",
"sys.exit",
"matplotlib... | [((514, 553), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (533, 553), False, 'import logging\n'), ((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((8752, 8782), 'astropy.io.fit... |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | [
"numpy.allclose",
"paddle.nn.LayerList",
"paddle.jit.ProgramTranslator.get_instance",
"paddle.ones",
"unittest.main"
] | [((2204, 2247), 'paddle.jit.ProgramTranslator.get_instance', 'paddle.jit.ProgramTranslator.get_instance', ([], {}), '()\n', (2245, 2247), False, 'import paddle\n'), ((2290, 2330), 'paddle.ones', 'paddle.ones', ([], {'shape': '[2, 3]', 'dtype': '"""int32"""'}), "(shape=[2, 3], dtype='int32')\n", (2301, 2330), False, 'im... |
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list ... | [
"numpy.int64",
"numpy.uint32"
] | [((1673, 1696), 'numpy.int64', 'numpy.int64', (['(1073741824)'], {}), '(1073741824)\n', (1684, 1696), False, 'import numpy\n'), ((1721, 1739), 'numpy.int64', 'numpy.int64', (['(16384)'], {}), '(16384)\n', (1732, 1739), False, 'import numpy\n'), ((1765, 1788), 'numpy.int64', 'numpy.int64', (['(2147483648)'], {}), '(2147... |
import itertools
import os
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
def _get_steps():
hdf_subdir = "augmentation/"
steps = {"step_name": ["prototypical", "single_sources", "mixtures"]}
steps_df = pd.DataFrame(steps)
steps_df["hdf_path"] = hdf_subdir + steps_df["step_... | [
"numpy.average",
"tqdm.tqdm",
"os.path.join",
"pandas.Categorical",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.geomspace",
"pandas.DataFrame",
"pandas.concat",
"numpy.poly1d",
"pandas.read_hdf"
] | [((245, 264), 'pandas.DataFrame', 'pd.DataFrame', (['steps'], {}), '(steps)\n', (257, 264), True, 'import pandas as pd\n'), ((401, 490), 'pandas.Categorical', 'pd.Categorical', (["steps_df['step_name']", "['prototypical', 'single_sources', 'mixtures']"], {}), "(steps_df['step_name'], ['prototypical', 'single_sources',\... |
import pytest
import numpy as np
import os
import pyarrow as pa
import pyarrow.feather as feather
import pandas as pd
from app.services.preprocessor import PreProcessor
from typing import List
@pytest.fixture
def preprocessor() -> PreProcessor:
return PreProcessor("datasets/csvs/train.csv", "datasets/csvs/build... | [
"numpy.random.rand",
"app.services.preprocessor.PreProcessor",
"numpy.savetxt",
"pandas.DataFrame",
"pyarrow.feather.write_feather",
"os.remove"
] | [((260, 330), 'app.services.preprocessor.PreProcessor', 'PreProcessor', (['"""datasets/csvs/train.csv"""', '"""datasets/csvs/building1.csv"""'], {}), "('datasets/csvs/train.csv', 'datasets/csvs/building1.csv')\n", (272, 330), False, 'from app.services.preprocessor import PreProcessor\n'), ((384, 406), 'numpy.random.ran... |
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import tensorflow as tf
import numpy as np
from xfdnn_compiler_tensorflow import TFFrontend
#from xfdnn.tools.compile.frontends.frontend_caffe import CaffeFrontend
from tensorflow.python.platform import gfile
impo... | [
"tensorflow.image.decode_png",
"xfdnn_compiler_tensorflow.TFFrontend",
"tensorflow.image.decode_bmp",
"tensorflow.Session",
"tensorflow.image.resize_bilinear",
"xdnn_opt.CPUTransform",
"xdnn_opt.FPGATransform",
"tensorflow.GraphDef",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.image.... | [((1993, 2013), 'numpy.concatenate', 'np.concatenate', (['pred'], {}), '(pred)\n', (2007, 2013), True, 'import numpy as np\n'), ((2743, 2756), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2754, 2756), True, 'import tensorflow as tf\n'), ((2889, 2906), 'xfdnn_compiler_tensorflow.TFFrontend', 'TFFrontend', ([... |
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are... | [
"copy.deepcopy",
"numpy.cross",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"fromage.utils.mol.Mol"
] | [((769, 828), 'copy.deepcopy', 'deepcopy', (['[a for a in self.atoms if a not in scattered_mol]'], {}), '([a for a in self.atoms if a not in scattered_mol])\n', (777, 828), False, 'from copy import deepcopy\n'), ((2143, 2158), 'numpy.array', 'np.array', (['trans'], {}), '(trans)\n', (2151, 2158), True, 'import numpy as... |
# -*- coding: utf-8 -*-
# @Time : 2020/2/15 16:10
# @Author : <NAME>
# @Email : <EMAIL>
# @File : utils.py
# @Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.core.framework import summary_pb2
class AverageMeter(object):
def __init__(self):
self.re... | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.core.framework.summary_pb2.Summary.Value",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",... | [((1003, 1036), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_accuracy', '"""r-"""'], {}), "(x, train_accuracy, 'r-')\n", (1011, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1074), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accuracy', '"""b--"""'], {}), "(x, val_accuracy, 'b--')\n", (1050, 10... |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import math
import numpy as np
def count_words(filename):
counter = collections.Counter()
wi... | [
"numpy.savez",
"argparse.ArgumentParser",
"math.log",
"collections.Counter",
"numpy.cumsum",
"numpy.arange"
] | [((291, 312), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (310, 312), False, 'import collections\n'), ((2254, 2266), 'numpy.cumsum', 'np.cumsum', (['v'], {}), '(v)\n', (2263, 2266), True, 'import numpy as np\n'), ((2443, 2502), 'numpy.savez', 'np.savez', (["(args.output + '-cdf_base.npz')"], {'cdf':... |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s... | [
"pandas.Series",
"numpy.abs",
"numpy.where",
"numpy.delete",
"pandas.Timedelta",
"numpy.log",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.isnan",
"numpy.vstack",
"numpy.isfinite",
"numpy.nanmax",
"numpy.argmin",
"numpy.percentile",
"numpy.nansum",
"numpy.a... | [((3125, 3144), 'pandas.Timedelta', 'pd.Timedelta', (['"""12h"""'], {}), "('12h')\n", (3137, 3144), True, 'import pandas as pd\n'), ((4823, 4856), 'pandas.Series', 'pd.Series', (['dX'], {'index': 'X.index[:-1]'}), '(dX, index=X.index[:-1])\n', (4832, 4856), True, 'import pandas as pd\n'), ((5690, 5707), 'numpy.array', ... |
# +
import argparse
import os
import pickle
import sys
sys.path.append("..")
import numpy as np
import torchvision
import torchvision.transforms as T
import torch.utils.data as torch_data
from tqdm import tqdm
from models.classifiers import EvalCompoundResNet
# -
def parse_args():
parser = argparse.ArgumentPar... | [
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"numpy.max",
"torchvision.datasets.ImageFolder",
"models.classifiers.EvalCompoundResNet",
"os.path.basename",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"sys.path.... | [((55, 76), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (70, 76), False, 'import sys\n'), ((300, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (323, 325), False, 'import argparse\n'), ((1629, 1690), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.Image... |
import math
from math import pi
import numpy as np
import open3d as o3d
import matplotlib.pyplot as plt
import cv2
import toml
from .cameraparam import CameraParam
from .fitted_line import FittedLine
from .ransac_fit import ransac_line_fit, ransac_ground_fit
from .util import check_all_false
# TODO: output random s... | [
"numpy.clip",
"math.acos",
"math.cos",
"numpy.array",
"numpy.argsort",
"cv2.ellipse",
"numpy.linalg.norm",
"numpy.arange",
"open3d.geometry.TriangleMesh.create_cylinder",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"numpy.stack",
"numpy.linspace",
"numpy.dot",
"open3d.geometry.TriangleM... | [((897, 918), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (909, 918), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3144), 'numpy.array', 'np.array', (['pcd.points'], {'dtype': 'np.float32'}), '(pcd.points, dtype=np.float32)\n', (3114, 3144), True, 'import numpy as np\n'), ... |
import cv2
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
import numpy as np
import time
import math
DEBUG = True
FACTOR = 2
RESO_X = int(576 / FACTOR)
RESO_Y = int(640 / FACTOR)
CONF_VAL = 0
THRESHOLD = 0
UPPER_BOUND = 230
LOWER_BOUND = 150
def get_file_index(filename):
i... | [
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.dnn.NMSBoxes",
"os.path.exists",
"numpy.mean",
"cv2.resizeWindow",
"os.listdir",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"numpy.argmax",
"time.time",
"cv2.namedWindow",
"cv2.imread",
"cv2.imwrite",
"os.makedi... | [((400, 441), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RGB"""', 'cv2.WINDOW_NORMAL'], {}), "('RGB', cv2.WINDOW_NORMAL)\n", (415, 441), False, 'import cv2\n'), ((446, 489), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Depth"""', 'cv2.WINDOW_NORMAL'], {}), "('Depth', cv2.WINDOW_NORMAL)\n", (461, 489), False, 'import cv... |
# coding: utf-8
# # Using Dropout
# Let's see how we can use dropout for early stopping
from concept_dependency_graph import ConceptDependencyGraph
import data_generator as dg
from student import *
import simple_mdp as sm
import dynamics_model_class as dmc
import numpy as np
import dataset_utils
import tensorflow a... | [
"numpy.savez",
"dynamics_model_class.DynamicsModel",
"dataset_utils.preprocess_data_for_rnn",
"concept_dependency_graph.ConceptDependencyGraph",
"numpy.array",
"numpy.zeros",
"simple_mdp.percent_all_seen",
"numpy.random.seed",
"simple_mdp.percent_complete",
"copy.copy",
"time.time",
"simple_md... | [((951, 975), 'concept_dependency_graph.ConceptDependencyGraph', 'ConceptDependencyGraph', ([], {}), '()\n', (973, 975), False, 'from concept_dependency_graph import ConceptDependencyGraph\n'), ((1640, 1683), 'dataset_utils.preprocess_data_for_rnn', 'dataset_utils.preprocess_data_for_rnn', (['data'], {}), '(data)\n', (... |
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import sys
from zoneout_wrapper import ZoneoutWrapper
class SequencePredictor():
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
"""
self.inp... | [
"tensorflow.shape",
"tensorflow.get_variable",
"numpy.log",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.nn.dynamic_rnn",
"tensorflow.matmul",
"tensorflow.train.AdamOptimiz... | [((338, 410), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.config.max_length)', 'name': '"""x"""'}), "(tf.int32, shape=(None, self.config.max_length), name='x')\n", (352, 410), True, 'import tensorflow as tf\n'), ((445, 517), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32']... |
import os
import logging
import json
from nnattack.variables import auto_var, get_file_name
from params import (
compare_attacks,
compare_defense,
#compare_nns,
nn_k1_robustness,
nn_k3_robustness,
nn_k1_approx_robustness_figs,
dt_robustness_figs,
rf_robustness_figs,
nn_k1_robustn... | [
"nnattack.variables.auto_var.set_intermidiate_variable",
"nnattack.variables.auto_var.get_var",
"nnattack.variables.auto_var.run_grid_params",
"os.path.exists",
"numpy.where",
"params.dt_robustness",
"sklearn.preprocessing.MinMaxScaler",
"nnattack.variables.get_file_name",
"nnattack.variables.auto_v... | [((595, 635), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (614, 635), False, 'import logging\n'), ((653, 683), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""', '(False)'], {}), "('DEBUG', False)\n", (667, 683), False, 'import os\n'), ((1680, 1738), 'nna... |
import torch
import os
from tqdm import tqdm
import numpy as np
from multiprocessing.pool import Pool
from itertools import islice, cycle
from utils.logging import logger
from utils.misc import ensure_dir
class Vocab(object):
def __init__(self):
self.tok2idx = {}
self.idx2tok = []
self.ad... | [
"os.path.exists",
"itertools.cycle",
"itertools.islice",
"utils.misc.ensure_dir",
"os.path.join",
"numpy.asarray",
"torch.save",
"multiprocessing.pool.Pool",
"utils.logging.logger.info"
] | [((2338, 2358), 'utils.misc.ensure_dir', 'ensure_dir', (['save_dir'], {}), '(save_dir)\n', (2348, 2358), False, 'from utils.misc import ensure_dir\n'), ((2449, 2506), 'utils.logging.logger.info', 'logger.info', (["('Building %s shard %d' % (mode, shard_index))"], {}), "('Building %s shard %d' % (mode, shard_index))\n",... |
import os
import time
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from config import get_config, export_config
from model.textcnn import TextCNN
from model.textrnn import TextRNN
from sklearn.model_selection import train_test_split
from dataloader import Word2VecEmbeddings, Doc2Vec... | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"model.textcnn.TextCNN",
"numpy.array",
"tensorflow.set_random_seed",
"dataloader.DataLoader",
"model.textrnn.TextRNN",
"matplotlib.pyplot.imshow",
"config.export_config",
"dataloader.Doc2VecEmbeddings",
"argparse.ArgumentParser",
"numpy.de... | [((388, 476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train/test movie review classification model"""'}), "(description=\n 'train/test movie review classification model')\n", (411, 476), False, 'import argparse\n'), ((791, 803), 'config.get_config', 'get_config', ([], {}), '()\... |
import numpy as np
import microdf as mdf
def gini(df, col, w=None, negatives=None):
"""Calculates Gini index.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:param negatives: An optional string indicating how to treat negati... | [
"numpy.amin",
"numpy.sort",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"microdf.weighted_sum",
"numpy.cumsum",
"microdf.weighted_quantile"
] | [((1888, 1936), 'microdf.weighted_quantile', 'mdf.weighted_quantile', (['df', 'col', 'w', '(1 - top_x_pct)'], {}), '(df, col, w, 1 - top_x_pct)\n', (1909, 1936), True, 'import microdf as mdf\n'), ((1957, 2007), 'microdf.weighted_sum', 'mdf.weighted_sum', (['df[df[col] >= threshold]', 'col', 'w'], {}), '(df[df[col] >= t... |
import math
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
import numba.cuda.random
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba.cuda.random import \
xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, \
xoroshiro128p_uniform_flo... | [
"numba.cuda.random.create_xoroshiro128p_states",
"numba.cuda.random.xoroshiro128p_normal_float64",
"numba.cuda.random.xoroshiro128p_uniform_float32",
"numpy.unique",
"numba.cuda.grid",
"numba.cuda.random.xoroshiro128p_normal_float32",
"math.sqrt",
"numba.cuda.random.xoroshiro128p_uniform_float64",
"... | [((512, 524), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (521, 524), False, 'from numba import cuda, float32\n'), ((891, 903), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (900, 903), False, 'from numba import cuda, float32\n'), ((2636, 2688), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudas... |
# -*- coding: utf-8 -*-
# Functions and Script to extract data
import blocksci
import pandas as pd
import numpy as np
import networkx as nx
import multiprocessing as mp
import itertools
import random
import time
import string
import pickle
import csv
import gc
import os, sys
from functools import partial
#********... | [
"networkx.number_of_selfloops",
"networkx.induced_subgraph",
"multiprocessing.cpu_count",
"numpy.array",
"blocksci.Blockchain",
"os.path.exists",
"networkx.info",
"itertools.chain.from_iterable",
"networkx.number_of_nodes",
"pandas.DataFrame",
"sys.stdout.flush",
"networkx.MultiDiGraph",
"ra... | [((8556, 8570), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (8568, 8570), True, 'import multiprocessing as mp\n'), ((8579, 8622), 'blocksci.Blockchain', 'blocksci.Blockchain', (['"""/home/ubuntu/bitcoin"""'], {}), "('/home/ubuntu/bitcoin')\n", (8598, 8622), False, 'import blocksci\n'), ((9788, 9799),... |
import datetime
import numpy as np
import libpySat as pySat
from astropy import _erfa as erfa
from scipy.misc import derivative
from scipy import interpolate
class TransformPolarMotion:
def __init__(self,fxp,fyp):
self.fxp=fxp
self.fyp=fyp
self.epochSave = datetime.datetime.now()
... | [
"astropy._erfa.sp00",
"libpySat.UTC2MJD",
"libpySat.pySatTime.UTC2MJD",
"scipy.misc.derivative",
"datetime.datetime.now",
"libpySat.RotationMatrix3DZ",
"libpySat.RotationMatrix3DY",
"numpy.matmul",
"libpySat.RotationMatrix3DX",
"numpy.matrix"
] | [((288, 311), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (309, 311), False, 'import datetime\n'), ((335, 392), 'numpy.matrix', 'np.matrix', (['([0, 0, 0], [0, 0, 0], [0, 0, 0])'], {'dtype': 'float'}), '(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)\n', (344, 392), True, 'import numpy as np\n'... |
import numpy as np
import pandas as pd
class WetChickenBaselinePolicy:
def __init__(self, env, gamma, method='heuristic', epsilon=0.1, convergence=0.1, learning_rate=0.1, max_nb_it=999,
order_epsilon=3, order_learning_rate=3):
self.env = env
self.gamma = gamma
self.nb_stat... | [
"numpy.ones",
"numpy.random.choice",
"numpy.argmax",
"numpy.max",
"numpy.zeros"
] | [((7130, 7141), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (7138, 7141), True, 'import numpy as np\n'), ((394, 436), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (401, 436), True, 'import numpy as np\n'), ((859, 902), 'numpy.zeros', 'np.zeros', (... |
import matplotlib.pyplot as plt
import networkx as nx
import numpy
plt.ion()
# test
def plot_neural_network(mek):
G = nx.DiGraph(numpy.transpose(mek.nn.links))
mylabels = dict(zip(range(len(mek.nn.neurons)),
[to_string(i)+'\n#'
+ str(ix)+'' for (ix, i) in enum... | [
"networkx.layout.spring_layout",
"networkx.relabel_nodes",
"matplotlib.pyplot.gca",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"matplotlib.pyplot.ion",
"numpy.transpose",
"networkx.draw_networkx_edges",
"matplotlib.pyplot.show"
] | [((68, 77), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (75, 77), True, 'import matplotlib.pyplot as plt\n'), ((354, 383), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['G', 'mylabels'], {}), '(G, mylabels)\n', (370, 383), True, 'import networkx as nx\n'), ((394, 425), 'networkx.layout.spring_layout', 'nx.l... |
"""
Overview
--------
general info about this module
Classes and Inheritance Structure
----------------------------------------------
.. inheritance-diagram::
Summary
---------
.. autosummary::
list of the module you want
Module API
----------
"""
from __future__ import absolute_import, division, print... | [
"numpy.float",
"astropy.coordinates.Angle",
"astropy.time.TimeDelta",
"builtins.super",
"builtins.str",
"numpy.append",
"astropy.time.Time",
"ast.literal_eval",
"numpy.int"
] | [((2855, 2881), 'numpy.append', 'np.append', (['self.msk', '(False)'], {}), '(self.msk, False)\n', (2864, 2881), True, 'import numpy as np\n'), ((12208, 12241), 'astropy.time.Time', 'astropyTime', (['value'], {'format': 'format'}), '(value, format=format)\n', (12219, 12241), True, 'from astropy.time import Time as astr... |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import fftconvolve
def energy(traces, duration, dt=1):
"""
Compute an mean-squared energy measurement for each point of a
seismic section.
:param traces: The data array to use for calculating MS energy.
Must be 1D or 2D n... | [
"numpy.zeros",
"scipy.signal.fftconvolve",
"numpy.ones",
"numpy.ndim"
] | [((651, 673), 'numpy.zeros', 'np.zeros', (['traces.shape'], {}), '(traces.shape)\n', (659, 673), True, 'import numpy as np\n'), ((752, 770), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (759, 770), True, 'import numpy as np\n'), ((779, 794), 'numpy.ndim', 'np.ndim', (['signal'], {}), '(signal)\n', (78... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2003-2018 European Synchrotron Radiation Facility, Grenoble,
# France
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of... | [
"logging.getLogger",
"scipy.optimize.optimize.fmin",
"numpy.log",
"numpy.ascontiguousarray",
"scipy.interpolate.interp1d",
"numpy.array",
"scipy.optimize.optimize.fminbound",
"fabio.open",
"fabio.factory",
"numpy.where",
"numpy.sort",
"scipy.ndimage.label",
"numpy.maximum",
"weakref.WeakKe... | [((2111, 2138), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2128, 2138), False, 'import logging\n'), ((11662, 11705), 'scipy.ndimage.morphology.binary_dilation', 'ndimage.morphology.binary_dilation', (['invalid'], {}), '(invalid)\n', (11696, 11705), False, 'from scipy import ndimage\n... |
import numpy as np
from scipy.misc import toimage
from scipy.ndimage.filters import gaussian_filter
from os import mkdir
from os.path import dirname, join
from time import time
from keras.models import Model
from keras.layers import Dense
from keras import backend as K
from keras.applications.vgg16 import VGG16
# de... | [
"numpy.clip",
"keras.applications.vgg16.VGG16",
"scipy.ndimage.filters.gaussian_filter",
"keras.backend.learning_phase",
"keras.backend.gradients",
"numpy.array",
"numpy.linalg.norm",
"keras.layers.Dense",
"keras.backend.image_data_format",
"os.mkdir",
"keras.models.Model",
"numpy.random.norma... | [((710, 746), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (718, 746), True, 'import numpy as np\n'), ((372, 389), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (379, 389), False, 'from os.path import dirname, join\n'), ((407, 425), 'os.mkdir', '... |
import json
import logging
import os
import sys
from argparse import ArgumentParser
import re
import numpy as np
import pandas as pd
import torch
from transformers import GPT2Tokenizer
from src.data.cleaning import mask_not_na, inds_unique, mask_long_enough
from src.data.nli import TransformersSeqPairDataset
from src... | [
"logging.getLogger",
"src.data.nli.TransformersSeqPairDataset",
"logging.StreamHandler",
"pandas.read_csv",
"src.data.cleaning.inds_unique",
"logging.info",
"os.path.exists",
"argparse.ArgumentParser",
"src.data.cleaning.mask_not_na",
"numpy.random.seed",
"numpy.random.permutation",
"src.data.... | [((380, 396), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (394, 396), False, 'from argparse import ArgumentParser\n'), ((1810, 1829), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1827, 1829), False, 'import logging\n'), ((2809, 2842), 'pandas.read_csv', 'pd.read_csv', (['args.paraphra... |
import numpy as np
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
from TransEmodule import utils
def check_entities(x, map):
if x in map:
return map[x]
else:
return None
def calculate_rankings(rank_list):
flat = rank_list.map(lambda x: x[0]).persist()
... | [
"numpy.mean",
"TransEmodule.utils.get_id_by_value",
"numpy.where",
"TransEmodule.utils.load_dataset",
"pyspark.SparkConf",
"numpy.square",
"numpy.argsort",
"TransEmodule.utils.restore",
"numpy.concatenate",
"pyspark.SparkContext"
] | [((4227, 4325), 'TransEmodule.utils.restore', 'utils.restore', (['"""/home/ubuntu/entity_embedding_999.pkl"""', '"""/home/ubuntu/label_embedding_999.pkl"""'], {}), "('/home/ubuntu/entity_embedding_999.pkl',\n '/home/ubuntu/label_embedding_999.pkl')\n", (4240, 4325), False, 'from TransEmodule import utils\n'), ((4429... |
from unittest import TestCase
import numpy as np
import dianna
import dianna.visualization
from dianna.methods import LIME
from tests.test_onnx_runner import generate_data
from tests.utils import ModelRunner
from tests.utils import run_model
class LimeOnImages(TestCase):
def test_lime_function(self):
np.... | [
"tests.test_onnx_runner.generate_data",
"numpy.allclose",
"dianna.explain_text",
"numpy.random.random",
"dianna.methods.LIME",
"tests.utils.ModelRunner",
"numpy.random.seed",
"dianna.explain_image",
"numpy.load"
] | [((1889, 1949), 'tests.utils.ModelRunner', 'ModelRunner', (['model_path', 'word_vector_file'], {'max_filter_size': '(5)'}), '(model_path, word_vector_file, max_filter_size=5)\n', (1900, 1949), False, 'from tests.utils import ModelRunner\n'), ((2487, 2534), 'numpy.allclose', 'np.allclose', (['scores', 'expected_scores']... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 16:52:10 2017
@author: margauxmouchene
"""
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from landlab import RasterModelGrid
from landlab.components import (
FlowAccumulator,
FlowDirectorSteepest,
Tr... | [
"landlab.components.FlowAccumulator",
"landlab.RasterModelGrid",
"landlab.components.TransportLengthHillslopeDiffuser",
"numpy.array",
"numpy.testing.assert_almost_equal",
"pytest.raises",
"landlab.components.FlowDirectorSteepest"
] | [((408, 433), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(10, 10)'], {}), '((10, 10))\n', (423, 433), False, 'from landlab import RasterModelGrid\n'), ((535, 575), 'landlab.components.FlowAccumulator', 'FlowAccumulator', (['mg'], {'flow_director': '"""MFD"""'}), "(mg, flow_director='MFD')\n", (550, 575), False, '... |
#!/usr/bin/env python3
import logging
import numpy as np
import time
import torch
import cv2
logger = logging.getLogger(__name__)
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (li... | [
"logging.getLogger",
"time.sleep",
"numpy.stack",
"cv2.imread",
"torch.linspace"
] | [((105, 132), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (122, 132), False, 'import logging\n'), ((595, 617), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (605, 617), False, 'import cv2\n'), ((896, 911), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (9... |
"""Shelving Filter Cascade with Adjustable Transition Slope and Bandwidth
<NAME>, <NAME>, <NAME>
In: Proc. of 148th AES Convention, Virtual Vienna, May 2020, Paper 10339
http://www.aes.org/e-lib/browse.cfm?elib=20756
"""
import numpy as np
from scipy.signal import tf2sos, freqs
from matplotlib import rcParams
def ha... | [
"numpy.prod",
"numpy.log10",
"numpy.sqrt",
"numpy.linalg.multi_dot",
"numpy.array",
"scipy.signal.freqs",
"numpy.arange",
"numpy.divmod",
"numpy.exp",
"numpy.concatenate",
"numpy.abs",
"numpy.ceil",
"scipy.signal.tf2sos",
"numpy.ones",
"numpy.squeeze",
"numpy.sign",
"numpy.log2",
"... | [((676, 686), 'numpy.sign', 'np.sign', (['G'], {}), '(G)\n', (683, 686), True, 'import numpy as np\n'), ((8566, 8591), 'numpy.zeros', 'np.zeros', (['(num_biquad, 6)'], {}), '((num_biquad, 6))\n', (8574, 8591), True, 'import numpy as np\n'), ((9018, 9043), 'numpy.zeros', 'np.zeros', (['(num_biquad, 6)'], {}), '((num_biq... |
import inspect
import sys
import numpy as np
import attrdict
from mtwaffle import graphs
from mtwaffle import mt
class Site(attrdict.AttrDict):
index_map = {
'xx': [0, 0],
'xy': [0, 1],
'yx': [1, 0],
'yy': [1, 1]
}
EXCLUDED_CALLABLES = ('between_freqs', )
def __in... | [
"mtwaffle.graphs.plot_impedance_tensors",
"mtwaffle.mt.callables.items",
"mtwaffle.graphs.plot_mohr_imp",
"numpy.asarray",
"mtwaffle.graphs.plot_ptensell",
"inspect.signature",
"mtwaffle.graphs.plot_mohr_ptensor",
"mtwaffle.graphs.plot_ptensell_filled",
"mtwaffle.graphs.plot_res_phase"
] | [((450, 467), 'numpy.asarray', 'np.asarray', (['freqs'], {}), '(freqs)\n', (460, 467), True, 'import numpy as np\n'), ((486, 500), 'numpy.asarray', 'np.asarray', (['zs'], {}), '(zs)\n', (496, 500), True, 'import numpy as np\n'), ((1758, 1778), 'mtwaffle.mt.callables.items', 'mt.callables.items', ([], {}), '()\n', (1776... |
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | [
"torchvision.transforms.ToTensor",
"torch.from_numpy",
"numpy.asarray",
"cv2.cvtColor"
] | [((1328, 1366), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1340, 1366), False, 'import cv2\n'), ((1098, 1136), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1110, 1136), False, 'import cv2\n'), ((1783, 1... |
import fmtrack
import os
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
import pyvista
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
... | [
"numpy.argsort",
"pyvista.Arrow",
"sklearn.gaussian_process.GaussianProcessRegressor",
"numpy.mean",
"pyvista.PolyData",
"numpy.sort",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"sklearn.neighbors.KernelDensity",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"numpy.vstack",
"numpy.min",... | [((1131, 1218), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_1 + '_cell_mesh.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_1 +\n '_cell_mesh.txt')\n", (1141, 1218), True, 'import numpy as np\n'), ((1230, 1320), 'numpy.loadtxt', 'np.loadtxt', (["(root_dire... |
import numpy as np
train_ratings_path = "./../Data/netflix/TrainingRatings.txt"
test_ratings_path = "./../Data/netflix/TestingRatings.txt"
map_users={}
map_titles={}
data_matrix = np.empty((28978,1821),dtype=np.float32)
data_matrix[:] = np.nan
with open(train_ratings_path,'r') as reader:
counter_titles=0
c... | [
"numpy.nanmean",
"numpy.sqrt",
"numpy.empty",
"numpy.isnan"
] | [((184, 225), 'numpy.empty', 'np.empty', (['(28978, 1821)'], {'dtype': 'np.float32'}), '((28978, 1821), dtype=np.float32)\n', (192, 225), True, 'import numpy as np\n'), ((730, 761), 'numpy.nanmean', 'np.nanmean', (['data_matrix'], {'axis': '(1)'}), '(data_matrix, axis=1)\n', (740, 761), True, 'import numpy as np\n'), (... |
import random
from precise.skaters.managerutil.managertesting import manager_test_run
from precise.skaters.managers.equalmanagers import equal_daily_long_manager, equal_long_manager
from precise.skaters.managers.equalmanagers import equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager
from precise.skaterto... | [
"random.choice",
"precise.skatertools.data.equityhistorical.random_cached_equity_dense",
"numpy.testing.assert_array_almost_equal",
"precise.skaters.managerutil.managertesting.manager_test_run"
] | [((540, 568), 'random.choice', 'random.choice', (['LONG_MANAGERS'], {}), '(LONG_MANAGERS)\n', (553, 568), False, 'import random\n'), ((573, 598), 'precise.skaters.managerutil.managertesting.manager_test_run', 'manager_test_run', ([], {'mgr': 'mgr'}), '(mgr=mgr)\n', (589, 598), False, 'from precise.skaters.managerutil.m... |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter
class CRNN:
def __init__(self, alphabet=None):
self.alphabet = alphabet
def load_weights(self, path):
ocrPath = path
ocrPat... | [
"cv2.dnn.readNetFromTensorflow",
"numpy.argmax",
"numpy.array",
"Text_B_ocr_crnn_model_file.crnn.util.strLabelConverter",
"Text_B_ocr_crnn_model_file.crnn.util.resizeNormalize"
] | [((378, 428), 'cv2.dnn.readNetFromTensorflow', 'cv2.dnn.readNetFromTensorflow', (['ocrPath', 'ocrPathtxt'], {}), '(ocrPath, ocrPathtxt)\n', (407, 428), False, 'import cv2\n'), ((476, 502), 'Text_B_ocr_crnn_model_file.crnn.util.resizeNormalize', 'resizeNormalize', (['image', '(32)'], {}), '(image, 32)\n', (491, 502), Fa... |
## @package teetool
# This module contains the Visual_2d class
#
# See Visual_2d class for more details
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import teetool as tt
## Visual_2d class generates the 2d output using Matplotlib
#
# Even 3-dimensional trajectories can... | [
"matplotlib.pyplot.contourf",
"numpy.ones_like",
"numpy.greater",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((710, 749), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white', **kwargs)\n", (720, 749), True, 'import matplotlib.pyplot as plt\n'), ((6461, 6546), 'numpy.array', 'np.array', (['[[x_lo, y_lo], [x_hi, y_lo], [x_hi, y_hi], [x_lo, y_hi], [x_lo, y_lo]]'], {}), '([[x_lo, y_l... |
import kmeans
import json
import numpy as np
NUM_GAUSSIANS = 32
DO_KMEANS = False
DEBUG = True
mixture_weights = [1.0/NUM_GAUSSIANS] * NUM_GAUSSIANS
if DEBUG:
print ("mixture_weights: ", mixture_weights)
print("Loading parsed data...")
traindata_processed_file = open("parsed_data/data1.universalenrollparsed", "... | [
"numpy.array",
"json.dumps",
"kmeans.do_kmeans",
"numpy.var"
] | [((811, 825), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (819, 825), True, 'import numpy as np\n'), ((842, 865), 'numpy.var', 'np.var', (['data_np'], {'axis': '(0)'}), '(data_np, axis=0)\n', (848, 865), True, 'import numpy as np\n'), ((481, 507), 'kmeans.do_kmeans', 'kmeans.do_kmeans', (['data', '(32)'], {}... |
import tensorflow as tf
import numpy as np
from gpflow.params import DataHolder, Minibatch
from gpflow import autoflow, params_as_tensors, ParamList
from gpflow.models.model import Model
from gpflow.mean_functions import Identity, Linear
from gpflow.mean_functions import Zero
from gpflow.quadrature import mvhermgauss... | [
"numpy.eye",
"gpflow.mean_functions.Zero",
"gpflow.mean_functions.Linear",
"numpy.zeros",
"numpy.concatenate",
"numpy.linalg.svd",
"numpy.random.randn",
"gpflow.mean_functions.Identity"
] | [((555, 561), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (559, 561), False, 'from gpflow.mean_functions import Zero\n'), ((1833, 1839), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (1837, 1839), False, 'from gpflow.mean_functions import Zero\n'), ((2598, 2625), 'numpy.concatenate', 'np.concatena... |
import sys
sys.path.append('../')
import config
import pymysql.cursors
import pandas as pd
import numpy as np
from scipy import io as scipyio
from tempfile import SpooledTemporaryFile
from scipy.sparse import vstack as vstack_sparse_matrices
# Function to reassemble the p matrix from the vectors
def reconstitute_vect... | [
"scipy.io.mmread",
"tempfile.SpooledTemporaryFile",
"numpy.array",
"pandas.DataFrame",
"scipy.sparse.vstack",
"pandas.read_sql",
"sys.path.append"
] | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((2329, 2372), 'scipy.sparse.vstack', 'vstack_sparse_matrices', (['listOfSparseVectors'], {}), '(listOfSparseVectors)\n', (2351, 2372), True, 'from scipy.sparse import vstack as vstack_sparse_matrices\n... |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
print("Python Version:", torch.__version__)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1,... | [
"numpy.mean",
"torch.nn.functional.nll_loss",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.Linear",
"numpy.std",
"torch.nn.functional.log_softmax",
"torch.no_grad",
"torch.nn.functional.max_pool2d",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] | [((2190, 2203), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2197, 2203), True, 'import numpy as np\n'), ((2204, 2216), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2210, 2216), True, 'import numpy as np\n'), ((308, 330), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)', '(5)', '(1)'], {}), '(1, 20, 5, 1... |
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
... | [
"pandas.read_csv",
"sklearn.metrics.auc",
"sys.exit",
"train_cvopt.train_cvopt",
"ludwig.api.LudwigModel.load",
"train_mlblocks.train_mlblocks",
"pyfiglet.Figlet",
"matplotlib.pyplot.xlabel",
"platform.system",
"os.mkdir",
"train_neuraxle.train_neuraxle",
"sklearn.metrics.mean_absolute_error",... | [((2550, 2568), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""doh"""'}), "(font='doh')\n", (2556, 2568), False, 'from pyfiglet import Figlet\n'), ((2600, 2619), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""doom"""'}), "(font='doom')\n", (2606, 2619), False, 'from pyfiglet import Figlet\n'), ((16731, 16742), 'os.getc... |
import numpy as np
import cvxpy as cvx
import util
def set_contains_array(S, a):
"""
:param S: list of np.ndarray
:param a: np.ndarray
:return: contains, 0 or 1
"""
contains = 0
for b in S:
if not (a - b).any(): # if a contained in S
contains = 1
return contains
... | [
"cvxpy.Variable",
"cvxpy.Problem",
"numpy.ones",
"numpy.random.random",
"numpy.random.choice",
"numpy.argmax",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.random.seed",
"numpy.ravel",
"numpy.all",
"time.time",
"cvxpy.Maximize"
] | [((1555, 1569), 'cvxpy.Variable', 'cvx.Variable', ([], {}), '()\n', (1567, 1569), True, 'import cvxpy as cvx\n'), ((1578, 1593), 'cvxpy.Variable', 'cvx.Variable', (['S'], {}), '(S)\n', (1590, 1593), True, 'import cvxpy as cvx\n'), ((1610, 1625), 'cvxpy.Maximize', 'cvx.Maximize', (['d'], {}), '(d)\n', (1622, 1625), True... |
# -*- coding: utf-8 -*-
# @author : wanglei
# @date : 2021/2/19 1:47 PM
# @description :
import numpy as np
"""
感应器对象
"""
class Perceptron(object):
"""
该方法为感应器的初始化方法
eta:学习速率
n_iter:学习次数(迭代次数)
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
... | [
"numpy.where",
"numpy.dot",
"numpy.zeros",
"pandas.read_csv"
] | [((1812, 1869), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/a1/Downloads/iris.data"""'], {'header': 'None'}), "('/Users/a1/Downloads/iris.data', header=None)\n", (1823, 1869), True, 'import pandas as pd\n'), ((1969, 2004), 'numpy.where', 'np.where', (["(y == 'Iris-setosa')", '(-1)', '(1)'], {}), "(y == 'Iris-setosa'... |
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-... | [
"numpy.array",
"numpy.zeros"
] | [((8731, 8944), 'numpy.array', 'np.array', (["[Cf_sc['laminar'] * asm_obj.bundle_params['de'] / asm_obj.params['de'] ** 2,\n Cf_sc['turbulent'] * asm_obj.bundle_params['de'] ** _M['turbulent'] / \n asm_obj.params['de'] ** (_M['turbulent'] + 1)]"], {}), "([Cf_sc['laminar'] * asm_obj.bundle_params['de'] / asm_obj.p... |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from rl.dataset import ReplayBuffer, RandomSampler
from rl.base_agent import BaseAgent
from rl.policies.mlp_actor_critic import MlpActor, MlpCritic
from util.logger import logger
from util.mpi import mpi_average
from util.pytorch import ... | [
"util.pytorch.compute_gradient_norm",
"rl.dataset.ReplayBuffer",
"util.pytorch.obs2tensor",
"env.action_spec.ActionSpec",
"torch.min",
"rl.policies.mlp_actor_critic.MlpActor",
"numpy.isfinite",
"util.pytorch.sync_networks",
"rl.policies.mlp_actor_critic.MlpCritic",
"util.pytorch.count_parameters",... | [((2111, 2166), 'rl.policies.mlp_actor_critic.MlpActor', 'MlpActor', (['config', 'ob_space', 'ac_space'], {'tanh_policy': '(False)'}), '(config, ob_space, ac_space, tanh_policy=False)\n', (2119, 2166), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2193, 2248), 'rl.policies.mlp_actor_critic... |
import math
import numpy as np
import cv2
import json
import argparse
def augment_homogeneous(V, augment):
""" Augment a 3xN array of vectors into a 4xN array of homogeneous coordinates
Args:
v (np.array 3xN): Array of vectors
augment (float): The value to fill in for the W coordinate
Retu... | [
"numpy.sqrt",
"cv2.remap",
"math.sqrt",
"math.cos",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"scipy.interpolate.interp2d",
"numpy.flip",
"math.tan",
"argparse.ArgumentParser",
"numpy.linspace",
"numpy.meshgrid",
"numpy.maximum",
"numpy.ones",
"numpy.cos",
"cv2.imread",
"num... | [((397, 422), 'numpy.zeros', 'np.zeros', (['(4, V.shape[1])'], {}), '((4, V.shape[1]))\n', (405, 422), True, 'import numpy as np\n'), ((823, 856), 'numpy.linalg.norm', 'np.linalg.norm', (['V[0:3, :]'], {'axis': '(0)'}), '(V[0:3, :], axis=0)\n', (837, 856), True, 'import numpy as np\n'), ((912, 922), 'numpy.copy', 'np.c... |
# Copyright 2019, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines the picmi Simulation interface
"""
import numpy as np
from scipy.constants import c, e, m_e
from .particle_charge_and_mass import particle_ch... | [
"picmistandard.PICMI_Simulation.add_laser",
"picmistandard.PICMI_Simulation.add_diagnostic",
"fbpic.openpmd_diag.FieldDiagnostic",
"fbpic.lpa_utils.bunch.add_particle_bunch_gaussian",
"fbpic.lpa_utils.laser.GaussianLaser",
"fbpic.lpa_utils.laser.add_laser_pulse",
"fbpic.openpmd_diag.ParticleDiagnostic",... | [((3469, 3526), 'picmistandard.PICMI_Simulation.add_laser', 'PICMI_Simulation.add_laser', (['self', 'laser', 'injection_method'], {}), '(self, laser, injection_method)\n', (3495, 3526), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((4589, 4700), 'fbpic.lpa_utils.laser.add_laser_pulse',... |
import math
import numpy as np
import os
import pickle
from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .constants import MAX_MILLER, COVALENT_MATERIALS_MPIDS
class Bu... | [
"pymatgen.io.ase.AseAtomsAdaptor.get_atoms",
"numpy.cross",
"numpy.random.choice",
"pickle.load",
"pymatgen.core.surface.SlabGenerator",
"pymatgen.core.surface.get_symmetrically_distinct_miller_indices",
"pymatgen.symmetry.analyzer.SpacegroupAnalyzer",
"pymatgen.io.ase.AseAtomsAdaptor.get_structure"
] | [((5132, 5177), 'numpy.random.choice', 'np.random.choice', (['possible_n_elems'], {'p': 'weights'}), '(possible_n_elems, p=weights)\n', (5148, 5177), True, 'import numpy as np\n'), ((7442, 7508), 'pymatgen.core.surface.get_symmetrically_distinct_miller_indices', 'get_symmetrically_distinct_miller_indices', (['bulk_stru... |
#! /usr/bin/env python
# Copyright 2018 The Yawn Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | [
"tensorflow.estimator.RunConfig",
"argparse.ArgumentParser",
"data.stochastic_quantized_sine_wave.get_numpy_data",
"model.wavenet_model.WaveNetModel",
"tensorflow.logging.set_verbosity",
"numpy.diff",
"tensorflow.estimator.inputs.numpy_input_fn",
"numpy.array",
"numpy.random.randint",
"tensorflow.... | [((1939, 1986), 'data.stochastic_quantized_sine_wave.get_numpy_data', 'get_numpy_data', (['(2000)', 'quantization'], {'scale': 'scale'}), '(2000, quantization, scale=scale)\n', (1953, 1986), False, 'from data.stochastic_quantized_sine_wave import get_numpy_data\n'), ((2092, 2324), 'model.wavenet_model.WaveNetModel', 'W... |
"""Evaluate outcome (+CATE) of datasets"""
from scipy.stats import spearmanr
import numpy as np
def safe_spearmanr(arr_a, arr_b):
"Compute the spearman-R correlation, but 0 if all equal"
if np.all(arr_a[0] == arr_a) or np.all(arr_b[0] == arr_b):
return 0
return spearmanr(arr_a, arr_b).correlation... | [
"scipy.stats.spearmanr",
"numpy.all"
] | [((201, 226), 'numpy.all', 'np.all', (['(arr_a[0] == arr_a)'], {}), '(arr_a[0] == arr_a)\n', (207, 226), True, 'import numpy as np\n'), ((230, 255), 'numpy.all', 'np.all', (['(arr_b[0] == arr_b)'], {}), '(arr_b[0] == arr_b)\n', (236, 255), True, 'import numpy as np\n'), ((285, 308), 'scipy.stats.spearmanr', 'spearmanr'... |
import random
import torch
import numpy as np
import time
import os
from model.net import Net
from model.loss import Loss
from torch.autograd import Variable
import itertools
import pandas as pd
from main.dataset import LunaDataSet
from torch.utils.data import DataLoader
from configs import VAL_PCT, TOTAL_EPOCHS, DEFA... | [
"numpy.mean",
"os.makedirs",
"torch.utils.data.DataLoader",
"random.Random",
"pandas.read_csv",
"torch.load",
"numpy.asarray",
"os.path.join",
"model.net.Net",
"numpy.sum",
"torch.cuda.is_available",
"model.loss.Loss",
"time.time",
"glob.glob",
"main.dataset.LunaDataSet"
] | [((726, 737), 'time.time', 'time.time', ([], {}), '()\n', (735, 737), False, 'import time\n'), ((1470, 1501), 'numpy.asarray', 'np.asarray', (['metrics', 'np.float32'], {}), '(metrics, np.float32)\n', (1480, 1501), True, 'import numpy as np\n'), ((1967, 1978), 'time.time', 'time.time', ([], {}), '()\n', (1976, 1978), F... |
#!/usr/bin/python3
"""Plot histograms of images. Possible nans and infinities are ignored."""
import argparse
from collections import OrderedDict
import logging
import numpy as np
import pylab as pl
from scipy import interpolate
import dwi.files
import dwi.util
def parse_args():
"""Parse command-line argument... | [
"logging.basicConfig",
"numpy.mean",
"numpy.histogram",
"collections.OrderedDict",
"argparse.ArgumentParser",
"pylab.plot",
"pylab.savefig",
"numpy.asarray",
"logging.warning",
"numpy.any",
"pylab.close",
"pylab.figure",
"numpy.isfinite",
"scipy.interpolate.spline",
"numpy.percentile",
... | [((334, 378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (357, 378), False, 'import argparse\n'), ((1056, 1069), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (1066, 1069), True, 'import numpy as np\n'), ((1346, 1387), 'numpy.histogram', 'np... |
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
import pdb
import cv2
import time
import json
import torch
import random
import scipy
import logging
import traceback
import numpy as np
from datetime import datetime
# from config import HOME
from tensorboard_logger import log_value, log... | [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"matplotlib.pyplot.plot",
"os.path.join",
"tensorboard_logger.log_images",
"random.seed",
"time.time",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"os.mkdir",
"matplotli... | [((366, 391), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (384, 391), True, 'from matplotlib import pyplot as plt\n'), ((668, 691), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (689, 691), False, 'import logging\n'), ((705, 732), 'logging.getLogger'... |
import numpy as np
from scipy.ndimage import convolve, maximum_filter
def gauss2d(sigma, fsize):
""" Create a 2D Gaussian filter
Args:
sigma: width of the Gaussian filter
fsize: (w, h) dimensions of the filter
Returns:
*normalized* Gaussian filter as (h, w) np.array
"""
m,... | [
"numpy.logical_and",
"scipy.ndimage.convolve",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.nonzero",
"scipy.ndimage.maximum_filter",
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.arange"
] | [((339, 369), 'numpy.arange', 'np.arange', (['(-m / 2 + 0.5)', '(m / 2)'], {}), '(-m / 2 + 0.5, m / 2)\n', (348, 369), True, 'import numpy as np\n'), ((378, 408), 'numpy.arange', 'np.arange', (['(-n / 2 + 0.5)', '(n / 2)'], {}), '(-n / 2 + 0.5, n / 2)\n', (387, 408), True, 'import numpy as np\n'), ((422, 452), 'numpy.m... |
from builtins import range
from builtins import object
import numpy as np
from comp411.layers import *
from comp411.layer_utils import *
class ThreeLayerNet(object):
"""
A three-layer fully-connected neural network with Leaky ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume ... | [
"numpy.hstack",
"numpy.sum",
"builtins.range",
"numpy.zeros",
"numpy.random.randn"
] | [((2953, 2976), 'numpy.zeros', 'np.zeros', (['hidden_dim[0]'], {}), '(hidden_dim[0])\n', (2961, 2976), True, 'import numpy as np\n'), ((3005, 3028), 'numpy.zeros', 'np.zeros', (['hidden_dim[1]'], {}), '(hidden_dim[1])\n', (3013, 3028), True, 'import numpy as np\n'), ((3057, 3078), 'numpy.zeros', 'np.zeros', (['num_clas... |
# coding: utf-8
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
def card_num_distribution():
"""Plot `Std-CardNumDistribution.png`."""
total = np.fromstring('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ')
jb = np.fromstring('0 6 6 6 6 8 9 11 11 ... | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"numpy.arange",
"matplotlib.ticker.PercentFormatter",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.bar",
"matplotlib.rc",
"matplotlib.pyplot.ylim",
"numpy.fromstring",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.... | [((210, 274), 'numpy.fromstring', 'np.fromstring', (['"""12 14 12 12 13 12 12 12 12 12 12 14 12"""'], {'sep': '""" """'}), "('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ')\n", (223, 274), True, 'import numpy as np\n'), ((285, 339), 'numpy.fromstring', 'np.fromstring', (['"""0 6 6 6 6 8 9 11 11 10 7 3 2"""'], {'sep'... |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import cellranger.analysis.io as analysis_io
import cellranger.analysis.constants as analysis_constants
import cellranger.h5_constants as h5_constants
import cellranger.io as cr_io
import cellranger.analysis.stats as analysis_stats
... | [
"cellranger.analysis.io.load_h5_iter",
"numpy.sqrt",
"cellranger.analysis.io.save_matrix_csv",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.where",
"cellranger.io.makedirs",
"numpy.empty",
"numpy.random.seed",
"numpy.ceil",
"collections.namedtuple",
"cellranger.analysis.stats.norma... | [((790, 922), 'collections.namedtuple', 'collections.namedtuple', (['"""PCA"""', "['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion',\n 'features_selected']"], {}), "('PCA', ['transformed_pca_matrix', 'components',\n 'variance_explained', 'dispersion', 'features_selected'])\n", (812, 922)... |
# udi dataset process module
# modiflied from nuscenes_dataset.py
import json
import pickle
import time
import random
from copy import deepcopy
from functools import partial
from pathlib import Path
import subprocess
import fire
import numpy as np
import os
from second.core import box_np_ops
from second.core import... | [
"subprocess.check_output",
"json.loads",
"os.listdir",
"pickle.dump",
"fire.Fire",
"pathlib.Path",
"json.dumps",
"udi_eval.Box",
"pickle.load",
"numpy.array",
"numpy.concatenate",
"json.load",
"pyquaternion.Quaternion",
"json.dump",
"second.utils.progress_bar.progress_bar_iter"
] | [((9955, 9982), 'os.listdir', 'os.listdir', (['lidar_root_path'], {}), '(lidar_root_path)\n', (9965, 9982), False, 'import os\n'), ((10004, 10023), 'second.utils.progress_bar.progress_bar_iter', 'prog_bar', (['filenames'], {}), '(filenames)\n', (10012, 10023), True, 'from second.utils.progress_bar import progress_bar_i... |
#!/usr/bin/env python
"""
Configure folder for Multicolor testing.
Hazen 01/18
"""
import argparse
import inspect
import numpy
import os
import pickle
import subprocess
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_anal... | [
"storm_analysis.simulator.camera.SCMOS",
"storm_analysis.simulator.drift.DriftFromFile",
"numpy.array",
"numpy.arange",
"storm_analysis.simulator.simulate.Simulate",
"storm_analysis.sa_library.sa_h5py.loadLocalizations",
"storm_analysis.simulator.psf.GaussianPSF",
"inspect.getfile",
"storm_analysis.... | [((832, 860), 'storm_analysis.sa_library.parameters.ParametersSCMOS', 'parameters.ParametersSCMOS', ([], {}), '()\n', (858, 860), True, 'import storm_analysis.sa_library.parameters as parameters\n'), ((1922, 1958), 'storm_analysis.sa_library.parameters.ParametersMultiplaneArb', 'parameters.ParametersMultiplaneArb', ([]... |
import numpy as np
import scipy.spatial as spatial
def bilinear_interpolate(img, coords):
""" Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interp... | [
"matplotlib.pyplot.imshow",
"numpy.ones",
"numpy.int32",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"numpy.linalg.inv",
"functools.partial",
"locator.weighted_average_points",
"blender.weighted_average",
"scipy.spatial.Delaunay",
"numpy.vstack",
"blender.mask_fro... | [((381, 397), 'numpy.int32', 'np.int32', (['coords'], {}), '(coords)\n', (389, 397), True, 'import numpy as np\n'), ((906, 926), 'numpy.min', 'np.min', (['points[:, 0]'], {}), '(points[:, 0])\n', (912, 926), True, 'import numpy as np\n'), ((970, 990), 'numpy.min', 'np.min', (['points[:, 1]'], {}), '(points[:, 1])\n', (... |
import face_embedding
import argparse
import cv2
import numpy as np
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../models/model-r34-amf/model,0', help='path to load model.')
parser.add_... | [
"face_embedding.FaceModel",
"cv2.imread",
"argparse.ArgumentParser",
"numpy.square"
] | [((78, 132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""face model test"""'}), "(description='face model test')\n", (101, 132), False, 'import argparse\n'), ((718, 748), 'face_embedding.FaceModel', 'face_embedding.FaceModel', (['args'], {}), '(args)\n', (742, 748), False, 'import fac... |
import os
import datetime
import h5py
import numpy as np
DEFAULT_DTYPE = np.dtype([
('datetime', np.int64),
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('limit_up', np.float),
('limit_down', np.float),
('volume', np.float),
('total_turnover', np.f... | [
"numpy.dtype",
"numpy.asarray",
"os.path.basename",
"h5py.File"
] | [((74, 361), 'numpy.dtype', 'np.dtype', (["[('datetime', np.int64), ('open', np.float), ('close', np.float), ('high',\n np.float), ('low', np.float), ('limit_up', np.float), ('limit_down', np\n .float), ('volume', np.float), ('total_turnover', np.float), (\n 'settlement', np.float), ('prev_settlement', np.floa... |
import sys, os
import time
import getopt
import pprint
try:
# doesn't exist on macos
from shmem import PyShmemClient
except:
pass
from psana import dgram
from psana.event import Event
from psana.detector import detectors
from psana.psexp.event_manager import TransitionId
import numpy as np
def dumpDict(di... | [
"getopt.getopt",
"shmem.PyShmemClient",
"psana.dgram.Dgram",
"os.close",
"os.open",
"numpy.asarray",
"os.getppid",
"numpy.array",
"os.popen",
"os.path.basename",
"os.getpid",
"sys.exit",
"psana.event.Event",
"sys.stdout.write"
] | [((9560, 9597), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hvd:f:"""'], {}), "(sys.argv[1:], 'hvd:f:')\n", (9573, 9597), False, 'import getopt\n'), ((9898, 9909), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9907, 9909), False, 'import sys, os\n'), ((9919, 9931), 'os.getppid', 'os.getppid', ([], {}), '()\n... |
# Copyright (c) 2020, <NAME>, University of Washington
# This file is part of rcwa_tf
# Written by <NAME> (Email: <EMAIL>)
import tensorflow as tf
import numpy as np
def convmat(A, P, Q):
'''
This function computes a convolution matrix for a real space matrix `A` that
represents either a relative per... | [
"tensorflow.eye",
"tensorflow.tile",
"numpy.floor",
"tensorflow.concat",
"numpy.linspace",
"tensorflow.reshape",
"tensorflow.linalg.matmul",
"tensorflow.signal.fft2d"
] | [((1189, 1206), 'numpy.floor', 'np.floor', (['(P / 2.0)'], {}), '(P / 2.0)\n', (1197, 1206), True, 'import numpy as np\n'), ((1219, 1236), 'numpy.floor', 'np.floor', (['(P / 2.0)'], {}), '(P / 2.0)\n', (1227, 1236), True, 'import numpy as np\n'), ((1277, 1306), 'numpy.linspace', 'np.linspace', (['(-p_max)', 'p_max', 'P... |
import pandas as pd
import json
import os
import os.path as osp
import numpy as np
"""
python -m spinup.run hyper_search <files> -ae <start from which epoch>
make a file that can order the experiments in terms of their performance
use this to easily find good hyperparameters when doing hyperparameter search
upload thi... | [
"os.listdir",
"numpy.convolve",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.join",
"numpy.argsort",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"numpy.isnan",
"json.load",
"pandas.concat",
"os.walk"
] | [((1541, 1561), 'numpy.zeros', 'np.zeros', (['n_settings'], {}), '(n_settings)\n', (1549, 1561), True, 'import numpy as np\n'), ((1577, 1597), 'numpy.zeros', 'np.zeros', (['n_settings'], {}), '(n_settings)\n', (1585, 1597), True, 'import numpy as np\n'), ((3264, 3279), 'os.walk', 'os.walk', (['logdir'], {}), '(logdir)\... |
import copy
import json
import numpy as np
import pandas as pd
import basicDeltaOperations as op
import calcIsotopologues as ci
import fragmentAndSimulate as fas
import solveSystem as ss
'''
This is a set of functions to quickly initalize methionine molecules based on input delta values and to simulate its fragmenta... | [
"numpy.array",
"fragmentAndSimulate.predictMNFragmentExpt",
"copy.deepcopy",
"solveSystem.processM1MCResults",
"calcIsotopologues.inputToAtomDict",
"solveSystem.OValueCorrectTheoretical",
"solveSystem.updateSiteSpecificDfM1MC",
"json.dumps",
"solveSystem.M1MonteCarlo",
"pandas.DataFrame",
"basic... | [((3681, 3712), 'pandas.DataFrame', 'pd.DataFrame', (['l'], {'columns': 'IDList'}), '(l, columns=IDList)\n', (3693, 3712), True, 'import pandas as pd\n'), ((8014, 8092), 'calcIsotopologues.inputToAtomDict', 'ci.inputToAtomDict', (['molecularDataFrame'], {'disable': 'disableProgress', 'M1Only': 'M1Only'}), '(molecularDa... |
"""
Demonstrate the type 1 NUFFT using cuFINUFFT
"""
import numpy as np
import pycuda.autoinit
from pycuda.gpuarray import GPUArray, to_gpu
from cufinufft import cufinufft
# Set up parameters for problem.
N1, N2 = 59, 61 # Size of uniform grid
M = 100 # Number of nonuniform p... | [
"numpy.random.standard_normal",
"numpy.abs",
"pycuda.gpuarray.GPUArray",
"numpy.exp",
"cufinufft.cufinufft",
"numpy.random.uniform",
"pycuda.gpuarray.to_gpu"
] | [((592, 632), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {'size': 'M'}), '(-np.pi, np.pi, size=M)\n', (609, 632), True, 'import numpy as np\n'), ((638, 678), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {'size': 'M'}), '(-np.pi, np.pi, size=M)\n', (655, 678), True, 'imp... |
from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField
from simtk import openmm, unit
from scipy.stats import distributions
import copy
import numpy as np
import os
from smt.sampling_methods import LHS
def vary_parameters_lhc(filename, num_samples, output_directory):
forcefield = ForceField(file... | [
"smt.sampling_methods.LHS",
"numpy.asarray",
"os.makedirs",
"openff.toolkit.typing.engines.smirnoff.forcefield.ForceField"
] | [((305, 357), 'openff.toolkit.typing.engines.smirnoff.forcefield.ForceField', 'ForceField', (['filename'], {'allow_cosmetic_attributes': '(True)'}), '(filename, allow_cosmetic_attributes=True)\n', (315, 357), False, 'from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField\n'), ((556, 580), 'numpy.asarr... |
import argparse
from collections import defaultdict
import pickle
import re
import lightgbm as lgb
import pandas as pd
import numpy as np
import xgboost as xgb
from ..data_utils import SEG_FP, get_encoded_classes
from ..utils import print_metrics
from ..metric import get_metrics
from .blend import (
score_predict... | [
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"re.compile",
"xgboost.train",
"pandas.read_csv",
"lightgbm.train",
"lightgbm.Booster",
"pickle.load",
"lightgbm.Dataset",
"collections.defaultdict",
"xgboost.DMatrix"
] | [((406, 431), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (429, 431), False, 'import argparse\n'), ((2124, 2151), 're.compile', 're.compile', (['"""^top_\\\\d+_cls"""'], {}), "('^top_\\\\d+_cls')\n", (2134, 2151), False, 'import re\n'), ((6836, 6872), 'lightgbm.Dataset', 'lgb.Dataset', (['tr... |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 21:55:56 2015
@author: aidanrocke
"""
import numpy as np
def compressSequenceNFast(posture_seq, newStart, nMax):
"""
# COMPRESSSEQUENCE Recursively finds the most compressive subsequence in
# posture_seq and creates and replaces it with a new number. Thi... | [
"numpy.shape"
] | [((1731, 1752), 'numpy.shape', 'np.shape', (['posture_seq'], {}), '(posture_seq)\n', (1739, 1752), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
### block that grows in fiber direction triggered by fiber stretch and remodels to softer material
# TODO: Somehow, this does not converge quadratically at the end (seems irrespective of remodeling,
# but likely to be attributed to the growth in fiber direction) ---> check linearization terms!
... | [
"traceback.format_exc",
"pathlib.Path",
"results_check.results_check_node",
"numpy.array",
"sys.exit",
"results_check.success_check"
] | [((4321, 4442), 'results_check.results_check_node', 'results_check.results_check_node', (['problem.mp.u', 'check_node', 'u_corr', 'problem.mp.V_u', 'problem.mp.comm'], {'tol': 'tol', 'nm': '"""u"""'}), "(problem.mp.u, check_node, u_corr, problem.\n mp.V_u, problem.mp.comm, tol=tol, nm='u')\n", (4353, 4442), False, '... |
import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_dics, apply_dics_csd
from config import dics_settings, fname, args
from megset.config import fname as megset_fname
from megset.config import freq_range
subject = args.subject
print(f'Running analsis for subject {subject}')
mne.set_log_... | [
"numpy.arccos",
"mne.set_log_level",
"numpy.arange",
"megset.config.fname.fwd",
"megset.config.fname.ecd",
"mne.beamformer.apply_dics_csd",
"numpy.log",
"mne.channels.equalize_channels",
"numpy.linalg.norm",
"mne.time_frequency.csd_morlet",
"config.fname.dics_megset_results",
"pandas.DataFrame... | [((308, 332), 'mne.set_log_level', 'mne.set_log_level', (['(False)'], {}), '(False)\n', (325, 332), False, 'import mne\n'), ((1091, 1122), 'numpy.arange', 'np.arange', (['*freq_range[subject]'], {}), '(*freq_range[subject])\n', (1100, 1122), True, 'import numpy as np\n'), ((1129, 1203), 'mne.time_frequency.csd_morlet',... |
import csv
from sklearn.cluster import MiniBatchKMeans
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pickle
import numpy as np
import sklearn.metrics as metrics
from yellowbrick.cluster import InterclusterDistance
from scipy.optimize import curve_fit
import umap.umap_ as umap
from col... | [
"csv.field_size_limit",
"csv.DictWriter",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"yellowbrick.cluster.InterclusterDistance",
"sklearn.metrics.silhouette_samples",
"scipy.stats.sem",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"numpy.exp",... | [((73, 96), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (87, 96), False, 'import matplotlib\n'), ((2181, 2273), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'selected_k', 'init': 'centers', 'verbose': '(0)', 'max_no_improvement': 'None'}), '(n_clusters=selected... |
import numpy as np
import matplotlib.pyplot as plt
import cv2
class LaneIdentifier:
def __init__(self, smooth_factor, filter):
self.left_lane_inds = []
self.right_lane_inds = []
self.lane_gap = []
self.binary_warped = None
self.window_height = None
self.leftx_current... | [
"numpy.polyfit",
"numpy.hstack",
"numpy.array",
"numpy.mean",
"numpy.delete",
"matplotlib.pyplot.plot",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"numpy.argmax",
"numpy.int_",
"matplotlib.pyplot.xlim",
"numpy.int",
"numpy.dstack... | [((841, 893), 'numpy.int', 'np.int', (['(self.binary_warped.shape[0] // self.nwindows)'], {}), '(self.binary_warped.shape[0] // self.nwindows)\n', (847, 893), True, 'import numpy as np\n'), ((954, 974), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (962, 974), True, 'import numpy as np\n'), ((999, ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.