prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
# Copyright 2020 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Utilities for fetching and munging public data and forecasts."""
import numpy as np
import pandas as pd
imp... | pd.to_datetime(df.time) | pandas.to_datetime |
""""""
__author__ = "<NAME>"
__copyright__ = "WeatherBrain"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pandas
def load_temperature_raw():
"""This methid loads the raw temperature data from
text files and recompiled the data into a dataframe.
:return: Dataframe wit... | pandas.DataFrame(data, columns=['date', 'hPa_one', 'hPa_two', 'hPa_three']) | pandas.DataFrame |
'''
This file includes all the locally differentially private mechanisms we designed for the SIGMOD work.
I am aware that this code can be cleaned a bit and there is a redundancy. But this helps keeping the code plug-n-play.
I can simply copy a class and use it in a different context.
http://dimacs.rutgers.edu/~graha... | pd.DataFrame(columns=["irr_l1_std", "mrr_l1_std", "iht_l1_std", "mht_l1_std", "ips_l1_std", "mps_l1_std","iolh_l1_std","icms_l1_std","icmsht_l1_std"]) | pandas.DataFrame |
"""
Tests for the choice_tools.py file.
"""
import unittest
import os
import warnings
from collections import OrderedDict
from copy import deepcopy
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, isspmatrix_csr
import pylogit.choice_tools as ct
import pylogit.ba... | pd.DataFrame(wide_data) | pandas.DataFrame |
"""Exhastuve grid search for parameters for TSNE and UMAP"""
import argparse
import itertools
import hdbscan
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import pdist, squareform
f... | pd.read_csv(args.distance_matrix, index_col=0) | pandas.read_csv |
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFram... | CategoricalDtype() | pandas.core.dtypes.dtypes.CategoricalDtype |
import concurrent.futures
import multiprocessing
import scipy
import pandas as pd
import random
import re
import time
import pickle
import os
from .DataManager import DataManager
from .Agent import Agent
from colorama import Fore
from .Utils import get_seconds
from sklearn.feature_extraction.text import TfidfVectorizer... | pd.to_datetime('2020-03-29T00:00:00Z') | pandas.to_datetime |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from collections.abc import Iterable
from datetime import datetime, timedelta
from unittest import TestCase
import numpy as np
import... | pd.DataFrame({"time": previous_seq, "value": upper_values}) | pandas.DataFrame |
#################################################
#created the 04/05/2018 09:52 by <NAME>#
#################################################
#-*- coding: utf-8 -*-
'''
'''
'''
Améliorations possibles:
'''
import warnings
warnings.filterwarnings('ignore')
#################################################
########### ... | pd.DataFrame() | pandas.DataFrame |
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline: iPython의 magic fx은 사용할 수 없다!!!
import seaborn as sns
from sklearn import preprocessing
import folium
from config.settings import DATA_DIR, TEMPLATES
from config.settings import STATICFILES_DIRS
# 데이터 파일을 dataframe으... | pd.read_excel(DATA_DIR[0] + '/city_pop.xlsx') | pandas.read_excel |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def set... | date_range("20130101", periods=5, freq="s") | pandas.date_range |
# run_experiment
# Basics
import pandas as pd
import numpy as np
import datetime
import pickle
import typer
import os
# Import paths
from globals import DATA_MODELLING_FOLDER, EVALUATION_RESULTS, full_feat_models, overlapping_feat_models, full_feat_models_rfe
# Import sklearn processing/pipeline
from sklearn.pipelin... | pd.DataFrame(feat_importance_dict) | pandas.DataFrame |
import logging
import os
import re
import shutil
import subprocess
from builtins import object, range, str, zip
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from editdistance import eval as editdist # Alternative library: python-levenshtein
cl... | pd.reset_option("display.max_rows") | pandas.reset_option |
import re, random, os, json
import pandas as pd
import numpy as np
import scipy as sp
import seaborn as sns
from bokeh import mpl
from bokeh.plotting import output_file, show
from sklearn.feature_extraction.text import TfidfVectorizer
from classifier import Classifier, label2domain, manifestolabels
MANIFESTO_FOLDER = ... | pd.DataFrame(most_distant_statements, columns=['party', 'domain', 'most_distant_to_other_parties', 'distance']) | pandas.DataFrame |
# Import dependencies
def scrapeData():
import urllib.request, json
from bson.json_util import dumps, loads
import os, ssl
import pymongo
import itertools
import pandas as pd
# ### 2021
# In[2]:
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and
getattr(ssl, '_create_unv... | pd.DataFrame(final_data) | pandas.DataFrame |
# ---------------------------------------------------------------------------- #
# World Cup: Stats scanner
# Ver: 0.01
# ---------------------------------------------------------------------------- #
#
# Code by <NAME>
#
# ---------------------------------------------------------------------------- #
import os
impor... | pd.isnull(rating_dict[team][player]) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 11:30:13 2020
This script calculate damage for the yearly basis
@author: acn980
"""
import os, sys, glob
import pandas as pd
import numpy as np
import warnings
import scipy
import matplotlib.pyplot as plt
import subprocess
warnings.filterwarnings("ignore")
sys.path.i... | pd.read_csv(fn_skew, parse_dates = True, date_parser= date_parser, index_col = 'Date') | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or a... | pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) | pandas.IntervalIndex.from_tuples |
from datetime import time
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.io.excel._base import _BaseExcelReader
from pandas.core.dtypes.missing import isnull
class _XlrdReader(_BaseExcelReader):
def __init__(self, filepath_or_buffer):
"""Reader using xlrd ... | import_optional_dependency("xlrd", extra=err_msg) | pandas.compat._optional.import_optional_dependency |
import os
import pandas as pd
from collections import defaultdict
import argparse
from pattern.text.en import singularize
# Dictionary used to store subject counts
subject_counts = defaultdict(lambda:0)
# Reads in the data
def read_data(filename):
print("Reading in {}".format(filename))
df = pd.read_csv(filename, ... | pd.DataFrame(columns=['doi', 'subjects', 'title']) | pandas.DataFrame |
import gc
import os
import time
import boto3
import dask
import fsspec
import joblib
import numpy as np
import pandas as pd
import rasterio as rio
import rioxarray
import utm
import xarray as xr
import xgboost as xgb
from pyproj import CRS
from rasterio.session import AWSSession
from s3fs import S3FileSystem
import c... | pd.DataFrame([[np.nan, np.nan, np.nan]], columns=['x', 'y', 'biomass']) | pandas.DataFrame |
import re
import json
import subprocess
import itertools
from multiprocessing import Pool
import urllib
import pandas as pd
from bs4 import BeautifulSoup
def get_schools(county, year, grade):
"""Get all the schools in a county for a year and grade"""
url = "https://app.azdhs.gov/IDRReportStats/H... | pd.DataFrame(group) | pandas.DataFrame |
import logging
import io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import sandy
__author__ = "<NAME>"
__all__ = [
"Samples",
]
np.random.seed(1)
minimal_testcase = np.random.randn(4, 3)
def cov33csv(func):
def inne... | pd.read_csv(file, **kwargs) | pandas.read_csv |
#!/usr/bin/env python
"""
Copyright 2019 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Evals PLDA LLR
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import sys
i... | pd.merge(ext_segments_in.segments, df_map) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# ## Visualize a representation of the spherized LINCS Cell Painting dataset
# In[1]:
import umap
import pathlib
import numpy as np
import pandas as pd
import plotnine as gg
from pycytominer.cyto_utils import infer_cp_features
# In[2]:
np.random.seed(9876)
# In[3]:
pro... | pd.read_csv(file) | pandas.read_csv |
def convert_to_perlodes(TaXon_table_xlsx, operational_taxon_list, path_to_outdirs):
import PySimpleGUI as sg
import pandas as pd
from pandas import DataFrame
import numpy as np
from pathlib import Path
#get the taxonomy from the operational taxon list
operational_taxon_list_df = pd.read_ex... | pd.DataFrame(perlodes_input_list) | pandas.DataFrame |
"""
This file contains functions that allows running adaptive
selection in parallel.
@author: <NAME>
"""
from typing import List, Any, Optional
import pandas as pd
from sklearn.base import clone
# It can serialize class methods and lambda functions.
import pathos.multiprocessing as mp
def add_partition_key(
... | pd.concat(predictions) | pandas.concat |
from __future__ import division
import numpy as np
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .iec_functions import IecFunctions
class IecInputs(ModelSharedInputs):
"""
Input class for IEC.
"""
def __init__(self):
"""Class representing the inputs for IEC"... | pd.Series([], dtype="float", name="out_z_score_f") | pandas.Series |
from .microfaune_package.microfaune.detection import RNNDetector
from .microfaune_package.microfaune import audio
import matplotlib.pyplot as plt
import pandas as pd
import scipy.signal as scipy_signal
import numpy as np
import seaborn as sns
from .IsoAutio import *
def local_line_graph(
local_scores,
... | pd.DataFrame() | pandas.DataFrame |
import copy
from io import StringIO
import numpy as np
import pandas as pd
from django import forms
from django.core.exceptions import ValidationError
from django.forms.widgets import RadioSelect, Select, Textarea, TextInput
from pandas.errors import ParserError
from core.utils.util import md5_hash
from .models impo... | pd.read_excel(data, dtype=str) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 16:06:04 2020
@author: ryancrisanti
"""
from .prediction import Prediction
from .account import Account
from .utilities import format_name, save, load
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
class ... | pd.DataFrame(self.tot_acnt_value) | pandas.DataFrame |
"""
Backs up ToodleDo
"""
import sys
import os
import requests
import yaml
import pandas as pd
from getpass import getpass
from requests_oauthlib import OAuth2Session
import requests
import urllib
import json
import logging
# TODO modify redirection URI? Localhost is a bit weird, there might be something running ther... | pd.DataFrame(i["outline"]["children"]) | pandas.DataFrame |
import pandas as pd
from pathlib import Path
from utils import Config
from sklearn.model_selection import train_test_split
# dataset
data_dir = Path("data")
train = | pd.read_csv(data_dir / "kor_pair_train.csv") | pandas.read_csv |
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from core import forms
import numpy as np
import pandas as pd
# Create your views here.
class HomePageView(TemplateView):
template_name = 'core/index.html'
form_class = forms.... | pd.DataFrame(normalized_table, index=index, columns=focus_header) | pandas.DataFrame |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
import math
# from .models.DeepCOVID import DeepCOVID
from models.DeepCOVID import DeepCOVID
# params
#N_SAMPLES = 20
#N=3 # stochastic repetitions for ea... | pd.concat([main,rest], axis =1) | pandas.concat |
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
# In[ ]:
def filtering(level): # filter class data based on level which students response
db=pd.read_... | pd.DataFrame(pref,columns=['class_size','tuition','careerOfTeacher','ageDistribution']) | pandas.DataFrame |
# coding: utf-8
# # Parameter Calibration
# This notebook describes a mathematical framework for selecting policy parameters - namely the emissions intensity baseline and permit price. Please be aware of the following key assumptions underlying this model:
#
# * Generators bid into the market at their short-ru... | pd.DataFrame(r['Solution'][0]) | pandas.DataFrame |
"""Utility functions, mostly for internal use."""
import os
import colorsys
import warnings
from urllib.request import urlopen, urlretrieve
from http.client import HTTPException
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.colors as mplcol
import matplotlib.... | pd.Categorical(df["class"], ["First", "Second", "Third"]) | pandas.Categorical |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from datetime import datetime
from sys import stdout
from sklearn.preprocessing import scale
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, Constant... | pd.DataFrame() | pandas.DataFrame |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager... | concat(values, axis=self.axis) | pandas.concat |
import os
import pickle
import pandas as pd
from collections import Counter
from numpy.random import choice
import random
import re
import simplejson
data_dir = '/home/hsinghal/workspace/DB_AS_A_SERVICE/input_data'
store_into = '/home/hsinghal/workspace/DB_AS_A_SERVICE/custom_scripts'
# ----------------------------... | pd.DataFrame.from_records(all_results) | pandas.DataFrame.from_records |
import streamlit as st
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from s... | pd.DataFrame(X) | pandas.DataFrame |
#!/usr/bin/env python3
import os
from collections import defaultdict, namedtuple
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegressionCV
from sklearn import metrics
from functools import partial
import gc
import pickle as pkl
import gzip
import json
from datetime import datetime
fr... | pd.concat([df_scores, df_sc], axis=1) | pandas.concat |
#!/usr/bin/env python3
import os
import sys
import re
import pandas as pd, geopandas as gpd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib ... | pd.read_csv(recurr_file, dtype={'feature_id': str}) | pandas.read_csv |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in wr... | pd.Timestamp("2016-01-07", tz='UTC') | pandas.Timestamp |
#!/usr/bin/env python
import os
import sys
import pandas as pd
import numpy as np
import time
from sqlalchemy import create_engine
from itertools import repeat
import multiprocessing
import tqdm
import genes
import eqtls
def find_snps(
inter_df,
gene_info_df,
tissues,
output_dir,
... | pd.DataFrame() | pandas.DataFrame |
# Author: <NAME>
import pandas as pd
import sys
def create_means_contralateral_average(means_input_file, contralateral_means_output_file):
means_df = | pd.read_csv(means_input_file) | pandas.read_csv |
"""
Tests whether ColumnPropagation works
"""
from inspect import cleandoc
import pandas
from pandas import DataFrame
from mlinspect._pipeline_inspector import PipelineInspector
from mlinspect.inspections import ColumnPropagation
def test_propagation_merge():
"""
Tests whether ColumnPropagation works for jo... | DataFrame([['cat_a', 1, 2, 'cat_a'], ['cat_b', 2, 2, 'cat_b']], columns=['A', 'B', 'C', 'mlinspect_A']) | pandas.DataFrame |
import pandas as pd
from functools import reduce
from pathlib import Path
def merge_benefits(cps, year, data_path, export=True):
"""
Merge the benefit variables onto the CPS files. TaxData use the
following variables imputed by C-TAM:
Medicaid: MedicaidX
Medicare: MedicareX
Veterans Benefits: ... | pd.merge(left, right, on="peridnum", how="left") | pandas.merge |
import pandas as pd
from skimage.measure import regionprops
from .compute_fsd_features import compute_fsd_features
from .compute_gradient_features import compute_gradient_features
from .compute_haralick_features import compute_haralick_features
from .compute_intensity_features import compute_intensity_features
from .c... | pd.concat(feature_list, axis=1) | pandas.concat |
### mkwc_util.py : Contains utilities for extracting and processing data from the MKWC website
### Author : <NAME>
### Date : 6/1/2021
import os
import numpy as np
import pandas as pd
from . import times
### NOTE: Seeing data is STORED by UT, with data in HST
### CFHT data is STORED by HST, with data in HST
mkwc_url... | pd.concat(all_data) | pandas.concat |
# Generate max, mean, and std from computed feature value comparison
from __future__ import print_function
import csv
import pandas as pd
# input_file = 'output.csv'
# output_file = 'validation.csv'
input_file = 'output1.csv'
output_file = 'validation1.csv'
df1 = | pd.read_csv(input_file) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
#导入数据集
def read_data(base_info_path,
... | pd.read_csv(entprise_info_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 18 11:31:56 2021
@author: nguy0936
I increased the number of layers from conv1 to embedding to see if more layers
could result in better performance. I did this for only set 1 - Hallett
"""
# load packages
import pandas as pd
import umap
import matplotlib.pyplot as plt
... | pd.concat(lowd_frames, axis=1) | pandas.concat |
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona... | pd.concat([flow_cfs, flow_cms, flow_mmday],axis=1) | pandas.concat |
"""
Authors: <NAME>, <NAME>, <NAME>, <NAME>
"""
import pandas as pd
import numpy as np
from scipy.linalg import eig
import matplotlib.pyplot as plt
import quantecon as qe
# == model parameters == #
a_0 = 100
a_1 = 0.5
ρ = 0.9
σ_d = 0.05
β = 0.95
c = 2
γ = 50.0
θ = 0.002
ac = (a_0 - c) / 2.0
# == Define LQ matrice... | pd.DataFrame(index=θs, columns=('value', 'entropy')) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import scipy.io as sio
import matconv
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
'''################### Set direcotories and open files #####################'''
bhalla_paths = matconv.set_pat... | pd.concat([stim_time, hist_time, glm_df], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import hail as hl
'''Module with helper functions used in both projects (and for mt + tRNA).'''
def find_subset(df, column_name, factor, condition):
'''Returns df subsetted by factor in specified column (==, !=, <, >, ... | pd.merge(all_genes, df, how="inner", on="gene") | pandas.merge |
import util
import numpy as np
import pandas as pd
# model_1 = pd.read_csv('fold1_boostdt.csv')
# model_1 = pd.read_csv('small_boostdt.csv')
model_2 = pd.read_csv('~/Desktop/predictions_stiebels/full/predictions_xgboost_fold1.csv', names = ["pred"])
model_1 = pd.read_csv('~/Desktop/predictions_stiebels/full/prediction... | pd.read_csv('large_boostdt.csv') | pandas.read_csv |
"""
This module contains methods related to validation of csv data contained
in the CSVFile model.
"""
from collections import namedtuple
from marshmallow import fields, post_dump, Schema, validate
from pandas import Index, to_numeric
from viime.cache import region
SEVERITY_VALUES = ['error', 'warning']
CONTEXT_VALU... | to_numeric(raw_table.iloc[:, index], errors='coerce') | pandas.to_numeric |
import os
import unittest
import pandas as pd
from context import technical as ti
# Change working directory
# This enable running tests from repository root
if os.getcwd() != os.path.abspath(os.path.dirname(__file__)):
os.chdir('tests/')
# Test results
class ResultsRSI(unittest.TestCase):
# Input data
te... | pd.testing.assert_series_equal(self.results_rsi, results, check_names=False) | pandas.testing.assert_series_equal |
import numpy as np
import pandas as pd
import scipy as sc
import scipy.spatial as spatial
from anndata import AnnData
from .het import create_grids
def lr(
adata: AnnData,
use_lr: str = "cci_lr",
distance: float = None,
verbose: bool = True,
) -> AnnData:
"""Calculate the proportion of known liga... | pd.DataFrame(df, index=adata.obs_names, columns=adata.var_names) | pandas.DataFrame |
# routes related to the boba run monitor
import os
import time
import pandas as pd
import numpy as np
from flask import jsonify, request
from .util import read_csv, read_json, write_json
from bobaserver import app, socketio, scheduler
from bobaserver.bobastats import sampling, sensitivity
import bobaserver.common as c... | pd.read_csv(fn, na_filter=False) | pandas.read_csv |
import pandas as pd
import numpy as np
import argparse
import random
def create_context_to_id_map(df, df_sent):
context_to_id = {}
c_context_id = 0
context_ids = []
relevant_sentence_ids_arr = []
df = df.reset_index()
for index, row in df.iterrows():
# add the relevant sentences to the ... | pd.read_pickle(args.sent_data_path) | pandas.read_pickle |
"""Functions for testing by means of pytest
"""
import sys
sys.path.append("/home/daniel/Schreibtisch/Projekte/avalanche-risk")
import pandas as pd
import numpy as np
from model.functions_model import preprocess_X_values, get_shifted_features
import pytest
@pytest.fixture
def df():
df = | pd.DataFrame([["a", "1"], ["b", "2"], ["c", "3"], ["d", "4"]], index = [1, 2, 3, 4], columns = ["A", "B"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as MSE
from sklearn import preprocessing
import math
import re
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^inter... | pd.read_csv('baseballdatabank-master/core/Batting.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
from collections import defaultdict
import numpy as np
import re
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.tokenize.regexp import RegexpTokenizer
import pandas as pd
def clean_tokens(tokens, to... | pd.DataFrame(test['Text']) | pandas.DataFrame |
import datetime
import os
import tempfile
from collections import OrderedDict
import boto3
import pandas as pd
import pytest
import yaml
from moto import mock_s3
from numpy.testing import assert_almost_equal
from pandas.testing import assert_frame_equal
from unittest import mock
from triage.component.catwalk.storage ... | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
'''
(c) 2014 <NAME> and <NAME>
This module contains functions for parsing various ldsc-defined file formats.
'''
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import logging
def series_eq(x, y):
'''Compare series, return False if lengths not equal.'''
return len(x) == len(y) and (x... | pd.concat(ldscore_array, axis=1) | pandas.concat |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import re
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn import preprocessing, model_select... | pd.read_csv('../input/test.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-.
"""
doi of according publication [preprint]:
https://doi.org/10.5194/hess-2021-403
Contact: <EMAIL>
ORCID: 0000-0002-0585-9549
https://github.com/AndreasWunsch/CNN_KarstSpringModeling/
MIT License
large parts opf the code from <NAME> (https://github.com/andersonsam/cnn_lstm_era)
see also: A... | pd.read_csv(fileName,header=None) | pandas.read_csv |
from __future__ import division
from __future__ import print_function
# Preprocessing of Option Quotes
# ==============================
#
# This notebook demonstrates the preprocessing of equity options, in preparation for the estimation of the parameters of a stochastic model.
# A number of preliminary calculations m... | pandas.read_pickle('../data/df_SPX_24jan2011.pkl') | pandas.read_pickle |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to i... | pd.Index(["a", "b", "a"]) | pandas.Index |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
#Custom Transformer Class
class NewFeatureTransformer(BaseEstimator, TransformerMixin):
def fit(self, x, y=None):
return self
def transform(self, x):
x['ratio'] = x['thalach']/x['trestbps']
... | pd.DataFrame(x.loc[:, 'ratio']) | pandas.DataFrame |
import torch
from lib import utils
from lib.dataloaders.dataloader import Dataset
from lib.metrics import metrics_torch, metrics_np
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from model.pytorch import supervisor
from model.pytorch.engine import Evaluat... | pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sandy
from sandy.core.endf6 import _FormattedFile
__author__ = "<NAME>"
__all__ = [
"Errorr",
]
pd.options.display.float_format = '{:.5e}'.format
class Errorr(_FormattedFile):
"""
Container for ERRORR file text grouped by MAT, MF and MT numbers.... | pd.IntervalIndex.from_breaks(eg) | pandas.IntervalIndex.from_breaks |
import pandas as pd
import glob
import os
import re
import phyphy
from ete3 import Tree
import numpy as np
absrel = glob.glob("families_absrel/logs/*.ABSREL.log")
family_list = []
branch_list = []
pvalue_list = []
for file in absrel:
with open(file) as myfile:
for line in myfile:
if re.search... | pd.read_csv(infile, sep='\t', names=col_names) | pandas.read_csv |
# Import python modules
import os, sys
# data handling libraries
import pandas as pd
import numpy as np
import pickle
import json
import dask
from multiprocessing import Pool
# graphical control libraries
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
# shape and layer libraries
import fiona... | pd.notnull(dataset) | pandas.notnull |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from skle... | pd.DataFrame(self.maes, index=index_as_array_sup) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELE... | tm.assert_series_equal(s, s2) | pandas._testing.assert_series_equal |
import pandas as pd
import numpy as np
import unittest
import decipy.executors as exe
import decipy.normalizers as norm
import decipy.weigtings as wgt
matrix = np.array([
[4, 3, 2, 4],
[5, 4, 3, 7],
[6, 5, 5, 3],
])
alts = ['A1', 'A2', 'A3']
crits = ['C1', 'C2', 'C3', 'C4']
beneficial = [True, True, True, ... | pd.DataFrame(matrix, index=alts, columns=crits) | pandas.DataFrame |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
... | pd.Series() | pandas.Series |
import os
import pprint as pp
from collections import OrderedDict, defaultdict
import diff_viewer
import pandas as pd
import streamlit as st
from datasets import load_from_disk
DATASET_DIR_PATH_BEFORE_CLEAN_SELECT = os.getenv("DATASET_DIR_PATH_BEFORE_CLEAN_SELECT")
OPERATION_TYPES = [
"Applied filter",
"Appli... | pd.DataFrame(data) | pandas.DataFrame |
# Procurement Charts - chart data
# -*- coding: latin-1 -*-
# A set of functions to calculate the chart data for procurement
# dashboards
import pandas as pd
import numpy as np
import sys
import settings
def generate_overview(df):
"""
Generate an overview of the whole dataset.
:param df:
Pandas datafra... | pd.value_counts(binned) | pandas.value_counts |
import pandas as pd
import numpy as np
#主要针对时间序列动量和hp6-8 这几个没法分组
adjust_price=pd.read_csv("../adjust_price/adjust_price.csv")
adjust_price=adjust_price.set_index('date')
cat_list=pd.read_csv("../data_extraction/cat_list.csv",header=None)
cat_list=pd.Series(cat_list[0])
#新建一个数据框记录持仓信息
port=pd.DataFrame(index=adjust_pri... | pd.DataFrame(index=check_vol.index,columns=check_vol.columns) | pandas.DataFrame |
from unittest import TestCase
from unittest.mock import ANY, Mock, call, patch
import pandas as pd
from mlblocks import MLPipeline
from orion import benchmark
from orion.evaluation import CONTEXTUAL_METRICS as METRICS
from orion.evaluation import contextual_confusion_matrix
def test__sort_leaderboard_rank():
ra... | pd.testing.assert_frame_equal(returned, expected_return) | pandas.testing.assert_frame_equal |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_singl... | pd.MultiIndex.from_frame(df, names=names_in) | pandas.MultiIndex.from_frame |
"""
Contains all functions that are needed in intermediary steps in order to obtain
certain tables and figures of the thesis.
"""
import os
import pickle
import numpy as np
import pandas as pd
import scipy.io
from ruspy.estimation.estimation import estimate
from ruspy.estimation.estimation_transitions import create_tr... | pd.DataFrame(index=index_table, columns=sensitivity_results.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CT_Viewer.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
import os
from moviepy.editor import ImageSequenceClip
from PyQt5 import QtCore, QtGui, QtWidgets
import pyli... | pandas.read_csv(dst_dir + patient_id + "_annos_pos_lidc.csv") | pandas.read_csv |
import pandas as pd
def find_ms(df):
subset_index = df[['BMI', 'Systolic', 'Diastolic',
'Triglyceride', 'HDL-C', 'Glucose',
'Total Cholesterol', 'Gender']].dropna().index
df = df.ix[subset_index]
df_bmi_lo = df.loc[df['BMI']<25.0,:]
df_bmi_hi = df.loc[df[... | pd.concat([df_bmi_lo, male_df_bmi_hi, female_df_bmi_hi]) | pandas.concat |
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Data Mining #
# File : \mymain.py #
# Python : 3.9.1 ... | pd.set_option('display.width', None) | pandas.set_option |
"""
The SamplesFrame class is an extended Pandas DataFrame, offering additional methods
for validation of hydrochemical data, calculation of relevant ratios and classifications.
"""
import logging
import numpy as np
import pandas as pd
from phreeqpython import PhreeqPython
from hgc.constants import constants
from hgc... | pd.Series(index=df_in.index,dtype='float64') | pandas.Series |
import tensorflow as tf
import tensorflow_graphics.geometry.transformation as tfg
import numpy as np
import pandas as pd
import random
import datetime
from tensorflow.keras import Input
from typing import Generator, Tuple, Dict
from pandas import DataFrame
from string import Template
from loguru import logger
from ap... | pd.DataFrame(data=new_acc, columns=["iphoneAccX", "iphoneAccY", "iphoneAccZ"]) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: rz
@email:
"""
#%% imports
import itertools, time, copy
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
import Levenshtein as Lev
from sklearn import metrics
from .etdata import ETData
from .u... | pd.concat((evt_gt, etdata_gt.evt.loc[set_gt, 'evt'])) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import argparse
from fastai.vision import *
from tqdm import tqdm
from pathlib import Path
import pandas as pd
import os
import sys
from fastai.callbacks import CSVLogger
# suppress anoying and irrelevant warning, see https://forums.fast.ai/t/warnings-when-trying-to-make-an-imag... | pd.read_csv("nifti/image_list.tsv", sep="\t", header=None, names=["pid","file"]) | pandas.read_csv |
import pandas as pd
import os.path
import datetime
data_path=os.path.dirname(__file__)+'/'
#combined_df = pd.read_csv(data_path+"combined_duty-b2nb_nb2b.csv")
combined_df = pd.read_csv(data_path+"combined_duty-b2b.csv")
#combined_df=combined_df.head(91)
j=1
final_df=pd.DataFrame()
combined_df["pairID"]=""
toggle=... | pd.to_datetime(combined_df['OrgUTC']) | pandas.to_datetime |
import pymongo
from PyQt5 import QtCore
import pandas as pd
import time
from bson.objectid import ObjectId
from nspyre.utils import get_mongo_client
import traceback
class DropEvent():
"""Represents a drop of a collection in a certain database"""
def __init__(self, db, col):
self.db, self.col = db, co... | pd.Series(doc) | pandas.Series |
# -*- coding: utf-8 -*-
"""Primary wepy simulation database driver and access API using the
HDF5 format.
The HDF5 Format Specification
=============================
As part of the wepy framework this module provides a fully-featured
API for creating and accessing data generated in weighted ensemble
simulations run w... | pd.DataFrame(records) | pandas.DataFrame |
"""
Binary Transport
================
Example of binary transport in pydeck. This notebook renders 10k points via the web sockets within
a Jupyter notebook if you run with ``generate_vis(notebook_display=True)``
Since binary transfer relies on Jupyter's kernel communication,
note that the .html in the pydeck document... | pd.DataFrame.from_records(node_positions) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, LSTM, GRU, Dropout
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras impo... | pd.DataFrame(self.model_results) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.