prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from covsirphy.util.term import Term
from covsirphy.loading.db_base import _RemoteDatabase
class _CSJapan(_RemoteDatabase):
"""
Access "COVID-19 Dataset in Japan.
https://github.com/lisphilar/covid19-sir/tree/master/data
Args:
... | pd.concat([c_df, e_df, p_df], axis=0, ignore_index=True, sort=True) | pandas.concat |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest... | Index(['a', 'b', 'c']) | pandas.core.index.Index |
import pandas as pd
#import html5lib as html5lib
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
#
# .. your code here ..
#df = pd.read_html('http://espn.go.com/nhl/statistics/player/_/stat/points/sort/points... | pd.to_numeric(df.iloc[:,1],errors='coerce') | pandas.to_numeric |
import pandas as pd
import pandas.testing as tm
print(pd.__version__)
s = | pd.Series([1, 2, 3]) | pandas.Series |
import sys
import time
from pathlib import Path
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import catboost
import mlflow
import hydra
import pickle
import shutil
import pprint
import warnings
from typing import List, Tuple, Any
from omegaconf.dictconfig import DictConfig
from sk... | pd.read_pickle(f'{DATA_DIR}/{f.path}') | pandas.read_pickle |
import keras
import numpy as np
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
import spacy
nlp = spacy.load('en_core_web_sm')
import warnings
# from Contractions import contraction_mapping
pd.set_option("display.max_colwidth", 200)
warnings.filterwarnings("ignore")
data =... | pd.read_csv("NewsSum.csv") | pandas.read_csv |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymo... | assert_frame_equal(move_df, expected) | pandas.testing.assert_frame_equal |
#=======================================================================================================================
#
# ALLSorts v2 - The ALLSorts pipeline
# Author: <NAME>
# License: MIT
#
# Note: Inherited from Sklearn Pipeline
#
#==========================================================================... | pd.concat([probabilities, compare], join="inner") | pandas.concat |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io impor... | pd.Timedelta('24h') | pandas.Timedelta |
from pprint import pprint
import json
import requests
import pandas as pd
import os
import datetime as dt
from datetime import datetime
from configparser import ConfigParser
import base64
path_to_batches = "batches/"
batch_files = ['ct_river_area.json', 'ledgelight.json', 'lyme_oldlyme.json']
add_style = "yes"
export... | pd.DataFrame(columns=['town', 'reported_date', 'age_group', 'initiated', 'vaccinated', 'change']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 10:22:36 2018
@author: Prodipta
"""
import sys
import pandas as pd
import os
import datetime
import requests
import json
import shutil
## TODO: This is a hack, install the correct version
#zp_path = "C:/Users/academy.academy-72/Documents/python/zipline/"
#sys.path.ins... | pd.to_datetime(date) | pandas.to_datetime |
#!/usr/bin python3
"""
<Description of the programme>
Author: <NAME> <<EMAIL>>
Created: 05 Nov 2020
License: MIT
Contributors: <NAME>
"""
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
import logging
# 3rd party:
from pandas import ... | to_datetime(d.loc[:, col], format=format) | pandas.to_datetime |
import unittest
from pathlib import Path
import sys
import tkinter
import numpy as np
import pandas as pd
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src"))
import src.score as score
import src.database as database
import src.const as const
class TestDatabas... | pd.DataFrame({"x1": [1, 2, 3]}) | pandas.DataFrame |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a co... | pd.DataFrame({'x': [1.0], 'y': [2.0], 'z': [3.0]}) | pandas.DataFrame |
from skyfield.api import load
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from skyfield.api import utc
from scipy.optimize import brentq # machine learning
from datetime import timedelta, datetime
import pytz
# Custom helper functions
from definitions impor... | pd.DataFrame(data_tmp) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.t... | pd.concat(Nuba_Efi_348) | pandas.concat |
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn import preprocessing
from sklearn.base import BaseEstimator
def calc_canceling_fund(estimated_vacation_time,
cancelling_policy_code,
original_selling_amount,
normali... | pd.to_datetime(full_data["booking_datetime"]) | pandas.to_datetime |
from warnings import filterwarnings
filterwarnings("ignore")
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.corpus import stopwords
import nltk
import json
import urllib
import re
import pandas as pd
from vaderSentiment.vaderSentiment import SentimentIntensi... | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import dstk
import random
# Create test data
# Class creation test dataset
df_create = | pd.DataFrame() | pandas.DataFrame |
__version__ = '0.0.1'
__author__ = '<NAME>, 2020'
import re
import numpy as np
import pandas as pd
fields = {
'AC': 'activating compound',
'AP': 'application',
'CF': 'cofactor',
'CL': 'cloned',
'CR': 'crystallization',
'EN': 'engineering',
'EXP': 'expression',
'GI': 'general informatio... | pd.DataFrame.from_dict(data, orient='index', columns=['']) | pandas.DataFrame.from_dict |
from copy import deepcopy
import datetime
import inspect
import pydoc
import numpy as np
import pytest
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark, skip_if_no
import pandas as pd
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
... | tm.assert_index_equal(with_prefix.columns, expected) | pandas._testing.assert_index_equal |
import numpy as np
import pandas as pd
from mip import Model, xsum, minimize, CONTINUOUS, OptimizationStatus, BINARY, CBC, GUROBI, LP_Method
class InterfaceToSolver:
"""A wrapper for the mip model class, allows interaction with mip using pd.DataFrames."""
def __init__(self, solver_name='CBC'):
self.v... | pd.concat(generic_constraints) | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from xgboost import XGBRegressor, plot_importance
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler, Normalizer
from sklearn.feature_selection import SelectKB... | pd.DataFrame({'Missing_Ratio': na_rate}) | pandas.DataFrame |
import numpy as np
import pandas as pd
def declat_mine(df, minsup):
frequent = {'support': [], 'itemset': []}
prefix = []
for col in df.columns:
d_col = set(df[df[col] == 0].index)
support = df.shape[0] - len(d_col)
if support >= minsup:
prefix.append((set(col), d_col, s... | pd.DataFrame(frequent) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
from tqdm import tqdm
filepath_hist = 'H:/cloud/cloud_data/Projects/DISCHARGEMaster/data/discharge_master/discharge_master_01092020/disch... | pd.read_pickle(filepath_hist) | pandas.read_pickle |
#
# extract and plot GMSL rate vs T values from AR5 and SROCC
#
# <NAME> 2021
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import hadcrut5
import pickle
from scipy.stats.stats import pearsonr
#--------------read google sheet:
sheet_id = '1b2CXW2D9ZFfJ4HDD42WpccF8xSzGVzzsEGie5yZBHCw'
shee... | pd.read_csv(url, error_bad_lines=False) | pandas.read_csv |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning) # to surpress future warnings
import pandas as pd
import sys
import textstat
import numpy as numpy
import math
import gensim
from pprint import pprint
from string import ascii_lowe... | pd.DataFrame(tester) | pandas.DataFrame |
from timeit import default_timer as timer
from collections import defaultdict
from tqdm import tqdm
import pandas as pd
#from evaluation_config import eval_runs
tqdm.pandas(desc="progess: ")
def add_scores(scores, list_of_param_dicts):
for param_dict in list_of_param_dicts:
for key, value in zip(param_dic... | pd.DataFrame() | pandas.DataFrame |
from pyulog import ULog
import pandas as pd
def getVioData(ulog: ULog) -> pd.DataFrame:
vehicle_visual_odometry = ulog.get_dataset("vehicle_visual_odometry").data
vio = | pd.DataFrame({'timestamp': vehicle_visual_odometry['timestamp'],
'sensor' : 'vio',
'x': vehicle_visual_odometry["x"],
'y': vehicle_visual_odometry["y"],
'z': vehicle_visual_odometry["z"],
'qw': vehicle_visual_odometry["q[0]"],
'qx': vehicle_visual_odometry["q[1]"],
'qy': vehicle_visual_odometry["q[2]"],
... | pandas.DataFrame |
# coding: utf-8
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in... | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import tsplib95
import networkx as nx
from tqdm import tqdm
import sys
import re
def prepare_testset_FINDER(data_dir, scale_factor=0.000001):
graph_list = []
atoi = lambda text : int(text) if text.isdigit() else text
natural_keys = lambda text : [atoi(c) fo... | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
"""Convert EURECOM data dump file into a train sets.
"""
import ast
import os
import numpy as np
import pandas as pd
from collections import defaultdict
import langid
RANDOM_NUMBER = 621323849
RANDOM_NUMBER2 = 581085259
FNAME = "data/total_post.csv"
COLS = [
"obj",
"museum",
"place_... | pd.isna(x) | pandas.isna |
import importlib
import json
import os
import pdb
import sys
import fnet
import pandas as pd
import tifffile
import numpy as np
from fnet.transforms import normalize
def pearson_loss(x, y):
#x = output
#y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sq... | pd.read_csv(val_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn a... | pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0) | pandas.read_csv |
import pandas as pd
from tabulate import tabulate
from sklearn.model_selection import train_test_split
def beautiful_nan_table(dataframe):
nans = dataframe.isna().sum().to_frame().rename(columns={0:"Number of Null Values"}).T
print(tabulate(nans, nans.columns, tablefmt="fancy_grid"))
def train_valiadate_tes... | pd.Series(input_list) | pandas.Series |
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.colle... | pd.read_feather("%s_XY.feather"%(name)) | pandas.read_feather |
import streamlit as st
import pandas as pd
from utils import *
from modules import *
import os
import numpy as np
import altair as alt
import plotly.graph_objects as go
absolute_path = os.path.abspath(__file__)
path = os.path.dirname(absolute_path)
ipl_ball = pd.read_csv(path+'/2008_2021_updated_ball.csv')
ipl_ma... | pd.DataFrame() | pandas.DataFrame |
"""
This script creates a boolean mask based on rules
1. is it boreal forest zone
2. In 2000, was there sufficent forest
"""
#==============================================================================
__title__ = "Hansen Active fire"
__author__ = "<NAME>"
__version__ = "v1.0(20.11.2019)"
__email__ = "<EMAIL>"
#=... | pd.Timestamp.now() | pandas.Timestamp.now |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
#AppAutomater.py has App graphs and data
#Graphs.py has all graphs
#Data.py has all data processing stuff
#Downloader.py is used to download files daily
import dash
import dash_core_components... | pd.DataFrame(df["data"]) | pandas.DataFrame |
from __future__ import print_function
import os
import datetime
import sys
import pandas as pd
import numpy as np
import requests
import copy
# import pytz
import seaborn as sns
from urllib.parse import quote
import monetio.obs.obs_util as obs_util
"""
NAME: cems_api.py
PGRMMER: <NAME> ORG: ARL
This code written at... | pd.DataFrame() | pandas.DataFrame |
# Copyright (C) 2016 <NAME> <<EMAIL>>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
import datetime
from datetime import date
# from memory_profiler import profi... | pd.read_csv(filename) | pandas.read_csv |
# MIT License
#
# Copyright (c) 2020-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merg... | pd.read_csv(datafile,comment='#',delimiter=',') | pandas.read_csv |
import os
import pandas as pd
import requests
import mixpanel as mp
MIXPANEL_API_KEY = os.environ.get('MIXPANEL_API_KEY')
MIXPANEL_API_SECRET = os.environ.get('MIXPANEL_API_SECRET')
keys = (MIXPANEL_API_KEY, MIXPANEL_API_SECRET)
DATA_LOCATION = './data/ppe-responses.csv'
HOSPITALS_LOCATION = './data/hospital_location... | pd.DatetimeIndex(gb_df['time']) | pandas.DatetimeIndex |
#%%
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Identically as how we did with the training data set, we randomly divided the test files into different
folders, then we generated different data frames and stored all of them in one single hdf file as our
validation ... | pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities}) | pandas.DataFrame |
import glob, pandas as pd, time, datetime
| pd.set_option('mode.chained_assignment', None) | pandas.set_option |
"""
data_curation_functions.py
Extract Kevin's functions for curation of public datasets
Modify them to match Jonathan's curation methods in notebook
01/30/2020
"""
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
import seaborn as sns
impor... | pd.concat(lst) | pandas.concat |
import requests
import json
import traceback
import sqlite3
import server.app.decode_fbs as decode_fbs
import scanpy as sc
import anndata as ad
import pandas as pd
import numpy as np
import diffxpy.api as de
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import matplo... | pd.read_sql_query("select gene,log2fc,pval,qval from DEG where contrast=? and tags=?;", conn,params=comGrp) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""Copy of final.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1JsZAdNd67Fcn-S5prbt1w33R4wxE_9ep
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import matplotlib.... | pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) | pandas.concat |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
def prepare_titanic(test_size=0.3, random_state=123):
print('Download or read from disk.')
ds = tfds.load('titanic', split='train')
# Turn DataSe... | pd.get_dummies(df['embarked'], prefix='embarked') | pandas.get_dummies |
import pandas as pd
import numpy as np
import pickle
from .utils import *
def predNextDays(optmod_name, opt_mod, var_name, pred_days):
pred = (opt_mod[optmod_name]['mod_data'][var_name])[opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['period'] -1 :opt_mod[optmod_name]['i_start'] + opt_mod[optmod_name]['per... | pd.Series(mod_Gc) | pandas.Series |
import os
import math
import copy
import random
import calendar
import csv
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
import sqlite3
import seaborn as sns
#from atnresilience import ... | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 23 19:41:56 2021
@author: u0117123
"""
#Import modules
import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegression
#Input variables
Validation_Area="Tervuren"
#Referece objects with features path
refObjectPat... | pd.concat(rfe_features_append) | pandas.concat |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, c... | pd.DataFrame(sig_list) | pandas.DataFrame |
import pandas as pd #import necassary packages
import statsmodels.api as sms
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import pickle
df = pd.read_csv('us_bank_wages/us_bank_wages.txt', delimiter="\t") #read the csv-file
df.drop('Unnamed: 0',... | pd.get_dummies(df['EDUC'], prefix='edu', drop_first=True) | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 10:23:59 2021
@author: alber
"""
import re
import os
import pandas as pd
import numpy as np
import spacy
import pickle
import lightgbm as lgb
import imblearn
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpread... | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import cvxpy as cvx
#### file to make the simulation of people that we can work with
class Person():
""" Person (parent?) class -- will define how the person takes in a points signal and puts out an energy signal
baseline_energy = a list or dataframe of values. This is data... | pd.DataFrame(energy_output) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # 🏁 Wrap-up quiz
#
# **This quiz requires some programming to be answered.**
#
# Open the dataset `bike_rides.csv` with the following commands:
# In[1]:
import pandas as pd
cycling = pd.read_csv("../datasets/bike_rides.csv", index_col=0,
parse_dates=... | pd.Series(HGB_pred) | pandas.Series |
#!/usr/local/bin/python
import argparse
import os
import sys
import pandas as pd
import numpy as np
import time
pd.options.mode.chained_assignment = None
parser = argparse.ArgumentParser(prog='snvScore')
parser.add_argument('SampleBED',type=str,help='Path to the mosdepth per-base BED output')
parser.add_argument('SNVG... | pd.concat([snv_cov,snvg_part]) | pandas.concat |
"""
Import as:
import core.test.test_statistics as cttsta
"""
import logging
from typing import List
import numpy as np
import pandas as pd
import pytest
import core.artificial_signal_generators as casgen
import core.finance as cfinan
import core.signal_processing as csproc
import core.statistics as cstati
import h... | pd.Series([]) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not u... | pandas.concat(y, axis=0) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
... | assert_frame_equal(df, expected, check_names=True) | pandas.testing.assert_frame_equal |
# coding: utf-8
"""Main estimation code.
"""
import re
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
from statsmodels.base.model import GenericLikelihoodModel
from numba import jit
_norm_pdf_C = np.sqrt(2 * np.pi)
@jit(nopython=True)
def _norm_pdf(x):
return np.exp(-x ** 2 / 2)... | pd.DataFrame({asf_index_loc: asf_loc, 1 - asf_index_loc: other_index}) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pyvista as pv
import pandas as pd
from skimage import measure
from scipy.integrate import simps
from scipy.interpolate import griddata
import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon
from zmapio import ZMAPGrid
def poly_area(x,y):
... | pd.concat(list_df_sorted, axis=0) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomCon... | pd.to_datetime('2020-02-01') | pandas.to_datetime |
# This example requires pandas, numpy, sklearn, scipy
# Inspired by an MLFlow tutorial:
# https://github.com/databricks/mlflow/blob/master/example/tutorial/train.py
import datetime
import itertools
import logging
import sys
from typing import Tuple
import numpy as np
import pandas as pd
from pandas import DataFram... | DataFrame.sample(data, frac=0.2, random_state=task_target_date.day) | pandas.DataFrame.sample |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
... | tm.assert_frame_equal(result_no_copy, expected) | pandas._testing.assert_frame_equal |
class Preprocessing:
#Assumption 1 - Data Columns For Train & Test Will Be Same
#Assumption 2 - Ordinal & Bit Switches Will Not Be Pushed In Nominal Function
#Assumption 3 - Train Categorical Will Be SuperSet & Test Will Be SubSet, Else Model To Be ReCreated
def LoadData(self, FileName, HeaderMissing="... | pd.DataFrame(FlattenedData) | pandas.DataFrame |
import pandas as pd
from sklearn import model_selection as skl
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
data = pd.read_csv('insurance.csv')
dframe = data.copy()
dframe['region'].fillna(method='bfill', inplace=True)
bmi_median_val = round(dframe['bmi... | pd.get_dummies(sample) | pandas.get_dummies |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
impo... | date_range("1/1/2000", periods=10) | pandas.date_range |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.stats as stats
import os
import matplotlib.pyplot as plt
import traceback
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels
import bambi as bmb
import arviz as az
import sklearn
... | pd.set_option('display.max_rows', None) | pandas.set_option |
import calendar
from datetime import date, datetime, time
import locale
import unicodedata
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.timezones import maybe_get_tz
from pandas.core.dtypes.common import is_integer_dtype, is_list_like
import pandas as pd
from pandas import (
DataFrame, ... | DatetimeIndex(s.values, freq='infer') | pandas.DatetimeIndex |
import re
import os
import pandas as pd
import numpy as np
def readGas(DataPath, building, building_num, write_data, datafile, floor_area):
dateparse = lambda x: pd.datetime.strptime(x, '%d-%b-%y')
print('importing gas data from:', DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv')
if bui... | pd.concat([df[['GF_AC', '1st_AC', '2nd_AC', '3rd_AC']]], axis=1) | pandas.concat |
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
i... | pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import logging
import operator
from abc import abstractmethod
from enum import Enum
from typing import Dict, Generator, List, Tuple, Union
import numpy as np
import pandas as... | pd.DataFrame(None, columns=self._meta_fields) | pandas.DataFrame |
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions ... | pd.to_datetime(data[timecol]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze CSV file into scores.
Created on Sat Feb 12 22:15:29 2022 // @hk_nien
"""
from pathlib import Path
import os
import re
import sys
import pandas as pd
import numpy as np
PCODES = dict([
# Regio Noord
(1011, 'Amsterdam'),
(1625, 'Hoorn|Zwaag'),
... | pd.Timedelta(f'{hm}:00') | pandas.Timedelta |
import numpy as np
import pandas as pd
cjxx1 = | pd.read_csv('../SourceData/bks_cjxx_out1-1.csv',usecols = ['xh','xn','xqm','ksrq','kch','kxh','kccj','xf','kcsxdm','xdfsdm']) | pandas.read_csv |
import os
import matplotlib.cm as mcm
import matplotlib.pyplot as plt
import pandas as pd
import pytest
from bevel.plotting import _DivergentBarPlotter
from bevel.plotting import divergent_stacked_bar
from pandas.testing import assert_frame_equal
@pytest.fixture
def sample_data_even():
a, b, c = 'a', 'b', 'c'
... | assert_frame_equal(actual, expected) | pandas.testing.assert_frame_equal |
import datetime
import pandas as pd
import plotly.express as px
import streamlit as st
def clean_dataframe(df):
df = df.drop(columns=[0])
df.rename(
columns={
1: "errand_date",
2: "scrape_time",
3: "rekyl_id",
4: "status",
5: "reporter",
... | pd.isnull(row.scrape_time.Avslutad) | pandas.isnull |
import scanpy as sc
import numpy as np
import scipy as sp
from skmisc.loess import loess
from statsmodels.stats.multitest import multipletests
from scipy.stats import rankdata
import pandas as pd
import time
def score_cell(data,
gene_list,
gene_weight=None,
suffix='',
... | pd.concat([df_cell, temp_df], axis=0) | pandas.concat |
from flask import Flask, render_template, request, Response, send_file
import matplotlib
import io
import base64
from PIL import Image
from textwrap import wrap
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.colors as pltcolors
import matplotlib.ticker as ticker
from mp... | pd.merge(gdf, df, right_on='id', left_on='id') | pandas.merge |
# -*- coding: utf-8 -*-
from spider.https import Http
from spider.jsonparse import JsonParse
from spider.setting import headers
from spider.setting import cookies
import time
import logging
import pandas as pd
from bs4 import BeautifulSoup
class Spider:
def __init__(self,kdList, cityList):
self.kdList = kd... | pd.concat([self.df, df2], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 10:31:31 2021
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 22 11:25:22 2021
@author: Administrator
"""
import h5py
# from pyram.PyRAM import PyRAM
from scipy import interpolate
import pandas as pd
import numpy as np... | pd.DataFrame(sspdic) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
#import statsmodels as stat
#import statsmodels.formula.api as smf
#import statsmodels.api as sm
#import matplotlib.pyplot as plt
#import nibabel.gifti as gio
#from statsmodels.stats.outliers_influence import OLSInfluence
from itertools import ... | pd.DataFrame(all_global_info_values) | pandas.DataFrame |
import pandas as pd
import os
csvfile = os.path.join(os.path.dirname(__file__),
"../../data/penguins_lter.csv")
main_db = | pd.read_csv(csvfile, sep=";") | pandas.read_csv |
import pathlib
import numpy as np
import pandas as pd
from ..designMethods.en_13001_3_3 import ENComputation, LoadCollectivePrediction, MARSInput
from .output import ResultWriter
from ..designMethods.en_13001_3_3.input_error_check import InputFileError
class MainApplication():
def __init__(self) -> Non... | pd.Series(wheel_geometries) | pandas.Series |
"""
Written by <NAME> and contributed to by <NAME>.
Using the NOAA rubrics Dr Habermann created, and his work
conceptualizing the documentation language so that rubrics using
recommendations from other earth science communities can be applied
to multiple metadata dialects as a part of the USGeo BEDI and
NSF DIBBs proje... | pd.Series(LevelOrder) | pandas.Series |
def load_gene_exp_to_df(inst_path):
'''
Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe
'''
import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io.mmread( inst_path + 'matrix.mtx')... | pd.Series(ini_genes) | pandas.Series |
"""
Static data imports
Written by <NAME> <EMAIL>
(C) 2014-2017 <NAME>
Released under Apache 2.0 license. More info at http://www.apache.org/licenses/LICENSE-2.0
"""
import pandas
from pandas import read_csv
#import os
#print os.path.dirname(os.path.abspath(__file__))
# Main folders
#UATPATH = 'O:\\G... | read_csv(DEFPATH+'CCY.csv',index_col=0) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not u... | pandas.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
from cached_property import cached_property
from functools import lru_cache
from .backend import DataBackend
from ..utils import get_str_date_from_int, get_int_date
import pymongo
import QUANTAXIS as qa
from QUANTAXIS.QAFetch import QATdx as QATdx
import pandas as pd
import datetime
# XSH... | pd.DataFrame(L) | pandas.DataFrame |
from transformer_rankers.eval import results_analyses_tools
from transformer_rankers.utils import utils
from IPython import embed
import pandas as pd
import numpy as np
import scipy.stats
import argparse
import logging
import json
import traceback
import os
import sys
logging.basicConfig(
level=logging.INFO,
... | pd.read_csv(run_folder+"/predictions.csv") | pandas.read_csv |
import json
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
def update_graph(
graph_id,
graph_title,
y_train_index,
y_val_index,
run_log_json,
yaxis_title,
):
def smooth(scalars, weight=0.6):
last = scalars[... | pd.read_json(run_log_json, orient="split") | pandas.read_json |
import pandas as pd
from tornado.ioloop import IOLoop
import yaml
from jinja2 import Template
from bokeh.application.handlers import FunctionHandler
from bokeh.application import Application
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider, Div
from bokeh.plotting import figure
from b... | pd.read_csv('data.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
#########################################################################################################
'''
Feature Engineering
'''
def create_name_feat(train, test):
for i in [train, test]:
i['Name_Len'] = i['Name'].apply(lambda x: len(x))
i['Name_Title'] =... | pd.isnull(x) | pandas.isnull |
"""
make_allvar_report
allvar_periodogram_checkplot
allvar_plot_timeseries_vecs
plot_rotationcheck
"""
from glob import glob
import os, pickle, shutil, multiprocessing
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from numpy import array as... | pd.isnull(lsp) | pandas.isnull |
from .base import GenericPreprocessor
import numpy as np
import pandas as pd
class ZTFLightcurvePreprocessor(GenericPreprocessor):
def __init__(self, stream=False):
super().__init__()
self.not_null_columns = [
'mjd',
'fid',
'magpsf',
'sigmapsf',
... | pd.to_numeric(x, errors='coerce') | pandas.to_numeric |
import os
import lmfit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ImagingReso._utilities import ev_to_s
from cycler import cycler
from lmfit import Model
from lmfit.models import LinearModel
from scipy.interpolate import interp1d
import ResoFit._utilities as fit_util
from ResoFit.mode... | pd.DataFrame() | pandas.DataFrame |
# read content function
## read content based on user & task inputs
## NOTE: might need to think of some parrellal solutions for this function
import pandas as pd
from sika.task_bypass.tasktypes.read.http_request import http_request, http_request_dynamic
from IPython import embed
def read_content(db, stage_name, task... | pd.DataFrame([base_url]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from sktime.transformers.series_as_features.summarize import PlateauFinder
@pytest.mark.parametrize("value", [np.nan, -10, 10, -0.5, 0.5])
def test_PlateauFinder(value):
# generate test data
value = np.nan
X = pd.DataFrame(pd.Series([
pd.Series... | pd.Series([value, value, 3, 3, value, 2, 2, 3]) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.