prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
... | Period(freq='D', year=2006, month=9, day=30) | pandas.Period |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import random
MAXLIFE = 120
SCALE = 1
RESCALE = 1
true_rul = []
test_engine_id = 0
training_engine_id = 0
def kink_RUL(cycle_list, max_cycle):
'''
Piecewise linear functi... | pd.concat([test_FD001, test_FD003]) | pandas.concat |
import multiprocessing
import numpy as np
import pandas as pd
from kts.validation.leaderboard import leaderboard as lb
from kts.core.backend.memory import load
from kts.core.base_constructors import merge, wrap_stl_function, empty_like
def _apply_df(args):
"""
Args:
args:
Returns:
"""
d... | pd.DataFrame(data=pred, columns=col_names) | pandas.DataFrame |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SP... | pd.testing.assert_index_equal(exp, index) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categor... | Series(["a", "b", "c", "a"]) | pandas.Series |
import numpy as np
import pandas as pd
import datetime as dt
import os
import zipfile
from datetime import datetime, timedelta
from urllib.parse import urlparse
study_prefix = "U01"
def get_user_id_from_filename(f):
#Get user id from from file name
return(f.split(".")[3])
def get_file_names_from_zip(z, file_... | pd.to_datetime(new_obs_df.index) | pandas.to_datetime |
import copy
import pickle as pickle
import os
import sys
import time
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from numpy import array, float32
import seaborn as sns
FRAME = 10
TWINDOW = 300
TDELTA = 600 # 300
MIN_FRAME = ... | pd.to_datetime(pd_frame[xvars[0]], unit='s') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[66]:
#置入所需套件
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# Load in the data
df = pd.read_csv("InterestsSurvey.csv")
#檢查遺漏值
null = df.isnull().sum()
print('number of missing... | pd.DataFrame(pcs, columns=df.columns[1:]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d, CubicSpline
from pandas.tseries.offsets import DateOffset
from pennies.market.curves import DiscountCurveWithNodes
from pennies.market.market import RatesTermStructure
# 1. Define Valuation Date
dt_val = pd.to_datetime('today')
# 2. Defin... | DateOffset(months=24) | pandas.tseries.offsets.DateOffset |
import re
import pandas as pd
from src.mapping.columns.column_label_catalog import ColumnLabelCatalog
from src.mapping.columns.column_name_classifier import ColumnNameClassifier
class PseudocolumnGenerator:
def __init__(self, data_source, concatenation_delimiter: str = " "):
"""Initialize new pseudocolu... | pd.isnull(name_pieces[i]) | pandas.isnull |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ im... | pd.Series([10., 11., 12., 11., 10.], index=price.index) | pandas.Series |
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import copy
import logging
import numpy as np
import pandas as pd
from typing import Any, Callable, Dict, List, Optional, Union
from sklearn.utils import check_consistent_length
import warnings
from functools import wra... | pd.DataFrame.from_dict(features) | pandas.DataFrame.from_dict |
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
class TestStataMerge(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
... | pd.DataFrame(
{'hh': {0: 2, 1: 4, 2: 5, 3: 6, 4: 7},
'has_fence': {0: 1, 1: 0, 2: 1, 3: 1, 4: 0}
}) | pandas.DataFrame |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import time
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils import p... | pd.Series(x_train) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(Terrplan... | pd.DataFrame.min(df, axis=1) | pandas.DataFrame.min |
# %% [markdown]
# ##
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold im... | pd.DataFrame(data=umap_euc) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 11:01:20 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import string
import scipy.stats as stats
'''
relax a little bit
test a quiz
'''
sdata = {'Ohi... | pd.Timestamp('20130102') | pandas.Timestamp |
import pandas as pd
import pytest
from eli5 import explain_prediction_df
from sklearn.datasets import load_boston
from pandas.testing import assert_series_equal
import ttrees
import build_model
def test_prediction_decomposition_eqal_eli5():
"""Test that the prediction decomposition outputs from xgb.explainer.dec... | pd.DataFrame(boston["data"], columns=boston["feature_names"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split... | pd.concat([tissue_metrics, new_df]) | pandas.concat |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELE... | tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
#
# EPY: stripped_notebook: {"metadata": {"kernelspec": {"display_name": "starfish", "language": "python", "name": "starfish"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert... | pd.merge(cnts_benchmark, cnts_starfish, on='gene', how='left') | pandas.merge |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from... | pd.array([1, 1, 1, 1, 1], dtype="Int64") | pandas.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as... | pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1') | pandas.read_excel |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid... | pd.Series([1], dtype=np.int64) | pandas.Series |
import streamlit as st
import pandas as pd
import yfinance as yf
import datetime
import os
from pathlib import Path
import requests
import hvplot.pandas
import numpy as np
import matplotlib.pyplot as plt
from MCForecastTools_2Mod import MCSimulation
import plotly.express as px
from statsmodels.tsa.arima_model import ... | pd.DataFrame(fighting) | pandas.DataFrame |
import numpy as np
import pandas as pd
class AdjacencyMatrix:
def __init__(self, base_path):
self.base_path = base_path
def get_folder_data(self, folder):
news_df = pd.read_csv(self.base_path + folder + "/News.txt", header=None)
news_list = list(news_df[0])
users_df = | pd.read_csv(self.base_path + folder + "/User.txt", header=None) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
import gsw
def string_converter(value):
'''To deal with Courtney CTD codes'''
return value.split(":")[-1].strip()
def int_converter(value):
'''To deal with Courtney CTD codes'''
return int(float(value.split(":")[-1].strip()))
def float_converter(value):
... | pd.read_csv(bottle_file, skiprows=[0,1,2,3,4,5,6,7,8,9,11], skipfooter=1, engine='python') | pandas.read_csv |
from numpy import mean
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plot
import matplotlib.mlab as mlab
import matplotlib.pylab as lab
import matplotlib.patches as patches
import matplotlib.ticker as plticker
from matplotlib import rcParams
from matplotlib import gridspec... | pd.Series(samies[1:]+[-20]) | pandas.Series |
"""
This is an upgraded version of Ceshine's LGBM starter script, simply adding more
average features and weekly average features on it.
"""
from datetime import date, timedelta
import gc
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder... | pd.concat([promo_2017_train, promo_2017_test], axis=1) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_operation_capacity.py
@time: 2019-05-30
"""
import gc
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
import six, pdb
import pandas as pd
from pandas.io.json import json_normalize
from utili... | pd.merge(factor_derivation, management, how='outer', on="security_code") | pandas.merge |
import datetime
import re
from itertools import islice
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from dateutil.parser import parse as d
from utils_pandas import daterange
from utils_pandas import export
from utils_scraping import any_in
from utils_scraping import camelot_cache
from utils_sc... | pd.DataFrame(columns=["Date", "Province"]) | pandas.DataFrame |
import os
import pandas as pd
import streamlit.components.v1 as components
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
assert os.path.exists(build_dir)
_component_func = components.declare_component("st_datatable", path=build_dir)
def st_datatable(d... | pd.DataFrame() | pandas.DataFrame |
import logging
import pandas as pd
from datetime import timedelta, date
from ..models import Index, Quote, Quote_CSI300, Hvlc_report, Ublb_cross,\
Hvlc_strategy, Hvlc_report_history
from ..utils.utils import gen_id, latest_over_rsi70
logger = logging.getLogger('main.hvlc')
def hvlc_report(sdic):
# Create Hvlc_repo... | pd.Series(x) | pandas.Series |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,... | pd.Timestamp(tmp[0]) | pandas.Timestamp |
import logging
import pandas as pd
from configchecker import ConfigChecker
import whalealert.settings as settings
log = logging.getLogger(__name__)
class Writer():
""" Puslishing Whale Alert API status and database results"""
def __init__(self, status, database):
self.__status = status
self._... | pd.DataFrame(current_transactions) | pandas.DataFrame |
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_descr... | pd.concat(sample_id_series) | pandas.concat |
# Get data
import io
import numpy as np
import pandas as pd
import requests
from coronavirus.utils import fill_dates
def get_data():
country = get_hopkins()
pop_country = get_pop_country()
country = pd.merge(country, pop_country, how="left", on="country_name")
state = get_tracking()
pop_state ... | pd.read_csv(data) | pandas.read_csv |
"""Module providing functions to plot data collected during sleep studies."""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
import pandas as pd
import seaborn as sns
from fau... | pd.to_datetime(sleep_endpoints["bed_interval_end"]) | pandas.to_datetime |
# divid by osm edge points and nodes
import pymongo
import json
import pandas as pd
import geopandas as gpd
import hashids
from hashids import Hashids
from shapely.geometry import LineString, Polygon, Point
import shapely
import osmnx
import geojson
import geopy.distance
myclient = pymongo.MongoClient("mongodb://192.1... | pd.DataFrame(district_docs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 22 14:50:25 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from ke... | pd.concat((trainYparta,trainYpartb),axis=0) | pandas.concat |
# imports
from sklearn.cluster import KMeans
import pandas as pd
import plotly.express as px
from numpy import random
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from flask import Flask, render_template, request, redirect, url_for, send_file
import base64
from io impor... | pd.read_excel(f) | pandas.read_excel |
from os.path import abspath, dirname, join
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from utils import make_dir, numpy_ewma_vectorized_v2, plot_postprocess, print_init, label_converter, series_indexer, \
color4la... | pd.Series(mean_feat_std, index=self.params_df.index) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import ... | pandas.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, ... | concat([state, data]) | pandas.concat |
import numpy as np
from csv import reader
from sklearn.model_selection import train_test_split
import pandas as pd
with open('glass_data.csv') as f:
raw_data = f.read()
####PREPROCESS OF THE DATASET######
def data_preprocess(raw_data):
# Load a CSV file
dataset = list()
#with filename as file:
csv... | pd.concat([pd_data, labels], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import itertools
import json
import operator
import os
from pathlib import Path
from pprint import pprint
import re
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import s... | pd.read_csv(f) | pandas.read_csv |
from dataclasses import dataclass, field
from operator import itemgetter
from typing import Set, Dict, List, Optional, Tuple
from urllib.parse import urlparse
import copy
import orjson
import pandas as pd
from kgdata.wikidata.models import WDEntity
@dataclass
class Link:
href: str
start: int
end: int
... | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import matplotlib as mpl
import numpy as np
from sklearn import metrics
import itertools
import warnings
from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax im... | pd.set_option('display.max_columns',None) | pandas.set_option |
"""
econ_platform_core - Glue code for a unified work environment.
The center-piece of the package is *fetch()* a function which dynamically loads a pandas time series (Series object)
from any supported provider or database interface. The fetch command will determine whether the series exists in the
local database, or... | pandas.DataFrame({'series_dates': ser.index, 'series_values': ser.values}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 23:51:08 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import ... | pd.DataFrame(long_strangle) | pandas.DataFrame |
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENS... | pd.Series(index=key_strikes_names, data=key_strikes) | pandas.Series |
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
import sympy as sp
from sympy import Matrix, Symbol
from sympy.utilities.lambdify import lambdify
import matplotlib
... | pd.DataFrame() | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# ... | pd.Series(tdi) | pandas.Series |
#/*##########################################################################
# Copyright (C) 2020-2021 The University of Lorraine - France
#
# This file is part of the LIBS-ANN toolkit developed at the GeoRessources
# Laboratory of the University of Lorraine, France.
#
# Permission is hereby granted, free of charge, t... | pd.DataFrame(data=Minls) | pandas.DataFrame |
#!/usr/bin/python3
"""
/**
******************************************************************************
* @file dominant_attribute.py
* @author <NAME>
* $Rev: 1 $
* $Date: Sat Nov 17 15:12:04 CST 2018 $
* @brief Functions related to Dominant Attribute Algorithm
*************************************************... | pd.DataFrame() | pandas.DataFrame |
import json
import datetime
import numpy as np
import pandas as pd
from pandas import json_normalize
import sqlalchemy as sq
import requests
from oanda.oanda import Account # oanda_v20_platform.
import os.path
import logging
from utils.fileops import get_abs_path
# TODO add updated to the database and have a chec... | pd.to_numeric(df.high) | pandas.to_numeric |
import logging
import math
import warnings
import numpy as np
import pandas as pd
import pytest
import scipy.stats
from dask import array as da, dataframe as dd
from distributed.utils_test import ( # noqa: F401
captured_logger,
cluster,
gen_cluster,
loop,
)
from sklearn.linear_model import SGDClassifi... | pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) | pandas.DataFrame |
import pandas as pd
from bokeh.embed import components
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
from app.main.data_qua... | pd.merge(df, file_df, left_on='Barcode_lower', right_on="Barcode_low", how='left') | pandas.merge |
import os
import pkg_resources
import rasterio
import simplejson
import numpy as np
import pandas as pd
def get_population_from_raster(raster_file, indices_list) -> float:
"""Get the population sum of all valid grid cells within a state.
:param raster_file: Full path with file name and extension... | pd.DataFrame({'FID': indices_list, 'n': arr}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import geomath as gm
import kalman
import math
def load_day(day):
header = ['timestamp', 'line_id', 'direction', 'jrny_patt_id', 'time_frame', 'journey_id', 'operator',
'congestion', 'lon', 'lat', 'delay', 'block_id', 'vehicle_id', 'stop_id', 'at_stop']
typ... | pd.DataFrame(data=result, columns=out_columns, index=df.index.values) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains all the remote tests. The data for these
tests is requested to ESA NEOCC portal.
* Project: NEOCC portal Python interface
* Property: European Space Agency (ESA)
* Developed by: Elecnor Deimos
* Author: <NAME>
* Date: 02-11-2021
© Copyright [European Space Agency][2021... | ptypes.is_int64_dtype(new_list['non-grav param.']) | pandas.api.types.is_int64_dtype |
import numpy as np
import scipy.io as sio
import datetime as dt
import io
import requests
import pandas as pd
def _get_timestamp(line):
""" Get a datetime and epoch value from a standard DAT file line """
timestamp = dt.datetime.strptime(' '.join(line.strip().split(' ')[1:3]), '%Y/%m/%d %H:%M:%S.%f')
epoch... | pd.DataFrame(sbe3_list, columns=['timestamp','epoch','dive_number','counts_0','counts_1']) | pandas.DataFrame |
""" Implementation of ``SDAFile`` for working with SDA files.
The SDA format was designed to be universal to facilitate data sharing across
multiple languages. It supports reading and updating all record types, except
*function* records. It support writing *numeric*, *logical*, *cell*, and
*structure* records.
"""
f... | DataFrame(summary, columns=cols) | pandas.DataFrame |
# Cluster EB Project
# Marin-French: GCs
# Salaris, Piskunov: OCs
"""Parameters we want:
name, RA, Dec, distance from galactic center, distance from Earth, size (ideally a half-mass radius),
number of stars, age, reddening (Av or E(B-V)), metallicity, central density, central velocity dispersion,
concentration p... | pd.read_fwf(path + "GC_data/mwgc2.txt", widths = [12,7,4,5,6,6,7,7,7,6,6,6,5,6], header = 0, \
names = ['ID','[Fe/H]', 'wt','E(B-V)', 'V_HB','(m-M)V', 'V_t', 'M_V,t','U-B','B-V','V-R', 'V-I', 'spt','ellip']) | pandas.read_fwf |
import tempfile
import unittest
import numpy as np
import pandas as pd
from airflow import DAG
from datetime import datetime
from mock import MagicMock, patch
import dd.api.workflow.dataset
from dd import DB
from dd.api.workflow.actions import Action
from dd.api.workflow.sql import SQLOperator
dd.api.workflow.datase... | pd.testing.assert_frame_equal(odf2, expected_result2) | pandas.testing.assert_frame_equal |
import os
import pandas as pd
from torch.nn import MSELoss
from easydict import EasyDict as edict
from readability_transformers import ReadabilityTransformer
from readability_transformers.readers import PairwiseDataReader, PredictionDataReader
from readability_transformers.dataset import CommonLitDataset
from readabili... | pd.DataFrame(submission) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LIC... | pd.Series([0.5, 0.5, 0.5]) | pandas.Series |
import multiprocess as mp
import pandas as pd
from itertools import combinations
import cooler
from bioframe import parse_regions
from .. import expected
import click
from . import cli
from . import util
# might be relevant to us ...
# https://stackoverflow.com/questions/46577535/how-can-i-run-a-dask-distributed-loca... | pd.DataFrame(regions2) | pandas.DataFrame |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std... | pd.rolling_std(y, window) | pandas.rolling_std |
# -*- coding: utf-8 -*-
"""
2021
@author: <NAME>
"""
## Import libraries
import json
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
import sys
## Define where output files have to be saved
working_dir_input = input("\n\nInput here the path to the directory where the output files have t... | pd.DataFrame(data_tuples, columns=['DataCite_clients_final', ('total_depo_' + PublicationYear), 'count_datasets', 'count_software'], dtype = float) | pandas.DataFrame |
import numpy as np, os, itertools
import pandas as pd
from rpy2 import robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
import rpy2.robjects.pandas2ri
from rpy2.robjects.packages import importr
from selection.adjusted_MLE.cv_MLE import (sim_xy,
selInf... | pd.DataFrame(data=relative_risk, columns=['sel-MLE', 'ind-est', 'rand-LASSO','rel-rand-LASSO', 'rel-LASSO', 'LASSO']) | pandas.DataFrame |
from lib.timecards import Timecards
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import re
def remove_gsa_gov(email):
return re.sub('@gsa.gov', '', email).lower()
def remove_non_billable_projects(project_name):
if ("TTS Acq" in project_name) or ("PIF" in project_name) or ("Federalist"... | pd.Series({"user":user.user.values[0], "category": "Partial", "unit":unit[0], "hours": billed_hours, "delta": 12 - billed_hours, "ooo": ooo, "understaffed": ((ooo+billed_hours)/40 < .2)}) | pandas.Series |
import sys
from pathlib import Path
from collections import namedtuple
import datetime as dt
import numpy as np
import re
import os
import multiprocessing
from functools import partial
from itertools import repeat
import pandas as pd
from .EC_DataLoader.CreateCV import create_CVs
from .HER.HER_analyze_scans import HE... | pd.concat([gr for n, gr in ovv_exp_grp if "HER" in n]) | pandas.concat |
import os
import io
import sys
import gzip
import re
import sqlite3
import dbm
from glob import glob
from datetime import datetime, date, timedelta
from pprint import pprint
from math import nan, inf, pi as π, e
import math
from random import seed, choice, randint, sample
from contextlib import contextmanager
from coll... | pd.DataFrame(rows) | pandas.DataFrame |
# This script is part of pyroglancer (https://github.com/SridharJagannathan/pyroglancer).
# Copyright (C) 2020 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either vers... | pd.DataFrame(synapsepointscollec_df, columns=['id', 'pre_syn_df', 'post_syn_df']) | pandas.DataFrame |
"""
Download COVID-19 data from NY Times
"""
import numpy as np
import pandas as pd
from database import DataBase
from wrangler import (
US_MAP_TABLE,
STATE_MAP_TABLE
)
from sql import (
COUNTIES_VIEW,
DROP_COUNTIES_VIEW,
STATES_VIEW,
DROP_STATES_VIEW,
US_MAP_PIVOT_VIEW,
DROP_US_MA... | pd.read_csv(URL_STATES, dtype={'fips': 'str'}) | pandas.read_csv |
__author__ = "<NAME>"
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import lightgbm as lgb
df = | pd.read_csv("../data/localData/newCasesWithClass_Interpolated.csv") | pandas.read_csv |
from collections import defaultdict
from datetime import datetime
import os
import pandas as pd
from tqdm import tqdm
import time
import pickle
import argparse
import csv
import utils
import numpy as np
def read_demo(DATA_DIR, patient_list=None):
"""
df_demo['SEX'].value_counts():
F 33498
... | pd.notna(pdates['1st_ADRD_date']) | pandas.notna |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core... | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
import pandas as pd
import numpy as np
# load dataset
def wrangle_data_fun():
path_to_data = "model/airline-safety.csv"
data = | pd.read_csv(path_to_data) | pandas.read_csv |
# fbs_allocation.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Functions to allocate data using additional data sources
"""
import numpy as np
import pandas as pd
import flowsa
from flowsa.common import fba_activity_fields, fbs_activity_fields, \
fba_mapped_wsec_default_grouping_fields, fba_wsec_default_g... | pd.concat(activity_list, ignore_index=True) | pandas.concat |
import ast
import os
from datetime import timedelta
import pandas as pd
import requests
from dateutil.parser import parse
from tqdm import tqdm
from src import BASEDATE, DATADIR
def load_stations_metadata() -> pd.DataFrame:
"""Load the stations metadata."""
stations_api = "https://rata.digitraffic.fi/api/v1... | pd.to_timedelta(df["trainDuration"]) | pandas.to_timedelta |
"""
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import pandas as pd
from pandas.core.base import PandasObject
from pandas import compat
from pandas.compat import range
from pandas.compat.numpy import function as nv
from pandas... | na_value_for_dtype(data.dtype) | pandas.core.dtypes.missing.na_value_for_dtype |
import sys
import argparse
import torch
import csv
import pandas as pd
from torchtext.data.functional import generate_sp_model
import params
from rcnn import RCNN
from train import *
from dataset import *
##data path
train_df_path = params.train_df
test_df_path = params.test_df
val_df_path = params.val_df
def train_... | pd.read_csv(df_path) | pandas.read_csv |
import argparse
import pandas as pd
from collections import Counter
import numpy as np
import datetime
def run():
parser = argparse.ArgumentParser()
# input files
parser.add_argument('--metadata')
parser.add_argument('--delim', default='\t')
parser.add_argument('--dateCol', default=1, type=int)
parser.add_argu... | pd.to_datetime(dat[args.dateCol]) | pandas.to_datetime |
import numpy as np
import pandas as pd
from glob import glob
from scipy import stats
from scipy.stats import pearsonr
from sklearn.preprocessing import OneHotEncoder
def compute_corrs(X, Y):
""" Compute Pearson correlation between each column in X
and each column in Y. """
corrs = np.zeros((X.shape[1], Y.... | pd.read_csv(f, sep='\t', index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
import argparse
def save(d1, fname):
d1.to_csv(fname, index=False)
def colC(d1):
if "C" in d1.columns : return
names = d1["Model Name"].values
nv = []
for s in names:
if "Lasso" in s:
v = float(s... | pd.read_csv(f1) | pandas.read_csv |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use ... | pd.isnull(calculated_values["timestamp"]) | pandas.isnull |
import pandas as pd
import os.path as osp
import inspect
from torch_geometric.data import Data
from sklearn import preprocessing
import torch
import random
import numpy as np
import pdb
from utils.utils import get_known_mask, mask_edge
def create_node(df, mode):
if mode == 0: # onehot feature node, all 1 sample ... | pd.DataFrame(df_np[:, :-1]) | pandas.DataFrame |
"""
Some utility functions the different instrumentation backends
"""
import itertools
from functools import partial
import numpy
from pandas import DataFrame
from ..inspections.inspection_input import InspectionInputRow
def build_annotation_df_from_iters(inspections, annotation_iterators):
"""
Build the an... | DataFrame(annotation_iterators, columns=inspection_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scanpy as sc
from termcolor import colored
import time
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import seaborn as sns
from pyVIA.core import *
def cellrank_Human(ncomps=80, knn=30, v... | pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from textblob import TextBlob
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
# File Names
TRAIN_TIMESERIES = 'input_csv... | pd.DataFrame(y_sub, columns=['close'], index=data_sub.index) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 17:14:55 2021
@author: sergiomarconi
"""
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preproce... | pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_sp_2.csv") | pandas.read_csv |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
fro... | pd.Timestamp('2015-01-09') | pandas.Timestamp |
"""
Tools for hydrological regionalization.
"""
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import statsmodels.api as sm
import xarray as xr
import ravenpy.models as models
from . import coords
LOGGER = logging.getLogger("PYWPS")
regionalisation_data_dir = Path(__file__).parent.... | pd.read_csv(f, index_col="ID") | pandas.read_csv |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import precision_recall_fscore_support, mean_squared_error
from collections import Counter
import math
import xgboost as xgb... | pd.DataFrame(columns=summaries[0].columns, index=[indx]) | pandas.DataFrame |
from flask import Flask, render_template, request, jsonify, url_for
import atexit
import os
import json
import folium
from botocore.client import Config
import ibm_boto3
import pandas as pd
import ast
from collections import namedtuple
import numpy as np
class ProvinceMap:
def __init__(self, province_mapping):
... | pd.read_csv('province_coordinates.csv') | pandas.read_csv |
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q
from elasticsearch.helpers import scan
from itertools import islice
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import plotly.express as px
from styles import style_sdl, style... | pd.DataFrame(var['freqs']) | pandas.DataFrame |
#!/usr/bin/env python
import datetime as dt
import glob
import logging
import matplotlib.pyplot as plt
import pandas as pd
import pdb
import numpy as np
import hwtmode.statisticplot
import scipy.ndimage.filters
from sklearn.calibration import calibration_curve
from sklearn import metrics
from tensorflow import is_tens... | pd.read_parquet(f) | pandas.read_parquet |
#!/usr/bin/env python
# coding=utf-8
# vim: set filetype=python:
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import posixpath
import sys
import math
import datetime
import string
from functools import wraps
import traceback
import xlrd3 as xl... | pd.concat(matches) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.