prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.utils import check_random_state
from scipy.linalg import block_diag
import matplotlib.pylab as plt
import matplotlib
from Machine_Learning_for_Asset_Managers import ch... | pd.Series(silh_coef_optimal, index=dist_matrix.index) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:0... | tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import pandas as pd
import numpy as np
from evaluation.experiment import data_root_dir
all_root_dir = data_root_dir#os.path.expanduser('~/data/bayesian_sequence_combination')
data_root_dir = os.path.join(all... | pd.read_csv(savepath + '/task1_val_doc_start.csv', skip_blank_lines=False, header=None) | pandas.read_csv |
import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from Bio import SeqIO
from six import StringIO
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio.SeqUtils.ProtParam import ProtParamData
from modlamp.plot import helical_wheel
# Protparam scales:
# kd → Kyte & Dool... | pd.DataFrame() | pandas.DataFrame |
from __future__ import print_function
import base64
import csv
import json
import sys
from collections import Counter
from functools import wraps
import numpy as np
import pandas as pd
import zerorpc
from nestor import keyword as kex
def exception_handler(func):
@wraps(func)
def func_or_exception(*args, **k... | pd.DataFrame([]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.filters.bk_filter import bkfilter
from statsmodels.tsa.filters import *
from statsmodels.tsa.filters.hp_filter import hpfilter
from statsmodels.tsa.statto... | pd.DataFrame(result) | pandas.DataFrame |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing, get_col_mapping_ce
class TestInverseTransformCate... | pd.DataFrame(data=[0, 1, 1], columns=['y']) | pandas.DataFrame |
import numpy as np
import rasterio as rio
import geopandas as gpd
import pandas as pd
import random
#from osgeo import gdal, ogr, osr
from rasterio.mask import mask
from shapely.geometry import mapping, Polygon
from skimage.util import img_as_float
import os as os
os.chdir('E:/SLICUAV_manuscript_code/3_Landscape_mapp... | pd.DataFrame(feat_struct.featHeightInvar) | pandas.DataFrame |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.c... | tm.ensure_clean() | pandas.util.testing.ensure_clean |
import os
from glob import glob
from pprint import pprint
import json
import numpy as np
import pandas as pd
import h5py
from scipy.optimize import fsolve
def generate_file_dict(sub_simu_path):
simu_path, sub_simu_name = os.path.split(sub_simu_path)
simu_name = os.path.split(simu_path)[1]
if os.path.isfile... | pd.read_hdf(filepath_h5, key='dataset_time_traces') | pandas.read_hdf |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets imp... | tm.assert_numpy_array_equal(result.values, expected_values) | pandas.util.testing.assert_numpy_array_equal |
#!/usr/bin/env python
# coding: utf-8
# data analysis and wrangling
import pandas as pd
from scipy.stats import linregress
#declare variables
s = ("01", "02", "03", "04", "05", "06", "07", "09", "10", "11", "12", "13","14", "15", "16", "17","18", "20", "21", "22","23", "24","25", "26")
df = pd.DataFrame()
for ... | pd.read_table(corrpath + 'corr_task-hedonic.txt',sep='\t', header=None) | pandas.read_table |
import pandas as pd
import pytest
from pandas import Timestamp
from pandas_historical import (
make_value_change_events_df,
update_value_change_events_df,
get_historical_state,
)
def test_parameterized():
currencies_scraping = pd.DataFrame(
[
{
"date": "2022-02-21"... | Timestamp("2022-03-11 00:00:00") | pandas.Timestamp |
import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
_testing as tm,
)
from pandas.tests.io.pytables.common import ensure_clean_store
pytestmark = [pytest.mark.single, td.skip_array_manager_not_yet_implemented]
def test_stor... | tm.assert_series_equal(result, ser) | pandas._testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Import standard library
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from pkg_resources import resource_filename
import datetime
import sys
# Import modules
import backtrader as bt
import backtrader.feeds as btf... | pd.DataFrame(self.order_history) | pandas.DataFrame |
# This script runs the RDD models for a paper on the impact of COVID-19 on academic publishing
# Importing required modules
import pandas as pd
import datetime
import numpy as np
import statsmodels.api as stats
from matplotlib import pyplot as plt
import gender_guesser.detector as gender
from ToTeX import r... | pd.get_dummies(df['Journal']) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 24 16:43:33 2019
@author: jeremy_lehner
"""
import pandas as pd
import datetime
from selenium import webdriver
import time
from bs4 import BeautifulSoup
from os import path
def get_scrape_date():
"""
Gets the date on which data was scrape... | pd.Series(last_patch) | pandas.Series |
import pandas as pd
import argparse
import matplotlib.pyplot as plt
import os
from collections import Counter
from yellowbrick.text import FreqDistVisualizer
# import rake
import numpy as np
from ast import literal_eval
from nltk import ngrams
from sklearn.feature_extraction.text import CountVectorizer
def read_fil... | pd.read_csv(preprocessed_file, delimiter='\t') | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_ran... | assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import numpy as np
import pandas as pd
import pickle
import mysql.connector
import configparser
config = configparser.ConfigParser()
config.read("configm.ini")
with open('model_match', 'rb') as f:
mp = pickle.load(f)
mydb = mysql.connector.connect(
host=config.get('db-connection','host'),
user=config.get('db-co... | pd.DataFrame(Pick,columns=['Pickup_location']) | pandas.DataFrame |
import unittest
import pandas as pd
from analysis.data import GeographicArea, features
from analysis.scaler import SpatialWaterVapourScaler
from analysis.search import GridSearchHDBSCAN, GridSearchDBSCAN
from analysis.aggregation import AggregateClusterStatistics
from sklearn.model_selection import ParameterGrid
impo... | pd.read_csv(out) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isn... | Series(["foo", "foo", "foo"], dtype=np.object_) | pandas.Series |
# Copyright 2021-present Kensho Technologies, LLC.
from collections import Counter
import logging
from multiprocessing import Pool
import os
import pandas as pd
import re
import typing
from kwnlp_preprocessor import argconfig
from kwnlp_preprocessor import utils
logger = logging.getLogger(__name__)
def parse_file(... | pd.merge(df_in, df_out, on="page_id", how="outer") | pandas.merge |
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import pandas as pd
import numpy as np
import moment
from operator import itemgetter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0... | pd.np.ceil(2*df['incubationDays']) | pandas.np.ceil |
# functions to analyze the results in python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from concise.utils.helper import merge_dicts
# make a report
def get_cv_accuracy(res):
"""
Extract the cv accuracy from the model
"""
ac_list = [(accuracy["train_acc_final"],
... | pd.DataFrame(perf_list) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## Observations and Insights
#
# In[4]:
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse... | pd.read_csv(study_results_path) | pandas.read_csv |
from .genometric_space import GenometricSpace
from .dataset.parser.parser import Parser
import pandas as pd
import warnings
import numpy as np
class MultiRefModel:
"""
GenometricSpace class to represent data that are mapped with multiple references
"""
def __init__(self):
"""
Con... | pd.MultiIndex.from_arrays(meta_index, names=meta_names) | pandas.MultiIndex.from_arrays |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import boto3
from dash.dependencies import Input, Output
from datetime import datetime
from glob import glob
from urllib.request import urlopen
import dash
import dash_core_components as dcc
import dash... | pd.DataFrame([['2016 Turnout',FL_turnout,PA_turnout,MI_turnout,NC_turnout]], columns = ['Category','Florida','Pennsylvania','Michigan','NCarolina']) | pandas.DataFrame |
import ast
import json
import os
import sys
import uuid
import lxml
import networkx as nx
import pandas as pd
import geopandas as gpd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry import LineString, Polygon, Point
from genet.core import Network
from genet.input... | assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False) | pandas.testing.assert_frame_equal |
from argparse import ArgumentParser
import json
import scipy.io as sio
import sys
import os
import pandas as pd
import numpy as np
def parse_options():
parser = ArgumentParser()
#parser.add_argument("-a", "--all", required=False, default=False,
# action="store_true",
# ... | pd.DataFrame(data_list, columns=colRoi, dtype=np.float64) | pandas.DataFrame |
'''
Project: WGU Data Management/Analytics Undergraduate Capstone
<NAME>
August 2021
GDELTbase.py
Class for creating/maintaining data directory structure, bulk downloading of
GDELT files with column reduction, parsing/cleaning to JSON format, and export
of cleaned records to MongoDB.
Basic use should ... | pd.StringDtype() | pandas.StringDtype |
#!/usr/bin/env python
# coding: utf-8
from bs4 import BeautifulSoup
from tqdm import tqdm
import numpy as np
import yfinance as yf
import random
import json
import requests
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
import time
i... | USFederalHolidayCalendar() | pandas.tseries.holiday.USFederalHolidayCalendar |
#!/usr/bin/python
import os
import sys
import json
import itertools
import datetime
import numpy as np
import pandas as pd
from windpowerlib.wind_turbine import WindTurbine
from windpowerlib.wind_farm import WindFarm
from windpowerlib.turbine_cluster_modelchain import TurbineClusterModelChain
def load_data(path, fil... | pd.MultiIndex.from_product(columns) | pandas.MultiIndex.from_product |
# -*- coding: utf-8 -*-
import requests as req
from bs4 import BeautifulSoup as bs
import lxml
from pathlib import Path
# csv writer
import pandas as pd
import time
from .config import BASE_DIR, BASE_URL
# import functions from common.py
from .common import (initialize, get_chrome_driver, makedirs,
... | pd.DataFrame(infod, columns=column_names) | pandas.DataFrame |
import pandas as pd
import numpy as np
from .content import test_questions_analisys as qa
from .content import tests_analisys as ta
from .output import output as out
def Execute(cursor, courseName):
#and questions NOT LIKE '__' means that we needn't questions like {}
#and attempts < 4 means that we use only t... | pd.DataFrame(data=data, columns=columns_names) | pandas.DataFrame |
import os
import json
import sys
import argparse
from pathlib import Path
import pandas as pd
from tqdm import tqdm
DESCRIPTION = """
Build a csv file containing necessary information of a COCO dataset that is
compatible with this package.
"""
def get_bbox(bbox):
"""Get bbox of type (xmin, ymin, xmax, ymax) fr... | pd.DataFrame.from_records(ann["annotations"]) | pandas.DataFrame.from_records |
#Analyze statistics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import alphapept.io
import os
import alphapept.io
import seaborn as sns
from tqdm.notebook import tqdm as tqdm
import warnings
def prepare_files(path1, path2):
df1 = | pd.read_hdf(path1, 'protein_fdr') | pandas.read_hdf |
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, classification_report
# Wrapping sklearn's confusion matrix
def confusion_error_matrix(y_row, y_col, target_names=None, normalize=False):
"""
Wrapper confusion_matrix of sklearn
Parameters
y_row & y_col: if y_row... | pd.DataFrame(conf_mat, columns=target_names, index=target_names) | pandas.DataFrame |
from datetime import date
import unittest
import dolphindb as ddb
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
import dolphindb.settings as keys
impor... | pd.merge(pd_left, pd_right, on=['symbol', 'time']) | pandas.merge |
import os
import requests
from typing import List
import pandas as pd
URL = 'http://64.111.127.166/origin-destination/'
FILENAME = 'date-hour-soo-dest-{}.csv.gz'
ALL_FILE = 'od_count_all_time.feather'
DATA_DIR = './data/'
ALL_FILE_PATH = os.path.join(DATA_DIR, ALL_FILE)
def download_files():
dataframes = []
... | pd.DatetimeIndex(df['Date']) | pandas.DatetimeIndex |
from __future__ import annotations
import numpy as np
from typing import List, Union, Tuple, Optional, Callable, Any, TYPE_CHECKING
import lmfit as lm
import pandas as pd
from dataclasses import dataclass
import logging
from ...hdf_util import NotFoundInHdfError, with_hdf_read, with_hdf_write, DatDataclassTemplate
fro... | pd.Series(z, dtype=np.float32) | pandas.Series |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from io import StringIO
import os
from pathlib import Path
import warnings
import numpy as np
import pytest
from pandas.errors import (
EmptyDataError,
Parse... | DataFrame({"a": [1, 4]}) | pandas.DataFrame |
import scipy.interpolate as sci
import geopandas as gpd
import shapely as shp
import random as random
import math
import arrow
import pandas as pd
import functools
import emeval.metrics.dist_calculations as emd
import emeval.input.spec_details as eisd
random.seed(1)
####
# BEGIN: Building blocks of the final impleme... | pd.isnull(loc_row.geometry_a) | pandas.isnull |
# FIT DATA TO A CURVE
# <NAME> - MIT Licence
# inspired by @dimgrr. Based on
# https://towardsdatascience.com/basic-curve-fitting-of-scientific-data-with-python-9592244a2509?gi=9c7c4ade0880
# https://github.com/venkatesannaveen/python-science-tutorial/blob/master/curve-fitting/curve-fitting-tutorial.ipynb
# https://... | Timestamp(firstday) | pandas.Timestamp |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas i... | pd.Period('2011-01-01', freq='D') | pandas.Period |
#!/usr/bin/env python3
import sys
import numpy as np
import pandas as pd
import os, shutil, zipfile
from numpy import array
import csv
from pandas import DataFrame
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from scipy.stats import entropy
import scipy as sc
from zipfile import ZipFile
im... | pd.concat([Heart_rate_test, BP2_test, Temp2_test, RR2_test, DBP2_test, O2Sat2_test,HospAdmTime_test,EtCO22,BaseExcess2,Creatinine2,Platelets2,gender2,WBC2,HCO32,Glucose2,Fibrinogen2], axis=1) | pandas.concat |
from typing import Optional
import json
import sys
from pathlib import Path
import pandas as pd
import typer
from loguru import logger
from streamlit import cli as stcli
from litreading.config import DEFAULT_MODEL_SCALER
from litreading.grader import Grader
from litreading.trainer import ModelTrainer
from litreading... | pd.Series(grades) | pandas.Series |
import json
import pandas as pd
import time
from pycoingecko import CoinGeckoAPI
import requests
cg = CoinGeckoAPI()
class CoinPrice:
def __init__(self):
self.tsym = "cad"
self.IDList = cg.get_coins_list()
time.sleep(1.0)
self.priceList = {}
self.timeResolution = 'D' #... | pd.to_datetime(dateString) | pandas.to_datetime |
import pandas as pd
import csv
import json
import io
from rltk.io.reader import *
arr = [{'1': 'A', '2': 'B'}, {'1': 'a', '2': 'b'}]
def test_array_reader():
for idx, obj in enumerate(ArrayReader(arr)):
assert obj == arr[idx]
def test_dataframe_reader():
df = | pd.DataFrame(arr) | pandas.DataFrame |
import multiprocessing
import numpy as np
import pandas as pd
import re
from pathlib import Path
from os import cpu_count
from tables.exceptions import HDF5ExtError
from src.patches import PatchSchema
from src.preset2fxp import *
FXP_CHUNK = 'chunk'
FXP_PARAMS = 'params'
DB_KEY = 'patches'
TAGS_KEY = 'tags'
PATCH_FILE... | pd.Categorical(meta_df[col], categories=pos) | pandas.Categorical |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, Timedelt... | DatetimeIndex(['20130102', pd.NaT, '20130105']) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created by <NAME> July 2021
This script reads the csv annotations from NIPS4Bplus and the species list from
NIPS4B to generate list of train and test files and dictionary label files
The dictionary files list random train and test sets for three selections of
classes: "All Classes"... | pd.read_csv(j['csv'], header=None) | pandas.read_csv |
import sys
sys.path.insert(0, '/Users/david/galvanize/super_liga_xg')
from combined_player import player_minutes_value
from scraping_tools.html_scraper import db
# from html_scraper import db
from mongo_to_db import create_master_df
from model_prep import create_rf_prep, create_xG_df, create_summed_xG_df
import pickle
... | pd.merge(xgb_contributions, final_df, on=['player_id']) | pandas.merge |
import pandas as pd
import json
def get_se_as_df(filename):
with open(filename) as f:
data = json.loads(f.read())
for record in data:
for key, value in record.items():
if type(value)==dict:
# extract only kWh
kWh = value['energy_kWh']
... | pd.to_datetime(df['created_on']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Tests computational time of different null methods and plots outputs
"""
from dataclasses import asdict, make_dataclass
import time
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import threadpoolctl
from brainsmash im... | pd.read_csv(DATADIR / fn) | pandas.read_csv |
from typing import Union, Optional
import pytest
import scanpy as sc
import cellrank.external as cre
from anndata import AnnData
from cellrank.tl.kernels import ConnectivityKernel
from cellrank.external.kernels._utils import MarkerGenes
from cellrank.external.kernels._wot_kernel import LastTimePoint
import numpy as ... | is_categorical_dtype(adata_large.obs[key]) | pandas.core.dtypes.common.is_categorical_dtype |
import time
import pandas as pd
from nltk import collections
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from ... | pd.read_csv('../dataset.csv', delimiter=';') | pandas.read_csv |
import numpy as np
import glob
import pandas as pd
from datetime import datetime
indir = '/glade/work/lgaudet/research/data/'
timeFormat = '%Y-%m-%d %H:%M:%S UTC'
parseTime = lambda x: datetime.strptime(x, timeFormat)
df = pd.concat([pd.read_csv(f,parse_dates=['time'],date_parser=parseTime) for f in sorted(glob.glob... | pd.to_datetime(df['date']) | pandas.to_datetime |
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import panda... | assert_frame_equal(eta0, eta1) | pandas.testing.assert_frame_equal |
from cmath import nan
from sqlite3 import DatabaseError
import pandas as pd
import numpy as np
import json
def load_from_csv(path):
dt = pd.read_csv(path, sep=';', dtype={'matricule': object})
return dt.set_index('matricule')
def fix_matricule(matricule):
if matricule.startswith('195'):
return '19... | pd.Series(names) | pandas.Series |
#!/usr/bin/env python
"""
Parsing GO Accession from a table file produced by InterProScan and mapping to GOSlim.
(c) <NAME> 2018 / MIT Licence
kinomoto[AT]sakura[DOT]idv[DOT]tw
"""
from __future__ import print_function
from os import path
import sys
import pandas as pd
from goatools.obo_parser import GODag
from goatoo... | pd.DataFrame(columns=output_hd) | pandas.DataFrame |
"""
Functions to clean up neighborhood data
and feed into interactive charts
"""
import numpy as np
import pandas as pd
from datetime import date, timedelta
S3_FILE_PATH = "s3://public-health-dashboard/jhu_covid19/"
NEIGHBORHOOD_URL = f"{S3_FILE_PATH}la-county-neighborhood-time-series.parquet"
CROSSWALK_URL = f"{S3... | pd.read_parquet(CROSSWALK_URL) | pandas.read_parquet |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
... | concat((df1, df2), ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
import sys, os, time
sys.dont_write_bytecode = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from datetime import datetime, timedelta
import tensorflow as tf
import numpy as np
import pandas as pd
import gym
import retro
import retro_contest.local
from policy import Policy
from actor import Ac... | pd.concat(dfs, axis=1) | pandas.concat |
from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
from scipy import integrate
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(
inputs={"data": DataType.Series, "speed": DataType.Any},
out... | pd.Series(speed * time_norm, index=data_sort.index) | pandas.Series |
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
""... | pd.DataFrame() | pandas.DataFrame |
import pandas as pd # version 1.0.1
# in pandas 1.1.4 dates for INTESA and BMG doesn't work after merge in "final"
from datetime import datetime
# TODO find repetitions and replace them with functions
# for example Santander and CITI files import and adjustment
# or date and amount formatting
pd.options.display.float... | pd.to_datetime(citidep['data']) | pandas.to_datetime |
import pandas as pd
import os
import sys
import numpy as np
import argparse
import librosa
import soundfile as sf
def shift_pitch(data, sampling_rate, pitch_factor):
# negative pitch factor makes the voice sound lower
# positive pitch factor makes the voice sound higher
return librosa.effects.pitch_shift(... | pd.DataFrame(new_list) | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
import os
import sys
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import pandas as pd
from brainsmash.workbench.geo import volume
from brainsmash.mapgen.eval import sampled_fit
from brainsmash.mapgen.sampled impor... | pd.DataFrame(index=unique) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
# http://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
# Load .csv file
path = 'german/german_final.csv'
data = pd.read_csv(path, header=None)
print(data)
# One-hot-encoding of categorical attributes
# https://stackoverflow.com/questions/37292872/h... | pd.concat([data_normalized, label], 1) | pandas.concat |
# IPython log file
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstart', '')
get_ipython().run_line_magic('logstart', '')
import pandas as pd
import pandas as pd
from pandas import Series,DataFrame
obj = Series(['c','a','d','a','a','b','b','c','c'])
obj
uniques = obj.unique()
uniques = ... | pd.value_counts(obj.values,sort=False) | pandas.value_counts |
# authors_name = '<NAME>'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '<EMAIL>'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import... | pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features) | pandas.DataFrame |
import traceback
import numpy as np
from skimage import exposure
import cv2
import tifffile
import os
from glob2 import glob
import pandas as pd
import mat4py
import datetime
import json
import matplotlib.pyplot as plt
import hashlib
# from napari_akseg._utils_imagej import read_imagej_file
from skimage import data
fr... | pd.DataFrame(files) | pandas.DataFrame |
import glob
import os
import pandas as pd
from sklearn.preprocessing import StandardScaler
from main.src.python.config import data_path
from main.src.python.config import config_path
from main.src.python.download.index_file import IndexFile
class ParallelReader:
def __init__(self, start, end, read=True, reduce=F... | pd.concat(frames) | pandas.concat |
from imblearn import under_sampling
from qiime2.plugin import (Str, Int)
import biom
from q2_feature_engineering._tada.logger import LOG
from qiime2 import NumericMetadataColumn
import numpy as np
import pandas as pd
import qiime2
import tempfile
import shutil
dispatcher = {'RandomUnderSampler': under_sampling.RandomU... | pd.DataFrame(index=dummy_samples, data=y_resampled) | pandas.DataFrame |
#! /usr/bin/env python
# coding=utf-8
import os
import pandas as pd
import urllib
import xml.etree.ElementTree as ET
import io
import itertools as IT
# Copyright © 2016 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
class Scraper:
"""
Scraper for parlament.ch
scraper.get(table_name):... | pd.concat(data_frames, ignore_index=True) | pandas.concat |
import pandas as pd
import sparse
import numpy as np
class AnnotationData:
"""
Contains all the segmentation and assignment data
WARNING: self.assignments['Clusternames'] will contain neurite ids (as strings) rather than names
"""
# Todo: if we can preserve segments instead of merging them when two... | pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int) | pandas.DataFrame |
import csv
import os
import sys
import re
import shutil
from urllib.request import urlopen
os.system(f"{sys.executable} -m pip install -U pytd==1.0.0")
def convert_directory_to_csv(directory, polarity, out_file_path):
with open(out_file_path, "a") as csvfile:
writer = csv.writer(csvfile)
for fil... | pd.concat([pos_df, neg_df]) | pandas.concat |
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from zvt import init_log, zvt_env
from zvt.api.quote import get_stock_factor_schema
from zvt.contract import IntervalLevel
from zvt.contract.api import df_to_db
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.recorders.joinquant.common import... | pd.bdate_range(end=end_timestamp, periods=size) | pandas.bdate_range |
import operator
import os
from collections import defaultdict
from typing import Any, Dict, List
import pandas as pd
from tqdm import tqdm
class Dataset(object):
"""
Object for a data source that exists in the form of a list:
[
(source, target, timestamp),
(source, target, tim... | pd.to_datetime(i[2]) | pandas.to_datetime |
import pandas as pd
import yfinance as yf
import config
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
from get_data_11_26 import get_data
from trading import get_trading_records
from visualization import plot_monthly_heatmap, plot_yearly_diff_comparison, plot_trading_behavior... | pd.read_excel(config.saving_path_trading_records+file_name) | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import baostock as bs
import pandas as pd
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取指数(综合指数、规模指数、一级行业指数、二级行业指数、策略指数、成长指数、价值指数、主题指数)K线数据
# 综合指数,例如:sh.000001 上证指数... | pd.DataFrame(data_list, columns=rs.fields) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils i... | c(big_df['每中一签获利']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, ... | DataFrame(mat, index=[0, 1], columns=[0], dtype=float) | pandas.DataFrame |
import json
import copy
import click
import itertools
from collections import ChainMap
import logging
import pandas as pd
from twarc import ensure_flattened
log = logging.getLogger("twarc")
DEFAULT_TWEET_COLUMNS = """id
conversation_id
referenced_tweets.replied_to.id
referenced_tweets.retweeted.id
referenced_tweets.... | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-02
"""
This script cleans the census dataset for a given year and saves them to
the file_path provided. This script takes the census year and the csv file
containing the census data as arguments.
Usage: src/02_clean_wrangle/05_clean_census.py --census_file=<cen... | pd.read_csv('data/processed/nhs/Citizenship.csv', index_col=0) | pandas.read_csv |
import re
import csv
from collections import Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
pattern = re.compile(r"(\d+) (.+), (.+), CA (\d+), USA")
class Employee(object):
def __init__(self,segments):
# address
matched = pattern.match(segme... | pd.Series(streets_counter) | pandas.Series |
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment
import json
import re
from datetime import datetime
import numpy as np
comm = re.compile("<!--|-->")
class Team: #change team player object
def __init__(self, team, year, player=None):
self.year = year
self.team = team
... | pd.DataFrame(data=table_starting) | pandas.DataFrame |
import logging
import shutil
import time
import pandas as pd
import requests
from requests_futures.sessions import FuturesSession
class Namara:
def __init__(self, api_key, debug=False, host='https://api.namara.io', api_version='v0'):
self.api_key = api_key
self.debug = debug
self.host = h... | pd.concat(list_of_chunks) | pandas.concat |
import logging
from os.path import splitext
import pandas as pd
import json
from six import string_types
from traits.api import Dict, Instance, Str
from .base_report_element import BaseReportElement
logger = logging.getLogger(__name__)
class PlotReportElement(BaseReportElement):
"""
"""
#: Type of elem... | pd.DataFrame(data_info["values"]) | pandas.DataFrame |
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by app... | pandas.DataFrame(mem_util) | pandas.DataFrame |
# demographics_etl.py
#######
# This class provides capabilities to extract, transform,
# and load data from student, staff, and school geographic
# data files that it downloads from the web.
######
import pandas as pd
import numpy as np
import os
import datetime
import urllib
import shutil
import logging
... | pd.read_table(self.school_geography_file, sep=',', header=0, index_col=False) | pandas.read_table |
#!/usr/bin/python3
"""D-Cube Plotting."""
# Example:
#
# ./dcube.py --suite="rpludp" --x="SF" --y="reliability" --start=1 --end=5 --title="test" --out=home/mike/test
import pandas as pd
import baddplotter as bplot
import matplotlib.pyplot as plt # general plotting
import seaborn as sns
import numpy as np
import argp... | pd.set_option('display.width', 1000) | pandas.set_option |
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import pandas as pd
import string
from scipy.sparse import hstack
from scipy import sparse
def load_data(filepath):
df = pd.read_csv(filepath)
return(df)
def vectorize_data(train_df, test_df):
vectorizer = CountVectorizer(max_df=... | pd.concat([train_df[f], test_df[f]], axis=0) | pandas.concat |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_deco... | tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import pycountry_convert as pc
import pycountry
import os
from iso3166 import countries
PATH_AS_RELATIONSHIPS = '../Datasets/AS-relationships/20210701.as-rel2.txt'
NODE2VEC_EMBEDDINGS = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
DEEPWALK_EMBEDDINGS_128 = '../... | pd.read_csv(BGP2VEC_64, sep=',') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import spotipy
import spotipy.util as util
import os
import sys
import requests
from dotenv import load_dotenv, find_dotenv
from spotipy.client import SpotifyException
from spotipy.oauth2 import SpotifyOAuth
from sklearn.prepro... | pd.DataFrame(standard_features, index=df.index, columns=df.columns[2:]) | pandas.DataFrame |
"""
Created on Mon Feb 22 15:52:51 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
import os
import pickle
import calendar
import time
import warnings
from pyproj import Transformer
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from requests import get
import datafram... | pd.read_csv('./data/' + file) | pandas.read_csv |
#!/usr/bin/env python
"""
@author: cdeline
bifacial_radiance.py - module to develop radiance bifacial scenes, including gendaylit and gencumulativesky
7/5/2016 - test script based on G173_journal_height
5/1/2017 - standalone module
Pre-requisites:
This software is written for Python >3.6 leveraging many Anaconda... | pd.DatetimeIndex(self.datetime) | pandas.DatetimeIndex |
import gc
import numpy as np
from pandas import (
DatetimeIndex,
Float64Index,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
date_range,
)
from .pandas_vb_common import tm
class SetOperations:
params = (
["datetime", "date_string", "int", "strings"],
["i... | RangeIndex(0, 100) | pandas.RangeIndex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.