prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
"""Functions for plotting sipper data."""
from collections import defaultdict
import datetime
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipper import SipperError
#---dates and s... | pd.Timestamp(2200,1,1,0,0,0) | pandas.Timestamp |
#----------------- Libraries -------------------#
import os
import sys
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from Preprocessing import Preprocessing
def kfold_decompose(data, kfold_n):
"""
Thi... | pd.DataFrame(data_label_true[i]['test']) | pandas.DataFrame |
import pandas as pd
from fuzzywuzzy import fuzz
import csv
import argparse
from timeit import default_timer as timer
def get_arguments():
parser = argparse.ArgumentParser(description='csv file identifying duplicates between new and old comments')
parser.add_argument('--new_comments_csv', '-i1', type=st... | pd.read_csv(args.new_comments_csv) | pandas.read_csv |
import os
import glob
import pathlib
import re
import base64
import pandas as pd
from datetime import datetime, timedelta
# https://www.pythonanywhere.com/forums/topic/29390/ for measuring the RAM usage on pythonanywhere
class defichainAnalyticsModelClass:
def __init__(self):
workDir = os.path.ab... | pd.read_csv(filePath, index_col=0) | pandas.read_csv |
import pandas as pd
import os
import requests as req
import sys
import re
import dask.dataframe as dd
from lxml import etree
import io
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',
datefmt='%m-%d %H:%M',
... | pd.to_datetime(srs_header[i]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import types
import pandas as pd
import numpy as np
import json
from pdsf import sflake as sf
from utils import split_months
def process_allo(param, permit_use):
"""
Function to process the consented allocation from the in... | pd.merge(wa6, waps, on='Wap') | pandas.merge |
import random
import spotipy
import requests
import pandas as pd
from sklearn import metrics
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
class Recommend:
'''
Arguments -
client_id - unique client ID
client... | pd.DataFrame() | pandas.DataFrame |
from numpy import nan
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from shapely.geometry import Point
from pymove import MoveDataFrame, conversions
from pymove.utils.constants import (
DATETIME,
DIST_TO_PREV,
GEOMETRY,
LATITUDE,
LONGITUDE,
SPEED_TO_PREV,... | assert_frame_equal(new_move_df, expected) | pandas.testing.assert_frame_equal |
""" Fred View """
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from fredapi import Fred
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
valid_date,
plot... | pd.DataFrame(data, columns=[f"{ns_parser.series_id}"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module contains classes that model the behavior of equities (stocks)
and stock market indices. Many methods that give usefull insights about the stocks
and indices behavior are implemented, ranging from fundamental and technical analysis
to time series ana... | pd.to_datetime(dates) | pandas.to_datetime |
# coding: utf8
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import abc
from clinicadl.tools.inputs.filename_types import FILENAME_TYPE
import os
import nibabel as nib
import torch.nn.functional as F
from scipy i... | pd.read_csv(data_file, sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 17:24:49 2020
@author: larabreitkreutz
"""
import pathlib
import os
import pandas as pd
import inspect
src_file_path = inspect.getfile(lambda: None)
#run add_datetime.py and load data here
# Creates an empty list
filelist = []
# Iterates over... | pd.read_csv(filename) | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_stats_utils.ipynb (unless otherwise specified).
__all__ = ['cPreProcessing', 'cStationary', 'cErrorMetrics']
# Cell
import numpy as np
import pandas as pd
from scipy.stats import boxcox, pearsonr
from scipy.special import inv_boxcox
from pandas.tseries.frequencies im... | to_offset(timestep) | pandas.tseries.frequencies.to_offset |
import pandas as pd
import numpy as np
# Analytics
# import timeit
# Load data locally
df_orig = pd.read_excel(r'result_data_x.xlsx', names=['index', 'type', 'date', 'code', \
'filter_one', 'filter_two', 'filter_three', 'filter_four', 'recommendation', 'easiness', 'overall', 'question_one', \
'rec_sc', 'eas_s... | pd.DataFrame(columns=['filter_four', 'recommendation', 'easiness']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from ..auth.auth import read_credential_file, load_db_info
import os
import json
"""
auth = {}
auth_dict = {}
env_dict = {}
if os.path.exists(os.path.expanduser('~/.fastteradata')):
auth = json.load(open(os.path.expanduser('~/.fastteradata')))
auth_dict = auth["auth_dic... | pd.read_csv(d_file, names=clist, sep="|", dtype=dtype_dict, na_values=["?","","~","!","null"]) | pandas.read_csv |
# fix_processing_gaps.py
# short script to fix missing tiles for each variable
# to be run post completion of processing with process_tiles.py
# <NAME> <EMAIL> 11 May 2021
# NB: uses scandir for speed, on OPALS Shell an install might be required with
# python -m pip install scandir --user
# Dependencies
import os
imp... | pandas.concat(missing_tiles_df_list) | pandas.concat |
import numpy as np
import pandas as pd
import multiprocessing
import time
from sklearn.metrics import pairwise_distances
import scanpy as sc
from sklearn.metrics.pairwise import pairwise_kernels
import json
from random import sample
import random
from . import iONMF
import sys
import re
import umap
from datetime import... | pd.DataFrame([ref_pair, query_pair]) | pandas.DataFrame |
"""Helper methods."""
import copy
import glob
import errno
import os.path
import time
import calendar
import numpy
import pandas
import matplotlib.colors
from matplotlib import pyplot
import keras
import tensorflow.keras as tf_keras
import tensorflow.keras.layers as layers
import tensorflow.python.keras.backend as K
f... | pandas.DataFrame() | pandas.DataFrame |
import os
from matplotlib import pyplot as plt
from pandas import DataFrame
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import OneHotEncoder
import category_encoders as ce
import numpy as np
from app import db
from app.base.db_models.ModelEncodedColumns import ... | pd.get_dummies(original_dataframe[[feature_to_encode]]) | pandas.get_dummies |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, align_axis=align_... | tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
"""
This script contains code used clean the raw data and is used in '1. descriptive.ipynb'
"""
#Import libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
def ordered_dict_values(dictionary):
"""
A function to obtain unique values from a dictionary
Parameters
... | pd.read_csv('raw_data/nstemi.csv') | pandas.read_csv |
import sys
import numpy as np
import pandas as pd
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
from pandas.util... | assert_series_equal(ac, expected) | pandas.util.testing.assert_series_equal |
# summarize class balance from the har dataset
from numpy import vstack
from pandas import read_csv
from pandas import DataFrame
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# summarize the balance of classe... | DataFrame(data) | pandas.DataFrame |
import argparse
from ast import parse
from os import P_ALL, error, path
import sys
import math
from numpy.core.fromnumeric import repeat
from numpy.core.numeric import full
import pandas as pd
from pandas import plotting as pdplot
import numpy as np
from pandas.core.frame import DataFrame
from statsmodels.tsa.statesp... | pd.set_option("display.max_columns", 999) | pandas.set_option |
from scipy import sparse
import pandas as pd
import joblib
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
def load_features(filepath):
matrix = sparse.load_npz(filepath)
return matrix
def load_train_label(filepath):
... | pd.DataFrame({'accuracy': model_acc}, index=class_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import sqlalchemy as sql
import math
from base_classes import Data, Portfolio, Strategy, Backtest
###############################################################################
class Data_Selected(Data):
# Get Data Set wi... | pd.DataFrame(portfolio_balance) | pandas.DataFrame |
"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import annotations
import operator
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Literal,
Union,
cast,
final,
)
from warnings import warn
import nump... | is_array_like(arr) | pandas.core.dtypes.common.is_array_like |
import glob
import os
import hashlib
import gc
import numpy as np
import pandas as pd
from PIL import Image
import skimage.color as skcolor
from skimage.transform import resize
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from collections import OrderedDict
from... | pd.DataFrame({'filenames': filenames, 'cls': cls}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Module with preprocessing methods to prepare a data from utterances
to an excel table with features for classification and labels
'''
__author__ = "<NAME>"
'''import os
import glob
import re
import numpy as np
import nltk
from collections import Counter
from nltk.corpus import stopwords''... | pd.DataFrame(X,columns=labels) | pandas.DataFrame |
import pandas as pd
def get_rolling_mean(df: pd.DataFrame, column_name: str, window: int):
return df[column_name].rolling(window=window).mean()
def get_move_value(df: pd.DataFrame, column_name: str, down: int, up: int, equal: int):
df2 = | pd.DataFrame(index=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Defines the ForecastModel class, which encapsulates model functions used in
forecast model fitting, as well as their number of parameter... | pd.to_datetime(dfo.outl_end) | pandas.to_datetime |
import unittest
import os
import shutil
import numpy as np
import pandas as pd
from aistac import ConnectorContract
from ds_discovery import Wrangle, SyntheticBuilder
from ds_discovery.intent.wrangle_intent import WrangleIntentModel
from aistac.properties.property_manager import PropertyManager
class WrangleIntentCo... | pd.DataFrame(columns=['dates'], data=['2019/01/30', '2019/02/12', '2019/03/07', '2019/03/07']) | pandas.DataFrame |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas a... | pd.DataFrame(data) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, ... | Timestamp('2000') | pandas.Timestamp |
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set(rc={'figure.figsize':(11, 4)})
dataDirectory = '../data/'
graphsDirectory = 'graphs/'
def visDay(dfs,sensors,day):
plt.clf()
fig, axs = plt.subplots(len(dfs),sharex=True,sharey=True,gridspec_kw={'hspace': 0.5},figsize=(20, 10... | pd.to_datetime(df['time']) | pandas.to_datetime |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from col... | pd.DataFrame.dot(S.T, S) | pandas.DataFrame.dot |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid... | pd.Series([2], dtype=np.int32) | pandas.Series |
#scikit learn ensemble workflow for binary probability
import time; start_time = time.time()
import numpy as np
import pandas as pd
from sklearn import ensemble
import xgboost as xgb
from sklearn.metrics import log_loss, make_scorer
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation impo... | pd.isnull(x) | pandas.isnull |
import os
import requests
import pandas as pd
from random import randint
from django.db.models import Q
from .models import Account
api_key = os.environ.get('IEX_API_KEYS')
TEST_OR_PROD = 'cloud'
def make_position_request(tickers):
data = []
for x in tickers:
response = requests.get("https://{}.iexapi... | pd.DataFrame(data) | pandas.DataFrame |
import ast
import importlib
import re
from inspect import isclass
from mimetypes import add_type, guess_type
import numpy as np
import pandas as pd
import woodwork as ww
from woodwork.pandas_backport import guess_datetime_format
# Dictionary mapping formats/content types to the appropriate pandas read function
type_... | pd.isnull(latitude) | pandas.isnull |
from hashlib import sha256
import time
import random
import uuid
import pandas as pd
import logging
import os
import gnupg
from tempfile import TemporaryDirectory
from datetime import datetime
CSV_SEPARATOR = ";"
PAN_UNENROLLED_PREFIX = "pan_unknown_"
SECONDS_IN_DAY = 86400
MAX_DAYS_BACK = 3
TRANSACTION_FILE_EXTENSI... | pd.DataFrame(hpans, columns=["hashed_pan"]) | pandas.DataFrame |
import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "... | Index(exp) | pandas.Index |
"""
This module does some post-processing of the stats results
and writes out the results to file
ResultsWriter uis subclassed for each data type.
Currently just the *_write* method is overridden in subclasses
which take into account the differences in output between voxel based data (3D volume) and organ volume res... | pd.DataFrame.from_dict({'p': pvals, 'q': qvals}) | pandas.DataFrame.from_dict |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or a... | pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
"""
Test indicators.py functions for common indicators to be extracted from an OHLC dataset
Author: <NAME>
"""
import unittest
import indicators
import pandas as pd
class TestIndicators(unittest.TestCase):
def test_checkGreenCandle(self):
candleGreen = {"Open": 1.2, "Close": 1.5}
candleRed = {"Open... | pd.DataFrame(candleSet) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 12:31:33 2017
@author: Astrid
"""
import os
import pandas as pd
import numpy as np
from collections import Counter
import re
import multiprocessing
def getFileList(dir_name, ext=''):
file_dir_list = list()
file_list = list()
for file in os.listdir(dir_name... | pd.MultiIndex.from_tuples(tuples) | pandas.MultiIndex.from_tuples |
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-12
'''This script read ministries' comments data from interim directory and predicted
labels of question 1 from interim directory, joins both databases, and saves it in specified directory.
There are 2 parameters Input and Output Path where you want to write thi... | pd.concat([ministries_2015, pred_2015], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pand... | StringIO(self.data1) | pandas.compat.StringIO |
import pandas as pd
import re
from collections import OrderedDict
#
# This file includes functions, used in training procedure. The functions are simple and self-explaining.
# Please use README, that describes the sequence of steps.
#
def helper_sentence_to_tokens(snt):
step1 = []
for token in snt.split(' '):... | pd.read_csv(input_file, encoding='utf-8') | pandas.read_csv |
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import io
import os
import math
import copy
import pickle
import zipfile
from textwrap import wrap
from pathlib import Path
from itertools import zip_longest
from collections import defaultdict
from urllib.error import URLError
from urllib.request import urlo... | pd.read_csv(filename) | pandas.read_csv |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import ... | pd.cut(df[0], grps) | pandas.cut |
# -*- coding: utf-8 -*-
""" bokeh_warnings_graphs.py
Usage:
bokeh_warnings_graphs.py <project_code> [options]
Arguments:
project_code unique project code consisting of 'projectnumber_projectModelPart'
like 456_11 , 416_T99 or 123_N
Options:
-h... | pd.DataFrame({iso_time_stamp: warning_ids}) | pandas.DataFrame |
import pandas as pd
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
import os
count = 0
reg = SGDRegressor()
predict_for = "NANOUSD.csv"
batch_size = "30T"
stop = pd.to_datetime("2020-08-01", format="%Y-%m-%d")
for pair... | pd.DataFrame(index=predict_df.index) | pandas.DataFrame |
import sys
import numpy.random
import pandas as pd
import numpy as np
from numpy.random import normal
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=8, progress_bar=True)
def create_cross_table(pandas_df):
cross_table = | pd.crosstab(pandas_df.iloc[:, 2], pandas_df.iloc[:, 1], margins=True, margins_name='Total_Reports') | pandas.crosstab |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
... | pd.Series() | pandas.Series |
import pandas as pd
import sys
# To edit for dev
if sys.platform == 'linux':
path_data = "/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv"
path_dictionary = "/n/groups/patel/samuel/HMS-Aging/Data_Dictionary_Showcase.csv"
path_features = "/n/groups/patel/samuel/data_final/page3_featureImp/FeatureImp/"
... | pd.isna(sample['Ethnicity_1']) | pandas.isna |
'''
Created on Feb. 9, 2021
@author: cefect
'''
#===============================================================================
# imports
#===============================================================================
import os, datetime
start = datetime.datetime.now()
import pandas as pd
import numpy as np
from pa... | pd.Series(sd) | pandas.Series |
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
node1 = | pd.read_csv("../Data/Node1.csv", index_col="AbsT") | pandas.read_csv |
# Copyright 2020 The GenoML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable ... | pd.merge(merged, df, on=col_id, how="inner") | pandas.merge |
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for global atmospheric SF6 concentration from NOAA GML data
data = | pd.read_csv('../processed/monthly_global_sf6_data_processed.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Properties
------... | pds.DateOffset(months=1) | pandas.DateOffset |
from pandas_datareader import data as pdr
import time
import yfinance as yf
import json
import sys
import pandas as pd
import numpy as np
yf.pdr_override() # <== that's all it takes :-)
# download dataframe
# data = pdr.get_data_yahoo("2317.TW", start="2019-01-01", end="2020-03-18")
data = pdr.get_data_yahoo((sys.a... | pd.DataFrame(0, index=data.index, columns=title) | pandas.DataFrame |
# standard libraries
import os
# third-party libraries
import pandas as pd
# local imports
from .. import count_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestCsvToDf:
"""
Tests converting a csv with various headers into a processible DataFrame
"""
def test_timestamp(self):
... | pd.to_datetime(test_df['session_end']) | pandas.to_datetime |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestS... | Timestamp("2010-01-01") | pandas.Timestamp |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.linear_model import LinearRegression
import pandas as pd
import rolldecayestimators.filters
import rolldecayestimators.measure as measure
from sk... | pd.isnull(self.scale_factor) | pandas.isnull |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys, pickle, os
import pymc3 as pm
import nipymc
from nipymc import *
import pandas as pd
from theano import shared
# 1st argument = which region to analyze
region = str(sys.argv[1])
# global variables...
SAMPLES = 3000
BURN = 1000
# get ... | pd.get_dummies(X[cfe], drop_first=True) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinea... | pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner') | pandas.concat |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not u... | pd.series.var(series) | pandas.series.var |
import timeit
import tensorflow as tf
import pandas as pd
from tqdm import tqdm
class DataProcessing():
def __init__(self, in_path, out_path):
if in_path == False:
self.out_path = out_path
elif out_path == False:
self.in_path = in_path
elif in_path == False and out_path == False:
pass
else:
self.i... | pd.DataFrame(data_list,columns=["txt_id","label"]) | pandas.DataFrame |
from math import floor, ceil
import numpy as np
import matplotlib.pyplot as plt
import datetime
import folium
import random
import seaborn as sns
import pandas as pd
import plotly.express as px
import geopandas as gpd
# import movingpandas as mpd
# from statistics import mean
from shapely.geometry import Polygon, Mult... | pd.DataFrame(grid) | pandas.DataFrame |
import pandas as pd
import argparse
import gspread
from gspread_dataframe import get_as_dataframe
"""This module is use to access,preporcess & summarize data
from google sheet. Three files (raw_data,clean_data &
summarized_data) are saved to the following directory:
./cow_disease_detection/data/
Example
-------
... | pd.to_datetime(df["date"] + " " + df["time"]) | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from Bio.SeqUtils.ProtParam import ProteinAnalysis
import numpy as np
import os
from datetime import datetime
def create_sequence_properties_dataframe(sequences):
print("---- Creating properties for the all data. This may take a few mins de... | pd.Series(flattened) | pandas.Series |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
Da... | Series(data) | pandas.Series |
import pandas as pd, numpy as np, matplotlib.pyplot as plt
import glob, pywt, pyclustering
from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from pyclustering.cluster.elbow import elbow
import xarray as xr
class HyCluster:
def __init__(
... | pd.Series(kmeans.labels_, index=self.traj.index) | pandas.Series |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdat... | tm.makeDataFrame() | pandas._testing.makeDataFrame |
import os
import numpy
import pandas as pd
import scipy.stats as st
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs')
def summary_cost(int_details,ctrl_m,ctrl_f,trt_m,trt_f, text):
int_dwc = 1 / (1 + discount_rate) ** numpy.array(range(time_horizon))
int_c = numpy.array([[prog_cost] * time... | pd.read_csv(file_name_m) | pandas.read_csv |
#######################################################################################################
# AUTHOR : <EMAIL>
# AIM : Script to create cleaned parallel dataset from uncleaned parallel dataset.
# The input dataset must have only two column and c1 must be in english,
# c2 language co... | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_url_tls() ... | pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64") | pandas.Series |
import logging as logger
import re
import regex
import unicodedata
from abc import abstractmethod
from collections import defaultdict
import pandas as pd
import nltk
# noinspection PyPackageRequirements
from iso639 import languages
from langdetect import detect, DetectorFactory
from nltk.corpus import stopwords
# noin... | pd.DataFrame() | pandas.DataFrame |
# Import libraries
import glob
import pandas as pd
import numpy as np
import pickle
import requests
import json
import fiona
import contextily as ctx
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
from shapely.geometry import Point, LineString, MultiPoint, Polygon
start_year,end_year = 2... | pd.unique(SiteCrashes_target[col_main]) | pandas.unique |
"""Module to test bowline.utils."""
import pandas as pd
import pytest
from bowline.utils import detect_series_type
@pytest.mark.parametrize(
"input_series, expected",
[
( | pd.Series([0, 1, 1, 0]) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIn... | assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pytesseract
import platform
import pandas as pd
class TextDetect():
def __init__(self, path_cmd):
if platform.system() == 'Windows':
pytesseract.pytesseract.tesseract_cmd = path_cmd
def get_data(self, image, join=True):
"""
:param image: image for text detection
... | pd.DataFrame(data) | pandas.DataFrame |
import xgboost as xgb
import pandas as pd
import math
def predict_xgb(df_in):
df = df_in.copy()
cols_input = ['Mz', 'Sk', 'Ku', 'Sigma']
dinput = xgb.DMatrix(df[cols_input])
bst = xgb.Booster()
bst.load_model('model/xgb_2.model')
ypred = bst.predict(dinput)
df['code_ng_pred']... | pd.Series(ypred, index=df.index, dtype='int') | pandas.Series |
# -*- coding: utf-8 -*-
import os
import pandas as pd
##### DEPRECATED? ######
# !!! STILL VERY TIME INEFFICIENT. WORKS FOR NOW BUT NEEDS REWORK LATER ON !!!
def transform_to_longitudinal(df, feats, pat_col, time_col, save_folder):
"""
Transforms a long format (each visit of patient stored in one row) dataf... | pd.DataFrame() | pandas.DataFrame |
import queue
import logging
import datetime
import pandas as pd
from koapy.grpc import KiwoomOpenApiService_pb2
from koapy.grpc.KiwoomOpenApiServiceClientSideDynamicCallable import KiwoomOpenApiServiceClientSideDynamicCallable
from koapy.grpc.KiwoomOpenApiServiceClientSideSignalConnector import KiwoomOpenApiServiceCl... | pd.Series(single_output) | pandas.Series |
import MDAnalysis
import MDAnalysis.analysis.hbonds
import pandas as pd
import numpy as np
import os
from collections import defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import sys
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
#logger.ad... | pd.DataFrame({'donor_residue': donor, 'acceptor_residue': accept, 'wat_num': wat_num}) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
def plot_results_for_probability_changes():
df1 = pd.read_csv("base.csv")
df2 = pd.read_csv("base_pc_100_pm_80.csv")
df3 = pd.read_csv("base_pc_80_pm_5.csv")
df_iterations = pd.DataFrame({
"90%% crossover, 40%% mutação": df1["iterations"],
... | pd.read_csv("pmx_insert_pc_100_pm_80_pop_200.csv") | pandas.read_csv |
# 导入数据包
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix
import warnings
warnings.filterwarnings('ignore')
def decode(encode_list):
final_re = []
... | pd.read_csv(path + 'test_4_feature_select.csv') | pandas.read_csv |
# coding: utf-8
# # Generating OncoPrint Data Files
#
# The script will process all variant files and output files in an ingestible format for the R OncoPrint function.
#
# It will output oncoprint data for both replicate files and the merged variant callsets.
# In[1]:
import os
import pandas as pd
# In[2]:
... | pd.read_table(file) | pandas.read_table |
from bokeh.charts import save, output_file, BoxPlot
from bokeh.layouts import column, gridplot
from bokeh.palettes import all_palettes
from bokeh.plotting import figure
from bokeh.models.widgets import Panel, Tabs, Div
from bokeh.models.widgets import DataTable, TableColumn
from bokeh.models import ColumnDataSource
i... | pd.DataFrame({"group": groups, "count": counts}) | pandas.DataFrame |
import random
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from examples.project_ENGIE import Project_Engie
from operational_analysis.methods import plant_analysis
def reset_prng():
np.random.seed(42)
random.seed(42)
class TestPandasPrufPlantAnalysis(unittest.... | pd.to_datetime(["2014-06-01", "2014-12-01", "2015-10-01"]) | pandas.to_datetime |
import os
import shutil
import filecmp
from unittest import TestCase
import pandas as pd
from pylearn.varselect import count_xvars, rank_xvars, extract_xvar_combos, remove_high_corvar
class TestVariableSelect(TestCase):
def setUp(self):
self.output = './tests/output'
if not os.path.exists(self.... | pd.read_csv('./tests/data/rlearn/VARSELECT.csv') | pandas.read_csv |
# --------------
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# Load the dataset and create column `year` which stores the year in which match was played
data_ipl= | pd.read_csv(path) | pandas.read_csv |
import matplotlib
import matplotlib.pylab as plt
import os
from matplotlib.pyplot import legend, title
from numpy.core.defchararray import array
from numpy.lib.shape_base import column_stack
import seaborn as sns
import pandas as pd
import itertools
import numpy as np
def plot_graph(data, plot_name, figsize, legend... | pd.set_option('display.max_colwidth', None) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 10 17:52:18 2018
@author: sudhir
"""
# =============================================================================
# Import packages
# =============================================================================
import pandas as pd
import numpy a... | pd.DataFrame(temp_ga) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/10/24 16:12
describe: Tushare 数据缓存,这是用pickle缓存数据,是临时性的缓存。单次缓存,多次使用,但是不做增量更新。
"""
import os.path
import shutil
import pandas as pd
from .ts import *
from ..utils import io
class TsDataCache:
"""Tushare 数据缓存"""
def __init__(self, data... | pd.to_datetime(edt) | pandas.to_datetime |
# Evolutionary optimizer for hyperparameters and architecture. Project at https://github.com/pgfeldman/optevolver
import concurrent.futures
import copy
import datetime
import getpass
import os
import random
import re
import threading
from enum import Enum
from typing import Dict, List, Tuple, Callable
import matplotli... | pd.Series(boot) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################... | pd.Timestamp('2012-01-01', tz=tz) | pandas.Timestamp |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from collections import defaultdict
from logging import Logger
from typing import Any, Dict, List, Optio... | pd.merge(metric_vals, metadata, on=key_col) | pandas.merge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.