prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
from sktime.transformations.panel.rocket import MiniRocket as MiniRKT
from sktime.classification.shapelet_based import MrSEQLClassifier
from convst.utils import load_sktime_arff_file_resample_id, return_all_dataset_names, U... | pd.Series(0, index=df.index) | pandas.Series |
from os import listdir
from os.path import isfile, join
import Orange
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from parameters import order, alphas, regression_measures, datasets, rank_dir, output_dir, graphics_dir, result_dir
from regression_algorithms import regression_list
results_di... | pd.to_numeric(df_mean['RANK_BORDERLINE1'], downcast="float") | pandas.to_numeric |
# -*- coding: utf-8 -*-
from lxml import objectify
import pandas as pd
from pandas import DataFrame
from datetime import datetime
import sys
from logging import getLogger
import logging.config
def main(args):
log = getLogger()
logging.config.fileConfig("config/logging.conf")
log.debug('Parse開始')
pa... | pd.Grouper(freq='D') | pandas.Grouper |
from collections import defaultdict
from sklearn import preprocessing
import signal
import influxdb_client
from influxdb_client import InfluxDBClient
from datetime import datetime
from sklearn.preprocessing import KBinsDiscretizer
import argparse
import ntopng_constants as ntopng_c
import numpy as np
import pandas as p... | pd.read_pickle(dname / 'timeseries.pkl') | pandas.read_pickle |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
d... | bdate_range("2012-01-01", periods=300) | pandas.bdate_range |
''' EVENT DETECTION (FIXATIONS & SACCADES)'''
import os
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from helper import *
# Categorize fixations and saccades based on their order:
for i,k in itertools.product(sub_id, img_id):
file='... | pd.read_csv(BEHAVIORAL_FILE) | pandas.read_csv |
import pandas as pd
import numpy as np
from pathlib import Path
from scipy.spatial import distance
from math import factorial, atan2, degrees, acos, sqrt, pi
from lizardanalysis.utils import auxiliaryfunctions
#TODO: check why files only contain species names but no measurements!!
analyze_again = True
# utility funct... | pd.DataFrame(columns=morph_csv_columns) | pandas.DataFrame |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scr... | pd.isnull(row[k]) | pandas.isnull |
import pandas as pd
import os
import re
#add one column with the name of the df aka condition
print(snakemake.input["whippet_mapping_dc"])
def retrieve_mapping_stats(list_of_paths, key1, key2, key3):
mapping_summary={}
sample_name_list=[]
Mapped_Percent_list=[]
Multimap_Percent_list=[]
N... | pd.DataFrame.from_records(mapping_summary) | pandas.DataFrame.from_records |
from datetime import datetime
import time
import pandas as pd
import typer
import subprocess
import numpy as np
import random
import os.path
import noise_mechanism as nm
DEFAULT_DATA_DIRECTORY = "./tests/data"
DEFAULT_NOISE_DATA_DIRECTORY = "./data/mob-dp"
DEFAULT_FIPS = "36" # New York
DEFAULT_ITERATIONS = 1000
D... | pd.Series(m) | pandas.Series |
from flask import Flask, render_template, request, redirect, make_response, url_for
app_onc = Flask(__name__)
import astrodbkit
from astrodbkit import astrodb
from SEDkit import sed
from SEDkit import utilities as u
import os
import sys
import re
from io import StringIO
from bokeh.plotting import figure
from bokeh.emb... | pd.to_numeric(data['ra']) | pandas.to_numeric |
import collections.abc
from pathlib import Path
import pandas as pd
import xml.etree.ElementTree as ET
from io import BytesIO
from typing import List, Union, Dict, Iterator
from pandas import DataFrame
from .types import UploadException, UploadedFile
from .config import column_names
import logging
logger = logging.... | pd.DataFrame(oc3_df) | pandas.DataFrame |
# Created by MeaningCloud Support Team
# Copyright 2020 MeaningCloud LLC
# Date: 23/02/2020
import sys
import os
import meaningcloud
import pandas as pd
# @param license_key - Your license key (found in the subscription section in https://www.meaningcloud.com/developer/)
license_key = '<<<your license key>>... | pd.Series([polarity, entities, concepts, iab2]) | pandas.Series |
#__________________________________________________________________________________________________________________________________________________________
"""Working Code, Do Not Change"""
#_____________________________________________________________________________________________________________________________... | pd.to_datetime('01/01/1900 00:00:00') | pandas.to_datetime |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under... | pd.concat([pd1, pd2], axis=1) | pandas.concat |
'''
permit.py
---------
This file collects raw building permit data, and summarizes the information
for each census tract. It is called by make_zone_facts.py
The resulting dataset from this file looks like:
zone_type | zone | construction_permits | total_permits
----------|---------|----------------------|-----... | pd.concat(data) | pandas.concat |
import numpy as np
import pandas as pd
attribute_dict = {}
a_file = open("attribute_names.txt")
for line in a_file:
key, value = line.strip('\n').split(":")
key = key.strip()
attribute_dict[key] = value
df_german_data = | pd.read_csv('german_data.csv', index_col=0) | pandas.read_csv |
from packaging.version import Version
from scprep.plot.histogram import _symlog_bins
from scprep.plot.jitter import _JitterParams
from scprep.plot.scatter import _ScatterParams
from tools import data
from tools import utils
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as... | pd.Series(self.X_pca[:, 2], name="z") | pandas.Series |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.c... | tm.assert_isinstance(arr, Index) | pandas.util.testing.assert_isinstance |
import os
import sys
# -----------------------------------------------------------------------------
from datetime import datetime
import dateutil.parser
this_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.dirname(os.path.dirname(this_folder))
sys.path.append(root_folder + '/python')
sys.p... | pd.to_datetime(df['Date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
# read Excel
df = | pd.read_excel('xacts.xlsx', sheetname='All Transactions') | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 11:06:22 2021
@author: madeline
"""
'''
This script converts VCF files that have been annotated by snpEFF into GVF files, including the functional annotation.
Note that the strain is obtained by parsing the file name, expected to contain the sub... | pd.merge(clades, merged_df, on=['mutation'], how='right') | pandas.merge |
import os
import pandas as pd
from utilies import warn_by_qq, data_load, hyper_tuner
def main():
item_dict = {'PH': 0, 'DO': 1, 'CODMN': 2, 'BOD': 3, 'AN': 4, 'TP': 5, 'CODCR': 6}
item_name_list = ['PH', 'DO', 'CODMN', 'BOD', 'AN', 'TP', 'CODCR']
data_path = "ziya.csv"
log_path = "alog"
... | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
"""Compares informal collaboration by cohort of researchers and publication
year of papers.
"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from _205_compute_centralities import p_to_stars
OUTPUT_FOLDER = "... | pd.read_csv(PAPER_FILE, usecols=columns, encoding="utf8") | pandas.read_csv |
from __future__ import print_function
# from builtins import str
# from builtins import object
import pandas as pd
from openpyxl import load_workbook
import numpy as np
import os
from .data_utils import make_dir
class XlsxRecorder(object):
"""
xlsx recorder for results
including two recorder: one for curre... | pd.ExcelWriter(self.writer_path, engine='openpyxl') | pandas.ExcelWriter |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
Da... | pd.Categorical([1, 2, 3]) | pandas.Categorical |
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import torch
from pytorch_forecasting.metrics import QuantileLoss
from pytorch_forecasting import TemporalFusionTransformer, TimeSeriesDataSet
from pytorch_forecasting.data import GroupNormalizer
from config import load_config
from load_data import Loa... | pd.concat([errors_data_name, df_errors], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warning... | pd.merge(sample_labels, sigs, how='left', on='sample') | pandas.merge |
from load_dataset import load_dataset
from load_dataset import split_data
from load_dataset import accuracy_metric
import numpy as np
import pandas as pd
import pandasql as ps
if __name__ == "__main__":
Y_full = pd.read_csv('emittance_labels.csv')
X_full = | pd.read_csv('unit_cell_data_16.csv') | pandas.read_csv |
from sklearn.cluster import MeanShift, estimate_bandwidth
import pandas as pd
import glob
from pathlib import Path
from spatiotemporal.util import sampling
def load_data_nrel(path, resampling=None):
## some resampling options: 'H' - hourly, '15min' - 15 minutes, 'M' - montlhy
## more options at:
## http:/... | pd.DataFrame(index=raw_df.index) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/21 0021
# @Author : justin.郑 <EMAIL>
# @File : index_baidu.py
# @Desc : 获取百度指数
import json
import urllib.parse
import pandas as pd
import requests
def decrypt(t: str, e: str) -> str:
"""
解密函数
:param t:
:type t:
:param e:
... | pd.to_datetime(temp_df_7["date"]) | pandas.to_datetime |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Open P... | pd.DataFrame(data=all_results) | pandas.DataFrame |
"""
Code for the dynamical system component of the Baselining work.
@author: <NAME>
@date Jan 6, 2016
"""
import numpy as np
import pandas as pd
from scipy.signal import cont2discrete
from patsy import dmatrices
from gurobipy import quicksum, GRB, LinExpr
class DynamicalSystem(object):
"""
Abstract base... | pd.Series(_beta, index=self._index) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Created on Fri Aug 9 14:01:22 2019
@author: cherrabi
"""
from P2N_Lib import GenereListeFichiers # import
from P2N_Config import LoadConfig #
import os # importation de la bibliothèque os qui sert à
from textblob import TextBlob # importation de... | pd.read_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv') | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime
import os
import warnings
import nose
import numpy as np
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import read_stata, StataReader
import pandas.util.testing as tm
from pandas.util.misc import is_li... | tm.assert_frame_equal(parsed_13, expected) | pandas.util.testing.assert_frame_equal |
import json
#import requests
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import uuid
import subprocess
from datetime import datetime
from bs4 import BeautifulSoup as bs
import re
import pysam
import mysecrets
import glob
import tarfile
from flask import Flask, request, redirect, url_for, jso... | pd.DataFrame(SS_std_snp_list, columns=[SNP+'.tmp']) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_tspecscores.ipynb (unless otherwise specified).
__all__ = ['log_it', 'tsi', 'spm', 'zscore', 'tau', 'ts_func', 'calc_ts']
# Cell
import pandas as pd
import numpy as np
# Cell
def log_it(data: pd.DataFrame) -> pd.DataFrame:
df = data.copy()
return np.log(1 + df)... | pd.DataFrame(zs, index=df.index, columns=df.columns) | pandas.DataFrame |
import math
import pandas as pd
from model.Enumeration import Level
class BondsDao(object):
def __init__(self):
pass
def my_filter(self, df):
# c_col = df.loc[:, 'G']
std1, mean1 = df.describe().loc[['std', 'mean'], '估价收益久期']
std2, mean2 = df.describe().loc[['std', 'mean'], ... | pd.read_csv('data/interest_bonds_quarter_data2.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas.compat import IS64
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in si... | pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64") | pandas.Series |
import concurrent.futures
import logging
import os
import pathlib
from pathlib import Path
from typing import Dict, List
import numpy as np
import pandas as pd
import pytz
import xarray as xr
from src.constants import ROOT_DIR
from src.data.utils import Location
from src.logger import get_logger
logger = get_logger(... | pd.Timedelta(value=12, unit="h") | pandas.Timedelta |
# env: py3
# Author: <NAME>
import pandas as pd
import datetime
import urllib
from urllib.request import urlopen
def AirNow():
baseURL = "http://www.airnowapi.org/aq/forecast/"
api_key = '###YOUR_API_KEY###'
#date = '2018-08-04'
# get the current date as input
now = datetime.datetime.now()
date = ... | pd.concat(dfs) | pandas.concat |
import pandas as pd
import time
import urllib.request, json
from bs4 import BeautifulSoup
import nltk
from nltk.corpus import stopwords
import datetime
import calendar
import csv
import pandas as pd
import os
path = os.environ["heuristik_data_path"]
path = os.path.abspath(path) + '/'
nltk.download("punkt",path)
nltk... | pd.read_csv(data_paths[0]) | pandas.read_csv |
from trading.indicators.indicators import (
bollinger,
directional_movement,
macd,
mma,
mme,
parabolic_sar,
rsi,
stochastic
)
import json
import pandas as pd
import math
import pytest
import random
@pytest.mark.parametrize("nb, values, mma_column", [
(1, [0.2], [0... | pd.DataFrame(values, columns=["high"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats import hmean
import cirpy
import datetime
from matplotlib import pyplot as plt
import seaborn as sns
from data_loader import GraphCancerMolecules
sns.set()
sns.set_context('talk')
def read_in_cpdb():
cpdb_lit = pd.read_csv('../data/cpdb.lit.tab.txt', sep=... | pd.read_csv('../data/cpdb_name.tsv', sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Library with PME calculation functions, provides utility for Private Equity analysis.
@author: <NAME> (<EMAIL>)
"""
import pandas as pd
import numpy as np
import scipy.optimize
from datetime import date
#Helper functions
def nearest(series, lookup, debug = False):
... | pd.to_datetime(dates_index) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# src: https://towardsdatascience.com/hands-on-predict-customer-churn-5c2a42806266
# In[2]:
# Churn quantifies the number of customers who have
# unsubscribed or canceled their service contract.
# Steps
# 1. Use Case / Business Case
# Only by understanding the fin... | pd.set_option('display.width', 1000) | pandas.set_option |
import pandas as pd
import numpy as np
from random import randint
import os.path
import click
from itertools import product
from sklearn.metrics import (
precision_score,
recall_score,
confusion_matrix,
accuracy_score,
)
from .preprocessing import (
feature_extraction,
group_feature_extraction,
... | pd.DataFrame.from_dict(tbl, columns=col_names, orient='index') | pandas.DataFrame.from_dict |
import matplotlib.pyplot as pyplot
from SQL import querys as sql
from Diagram import hex_converting
import seaborn as sb
import numpy as np
import pandas as ps
import sqlite3
import datetime
import config
save_plots = 'plots/'
__databaseFile = config.CONFIG['database_file_name']
sb.set(style="dark", color_codes=True)
... | ps.DataFrame(d16) | pandas.DataFrame |
"""
Match two sets of proteins based on BLAST results,
using the maximum weight bipartite matching method.
Parameters:
1. set1 proteins fasta
2. set2 proteins fasta
3. set1 vs. set2 blast6 result
(must use -outfmt "6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen slen")
4.... | pd.read_csv(blast6, sep='\t', names=blast6_headers) | pandas.read_csv |
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn import preprocessing, cross_validation, neighbors, svm
from sklearn.preprocessing import StandardScaler
n_com... | pd.Series(y_test) | pandas.Series |
from pandas import to_datetime
from pandas.io.json import json_normalize
from requests import get
def chart(
apiToken="demo",
apiVersion="v0",
host="api.fugle.tw",
output="dataframe",
symbolId="2884",
):
outputs = ["dataframe", "raw"]
if output not in outputs:
raise ValueError('out... | json_normalize(json) | pandas.io.json.json_normalize |
'''
Clase que contiene los métodos que permiten "limpiar" la información extraida por el servicio de web scrapper
(Es implementada directamente por la calse analyzer)
'''
import pandas as pd
import re
from pathlib import Path
import numpy as np
import unidecode
class Csvcleaner:
@staticmethod
def FilterDataOp... | pd.isnull(dfAux.at[idxVersion, 'A_favor']) | pandas.isnull |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | pd.DataFrame(self.numpy_input) | pandas.DataFrame |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import os
import math
#import utm
import shapefile as shp
import seaborn as sns
from collections import OrderedDict
import geopandas as gpd
from geopy.distance import distance
import argparse
# PRIMARY DATA SOURCE
# https:/... | pd.DataFrame(columns=fields, data=records) | pandas.DataFrame |
import os, sys
import numpy as np
from pyhdf.SD import SD, SDC
from scipy import ndimage
import glob
import pandas as pd
import xarray as xr
from joblib import Parallel, delayed
'''
# Basic parameters
lat_0 = 60
lon_0 = -180
res_x = 0.01 # 0.02 for the 2km grid
res_y = 0.01 # 0.02 for th... | pd.read_pickle(cache_file) | pandas.read_pickle |
"""DataFrame loaders from different sources for the AccountStatements init."""
import pandas as pd
import openpyxl as excel
def _prepare_df(transactions_df):
"""Cast the string columns into the right type
Parameters
----------
transactions_df : DataFrame
The DataFrame where doing the casting
Returns
--------... | pd.to_numeric(importo_series) | pandas.to_numeric |
#!/usr/bin/env python
# coding: utf-8
# # Machine Learning Engineer Nano Degree - Capstone Project
# ## Student: <NAME>
# ## January 08, 2017
#
# ## Overview
#
# This project started as a work project that I performed for my professional career. The original project was used to identify false/positive readings from ... | pd.read_csv(input_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, ... | DataFrame(data_timedelta64) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from .._utils import color_digits, color_background
from ..data import Data, DataSamples
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.col... | pd.DataFrame() | pandas.DataFrame |
"""
database.py
Routines for managing a spectral line database.
TODO - set up routines for a persistent database
"""
import os
import warnings
try:
import tables
from tables import IsDescription, open_file
from tables import StringCol, Int64Col, Float64Col
except ImportError:
warnings.wa... | pd.DataFrame(matches) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
import pandas as pd
import argparse
import glob
from scipy.stats import ttest_ind
# %%
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type ... | pd.read_csv(targetfile2) | pandas.read_csv |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, so... | pd.merge(analyticalDF,ocrDF,on='ppn') | pandas.merge |
import pull_mdsplus as pull
import pandas as pd
import numpy as np
import meas_locations as geo
import MDSplus as mds
import itertools
from scipy import interpolate
def load_gfile_mds(shot, time, tree="EFIT01", exact=False, connection=None, tunnel=True):
"""
This is scavenged from th... | pd.Series() | pandas.Series |
# cvworkflow/kkcalcfunctions.py
import kkcalc
from kkcalc import data
from kkcalc import kk
import numpy as np
import pandas as pd
import matplotlib
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def kkcalc_convert(file_path, *, chemical_formula, density, min_ev, max_ev, ... | pd.DataFrame(delta1_df, columns=[delta_label1]) | pandas.DataFrame |
from __future__ import print_function
import collections
import json
import logging
import os
import pickle
import sys
import numpy as np
import pandas as pd
import keras
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, Max... | pd.concat([df_fp, df_fp2]) | pandas.concat |
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import itertools
import operator
import sys
import warnings
from numpy import nan, ndarray
import numpy as np
from pandas.core.common import (isnull, notnull, _ensure... | _ensure_index(index) | pandas.core.common._ensure_index |
#!/usr/bin/env python
#----------------------------------------------------------------------#
'''
A module to analyze token trends on the BSC blockchain.
This is very much a work in progress.
'''
#----------------------------------------------------------------------#
# System Module Imports
import os
import sys
impor... | pd.Timestamp(trade['timeInterval']['minute']) | pandas.Timestamp |
#!/usr/bin/env python
'''
Tools for generating SOWFA MMC inputs
'''
__author__ = "<NAME>"
__date__ = "May 16, 2019"
import numpy as np
import pandas as pd
import os
import gzip as gz
boundaryDataHeader = """/*--------------------------------*- C++ -*----------------------------------*\\
========= ... | pd.isna(self.df[fieldname]) | pandas.isna |
#!/usr/bin/python3
import sys
import json
import pandas as pd
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
def autentica(client_id, client_secret):
client_credentials_manager = SpotifyClientCredentials(client_id, client_secret)
return spotipy.Spotify(client_credentials_manager = clien... | pd.DataFrame(features) | pandas.DataFrame |
"""
Given a software, find similar software using source code
Currently based on software name that exist in the dataset
TODO: find similar software using source code that is not
in the existing pool
"""
from LASCAD.LDA.Clustering import Clustering
import pandas as pd
import numpy as np
from scipy.sp... | pd.DataFrame(self.projectsMap) | pandas.DataFrame |
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import korbinian
import sys
from multiprocessing import Pool
##########parameters#############
list_number = 2
data_dir = r"/Volumes/Musik/Databases"
data_dir = r"D:\Databases"
repeat_randomisation = False
seq_len = 2000
ma... | pd.Series.from_csv(List_rand_TM, sep="\t") | pandas.Series.from_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 17:03:06 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import seaborn as sns
plt.style.use('seaborn')
#con = pdblp.BCon(debug=True, port=8194, timeout=5000)
con = pdblp.BCon(debug... | pd.Grouper(freq='W') | pandas.Grouper |
"""
Abstract Base Class for PfLine.
"""
from __future__ import annotations
# from . import single, multi #<-- moved to end of file
from ..ndframelike import NDFrameLike
from ..mixins import PfLineText, PfLinePlot, OtherOutput
from ...prices.utils import duration_bpo
from ...prices import convert
from ...tools import... | pd.MultiIndex.from_product([vals.columns, ["w"]]) | pandas.MultiIndex.from_product |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import json
import pandas as pd
url = "https://glyconnect.expasy.org/api/glycosylations"
# In[2]:
## send the correct params to query the api
params = {'taxonomy':'Severe acute respiratory syndrome coronavirus 2 (2019-nCoV)', 'protein': 'Recombinant ... | pd.DataFrame(my_response['results'][r]['protein']['uniprots'],index=[r]) | pandas.DataFrame |
#-*- coding: utf-8 -*-PART II
#使用K-Means算法聚类消费行为特征数据
"""
Created on Fri Dec 20 20:39:11 2019
@author: winhl
"""
import pandas as pd
inputfile = 'C:/Users/winhl/Downloads/kongtiao/喂丝间.xlsx'
#data=pd.DataFrame(columns=('时间','1#','2#','3#'))
df_tmp = []
for i in range(0,7,2):
temp1 = pd.read_excel(inpu... | pd.DataFrame(df_tmp) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import tensorflow_decision_forests as tfdf
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd
import gradio as gr
import urllib
input_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/census-income-mld/census-income"
inpu... | pd.read_csv(f"{BASE_PATH}.data.gz", header=None, names=CSV_HEADER) | pandas.read_csv |
# -*- coding: utf-8 -*-
# import pytest
import pandas as pd
import pandas.testing as tm
import xnd
from pandas.core.internals import ExtensionBlock
import numpy as np
import xndframes as xf
TEST_ARRAY = ["Test", "string", None]
def test_constructors():
v1 = xf.XndframesArray(TEST_ARRAY)
assert isinstance(v1... | pd.DataFrame({"A": v}) | pandas.DataFrame |
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from date... | pd.DataFrame(mean_data - 2 * std_data, columns=['num']) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data.head(10)
#Code starts here
# --------------
#C... | pd.DataFrame(data) | pandas.DataFrame |
import os
import pickle
import random
from datetime import datetime
import nltk
import numpy
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from app import db, create_app
import numpy as np
from random import randint
from matplotlib import pyplot as plt
from sklearn imp... | pd.read_csv(csv_file_location) | pandas.read_csv |
import re
from pathlib import Path
import json
import logging
import os
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from configparser import ConfigParser
from typing import List, Dict, Any
from datetime import datetime, timedelta
import dateutil
import requests
import pandas as pd
impor... | pd.DataFrame(objs) | pandas.DataFrame |
import os
import datetime
import numpy as np
import pandas as pd
pd.set_option('mode.chained_assignment', None)
from sortasurvey import observing
def make_data_products(survey):
"""
After target selection process is complete, information is saved to several csvs.
All information is stored as attributes ... | pd.DataFrame.from_dict(survey.track[survey.n], orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import datetime
def main():
base_path = 'data/train/'
for year in range(2015, 2022):
for month in range(1, 13):
print(year, month)
if len(str(month)) == 1:
month_str = '0' + str(month)
else:
month_str = str(month)
... | pd.read_csv(final_path) | pandas.read_csv |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
def archive_results(filename,results,algo,script):
"""
... | pd.read_csv('../features/surgical_procedure_type_code_counts_test.csv.gz') | pandas.read_csv |
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from finmarketpy.economics.techindicator import TechParams, TechIndicator
tech_params = TechParams(fillna=True, atr_period=14, sma_period=3,
green_n=4, green_count=9, red_n=2, red_count=13... | assert_frame_equal(signal_df, expected_signal_df) | pandas.testing.assert_frame_equal |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy
def calculate(mylist):
return | pd.DataFrame(mylist[1:],columns=mylist[0]) | pandas.DataFrame |
import unittest
import pandas as pd
import pytest
import riptable as rt
# N.B. TL;DR We have to import the actual implementation module to override the module global
# variable "tm.N" and "tm.K".
# In pandas 1.0 they move the code from pandas/util/testing.py to pandas/_testing.py.
# The "import ... | pd.DataFrame(data, columns=['date', 'variable', 'value']) | pandas.DataFrame |
'''
This file is used to extract features for gait classification. Machine learning model parameters are included.
Users will have to provide their own data and ground truths to train the model. Input data is raw accelerometer data
from wearable sensor on wrist location.
'''
import pandas as pd
from signal_preprocessi... | pd.DataFrame() | pandas.DataFrame |
from nose.tools import *
from os.path import abspath, dirname, join
import numpy as np
import pandas as pd
from scipy.stats import norm, lognorm
import wntr
testdir = dirname(abspath(str(__file__)))
datadir = join(testdir,'..','..','tests','networks_for_testing')
packdir = join(testdir,'..','..','..')
FC1 = wntr.scen... | pd.Series({'1': 0, '2': 1, '3': 2}) | pandas.Series |
"""Data visualization functions"""
from fastapi import APIRouter, HTTPException, Depends
from pydantic import BaseModel
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
fro... | pd.to_datetime(rental_melt['ds']) | pandas.to_datetime |
from matplotlib import pyplot as plt
import pickle
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, \
classification_report
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import seaborn as ... | pd.DataFrame(report) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 05:32:10 2021
@author: <NAME>
The following script analyzes in vivo field data for cannula infusion experiments.
To utilize this script, simply update the filepath with the folder to be analyzed
and the savepath with the folder to save the post-processed data to, t... | pd.DataFrame(slow_theta) | pandas.DataFrame |
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import... | pd.DataFrame() | pandas.DataFrame |
import IPython
import base64
import cv2
import json
import numpy as np
import pandas as pd
import pravega.grpc_gateway as pravega
from matplotlib import pyplot as plt
import time
def ignore_non_events(read_events):
for read_event in read_events:
if len(read_event.event) > 0:
yield read_event
... | pd.DataFrame(index_list) | pandas.DataFrame |
# coding=utf-8
import pandas as pd
from mock import MagicMock
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from nose.tools import assert_raises, assert_equals
from sparkmagic.livyclientlib.command import Command
import sparkmagic.utils.constants as constants
from sparkmagic.livyclientlib.sendpa... | pd.DataFrame({"A": [1], "B": [2]}) | pandas.DataFrame |
"""
Tests compressed data parsing functionality for all
of the parsers defined in parsers.py
"""
import os
from pathlib import Path
import zipfile
import pytest
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture(params=[True, False])
def buffer(request):
return request.p... | tm.ensure_clean() | pandas._testing.ensure_clean |
# coding: utf-8
# In[1]:
#first commit -Richie
import pandas as pd
import numpy as np
# In[2]:
data_message = | pd.read_csv('../../data/raw_data/AAPL_05222012_0930_1300_message.tar.gz',compression='gzip') | pandas.read_csv |
import pandas as pd
import path_utils
from Evolve import Evolve, replot_evo_dict_from_dir
import traceback as tb
import os, json, shutil
import numpy as np
import matplotlib.pyplot as plt
import itertools
from copy import deepcopy
import pprint as pp
from tabulate import tabulate
import seaborn as sns
import shutil
imp... | pd.concat(all_row_dfs) | pandas.concat |
# -*- coding: utf-8 -*-
"""
:author: <NAME>
:url: https: // github.com / LiJinfen
"""
from bleach import clean, linkify
from flask import flash
from markdown import markdown
import json
import os
import collections as ct
import pickle
from textstat.textstat import textstat
from nltk.tokenize import sent_tokeniz... | pd.ExcelWriter(filepath + filename,engine='xlsxwriter') | pandas.ExcelWriter |
import json
import pandas as pd
import plotly
import plotly.graph_objs as go
from flask import Flask, render_template, request
app = Flask(__name__)
data = pd.read_csv("items.csv")
data=data.drop([0,1,3,17,18],axis=0)
data=data.sort_values(by=['product_price'])
new_data=data[0:5]
DVDs=new_data.iloc[:,0]
Prices=new_... | pd.DataFrame({'x': x, 'y': y}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.