prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import itertools
import logging
import math
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.ndimage.filters import uniform_filter1d
import basty.utils.misc as misc
np.seterr(all="ignore")
class SpatioTemporal:
def __init__(self, fps, stft_cfg={}):
self.stft_cfg = deepcopy(st... | pd.concat(df_snap_list, axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from pandas_datareader import data
import matplotlib.pyplot as plt
def load_data(ticker, start_date, end_date, output_file):
"""
a data loading function, using the Yahoo Finance API
"""
try:
df = | pd.read_pickle(output_file) | pandas.read_pickle |
import requests
import zipfile
import io
import pandas as pd
from datetime import datetime, timedelta
| pd.set_option('display.width', None) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 12:55:16 2017
@author: rdk10
"""
import os
import pandas as pd
import sitchensis.Functions as f
import tkinter as tk
from tkinter.filedialog import askopenfilename
import pdb
############# Functions below #############################################
... | pd.read_excel(fullFileName, parse_dates = False , sheet_name = brKey, converters={'name':str,'O/E':str, 'L/D':str,'origin':str,'base ref':str, 'top ref':str,'midsegment ref':str}) | pandas.read_excel |
import pyqtgraph as pg
from .__function__ import Function as _F
from scipy import signal
import pandas as pd
import copy
import numpy as np
class Function(_F):
def calc(self, srcSeries, f_Hz, **kwargs):
f_Hz = float(f_Hz)
fs = 1.0/srcSeries.attrs["_ts"]
order = int(kwargs.get("order",2))
... | pd.Series(newvals, index=srcSeries.index) | pandas.Series |
import numpy as np
import os
import pandas as pd
import PySimpleGUI as sg
import csv
class Demonstrativo_cartao:
def __init__(self, nome, validade, lista_devedores, lista_compras, valores=None):
self.nome = nome
self.validade = validade
self.lista_devedores = lista_devedores
self.li... | pd.read_csv(arquivo) | pandas.read_csv |
import pandas as pd
import xlrd
import sys
from ross.materials import Material
class DataNotFoundError(Exception):
"""
An exception indicating that the data could not be found in the file.
"""
pass
def read_table_file(file, element, sheet_name=0, n=0, sheet_type="Model"):
"""Instantiate one or m... | pd.isna(row[header_key_word]) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chai... | pd.DateOffset(months=12) | pandas.DateOffset |
import pandas as pd
import sqlite3
import numpy as np
def hampel_filter(df, col, k, threshold=1):
df['rolling_median'] = df[col].rolling(k).median()
df['rolling_std'] = df[col].rolling(k).std()
df['num_sigma'] = abs(df[col]-df['rolling_median'])/df['rolling_std']
df[col] = np.where(df['num_si... | pd.merge(rain_sum_036, rain_sum_039, left_index=True, right_index=True) | pandas.merge |
import pandas as pd
import sqlite3
class Co2:
# ind_name -> 산업명
def ind_name(self,ind):
con = sqlite3.connect('./sorting.db')
df = pd.read_sql_query('select * from sorting',con)
df2 = df[['sort','industry']]
df3 = df2[df2['industry'] == ind]
result = df3['sort'].tol... | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
stocks = | pd.Series([20.1, 100.0, 66.5], index=['tx', 'tobao', 'apple']) | pandas.Series |
# pylint: disable=bad-continuation
"""
Defines the Targetted Maximum likelihood Estimation (TMLE) model class
"""
from pprint import pprint
import numpy as np
import pandas as pd
from pandas.api.types import is_float_dtype, is_numeric_dtype
from scipy.interpolate import interp1d
from scipy.stats import norm
from sklea... | is_float_dtype(self.y_data) | pandas.api.types.is_float_dtype |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
impo... | tm.ensure_clean() | pandas._testing.ensure_clean |
#!/usr/bin/env python
# encoding: utf-8
import os.path
import pandas as pd
DATA_FILE_DIR = "input/raw/studies_by_council/"
OUTPUT = "input/generated/"
def read_data():
"""Create a summary dataframe based on concatenation of multiple Excel files.
Goes through all the xlsx files in the appropriate dir, conve... | pd.concat([dataframe, temp_df]) | pandas.concat |
import numpy as np
import scipy
import pandas as pd
import astropy.units as u
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.units import Quantity
from astroquery.mast import Catalogs
from astroquery.gaia import Gaia
import requests
import re
from stellar.isoclassify import classify, pipeline
imp... | pd.isnull(exofop_dat[['logg','mass']]) | pandas.isnull |
import argparse
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
import cooler
import numpy as np
import pandas as pd
from hicmatrix import HiCMatrix
from hicmatrix.lib import MatrixFileHandler
from schicexplorer._version import __version__
from schicexplorer.ut... | pd.DataFrame({'bin1_id': instances, 'bin2_id': features, 'count': data}) | pandas.DataFrame |
"""
Computational Cancer Analysis Library
Authors:
Huwate (Kwat) Yeerna (Medetgul-Ernar)
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
<NAME>
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
"""
from os.path import isfile
from matplo... | DataFrame(row_annotation) | pandas.DataFrame |
from typing import Tuple, List, Dict, Any
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, Imputer, FunctionTransformer
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
from sklearn.ensemble import RandomForestRegresso... | pd.read_csv('data/train.csv', parse_dates=['timestamp']) | pandas.read_csv |
import os
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder,MinMaxScaler
from config import *
import numpy as np
import json
class Dataset:
def __init__(self,data_path):
self.data_path = data_path
self.load_dataset()
def load_dataset(self)... | pd.read_csv(self.data_path) | pandas.read_csv |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %%
DATA_ROOT = '../../data/raw'
# %% [markdown]
# ## LOADING DATA
# %%
print('Loading raw datasets...', flush=True)
GIT_COMMITS_PATH = f"{DATA_ROOT}/GIT... | pd.read_csv(JIRA_ISSUES) | pandas.read_csv |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--datafile', type=str, default='heteroaryl_suzuki.onerx')
parser.add_argument('--output', default='heteroaryl_suzuki.csv')
args= parser.parse_args()
import pandas as pd
fname=args.datafile
sidx = 5
bidx = 6
tidx = 7
lidx = 9
yidx = 11
aidx = 12
sm... | pd.DataFrame(data) | pandas.DataFrame |
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat... | pd.to_numeric(matrix.columns) | pandas.to_numeric |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isn... | tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class ... | pd.Series([1, 2, 3, 4]) | pandas.Series |
import pandas as pd
import numpy as np
from rdtools import energy_from_power
import pytest
# Tests for resampling at same frequency
def test_energy_from_power_calculation():
power_times = pd.date_range('2018-04-01 12:00', '2018-04-01 13:00', freq='15T')
result_times = power_times[1:]
power_series = pd.Ser... | pd.date_range('2018-04-01 12:00', '2018-04-01 13:30', freq='30T') | pandas.date_range |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
... | pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']) | pandas.Series |
import numpy as np
from keras.models import Model
from keras.models import load_model, model_from_json
from os.path import join
import config.settings as cnst
import plots.plots as plots
from predict.predict import predict_byte, predict_byte_by_section
from predict.predict_args import DefaultPredictArguments, Predict a... | pd.DataFrame(qdata) | pandas.DataFrame |
import gc
import itertools
import multiprocessing
import time
from collections import Counter
import numpy as np
import pandas as pd
def create_customer_feature_set(train):
customer_feats = pd.DataFrame()
customer_feats['customer_id'] = train.customer_id
customer_feats['customer_max_ratio'] = train.cus... | pd.concat([df_train, lag_features]) | pandas.concat |
__all__ = [
"eval_df",
"ev_df",
"eval_nominal",
"ev_nominal",
"eval_grad_fd",
"ev_grad_fd",
"eval_conservative",
"ev_conservative",
]
import itertools
import grama as gr
from grama import add_pipe, pipe
from numpy import ones, eye, tile, atleast_2d
from pandas import DataFrame, concat
f... | DataFrame(data=quantiles, columns=model.var_rand) | pandas.DataFrame |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_... | pd.Series([1, 1, 1, 1]) | pandas.Series |
#!/bin/python3
""" Provides analysis about the R-Mappings """
__author__ = 'Loraine'
__version__ = '1.0'
import pandas as pd
from config import path
from scipy.stats import ttest_ind
class FirstExperiment(object):
def __init__(self,filename):
self.df = | pd.read_csv(path + filename) | pandas.read_csv |
#!/usr/bin/python3.7
import pandas as pd
import numpy as np
import os, sys
# Read in files.
critical_0to1000 = | pd.read_csv("Critical.0-1000.txt", skipinitialspace=True, names=['seqnum', 'mutant_position', 'original_class', 'mutant_class', 'original_score', 'mutant_score', 'delta_score'], header=0, delim_whitespace=True) | pandas.read_csv |
import pandas as pd
import numpy as np
from pathlib import Path
import time
from radon.raw import *
from radon.cli import *
from radon.complexity import *
from radon.metrics import *
import logging
import tqdm
import itertools
from func_timeout import func_timeout, FunctionTimedOut
import signal
import logging
im... | pd.DataFrame.from_dict(temp) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
... | pd.Timedelta(days=0) | pandas.Timedelta |
#%%
import json
from itertools import chain, cycle
import numpy as np
import pandas as pd
# from pandas import json_normalize
from pandas.io.json import json_normalize
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook')
import sys
import os
# %%
## Connect to Postgres database with dat... | pd.DataFrame(test_outputs) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.c... | pd.Series([], name="abc", dtype="float64") | pandas.Series |
import re
import fnmatch
import os, sys, time
import pickle, uuid
from platform import uname
import pandas as pd
import numpy as np
import datetime
from math import sqrt
from datetime import datetime
import missingno as msno
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from stat... | pd.Series(adfTest[0:4], index=['ADF Test Statistic','P-Value','Lags Used','Observations Used']) | pandas.Series |
import sys
sys.path.append("../")
sys.path.append("AIF360/")
import warnings
from sklearn.model_selection import train_test_split
from aif360.datasets import StandardDataset
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import L... | pd.to_datetime(df['c_jail_in']) | pandas.to_datetime |
def getMetroStatus():
import http.client, urllib.request, urllib.parse, urllib.error, base64, time
headers = {
# Request headers
'api_key': '6b700f7ea9db408e9745c207da7ca827',}
params = urllib.parse.urlencode({})
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/StationPredi... | pd.read_sql(query,engine) | pandas.read_sql |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create... | pd.DataFrame(pred) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " +... | pd.Series([], dtype='float') | pandas.Series |
import os
import toml
import time
import datetime
import pandas as pd
from bayescache.meters import (
AverageMeter, EpochMeter, LossMeter, PatienceMeter, TimeMeter
)
class OptimizationHistory:
"""Records of the optimization"""
def __init__(self, savepath=None, experiment_name=None,
dev... | pd.DataFrame({'loss': self.train_loss[0]}) | pandas.DataFrame |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_... | tm.assert_index_equal(result, orig) | pandas.util.testing.assert_index_equal |
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from scipy.optimize import minimize
from utils import get_next_gw, time_decay
from ranked_probability_score import ranked_probability_score, match_outcome
class Bradley_Terry:
""" Model game outcomes using logistic distribution """
de... | pd.DataFrame() | pandas.DataFrame |
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to i... | pandas.DataFrame({'A': [1,2,3,4], 'B': [5,6,7,8]}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (... | DataFrame(0.0, index=[0], columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 3 22:53:56 2021
@author: afo
"""
import pandas as pd
import lyricsgenius
import json
import os
from os.path import isfile, join
from os import listdir
import re
from rym import rymscraper
# Function to get all the json file names in 3 subdirecto... | pd.to_numeric(df['Year']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._tes... | pd.to_numeric([pd.NA, 273.65], errors="coerce") | pandas.to_numeric |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from collections import OrderedDict
def latex_matrix_string(mean, title,
row_labels, col_labels,
best_bold_row=True, best_bold_column=False):
"""
Latex Matrix String ... | pd.Series(data=p, index=t) | pandas.Series |
import pytd
import os
import logging
import pandas as pd
import time
from dotenv import load_dotenv
from pathlib import Path
class TreasureData:
# Methods to integrate with TreasureData.
def __init__(self) -> None:
# Read environment variables
load_dotenv()
env_path = Path('..')/'.env'... | pd.DataFrame(columns=tableColumns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import time
import requests
from datetime import datetime
from logging import getLogger
from typing import Optional
from typing import Dict
from typing import Iterable
from funcy import compose
from funcy import partial
from pandas import DataFrame
from pandas import to_datetime
from pandas imp... | DataFrame(r, index=ts) | pandas.DataFrame |
from datetime import datetime
import os
import re
import numpy as np
import pandas as pd
from fetcher.extras.common import atoi, MaRawData, zipContextManager
from fetcher.utils import Fields, extract_arcgis_attributes, extract_attributes
NULL_DATE = datetime(2020, 1, 1)
DATE = Fields.DATE.name
TS = Fields.TIMESTAMP.... | pd.to_datetime(df.index) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import json
import os
import pandas as pd
import sklearn.datasets
def data(dataset="bio_eventrelated_100hz"):
"""Download example datasets.
Download and load available `example datasets <https://github.com/neuropsychology/NeuroKit/tree/master/data#datasets>`_.
Note that an intern... | pd.read_json(dataset, orient="index") | pandas.read_json |
from itertools import combinations
import pandas as pd
import numpy as np
import scipy.stats as stats
import random
from diffex import constants
IUPHAR_Channels_names = constants.IUPHAR_Channels_names
def clean_dataframe(df):
""" Return a cleaned dataframe with NaN rows removed and duplicate
fold ch... | pd.merge(df_1, df_2, on='Gene name') | pandas.merge |
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
... | pd.ExcelFile(uploaded_file_1) | pandas.ExcelFile |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
# df_2018 = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
# df_2016 = pd.read_csv("/mnt/nadavrap-students/STS/data/2016_2017.csv")
# df_2014 = pd.read_csv("/mnt/nadavrap-students... | pd.merge(d5, df_comp, left_on=['surgid', 'surgyear'], right_on=['surgid', 'surgyear'], how='outer') | pandas.merge |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
Da... | Series([arg], dtype="datetime64[ns, CET]") | pandas.Series |
import os
import sys
import glob
import click
import pandas as pd
from .printing import splash_screen
from .prompt import main_menu_prompt
csv_sheets = []
xlsx_sheets = []
def get_csv_df(files):
for file in files:
csv_sheets.append(str(file))
yield pd.read_csv(file)
def get_xlsx_df(files):
... | pd.read_excel(file) | pandas.read_excel |
#%% [markdown]
# # Lung Vasculature Analysis
# This notebook (.ipynb) is a working project for analyzing lung vasculature. It inculdes three parts:
# 1. converts skeleton analytical output (.xml) into .csv file.
# 2. calulates the length and average thickness of each segment.
# 3. makes two types of plots:
# ... | pd.read_csv(ippath) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
... | pd.DataFrame(tensor_np) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
... | pd.Index(['a', 'b']) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import _pickle as cPickle
import argparse
from copy import deepcopy
import japanize_matplotlib
import lightgbm as lgb
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import mean_squared_error
import time
from tqdm import ... | pd.read_csv(f'{code_path}/../input/sample_submission.csv') | pandas.read_csv |
""" test get/set & misc """
from datetime import timedelta
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
IndexSlice,
MultiIndex,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
def test_basic_ind... | tm.assert_series_equal(s, expected) | pandas._testing.assert_series_equal |
# coding: utf-8
# # Guild Wars 2 Achievement System Analysis
# Guild Wars 2 is a Massively multiplayer online role-playing game created by ArenaNet, which tends to cater to a more casual players and focuses more on cooperative play with some single player campaign.
#
# This project analyzes the game achievement sys... | pd.read_csv("clean Data/titles.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import sys
import tensorflow as tf
import json
import joblib
import time
from tensorflow import keras
from keras import optimizers
from datetime import datetime,timedelta
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
pd.set_option('display.... | pd.concat([result_acc, accRate_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 16:01:32 2018
@author: Adam
"""
import os
import glob
from numbers import Number
import numpy as np
import pandas as pd
def sub_dire(base, dire, fname=None):
""" Build path to a base/dire. Create if does not exist."""
if base is None:
raise ValueErro... | pd.DataFrame(df2) | pandas.DataFrame |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pa... | is_datetime64tz_dtype(data.dtype) | pandas.core.dtypes.common.is_datetime64tz_dtype |
import pytesseract
from pytesseract import Output
import cv2
import jiwer
import numpy as np
import pandas as pd
import base64
class Class_Pytesseract_OCR:
def __init__(self, hyperparams,model_parameters,return_formats):
#---------dataset_infos
self.X = None
self.y_target = None
... | pd.DataFrame({'source_text': self.X,'predicted_ocr_text': self.y_pred,'BBOXES_COORDS(X, Y, W, H)':all_bboxes_list}) | pandas.DataFrame |
# @name: create_fake_data.py
# @summary: Creates a series of fake patients and "data" simulating CViSB data
# @description: For prototyping and testing out CViSB data management website, creating a series of fake patients and data files.
# @sources:
# @depends:
# @author: <NAME>
# @email: <EMAIL>
... | pd.merge(expt_files, ex_pars, on='expt_id', how='outer') | pandas.merge |
import os
import re
import unicodedata
from twitter import OAuth, Twitter
import numpy as np
import pandas as pd
import arrow
from . import templates, plots
from loonathetrends.utils import get_video_title_lookup, get_video_ismv_lookup
auth = OAuth(
os.environ["TWITTER_ACCESSTOKEN"],
os.environ["TWITTER_ACCESS... | pd.Timedelta("3d") | pandas.Timedelta |
import pandas as pd
def extract_feature_values(data):
""" Given a params dict, return the values for feeding into a model"""
# Replace these features with the features for your model. They need to
# correspond with the `name` attributes of the <input> tags
EXPECTED_FEATURES = [
"u... | pd.DataFrame(values, columns=EXPECTED_FEATURES) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-03-04 19:03:49
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-03-28 10:26:48
import lightgbm as lgb
import numpy as np
import pandas as pd
import warnings
import threading
import time
from datetime import datetime
from numba import jit
from ... | pd.DataFrame(pre, columns=wait_columns) | pandas.DataFrame |
import json
import pandas as pd
pd.set_option('display.max_rows', 30)
pd.set_option('display.max_columns', 50)
pd.set_option('display.width', 1200)
import matplotlib.pyplot as plt
import seaborn as sns # used for plot interactive graph.
import warnings
warnings.filterwarnings('ignore')
def load_tmdb_movies(path):
... | pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) | pandas.concat |
"""This script is designed to perform statistics of demographic information
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr,spearmanr,kendalltau
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools... | pd.merge(scale_data, headmotion_COBRE, left_on='ID', right_on='Subject ID', how='inner') | pandas.merge |
import numpy as np
import pandas as pd
import time
from pathlib import Path
from experiments.evaluation import calculate_metrics
from causal_estimators.ipw_estimator import IPWEstimator
from causal_estimators.standardization_estimator import \
StandardizationEstimator, StratifiedStandardizationEstimator
from exper... | pd.concat([results, causal_metrics], axis=1) | pandas.concat |
import json
import pandas as pd
from .RelationshipHelper import RelationshipHelper
from .TwinHelper import TwinHelper
from .QueryHelper import QueryHelper
class DeployHelper:
def __init__(self, host_name, token_path=None, token=None):
self.__rh = RelationshipHelper(
host_name=host_name, token_... | pd.isna(rtarget) | pandas.isna |
from tqdm import tqdm
import numpy as np
from scipy import sparse
import os
import gensim.models
import pandas as pd
import src.utils as utils
from sklearn.ensemble import RandomForestRegressor
from src.features.w2v import reduce_dimensions, plot_with_plotly
import torch
import torchvision
import torchvision.transf... | pd.DataFrame(cm, index=['benign', 'malware'], columns=['benign', 'malware']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
This code generates Fig. S5
The probability that cooling associated with anthropogenic aerosols has resulted in economic benefits at the country-level.
by <NAME> (<EMAIL>)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basem... | pd.isna(ishp_ctry['prob_damg']) | pandas.isna |
import os
import numpy as np
import pandas as pd
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_sum(df, window=10):
... | pd.concat([result_unaveraged_industry,result_unaveraged_piece],axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Desafio 5
#
# Neste desafio, vamos praticar sobre redução de dimensionalidade com PCA e seleção de variáveis com RFE. Utilizaremos o _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), contendo originalmente 89 variáveis de mais de 18 mil jogadores do _game_ FI... | pd.concat([mis_val, mis_val_percent], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Importing dependencies
import os
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
pd.set_option('display.max_columns', None)
# In[2]:
# Path to source JSON
businessJson=os.path.join('sourceData', 'business.json')
# In[3]:
# Path... | pd.to_timedelta(hours[column]) | pandas.to_timedelta |
# -*- coding: utf-8 -*-
"""
Creates textual features from an intput paragraph
"""
# Load Packages
import textstat
from sklearn.preprocessing import label_binarize
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import pkg_resources
import ast
import spacy
#from collections import Counter
f... | pd.DataFrame(columns=['var','value', 'cat']) | pandas.DataFrame |
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confu... | pd.read_csv(f, index_col=1) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
from ber_public.deap import dim
@pytest.fixture
def building_fabric():
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = | pd.Series([1.5]) | pandas.Series |
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
begge_kjonn_5 = pd.read_csv("begge_kjonn_5.csv")
gutter_5 = pd.read_csv("gutter_5.csv")
jenter_5 = pd.read_csv("jenter_5.csv")
jenter_gutter_5 = pd.concat([gutter_5, jenter_5]).reset_index(drop=True)
begge_kjonn_8 = pd.r... | pd.concat([gutter_9, jenter_9]) | pandas.concat |
import os
import glob
import numpy as np
import pandas as pd
import lightgbm as lgb
from tqdm import tqdm_notebook as tqdm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from constants import DATA_DIR
def csv_concaten... | pd.concat(df_list) | pandas.concat |
import pandas as pd
import pytest
from .. import sqftproforma as sqpf
from .. import developer
@pytest.fixture
def simple_dev_inputs():
return pd.DataFrame(
{'residential': [40, 40, 40],
'office': [15, 18, 15],
'retail': [12, 10, 10],
'industrial': [12, 12, 12],
'land_... | pd.Series([650, 650, 650], index=['a', 'b', 'c']) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # IBM HR Employee Attrition & Performance.
# ## [Please star/upvote in case you find it helpful.]
# In[ ]:
from IPython.display import Image
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-logo.png")
# ## CONTENTS ::->
# [ **1 ) Expl... | pd.crosstab(columns=[df.Attrition],index=[df.EnvironmentSatisfaction],margins=True,normalize='index') | pandas.crosstab |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import De... | is_datetime64_dtype(ts) | pandas.core.dtypes.common.is_datetime64_dtype |
from unittest import TestCase
import pandas as pd
from cbcvalidator.main import Validate, ValueOutOfRange, BadConfigurationError
class TestValidate(TestCase):
def test_validate(self):
v = Validate(verbose=True)
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghij... | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
def find_ms(df):
subset_index = df[['BMI', 'Systolic', 'Diastolic',
'Triglyceride', 'HDL-C', 'Glucose',
'Total Cholesterol', 'Gender']].dropna().index
df = df.ix[subset_index]
df_bmi_lo = df.loc[df['BMI']<25.0,:]
df_bmi_hi = df.loc[df[... | pd.Series() | pandas.Series |
import numpy as np
import pytest
from pandas.compat import range, u, zip
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def frame_random_data_integer_mul... | tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# =============================================================================
# ALGORITMO MACHINE LEARNING PARA GASOLINERAS EN ESPAÑA
# =============================================================================
"""
Proceso:
Input:
- /home/tfm/Documentos/TFM/Datasets/Gasolineras/Gasolineras_de_... | pd.DataFrame({"longitud":lons, "latitud":lats}) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime,... | Timestamp('2011-08-01 10:00', tz='US/Eastern') | pandas.Timestamp |
import os
import jsonlines
import numpy as np
import pandas as pd
import requests
import tagme
import ujson
from tqdm import tqdm
from bootleg.symbols.entity_profile import EntityProfile
pd.options.display.max_colwidth = 500
def load_train_data(train_file, title_map, entity_profile=None):
"""Loads a jsonl file... | pd.DataFrame(lines) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not u... | pandas.Series([""], index=self.index[:1]) | pandas.Series |
"""
pip install mysql-connector-python
pip install pandas
pip install numpy
"""
# libs
from db_works import db_connect, db_tables
import pandas as pd
import numpy as np
import pandas_ta as pta # https://mrjbq7.github.io/ta-lib/
import talib as ta # install from whl file < https://www.lfd.uci.edu/~gohlke/pythonlibs/#... | pd.DataFrame.to_json(df4) | pandas.DataFrame.to_json |
import importlib
import inspect
import os
import warnings
from unittest.mock import patch
import cloudpickle
import numpy as np
import pandas as pd
import pytest
from skopt.space import Categorical
from evalml.exceptions import (
ComponentNotYetFittedError,
EnsembleMissingPipelinesError,
MethodPropertyNot... | pd.testing.assert_index_equal(y.index, y_original_index) | pandas.testing.assert_index_equal |
"""
This module contains methods that will be applied inside of the apply call on the freshly convertet
Spark DF that is now as list of dicts.
These methods are meant to be applied in the internal calls of the metadata extraction.
They expect dictionaries which represent the metadata field extracted from Spark NLP ann... | pd.Series({}) | pandas.Series |
"""
Compute the statistical impact of features given a trained estimator
"""
from scipy.stats.mstats import mquantiles
import numpy
import pandas
def averaged_impact(impact, normalize=True):
"""
Computes the averaged impact across all quantiles for each feature
:param impact: Array-like object of shape [... | pandas.Series(y - y_star) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.