prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import pandas as pd
import numpy as np
import math
import os
import geopandas as gpd
import folium
import requests
import json
import datetime
from datetime import date, timedelta
from abc import ABC, abstractmethod
from pathlib import Path
from CovidFoliumMap import CovidFoliumMap, ensure_path_exists, download_JSON_fi... | pd.json_normalize(res['data']) | pandas.json_normalize |
from creator.ingest_runs.genomic_data_loader import (
GenomicDataLoader,
GEN_FILE,
GEN_FILES,
SEQ_EXP,
SEQ_EXPS,
SEQ_EXP_GEN_FILE,
SEQ_EXP_GEN_FILES,
BIO_GEN_FILE,
)
from creator.studies.models import Study
from tests.integration.fixtures import test_study_generator # noqa F401
from kf... | pd.DataFrame(gf_data) | pandas.DataFrame |
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agre... | pd.to_datetime(df['time'], utc=True) | pandas.to_datetime |
# --------------
import pandas as pd
from collections import Counter
# Load dataset
data = pd.read_csv(path)
print(data.isnull().sum())
print('Statistical Description : \n', data.describe())
# --------------
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style(style='darkgrid')
# Store the lab... | pd.DataFrame(duration_df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 18:19:29 2019
@author: Administrator
"""
import pdblp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn')
#con = pdblp.BCon(debug=True, port=8194, timeout=5000)
con = pdblp.BCon(debug=... | pd.concat([prices_open_w, prices_high_w, prices_low_w, prices_close_w],axis=1) | pandas.concat |
#### Setup ####
import numpy as np
import pandas as pd
from scipy import stats
from itertools import repeat
from collections import Counter
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso, LinearRe... | pd.DataFrame(ks) | pandas.DataFrame |
from datetime import datetime
from datetime import timedelta
import pandas as pd
from typing import Mapping
import os, sys
dirname = os.path.dirname(__file__)
sys.path.append(dirname)
from constant import GOOGLE_CALENDER_COLS, GOOGLE_CALENDER_FUNCS, GOOGLE_CALENDER_MAPS, EVENT_DAYS, EVENT_COLS
class CoupleEvent(obj... | pd.DataFrame(current_year_event_collection) | pandas.DataFrame |
# License: Apache-2.0
from gators.encoders.target_encoder import TargetEncoder
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.Da... | assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
__all__ = [
'get_calc_rule_ids',
'get_grouped_fm_profile_by_level_and_term_group',
'get_grouped_fm_terms_by_level_and_term_group',
'get_il_input_items',
'get_policytc_ids',
'write_il_input_files',
'write_fm_policytc_file',
'write_fm_profile_file',
'write_fm_programme_file',
'writ... | pd.concat([il_inputs_df, level_df], sort=True, ignore_index=True) | pandas.concat |
"""
Several references:
A good, comic tutorial to learn Markov Chain:
https://hackernoon.com/from-what-is-a-markov-model-to-here-is-how-markov-models-work-1ac5f4629b71
Tutorial (example code for using metworkx graphviz with pandas dataframe):
http://www.blackarbs.com/blog/introduction-hidden-markov-models-python-netw... | pd.Series(pi, index=states, name='states') | pandas.Series |
import sys
import pandas as pd
import os
import numpy as np
import random
from math import ceil
from igraph import Graph
from signet.cluster import Cluster
from scipy import sparse as sp
from scipy import io
import networkx as nx
from sklearn import metrics
import seaborn as sns
import time
import graphC
wd = os.get... | pd.DataFrame(BNC_none) | pandas.DataFrame |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scr... | pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0) | pandas.read_csv |
import sys
import os
import pandas as pd
import streamlit as st
from datetime import datetime
from streamlit import cli as stcli
from optimization import Optmizer
from portfolio import Portfolio_Analyzer
class Dashboard():
def start():
st.title("Portfolio Analysis")
df = pd.DataFrame({
... | pd.to_datetime(assets_data['Date']) | pandas.to_datetime |
import unittest
import tempfile
import numpy as np
import pandas as pd
from supervised.preprocessing.preprocessing_exclude_missing import (
PreprocessingExcludeMissingValues,
)
class PreprocessingExcludeMissingValuesTest(unittest.TestCase):
def test_transform(self):
d_test = {
"col1": [1, ... | pd.DataFrame(data=d_test) | pandas.DataFrame |
''' Toro 1996 method for randomizing shear wave velocity
DESCRIPTION:
Toro Method is a first order auto-regressive model used to randomize shear wave
velocity. Note that the functions here are QUITE simplified, because the
interlayer correlation coefficient is assumed constant with depth. Maybe one day
I'll code eve... | pd.DataFrame({}, columns=['mid_depth', 'prev_vs', 'next_vs']) | pandas.DataFrame |
import pandas as pd
import datetime
import os
from textblob import TextBlob
stockIndex = pd.read_excel("./BSIFinal.xlsx")
stockIndexDF = | pd.DataFrame(stockIndex) | pandas.DataFrame |
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/li... | pandas.rolling_std(data_frame, tech_params.bb_period) | pandas.rolling_std |
# Copyright © 2019 <NAME>
"""
Test for the ``preprocess._aggregate_columns._difference`` module.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...clean_variables import VariableCleaner
class PreprocessConstantDifferenceTests(unittest.TestCase)... | assert_frame_equal(_expected, _vc.frame) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
"""
Author: <NAME>
Date: 04/05/2020
Function: Calls to an external C++ program
Description:
============
This calls out to an external C++ program with some data entered purely for inout/output testing
The return of this external program is a csv style stream
There is code commented out... | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import plotly.express
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
... | pd.DataFrame(res, columns=['f1', 'f3', 'Log Likelihood']) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import geopandas
from mapillary_image_classification.data.osm import define_categories
def split_data(df: geopandas.GeoDataFrame, num_parts: int = 4):
"""
Split a dataframe into num_parts chunks.
This can be used to produce multiple dataset files and downl... | pd.concat([df_smaller, df_sample]) | pandas.concat |
import pandas as pd
import numpy as np
from scipy.stats import bernoulli
from scipy.stats import uniform
def assign_bags(strategy='random_n_size', random_seed=None, **kwargs):
# Arguments:
# X: feature matrix, each feature vector should be represented as a row vector in the matrix
# num_bags: number of bag... | pd.Categorical(X[strategy_col]) | pandas.Categorical |
import re
import numpy as np
import pandas as pd
from nltk import WordNetLemmatizer
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from src.embeddings import load_vocab, load_embeddings
def find_all_num(data):
all_ch_c = len(data)
i = 0
all_nums = set()
while i <... | pd.read_csv("data/wikihow.csv") | pandas.read_csv |
import codecademylib3_seaborn
from bs4 import BeautifulSoup
import requests
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
print("some")
webpage_response = requests.get("https://s3.amazonaws.com/codecademy-content/courses/beautifulsoup/cacao/index.html")
webpage = webpage_response.content
soup=... | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import functools
from io import BytesIO
import pickle
import gzip
from pathlib import Path
from functools import cached_property
from dataclasses import dataclass
from PIL import Image
import json
from pandas._libs.tslibs import Timedelta
import torch
from collections import Counter
import functools
import random
from... | pd.Series(group["distance"], name=f"beacon:{beacon_id}") | pandas.Series |
import unittest
from datetime import datetime, timezone
from parameterized import parameterized
import pandas as pd
if __package__:
from ..ohlc import OHLC
else:
from aiokraken.model.ohlc import OHLC
"""
Test module.
This is intended for extensive testing, using parameterized, hypothesis or similar generatio... | ptypes.is_datetime64_ns_dtype(ohlc.dataframe.index.dtype) | pandas.api.types.is_datetime64_ns_dtype |
import pickle
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
from scipy import stats
from datetime import datetime
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, confu... | pd.DataFrame(swimming_data) | pandas.DataFrame |
import pandas as pd
import pytest
from bach import DataFrame
from bach.series.series_multi_level import SeriesNumericInterval
@pytest.fixture()
def interval_data_pdf() -> pd.DataFrame:
pdf = pd.DataFrame(
{
'lower': [0., 0., 3., 5., 1., 2., 3., 4., 5.],
'upper': [1., 1., 4., 6., 2... | pd.Interval(left=5., right=6., closed='right') | pandas.Interval |
import time
import numpy as np
from loguru import logger
import psycopg2.extras as extras
import os
import pandas as pd
import functools
logger.remove(0)
logger.add("sampling.log", level="DEBUG", enqueue=True, mode="w")
def timeit(f_py=None, to_log=None):
assert callable(f_py) or f_py is None
def _decorator... | pd.read_csv(fname, parse_dates=["timestamp_utc"]) | pandas.read_csv |
# ©<NAME>, @brianruizy
# Created: 03-15-2020
import datetime
import platform
import pandas as pd
# Datasets scraped can be found in the following URL's:
# https://github.com/CSSEGISandData/COVID-19
# https://github.com/owid/covid-19-data/tree/master/public/data
# Different styles in zero-padding in date depend on o... | pd.date_range(end=yesterday, periods=8, freq='7D') | pandas.date_range |
"""
Download, transform and simulate various datasets.
"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from os.path import join
from urllib.parse import urljoin
from string import ascii_lowercase
from sqlite3 import connect
from rich.progress import track
import numpy as np
import pandas as pd
from .base import Datas... | pd.read_csv(FETCH_URLS["baseball"], na_values="?") | pandas.read_csv |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scal... | concat_categorical(to_concat) | pandas.core.dtypes.concat.concat_categorical |
'''
main.py
----------
<NAME>
June 6, 2018
Given a company's landing page on Glassdoor and an output filename, scrape the
following information about each employee review:
Review date
Employee position
Employee location
Employee status (current/former)
Review title
Number of helpful votes
Pros text
Cons text
Advice t... | pd.DataFrame([], columns=SCHEMA) | pandas.DataFrame |
import mlrose
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.metrics import accuracy_score
from alg_runner import sim_annealing_runner, rhc_runner, ga_runner,... | pd.DataFrame.from_dict(best_fit_dict, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3.7
# coding: utf-8
# In[1]:
import sys
import rstr
import string
import random
import pandas as pd
from numpy.random import default_rng
import numpy as np
import time
#####INPUT PARAMETERS #####
## pattern
## stream_length
## num_sub_streams
## window_size
## num_matches
## strict
############... | pd.DataFrame(data,columns = ['pos','stream_id', 'event']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""reVX PLEXOS unit test module
"""
from click.testing import CliRunner
import numpy as np
import json
import os
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
import shutil
import tempfile
import traceback
from rex import Resource
from rex.utilities.loggers imp... | pd.read_csv(REEDS_1) | pandas.read_csv |
import json
import os
import pandas as pd
import scraper
class full_version:
def __init__(self):
self.data={}
self.name=""
self.email=""
self.user_data = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))),
"json",
"user_data.json"
)
self.user_list = os.path.joi... | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.mark.functions
def test_truncate_datetime_dataframe_invalid_datepart():
"""Checks if a ValueError is appropriately raised when datepart is
not a valid enumeration.
"""
... | pd.DataFrame({"dt": [x], "foo": [np.nan]}, copy=False) | pandas.DataFrame |
import statistics
import json
import csv
from pathlib import Path
from promise.utils import deprecated
from scipy import stats
from .core import should_process, rename_exp
from .core import get_test_fitness
from .core import sort_algorithms
from .core import rename_alg
from .plotting import plot_twinx
import stac
... | pd.Series(rnks) | pandas.Series |
# -*-coding:utf-8 -*-
'''
@File : preprocess.py
@Author : <NAME>
@Date : 2020/9/9
@Desc :
'''
import pandas as pd
import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_BASE = BASE_DIR + "/data/"
# print(DATA_BASE)
def data_prepro... | pd.read_csv("../data/" + corpus_file_name + ".csv") | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pandas as pd
import numpy as np
from qlib.contrib.report.data.base import FeaAnalyser
from qlib.contrib.report.utils import sub_fig_generator
from qlib.utils.paral import datetime_groupby_apply
from qlib.contrib.eva.alpha import pred_autoco... | pd.DataFrame(self._inf_cnt) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.n... | tm.assert_panel_equal(shifted1, shifted2) | pandas.util.testing.assert_panel_equal |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
... | find_common_type([frame.dtypes["b"], other.dtypes["b"]]) | pandas.core.dtypes.cast.find_common_type |
"""
1. Universal base class for luigi targets.
2. Target for saving pandas.DataFrame to CSV file.
3. Target for saving numpy.array to CSV file.
Example:
```
target = DataFrameCSVTarget('path/to/file.csv')
with target.open('w') as stream:
stream.write({'lol': 1, 'lal': 2})
with target.open('r') as stream:
data... | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 14:37:03 2019
@author: ppradeep
"""
import os
clear = lambda: os.system('cls')
clear()
## Import packages
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import pickle
# Classifiers
from sklearn.ensemble im... | pd.concat([Y_reg, fingerprints], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_... | pd.timedelta_range(end=0, periods=points, freq=freq) | pandas.timedelta_range |
# -*- coding: utf-8 -*-
# imports
import string
import logging, os, sys
import math
import re
import pandas as pd
from collections import Counter
from db.models import session, engine
from db.controller import Storage
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import wo... | pd.read_csv(csv_file) | pandas.read_csv |
import pandas as pd
import evaluation
import pytest
def test_labels() -> None:
labels = pd.DataFrame.from_dict({'label': ['high', 'medium', 'low'], 'url': ['a', 'b', 'c']})
predictions = pd.DataFrame.from_dict({'prediction': ['high', 'low', 'low'], 'url': ['a', 'b', 'c']})
result = evaluation.calc_error_m... | pd.DataFrame.from_dict({'label': ['high', 'low', 'low'], 'url': ['a', 'b', 'c']}) | pandas.DataFrame.from_dict |
"""Backtester"""
from copy import deepcopy
import unittest
import pandas as pd
import pytest
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from soam.constants import (
ANOMALY_PLOT,
DS_COL,
FIG_SIZE,
MONTHLY_TIME_GRANULARITY,
P... | pd.Timestamp('2023-01-01 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from functools import reduce
import pickle
import os
import pymssql
from virgo import market
startDate_default = '20060101'
endDate_default = (datetime.now() + timedelta(days=-1)).strftime('%Y%m%d')
# endDate_defau... | pd.pivot_table(data500, values='flag_end', index='OutDate', columns='SecuCode') | pandas.pivot_table |
# Module: Preprocess
# Author: <NAME> <<EMAIL>>
# License: MIT
import pandas as pd
import numpy as np
import ipywidgets as wg
from IPython.display import display
from ipywidgets import Layout
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.impute._base import _BaseImputer
... | pd.DataFrame(data_pca) | pandas.DataFrame |
import json
import logging
import os
import pathlib
import sys
from collections import OrderedDict
from datetime import datetime
import click
import humanfriendly
import pandas
__version__ = '1.1.5'
logger = logging.getLogger()
@click.group()
@click.option('--debug', is_flag=True)
@click.pass_context
def cli(ctx,... | pandas.read_excel(source_path, sheet_name=None) | pandas.read_excel |
import pandas as pd # Data tables
import numpy as np # Arrays
from math import sqrt, atan, log, exp, sin, cos, tan
from scipy.integrate import odeint
from scipy.optimize import *
pi = np.pi
month = 7
# "!Boundary layers"
h_r = 5 # [W/m^2-K]
h_c = 7 # [W/m^2-K]
# h_in=h_r + h_c
h_in... | pd.DataFrame(h_sol, columns=['hour_per']) | pandas.DataFrame |
import datetime
import numpy as np
from numpy import nan
import pandas as pd
import pytz
import pytest
from pytz.exceptions import UnknownTimeZoneError
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pvlib.location import Location
from test_solarposition import expected_solpos
from conf... | assert_frame_equal(expected, clearsky) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# Filename: analyze_dataAug_results
"""
introduction:
authors: <NAME>
email:<EMAIL>
add time: 29 March, 2021
"""
import os, sys
code_dir = os.path.expanduser('~/codes/PycharmProjects/Landuse_DL')
sys.path.insert(0, code_dir)
import basic_src.io_function as io_function
import pandas as pd
def ... | pd.DataFrame(save_dict) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv('data.csv')
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats.mstats import gmean
import sys
labels = pd.read_csv("labels.txt",sep= ' ',header=None)
df1 = | pd.read_csv("fmow_imagenet1k-resnext-101-cnn-only-all_8_simplecut_test.txt",header=None) | pandas.read_csv |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from tqdm import tqdm, trange
import pandas as pd
import io
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import re
import argparse
from pytorch_transformers import BertTokenizer
from other_func import writ... | pd.read_csv(args.original_data, header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import demjson
import pandas as pd
from spider.setting import col_names
class JsonParse:
'''
解析网页信息
'''
def __init__(self, htmlCode):
self.htmlCode = htmlCode
self.json = demjson.decode(htmlCode)
pass
def parseTool(self, content):
... | pd.DataFrame(info, columns=col_names) | pandas.DataFrame |
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, recall_score, precision_score, classification_report, confusion_matrix
import numpy as np
from sklearn.model_selection import StratifiedKFold
# This script conducts a hype... | pd.DataFrame(clf.cv_results_["mean_test_score"], columns=["Accuracy"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import operator as op
import seaborn as sns
# http://data8.org/datascience/_modules/datascience/tables.html
#####################
# Frame Manipulation
def relabel(df, OriginalName, NewName):
return df.rename(index=str, columns={OriginalN... | pd.DataFrame() | pandas.DataFrame |
"""
It is observed that if last trade is profitable, next trade would more likely be a loss.
Then why not create a ghost trader on the same strategy; and trade only when the ghost trader's a loss.
Elements: two moving averages; rsi; donchain channel
conditions: 1. long if short MA > long MA, rsi lower than overbought 7... | pd.to_datetime(bm_ret.index) | pandas.to_datetime |
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame"""
# Third Party
import pandas as pd
# Local
from zat import zeek_log_reader
class LogToDataFrame(object):
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame
Notes:
This class has recently been overhauled from a simple l... | pd.to_timedelta(self._df[name], unit='s') | pandas.to_timedelta |
from dateutil.parser import parse
import pandas as pd
import pandas as ExcelWriter
import numpy as np
import csv
twitter_raw_filename = '/Nike_tweets.csv'
# reading the twitter scrapped data file
tweets = pd.read_csv(twitter_raw_filename)
# setting the column of tweets dataframe
tweets.columns = ["Twitter_ID","Tweet_... | pd.to_datetime(tweets['Timestamp']) | pandas.to_datetime |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import ... | pd.BooleanDtype() | pandas.BooleanDtype |
"""
Created on Wed Oct 9 14:10:17 2019
@author: <NAME>meters
Building the graph of Athens network by using osmnx package
"""
from pneumapackage.settings import *
import osmnx as ox
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.collections import LineCollection
import networkx a... | pd.concat([n1, n2], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from scipy.stats import pearsonr, linregress
from statsmodels.stats.multitest import multipletests
np.seterr(divide='ignore') # Hide Runtime warning regarding log(0) = -inf
import process_files
import La... | pd.read_csv("./Data83018/connectivity_contra.csv", index_col=0) | pandas.read_csv |
"""
Imputation
https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html
fill in missing values
1. Execute the code
(in Jupyter, split it into multiple cells)
2. Understand what is happening
QUESTION: What other imputation strategies exist (check out the "strategy" parameter in the... | pd.DataFrame(t, columns=cols.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
def auto_pate(method):
"""自动添加括号"""
method = str.strip(method)
if method[-1] != ')':
if '(' not in method:
method = method + '()'
else:
method = method + ')'
return method
def back_args_str(*args, **kwargs):
largs = [f... | pd.cut(notmiss.x, bin_values, precision=8, include_lowest=True) | pandas.cut |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pan... | Series(result) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 14:50:32 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
from eotg import eotg
#%%
quotes = | pd.read_csv('quotes.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
# IMPORTANDO AS BIBLIOTECAS
"""
import pandas as pd
import gc #--> Limpar memoria
from datetime import date, datetime
from pytz import timezone
fuso_horario = timezone('America/Sao_Paulo')
data_e_hora_Manaus = datetime.today().astimezone(fuso_horario)
"""# 0. INPUTS DO USUÁRIO
## 0.1 Qual... | pd.concat([df_Consultor1,df_Consultor2,df_Consultor3,df_Consultor4]) | pandas.concat |
#!/bin/env python
#
# Script name: IDP_html_gen.py
#
# Description: Script to generate IDP page of QC html report.
#
## Author: <NAME>
import pandas as pd
import numpy as np
import sys
import os
from ast import literal_eval
def formatter(x):
try:
return "{:e}".format(float(x))
except:
retur... | pd.DataFrame(cat_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
train = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train-1542865627584.csv")
beneficiary = pd.read_csv("D:/ML/Dataset/MedicalInsurance/Train_Beneficiarydata-1542865627584.csv")
inpatient = pd.read_csv("D:/ML/Dataset/Medica... | pd.read_csv("D:/ML/Dataset/MedicalInsurance/Test_Beneficiarydata-1542969243754.csv") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Analyses
@author: boyangzhao
"""
import os
import numpy as np
import pandas as pd
import re
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import logging
from ceres_infer.utils import *
import matplotlib... | pd.concat([df1, df2]) | pandas.concat |
#pylint disable=C0301
from struct import Struct, pack
from abc import abstractmethod
import inspect
from typing import List
import numpy as np
from numpy import zeros, searchsorted, allclose
from pyNastran.utils.numpy_utils import integer_types, float_types
from pyNastran.op2.result_objects.op2_objects import BaseEle... | pd.DataFrame(element_location) | pandas.DataFrame |
"""Time series feature generators as Scikit-Learn compatible transformers."""
from itertools import combinations
from typing import List, Optional
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from s... | pd.concat(poly, axis=1) | pandas.concat |
"""
This script save the direct/indirect effects for each neuron averaging across different groups depending
on negation type and correctness category.
Usage:
python compute_and_save_neuron_agg_effect.py $result_file_path $model_name $negation_test_set_file
"""
import os
import sys
import json
import pandas as pd... | pd.concat(va_dfs) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomCon... | pd.testing.assert_frame_equal(called[0][0][0], table_data) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinka... | pdt.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, to_timedelta
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.e... | DataFrame(arr) | pandas.DataFrame |
"""
#--------------------------------
# Name:npmrds_data_conflation_cmp_batch.py
# Purpose: Get distance-weighted average speed from NPMRDS data for CMP deficient corridors,
# make chart images. If multiple years of input data provided, then charts
# showing year-year changes will be created.
# Autho... | pd.DataFrame([out_row_dict]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('classic')
import pandas as pd
import quandl as Quandl
import wbdata as wb
from scipy import stats
import runProcs
# get_ipython().run_line_magic('matplotlib', 'inline')
# # Preliminaries
#
# Import cou... | pd.DataFrame({}) | pandas.DataFrame |
#!/usr/bin/env python3
import requests
import json
import pandas as pd
import numpy as np
import os
import sys
import time
from datetime import datetime, date
from strava_logging import logger
from db_connection import connect, sql
from location_data import lookup_location
class Athlete:
def __init__(self, **kwa... | pd.DataFrame() | pandas.DataFrame |
"""Tests for _data_reading.py"""
import datetime
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import primap2
import primap2.pm2io as pm2io
import primap2.pm2io._conversion
from primap2.pm2io._data_reading import additional_coordinate_metadata
from .utils import assert_ds_aligned_equa... | pd.testing.assert_frame_equal(df_result, df_expected, check_column_type=False) | pandas.testing.assert_frame_equal |
#Imports
from aiohttp import ClientSession
from itertools import chain
import pandas as pd
import asyncio
#Stock ticker and dates for data required
#IEX api key
start = '2019/04/01' #earliest date available on non-premium IEX accounts
end = '2020/11/27'
key = 'IEX API KEY' #enter api key from IEX
ticker = 'FB'
#Conve... | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import glob
import os
import numpy as np
import time
import fastparquet
import argparse
from multiprocessing import Pool
import multiprocessing as mp
from os.path import isfile
parser = argparse.ArgumentParser(description='Program to run google compounder for a particular file and setting')
parse... | pd.concat(words_list,ignore_index=True,sort=False) | pandas.concat |
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import ep_time_step, config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim, access_block
f... | pd.DataFrame(run1_raw_result) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 18:41:43 2018
@author: <NAME>
"""
# Libraries
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
def obtain_centroid(X_train, sc, n_clusters):
... | pd.DataFrame(centroid) | pandas.DataFrame |
import pandas as pd
from sklearn.preprocessing import PowerTransformer
def preprocess_columns(df):
"""
Assumptions:
- Remove variables with more than 50% missing values
- Replace missing values of numerical variables with per mean
- Remove categorical variables with more than 25 unique values
... | pd.read_csv('../data/student-por.csv', sep=';') | pandas.read_csv |
# -*- coding: utf-8 -*-
# import libraries
import pandas as pd
import statsmodels.api as sm
'''
Download monthly prices of Facebook and S&P 500 index from 2014 to 2017
CSV file downloaded from Yahoo File
start period: 02/11/2014
end period: 30/11/2014
period format: DD/MM/YEAR
'''
fb = pd.read_csv('FB.t... | pd.concat([fb['Close'], sp_500['Close']], axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
impo... | Timestamp("20130228 21:00:00") | pandas.Timestamp |
from ...utils import constants
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely
import pytest
from contextlib import ExitStack
from sklearn.metrics import mean_absolute_error
from ...preprocessing import detection, clustering
from ...models.sts_epr import STS_epr
from ...core.trajectorydata... | pd.to_datetime('2020/01/10 08:00:00') | pandas.to_datetime |
import argparse
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
import seaborn as sns
plt.style.use(["bmh"])
sns.set_palette(sns.color_palette("Paired", 6))
def get_args():
parser = argparse.ArgumentParser("graph argument")
parser.add_argument("--dat... | pd.read_pickle(f"./data/{label}.pkl") | pandas.read_pickle |
"""Tests for Resource harvesting methods."""
from typing import Any, Dict, List
import numpy as np
import pandas as pd
import pytest
from pudl.metadata.classes import Package, Resource, RESOURCE_METADATA
from pudl.metadata.helpers import most_frequent
# ---- Helpers ---- #
def _assert_frame_equal(a: pd.DataFrame, ... | pd.testing.assert_frame_equal(a, b, **kwargs) | pandas.testing.assert_frame_equal |
"""
Pipeline Evaluation module
This module runs all the steps used and allows you to visualize them.
"""
import datetime
from typing import List, Tuple, Union
import pandas as pd
from sklearn.pipeline import Pipeline
from .evaluation import Evaluator
from .feature_reduction import FeatureReductor
from .labeling imp... | pd.Series(self.y_pred, index=self.X_test.index) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
# In[3]:
sub_1_p = pd.read_csv('./output/submission_1020.csv')
sub_2_p = pd.read_csv('./output/submission_1021.csv')
sub_3_p = pd.read_csv('./output/submission_12345.csv')
sub_4_p = pd.read_csv('./output/submission_1234.csv')
sub_5_p = | pd.read_csv('./output/submission_2017.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.f... | SparseArray([0, 1], dtype=dtype) | pandas.core.arrays.sparse.SparseArray |
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import seaborn as sns
sns.set_style("whitegrid")
import sys
import os
from pathlib import Path
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection i... | pd.concat([tr_size_df, tr_sc_m_df,val_sc_m_df,tr_sc_std_df,val_sc_std_df], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import pandas as pd
import numpy as np
from pdsf import sflake as sf
from utils import json_filters, geojson_convert, process_limit_data, assign_notes, get_json_from_api
def process_limits(param):
"""
"""
run_time_st... | pd.Timestamp.today() | pandas.Timestamp.today |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.