prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
from . import webcat, geo, prices
import pandas as pd
import aljpy
from pathlib import Path
DECISIONS = Path('data/decisions.json')
CUTS = {
'park': 10,
'town': 10,
'propvalue': 10000,
'friends': 45,
'aerial': 30,
'central': 60}
@aljpy.autocache(disk=False, memory=True)
def map_layers():
... | pd.to_numeric(df['num_bedrooms']) | pandas.to_numeric |
from rest_framework import permissions, status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from datetime import date, datetime, timedelta
from django.forms.models import model_to_dict
fro... | pd.DataFrame(web_activities_type) | pandas.DataFrame |
import cv2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from PIL import Image
from skimage.transform import resize
from random import shuffle
from random import randint
import math
import random
from io import BytesIO
import jpeg4py as jpeg
# Input... | pd.DataFrame(columns=['fname', 'camera']) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, Timedelt... | to_timedelta([pd.NaT]) | pandas.to_timedelta |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 22:50:43 2018
@author: kennedy
"""
"""
Credit:
https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code
Bug Fix by Kennedy:
Works fine for library import.
returns only column of the indicator r... | pd.Series(TR_l) | pandas.Series |
import pandas as p
from pandas import DataFrame as df
import matplotlib.pyplot as pl
from sklearn.linear_model import LinearRegression
data=p.read_csv('cost_revenue_clean.csv')
x= | df(data,columns=['production_budget']) | pandas.DataFrame |
# %% imports and settings
from pandarallel import pandarallel
import datar.all as r
from datar import f
import plotnine as p9
import os
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
pd.set_option("max_colwidth", 250) # column最大宽度
pd.set_option("display.width", 250) # dataframe宽度
| pd.set_option("display.max_columns", None) | pandas.set_option |
# Module: Preprocess
# Author: <NAME> <<EMAIL>>
# License: MIT
import pandas as pd
import numpy as np
import ipywidgets as wg
from IPython.display import display
from ipywidgets import Layout
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.impute._base import _BaseImputer
... | pd.set_option("display.max_columns", 500) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Récupérer des mails d'étudiants en pièce jointe (1:1)
=====================================================
Récupère des fichiers en pièce jointe provenant d'étudiants comme un rendu de projet.
Le programme suppose qu'il n'y en a qu'un par étudiant, que tous les mails ont été
archivés dans ... | pandas.DataFrame(rows) | pandas.DataFrame |
# Dependencies
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter('ignore', UserWarning)
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import sys
import argparse
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
im... | pd.concat([dataframePositive, dataframeNegative]) | pandas.concat |
"""
A set of classes for aggregation of TERA data sources into common formats.
"""
from rdflib import Graph, Namespace, Literal, URIRef, BNode
from rdflib.namespace import RDF, OWL, RDFS
UNIT = Namespace('http://qudt.org/vocab/unit#')
import pandas as pd
import validators
import glob
import math
from tqdm import tqdm
... | pd.read_csv(path,sep=',',header=None,na_values = nan_values, dtype=str) | pandas.read_csv |
#!/usr/bin/env python3
# coding: utf-8
# In[3]:
import csv
import pandas as pd
from connected_component import connected_component_subgraphs as ccs
from strongly_connected_component import strongly_connected_components as scc
# In[4]:
'''
df = pd.read_csv("/root/.encrypted/.pythonSai/moreno_highschool/out.moreno... | pd.read_csv("/root/.encrypted/.pythonSai/my_parsed3.csv", sep=",", header=None, chunksize=2000, names=["userid","retweet_userid"]) | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or a... | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import phik
import scipy.stats as sstats
import warnings
class CalidadDatos:
def __init__(self, _base, castNumero=False, diccionarioCast=None,
errores="ignore", formato_fecha=None):
""" Constructor por defecto d... | pd.DataFrame(dic_outliers) | pandas.DataFrame |
"""This provides a class for discretizing data in a convienant way that makes
sense for our spatially referenced data/models.
"""
__all__ = [
'Grid',
]
__displayname__ = 'Mesh Tools'
import numpy as np
import pandas as pd
import properties
import discretize
from .plots import display
from .fileio import GridFil... | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Created on Jul 2021.
@author: <NAME>
This module processes the option 2
of the menuInicial
"""
import datetime
import pandas as pd
from getMJ import getMJ
class opcao():
def opcao2(placa, dataInicial, dataFin... | pd.json_normalize(dados) | pandas.json_normalize |
import os,sys
import numpy as np
import pandas as pd
import re
from intervaltree import Interval, IntervalTree
from functools import reduce
from typing import (
List,
Set,
Iterable,
)
from collections import OrderedDict
import viola
from viola.core.indexing import Indexer
from viola.core.bed import Bed
from... | pd.Index(ls_order) | pandas.Index |
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.due_diligence import dd_controller
# pylint: disable=E1101
# pylint: disable=W0603
first_call = True
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_menu... | pd.DataFrame() | pandas.DataFrame |
"""
Download, transform and simulate various binary datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from i... | pd.read_csv(FETCH_URLS["spambase"], header=None) | pandas.read_csv |
from __future__ import print_function
import collections
import os
import sys
import numpy as np
import pandas as pd
try:
from sklearn.impute import SimpleImputer as Imputer
except ImportError:
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsSca... | pd.DataFrame(mat, columns=df.columns) | pandas.DataFrame |
import pandas as pd
import sys
from datetime import datetime
from pytz import timezone, utc
def str_list(s_cd):
cds = []
if type(s_cd) == str:
cds = []
cds.append(s_cd)
else:
cds = s_cd
return cds
def today_yymmdd():
d = pd.Timestamp.today().date().strftime('%y%m%d')
... | pd.Timestamp.today() | pandas.Timestamp.today |
from datetime import datetime
import pandas as pd
import os
import re
from .transformers_map import transformers_map
def build_data_frame(backtest: dict, csv_path: str):
"""Creates a Pandas DataFame with the provided backtest. Used when providing a CSV as the datafile
Parameters
----------
backtest:... | pd.to_numeric(new_df.low) | pandas.to_numeric |
import warnings
warnings.filterwarnings("ignore")
import pickle
import json
import pandas as pd
import numpy as np
from pathlib import Path
from process_functions import adjust_names, aggregate_countries, moving_average, write_log
from pickle_functions import picklify, unpicklify
#####################################... | pd.read_csv(path_policy) | pandas.read_csv |
from baseq.utils.file_reader import read_file_by_lines
import pandas as pd
pd.set_option('precision', 3)
def fastq_basecontent_quality(sample, fastq_path, maxLines = 10000):
"""
Generate the basic quality stats of the fastq file
Return:
dataframe: A/T/C/G/quality;
base content figure in bas... | pd.DataFrame(content, columns=['A', 'T', 'C', 'G', 'quality']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
## python 3.7.7, pandas 1.1.3, numpy 1.19.2
#
# In[1]:
import numpy as np
import pandas as pd
import os
import re
import sys
import argparse
import warnings
warnings.filterwarnings('ignore') ## want to avoid print warnings with pandas merges that can be ignored
parser = argparse.... | pd.merge(pos_count2, neg_count2, right_on = "tool", how = "outer",left_index=True, right_index=False) | pandas.merge |
import numpy as np
import pandas as pd
import pickle
from pathlib import Path
import covid19
from COVID19.model import AgeGroupEnum, EVENT_TYPES, TransmissionTypeEnum
from COVID19.model import Model, Parameters, ModelParameterException
import COVID19.simulation as simulation
from analysis_utils import ranker_I, check... | pd.read_csv(output_dir+"transmission_Run1.csv") | pandas.read_csv |
# Credit card fruad transaction data
# Undersampling - logistic regression - bagging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation... | pd.read_csv('./trainccard.csv') | pandas.read_csv |
"""
Authors: <NAME> and <NAME>
"""
from bloomberg import BBG
import pandas as pd
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
bbg = BBG()
# Brazil FGV Consumer Confidence Index SA Sep 2005=100
# Original Date: '30-sep-2005'
start_date = | pd.to_datetime('01-jan-2010') | pandas.to_datetime |
"""
This script cleans the data
"""
import json
import lightgbm as lgb
import numpy as np
import pandas as pd
from scipy.signal import savgol_filter as sg
from sklearn.feature_selection import RFECV
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
import shap
from auxiliary imp... | pd.read_csv(DATA_ISO_CONSUMPTION_PROCESSED_FILE) | pandas.read_csv |
#### Filename: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and get atalaia dataframe.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy... | pd.to_datetime(self.preprocessed_data['VISIT_TIME'], format='%H:%M:%S') | pandas.to_datetime |
from os import listdir
import os
from os.path import isfile, join
import csv
import matplotlib.pyplot as plt
from configparser import ConfigParser
import sweetviz
import pandas as pd
import numpy as np
from joblib import dump, load
from sklearn import metrics
from termcolor import colored
from sklearn.ensemble import R... | pd.DataFrame(errors_data,columns = ['Index', 'Error_Type','Value']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Naive_Bayes_Classifier.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1JZwLGwBxEjnbv_8UTqEmmbDgFy_7Te2r
<div class="alert alert-block alert-info" >
<h1>Naive Bayes Classifier </h1>
## Build a spam classifier using... | pd.read_csv('TrainDataset3.txt', delimiter='\t') | pandas.read_csv |
# 1584927559
import task_submit
# import task_submit_optimus
import task_submit_raw
from task_submit_raw import VGGTask,RESTask,RETask,DENTask,XCETask
import random
import kubernetes
import influxdb
import kubernetes
import signal
from TimeoutException import TimeoutError,Myhandler
import yaml
import requests
from mult... | pd.value_counts(pod_status2) | pandas.value_counts |
#Merges two CSV files and saves the final result
import pandas as pd
import sys
df1 = | pd.read_csv(sys.argv[1]) | pandas.read_csv |
import json
from datetime import datetime
import pandas as pd
from autogluon import TabularPrediction as task
data_path = "./data/plasma/plasma"
label_column = "RETPLASMA"
fold1 = pd.read_csv(data_path + "-fold1.csv")
fold2 = pd.read_csv(data_path + "-fold2.csv")
fold3 = | pd.read_csv(data_path + "-fold3.csv") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 08:48:39 2020
@author: cclark2
"""
import numpy as np
import math
from scipy.interpolate import interp2d
from scipy.optimize import fsolve
import pandas as pd
import os
import struct
import multiprocessing #import Pool
from itertools import repe... | pd.read_csv(file, delim_whitespace=True, header = [0,1], skiprows=6, error_bad_lines=False) | pandas.read_csv |
# Libraries
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
from time import sleep
def fetch_stock_data(stocks):
"""
Fetches stock data (per min) for last 14 days.
INPUT: List of stocks
OUTPUT: CSV files generated in data folder for all the stocks
"""
cnt=0
for stock in ... | pd.read_csv("../data/Historical_Data/"+stock+".csv",index_col=0) | pandas.read_csv |
import numpy as np
import mxnet as mx
import pdb
np.seterr(divide='ignore', invalid='ignore')
## for saving
import pandas as pd
import os
def COR(label, pred):
label_demeaned = label - label.mean(0)
label_sumsquares = np.sum(np.square(label_demeaned), 0)
pred_demeaned = pred - pred.mean(0)
pred_sum... | pd.DataFrame(label) | pandas.DataFrame |
#!/data7/cschoi/anaconda3/bin/python
# to fine newly discoverd sne from http://www.rochesterastronomy.org/snimages/
import requests
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
from html_table_parser import parser_functions as parser
import astropy.io.ascii as asci... | pd.DataFrame(html_table[1:], columns=html_table[0]) | pandas.DataFrame |
import sys
import os
from flask import Flask, escape, request, send_from_directory, redirect, url_for
import flask
import json
from flask_cors import CORS
import copy
import pandas as pd
import time
sys.path.append(os.path.abspath('../falx'))
from falx.interface import FalxInterface
from falx.utils import vis_util... | pd.api.types.infer_dtype(values, skipna=False) | pandas.api.types.infer_dtype |
import pandas as pd
import numpy as np
import warnings
import sklearn.metrics as mt
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_tes... | pd.get_dummies(data[feature], prefix=feature) | pandas.get_dummies |
import pyspark
from pyspark.sql import SQLContext
import pandas as pd
import csv
import os
def load_states():
# read US states
f = open('states.txt', 'r')
states = set()
for line in f.readlines():
l = line.strip('\n')
if l != '':
states.add(l)
return states
def vali... | pd.read_csv(user_train_fname) | pandas.read_csv |
import collections
import ixmp
import itertools
import warnings
import pandas as pd
import numpy as np
from ixmp.utils import pd_read, pd_write
from message_ix.utils import isscalar, logger
def _init_scenario(s, commit=False):
"""Initialize a MESSAGEix Scenario object with default values"""
inits = (
... | pd.Series(df) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from utils import binary_sampler
def data_loader(data_name, miss_rate, target_column=None):
"""Loads datasets and introduce missingness.
Args:
- data_name: letter, spam, or mnist
- miss_rate: the probabili... | pd.read_csv(file_name, delimiter=',') | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
from zenml.preprocessing import (add_prefix, add_suffix, strip_whitespace, string_to_float,
remove_string, replace_string_with_nan, replace_nan_with_string,
... | pd.DataFrame({'probs': ['0.3', '0.8', 2]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_... | pd.to_numeric(row[1], errors='ignore') | pandas.to_numeric |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common i... | tm.assert_frame_equal(df2_result, df2_expected) | pandas._testing.assert_frame_equal |
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=9... | pd.get_dummies(df_titanic_data['Fare_bin']) | pandas.get_dummies |
"""
this is compilation of useful functions that might be helpful to analyse BEAM-related data
"""
import matplotlib.pyplot as plt
import numpy as np
import time
import urllib
import pandas as pd
import re
import statistics
from urllib.error import HTTPError
from urllib import request
def get_output_path_from_s3_u... | pd.read_csv(events_file_path, low_memory=False, chunksize=100000) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import os
import six
import json
import shutil
import sqlite3
import pandas as pd
import gramex.cache
from io import BytesIO
from lxml import etree
from nose.tools import eq_, ok_
from gramex import conf
from gramex.http import BAD_REQUEST, FOUN... | afe(actual, expected, check_like=True) | pandas.util.testing.assert_frame_equal |
import matplotlib.pyplot as plt
# from sklearn import metrics
from sklearn.metrics import roc_curve, auc, confusion_matrix
from sklearn import preprocessing
import tensorflow as tf
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
def compute_rates(y_test, y_pred, pos_label=1):... | pd.read_csv("fraud_acc.csv") | pandas.read_csv |
from datetime import datetime
from collections import Counter
from functools import partial
import pandas as pd
import mongoengine
import xlrd
import os
import re
def create_regex(s: str, initials: bool = True) -> str:
"""
Given a string representation of either a channel or marker, generate a standard
re... | pd.ExcelWriter(file_name, engine='xlsxwriter') | pandas.ExcelWriter |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
from scipy.special import gamma,gammainc,gammaincc
from scipy.stats import norm
from scipy.optimize import minimize,root_scalar
import networkx as nx
from operator import itemgetter
ep... | pd.to_numeric(train) | pandas.to_numeric |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_... | pandas.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
from lifelines import KaplanMeierFitter, NelsonAalenFitter
from lifelines import KaplanMeierFitter
from lifelines.plotting import add_at_risk_counts
def plot_kaplanmeier(outcomes, groups=None, plot_counts=False, **kwargs):
"""Plot a Kaplan-Meier Survival Estimator stratifie... | pd.isna(group) | pandas.isna |
import pandas as pd
import pandas_datareader as pdr
##### Naver Finance에서 KOSPI 가져오기 #####
kospi_total_url = 'https://finance.naver.com/sise/sise_index_day.nhn?code=KOSPI'
# 일자 데이터를 담을 df라는 DataFrame 정의
kospi_total_df = | pd.DataFrame() | pandas.DataFrame |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
cities=('chicago','new york city','washington')
months=('january','february','march','april','may','june', 'all')
days=('sunday',... | pd.to_datetime(df['Start Time']) | pandas.to_datetime |
# Copyright (C) 2022 National Center for Atmospheric Research and National Oceanic and Atmospheric Administration
# SPDX-License-Identifier: Apache-2.0
#
""" This is the overall control file. It will drive the entire analysis package"""
import monetio as mio
import monet as m
import os
import xarray as xr
import pand... | pd.Timestamp(self.control_dict['analysis']['start_time']) | pandas.Timestamp |
import numpy as np
import pandas as pd
from scipy import sparse
import scanpy as sc
from sklearn.linear_model import LinearRegression
from scIB.utils import checkAdata, checkBatch
def pcr_comparison(
adata_pre,
adata_post,
covariate,
embed=None,
n_comps=50,
scale=True,... | pd.api.types.is_numeric_dtype(covariate) | pandas.api.types.is_numeric_dtype |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from pathlib import Path
import numpy as np
import pandas as pd
train = pd.read_csv("corpus/imdb/labeledTrainData.tsv", header=0,
delimiter="\t", quoting=3)
test = pd.read_csv("corpus/imdb/testData.tsv", header=0,
delimiter="\t",... | pd.DataFrame(data={"id": test["id"], "sentiment": y_pred}) | pandas.DataFrame |
import pandas as pd
import shutil
import os
import time
import re
import datetime
from functools import partial
def append_csvs_to_csv(csv_filepath_list, outpath=None):
"""
Appends csvs into a single csv. Is memory efficient by only keeping the current
processed file in memory. However still keeps track of... | pd.read_csv(inpath) | pandas.read_csv |
from pdpbox.info_plots import target_plot
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
def test_binary(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
... | assert_frame_equal(expected, summary_df.loc[[0, 4, 8], :], check_like=True) | pandas.testing.assert_frame_equal |
"""
Zonal Statistics
Vector-Raster Analysis
Modified by <NAME> from 2013 <NAME> and AsgerPetersen:
usage: generate_twi_per_basin.py [-h] [--output flag [--buffer distance] [--nodata value] [-f FORMAT]
catchments twi_raster slope_raster outputfolder_twi
positional arguments:
namest ... | pd.DataFrame(columns=['TravelTimeHour'], data=sorted_array) | pandas.DataFrame |
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from _imports import *
os.system('cls')
remove_duplicates = ask_for_user_preference('Czy usunąć duplikaty projektów wygenerowanych przez algorytmy?')
verify_designs = ask_for_user_preference('Czy symulacyjnie zweryf... | pd.concat([random_diffusions_df,agent_diffusions_rnd_df,agent_diffusions_bst_df,algenet_diffusions_df]) | pandas.concat |
from calendar import monthrange
from datetime import datetime
import pandas as pd
from flask import Blueprint, jsonify, abort, g
from gatekeeping.api.budget import get_budget
from gatekeeping.api.position import get_positions
from gatekeeping.api.function import get_functions, get_function
from gatekeeping.api.user im... | pd.Series(hc + total_proposed_increase) | pandas.Series |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Utility functions for model generation
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python s... | pd.Timedelta(366, unit='d') | pandas.Timedelta |
import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from ..models.kalman_filter import KalmanFilter
from ..models.filtering_model import MovingAverage
from ..timeseries import TimeSeries
from ..utils import timeseries_generation as tg
class KalmanFilterTestCase(DartsBaseTestClass):... | pd.DataFrame(data=testing_signal_with_noise, columns=['signal']) | pandas.DataFrame |
# coding: utf-8
# In[1]:
#IMPORT REQUISTITE LIBRARIES
from datadownloader.MeetupClients import MeetUpClients
import json
import pandas as pd
from datadownloader.Utils.Logging import LoggingUtil
from datetime import datetime
import multiprocessing as mp
from functools import partial
import numpy as np
import sys
... | pd.concat([group_event_counts[group_event_counts['EventCount']!=-1],event_count_failed_rep]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Analysis of Cryptocurrency Investments
#
# In this analysis report, I will perform exploratory data analysis and build machine learning models to predict market prices in future 30 days for the above 7 cryptocurrencies.
# [1. Prepare Data Set](#1)
# - [Load Python Packages]... | pd.DataFrame(forecasted_BTC, columns=['daily_avg'], index=new_date) | pandas.DataFrame |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or a... | pd.RangeIndex(actual_max, actual_min, step) | pandas.RangeIndex |
"""
Created on Sat Sep 18 23:11:22 2021
@author: datakind
"""
import logging
import os
import sys
import typing as T
from functools import reduce
from pathlib import Path
import pandas as pd
import requests
from matplotlib import collections
from matplotlib import pyplot as plt
from analysis.acs_correlation import c... | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import requests
import io
import zipfile
from kungfu.series import FinancialSeries
from kungfu.frame import FinancialDataFrame
def download_factor_data(freq='D'):
'''
Downloads factor data from Kenneth French's... | pd.read_excel(url, sheet_name='Monthly') | pandas.read_excel |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data as Data # 里面有minibatch实现所需要的DataLoader
from torch.autograd import Variable
from sklearn.utils import shuffle
from... | pd.concat([true_train_dataset, negative_samples]) | pandas.concat |
import torch
from pathlib import Path
import librosa
import numpy as np
from torch.utils.data import Dataset, DataLoader
import json
import pandas as pd
import os
import math
from PIL import Image
import warnings
from helpers.audio_utils import *
from dataloaders.imbalanced_dataset_sampler import ImbalancedDatasetSampl... | pd.read_csv(xeno_csv) | pandas.read_csv |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import os
import datetime
#################################################... | pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
import pandas as pd
import results
from phrasegeo import Matcher, MatcherPipeline
from time import time
# load up the db
db_name = 'GNAF_VIC'
DB = f"postgresql:///{db_name}"
db = results.db(DB)
# set up the matchers
matcher1 = Matcher(db, how='standard')
matcher2 = Matcher(db, how='slow')
matcher3 = Matcher(db, how... | pd.read_csv('phrasegeo/datasets/nab_atm_vic.csv') | pandas.read_csv |
"""
Target Problem:
---------------
* To train a model to predict the brain connectivity for the next time point given the brain connectivity at current time point.
Proposed Solution (Machine Learning Pipeline):
----------------------------------------------
* K-NN
Input to Proposed Solution:
------------------... | pd.DataFrame(predictions) | pandas.DataFrame |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import spacepy.plot as splot
import datetime as dt
import matplotlib.dates as mdates
import pandas as pd
import statsmodels.api as sm
from scipy.interpolate import interp1d
from scipy import array
import numpy as np
import analysis as ala
import g... | pd.DataFrame() | pandas.DataFrame |
# Multiscale sampling (MSS) with VASP and LAMMPS
# <NAME>
# Getman Research Group
# Mar 5, 2020
import sys,os
import pandas as pd
import solvent
class ReadInput(object):
def __init__(self, poscar_file, mss_input):
self.readPOSCAR(poscar_file)
self.readMSSinput(mss_input)
self.groupAtom()
def readPOSCAR(se... | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
... | DataFrame(columns=["a", "b", "c"]) | pandas.DataFrame |
from ctypes import sizeof
import traceback
from matplotlib.pyplot import axis
import pandas as pd
import numpy as np
from datetime import datetime
from time import sleep
from tqdm import tqdm
import random
import warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from skl... | pd.DataFrame([[class_models[c], reg_models[r]]]*3) | pandas.DataFrame |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import json
import os
import sys
import subprocess
from configparser import ConfigParser
from tqdm import tqdm
from nltk import sent_tokenize
from sklearn.metrics import accuracy_score, classification_report
from sklearn.feature_extraction.text import Tfidf... | pd.DataFrame.from_records(train) | pandas.DataFrame.from_records |
import json
import logging
import datetime
from pathlib import Path
import branca.colormap as cm
import fiona
import folium
import geopandas as gpd
import numpy as np
import pandas as pd
import rasterio
from folium import plugins
from rasterstats import zonal_stats
from shapely import geometry as sgeom
... | pd.DataFrame(collection) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output... | pd.DataFrame(self.mam2.basis_set) | pandas.DataFrame |
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confu... | pd.concat([null_imp_df, imp_df]) | pandas.concat |
# Import required modules
import requests
import pandas as pd
import json
import subprocess
from tqdm import tqdm
import re
# Set pandas to show full rows and columns
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ im... | pd.Timedelta('4 days 00:00:00') | pandas.Timedelta |
# Standard libraries
import sys
from typing import Optional
import threading
# Third party libraries
import discord
from discord.ext import commands
import pandas as pd
# Local dependencies
from util.vars import config
from util.db import get_db
def get_guild(bot: commands.Bot) -> discord.Guild:
"""
Returns... | pd.DataFrame() | pandas.DataFrame |
# TO DO
# 1. Fair probability
# 2. Hedge opportunities
# 3. Datapane map
# 4. Change since prior poll
# Import modules
import json
import requests
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
pd.set_option('display.max_rows', None) #print all rows without truncatin... | pd.read_csv('https://projects.fivethirtyeight.com/2020-general-data/presidential_state_toplines_2020.csv') | pandas.read_csv |
from daily_clifile_editor import compute_breakpoint
import pandas as pd
import subprocess
import numpy as np
import matplotlib.pyplot as plt
# Jun 11 2015
precip = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,... | pd.read_pickle("exercise.pickle") | pandas.read_pickle |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class DataProcessor:
def __init__(self, data_path):
self.orig_data = pd.read_csv(data_path)
self.data = self.orig_data
self.scaled_features = {}
self.train_features = None
self.train_targets = None
... | pd.concat([self.data, dummies], axis=1) | pandas.concat |
import multiprocessing as mp
import numpy as np
import pandas as pd
def _get_ids(vol, bl, co):
"""Fetch block and extract IDs.
Parameters
----------
vol : CloudVolume
Volume to query.
bl : list-like
Coordinates defining the block:
left,... | pd.DataFrame(x) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_io.ipynb (unless otherwise specified).
__all__ = ['dicom_dataframe', 'get_plane', 'is_axial', 'is_sagittal', 'is_coronal', 'is_fat_suppressed', 'load_mat',
'load_h5']
# Cell
from fastscript import call_parse, Param, bool_arg
from scipy import ndimage
impo... | pd.DataFrame() | pandas.DataFrame |
import shutil
import sys
from argparse import ArgumentParser
from collections import Counter
from pathlib import Path
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from src.config import CONTEXT_SIZE, COVERAGE, DATA_DIR, TEXT8_URL, VOCAB_SIZE
from src.utils.logger import get_logge... | pd.concat(dfs_agg, sort=False) | pandas.concat |
import copy
import os
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from tqdm import tqdm
from scipy import stats
from typing import Tuple, Dict, Union
from scipy.spatial.distance import cdist
from sklearn.model_selection import KFo... | pd.DataFrame({'real': r2r + f2r, 'fake': r2f + f2f}, index=index) | pandas.DataFrame |
import pytest
from pymanda import ChoiceData, DiscreteChoice
import pandas as pd
import numpy as np
@pytest.fixture
def psa_data():
'''create data for psa analysis'''
#create corporations series
corps = ['x' for x in range(50)]
corps += ['y' for x in range(25)]
corps += ['z' for x in range(25)]
... | pd.concat([df_miss, psa_data]) | pandas.concat |
# coding: utf-8
import pandas as pd
from collections import defaultdict
def main(args):
clustering = | pd.read_table(args.clustering_file, sep=',', names=['contig_id', 'cluster_id'], index_col=0) | pandas.read_table |
# this will be the main program for inspecting TESS light curves for stellar rotation
# Import relevant modules
#%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib
import matplotlib.gridspec as gridspec
#from astropy.visualization im... | pd.read_csv(file[0]) | pandas.read_csv |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():... | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
prodlim = importr('prodlim')
survival = importr('survival')
#KMsurv = importr('KMsurv')
#cvAUC = importr('pROC')
#utils.install_packages('pseudo')
#utils.install_packages('prodlim')
#utils... | pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.