prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
## 1. Introduction ##
import pandas as pd
happiness2015 = pd.read_csv("World_Happiness_2015.csv")
happiness2016 = pd.read_csv("World_Happiness_2016.csv")
happiness2017= pd.read_csv("World_Happiness_2017.csv")
happiness2015['Year'] = 2015
happiness2016['Year'] = 2016
happiness2017['Year'] = 2017
## 2. Combining Da... | pd.concat([happiness2015, happiness2016, happiness2017]) | pandas.concat |
import wf_rdbms.utils
import pandas as pd
import logging
logger = logging.getLogger(__name__)
TYPES = {
'integer': {
'pandas_dtype': 'Int64',
'to_pandas_series': lambda x: pd.Series(x, dtype='Int64'),
'to_python_list': lambda x: wf_rdbms.utils.series_to_list(pd.Series(x, dtype='Int64'))
... | pd.Series(x, dtype='string') | pandas.Series |
import pandas as pd
def create_dataframe():
"""Create a sample Pandas dataframe used by the test functions.
Returns
-------
df : pandas.DataFrame
Pandas dataframe containing sample data.
"""
dti = pd.date_range("2018-01-01", periods=9, freq="H")
d = {'user': ... | pd.date_range("2018-01-01", periods=9, freq="H") | pandas.date_range |
# Modified from: https://www.kaggle.com/gauravs90/keras-bert-toxic-model-bert-fine-tuning-with-keras
from keras.callbacks import ModelCheckpoint
import keras as keras
from keras.layers import Input, concatenate
from keras_bert import load_trained_model_from_checkpoint, load_vocabulary
from keras_bert import Tokenize... | pd.Series(y_test) | pandas.Series |
"""
Utilities for examining ABS NOM unit record
"""
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from IPython.display import display_html, display
from matplotlib.patches import Patch
from chris_utilities import adjust_c... | pd.read_excel(file_path) | pandas.read_excel |
import os
from mtsv.scripts.mtsv_analyze import *
import pytest
import datetime
import tables
import pandas as pd
@pytest.fixture(scope="function")
def existing_empty_datastore(tmpdir_factory):
fn = tmpdir_factory.mktemp("datastores").join("empty_datastore.h5")
store = pd.HDFStore(fn)
store.close()
ret... | pd.DataFrame([1,2,3,4]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from IPython.core.display import HTML
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
import plotly.offline as py
import plotly.graph_objs as go
import plotly.express as px
class... | pd.Timedelta('6 hours') | pandas.Timedelta |
import collections
import fnmatch
import os
from typing import Union
import tarfile
import pandas as pd
import numpy as np
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils im... | pd.read_csv(attr_all_file, sep=",") | pandas.read_csv |
import hashlib
import json
import os
import random
import threading
import sys
import time
import uuid
import functools
import pandas as pd
import numpy as np
from fate_test._config import Config
from fate_test._io import echo, LOGGER
def import_fate():
from fate_arch import storage
from fate_flow.utils impo... | pd.DataFrame(columns=head_1) | pandas.DataFrame |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymo... | Timestamp('2008-10-23 05:53:05') | pandas.Timestamp |
"""Instantiate a Dash app."""
import datetime
import numpy as np
import pandas as pd
import dash
import dash_table
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from .layout import html_layout
import sqlite3
import plotly.express as px
app_colors = {... | pd.to_datetime(twdf['timestamp']) | pandas.to_datetime |
"""
Module to work with result data
"""
import glob
import collections
import logging
import os
import shutil
from typing import Dict
import pandas as pd
import numpy as np
import tensorflow as tf
import settings
clinical = None
# Get the mixed results
def all_results(path, select_type, predictions=False, elem_fol... | pd.merge(clinical_info, results_df, left_on='id', right_on='pA') | pandas.merge |
import os
import time
import csv
import torch
import torch.nn as nn
from mvcnn import Model
from args import get_parser
import torch.nn.functional as F
from dataset import MultiViewDataSet, preprocess
from torch.utils.data import DataLoader
# from helpers.logger import Logger
# import util
import numpy as np
from pathl... | pd.DataFrame(columns=['f1 micro']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib.pyplot as plt
import seaborn as sns
from collections import OrderedDict
import pickle
from pystan import StanModel
import plot_coefficients as pc
import copy
import os
sns.set_context('notebook')
script_dir = os.path.dirname(os.path.abspath(_... | pd.DataFrame(o_dict) | pandas.DataFrame |
import pandas as pd
import geopandas
from geopandas import GeoDataFrame
from pandas import Series
from shapely.geometry import Point, MultiPoint, LineString, MultiLineString, Polygon
import itertools
import numpy as np
import math
import matplotlib.pyplot as plt
import time
import datetime
def ReadData(bestand):
... | pd.DataFrame(columns = ['gull pair', 'start', 'end','gull','geometry']) | pandas.DataFrame |
import MSfingerprinter.decoder as decoder
import MSfingerprinter.preprocessing as preprocessing
import MSfingerprinter.pysax as SAX
import MSfingerprinter.periodicityfinder as periodicityfinder
import MSfingerprinter.maxsubpatterntree as maxsubpatterntree
import MSfingerprinter.datacube as datacube
import MSfingerprint... | pd.DataFrame(normalizedMSarray) | pandas.DataFrame |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.sty... | Styler(df, uuid_len=len_, cell_ids=False) | pandas.io.formats.style.Styler |
import os
import pandas as pd
from numpy import testing as npt
import pandas.util.testing as pdt
import ixmp
import pytest
from ixmp.default_path_constants import CONFIG_PATH
from testing_utils import (
test_mp,
test_mp_props,
test_mp_use_default_dbprops_file,
test_mp_use_db_config_path,
)
test_args ... | pd.DataFrame.from_dict(df) | pandas.DataFrame.from_dict |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df= | pd.read_json(path,lines=True) | pandas.read_json |
#!/usr/bin/env python
# coding: utf-8
# <div class="alert alert-block alert-info">
# <b><h1>ENGR 1330 Computational Thinking with Data Science </h1></b>
# </div>
#
# Copyright © 2021 <NAME> and <NAME>
#
# Last GitHub Commit Date: 4 Nov 2021
#
# # 26: Linear Regression
# - Purpose
# - Homebrew (using covar... | pd.DataFrame({'X':sample_weight, 'Y':sample_length}) | pandas.DataFrame |
import numpy as np
def rk4(x,ii,f,h):
for j in ii[1:]:
m1 = f(j,x[-1])
m2 = f(j+h/2,x[-1] + h/2*m1)
m3 =f(j+h/2,x[-1] + h/2*m2)
m4 = f(j+h,x[-1] + h*m3)
avg_slope = ( m1 +2*(m2 +m3) + m4 )/6
x.append(x[-1] + avg_slope*h)
def rk2(x,ii,f,h):
for j in ii[1:]:
... | pd.DataFrame(y) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
... | pd.Series(vals) | pandas.Series |
import os
import shutil
from attrdict import AttrDict
import numpy as np
import pandas as pd
from scipy.stats import gmean
from deepsense import neptune
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from . import pipeline_config as cfg
from .pip... | pd.read_csv(params.test_filepath, nrows=nrows) | pandas.read_csv |
import pandas as pd
from tqdm import tqdm
import requests
import numpy as np
import sys
from typing import *
from time import sleep
class _UniProtClient:
def __init__(self, base_url):
self._base_url = base_url
@staticmethod
def _query(query_string) -> str:
for i in range(10):
... | pd.concat([valid_mappings, invalid_mapping]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import datetime as dt
from scipy import stats
import pymannkendall as mk
from Modules import Read
from Modules.Utils import Listador, FindOutlier, FindOutlierMAD, Cycles
from Modules.Graphs import GraphSerieOutliers, Graph... | pd.DataFrame([], columns=['outlier_inf','outlier_sup']) | pandas.DataFrame |
# %% Imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from geneticalgorithm import... | pd.read_csv(datasetURL) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Teaching your models to play fair
#
# In this notebook you will use `fairlearn` and the Fairness dashboard to generate predictors for the Census dataset. This dataset is a classification problem - given a range of data about 32,000 individuals, predict whether their annual in... | pd.DataFrame(X_scaled, columns=X.columns) | pandas.DataFrame |
from hydroDL import kPath, utils
from hydroDL.app import waterQuality, wqRela
from hydroDL.data import gageII, usgs, gridMET
from hydroDL.master import basins
from hydroDL.post import axplot, figplot
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import time
from astropy.timeseries imp... | pd.DataFrame({'date': ctR}) | pandas.DataFrame |
import json
from datetime import datetime, timedelta
import requests
import pandas as pd
import numpy as np
from scipy.spatial.distance import euclidean, cityblock, cosine
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class Task(object):
def __init__(self, data):
self.df =... | pd.isna(value) | pandas.isna |
import os
import glob
import pandas as pd
game_files=glob.glob(os.path.join(os.getcwd(),'games','*.EVE'))
game_files.sort()
game_frames=[]
for game_file in game_files:
game_frame=pd.read_csv(game_file,names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games= ... | pd.concat([games,identifiers], axis=1, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 01 11:05:46 2019
@author: Neal
"""
import requests
from bs4 import BeautifulSoup
import pandas as pd
fake_header = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
... | pd.DataFrame(columns=['Stock', 'Date','Title'], data=results) | pandas.DataFrame |
import os
import pandas as pd
import csv
from sklearn.model_selection import train_test_split
import numpy as np
import random
import tensorflow as tf
import torch
#directory of tasks dataset
os.chdir("original_data")
#destination path to create tsv files, dipends on data cutting
path_0 = "mttransformer/... | pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',') | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not u... | pandas.DataFrame(data) | pandas.DataFrame |
import logging
from datetime import datetime, timedelta
from logging import handlers
import numpy as np
from pandas import Series, to_datetime, Timedelta, Timestamp, date_range
from pandas.tseries.frequencies import to_offset
from scipy import interpolate
logger = logging.getLogger(__name__)
def frequency_is_suppor... | Series(np.NaN, index=index) | pandas.Series |
import os
import pandas as pd
def loadData(folder_path: str, date: str, start_time: str='9:30',
end_time: str='16:00') -> pd.DataFrame:
"""Function to load complete price data for a given asset, from a given
folder. This function loads all '*.csv' files from a given directory
corresponding to... | pd.concat([data, candidate_data], axis=1) | pandas.concat |
import logging
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
import sentry_sdk
from solarforecastarbiter import utils
def _make_aggobs(obsid, ef=pd.Timestamp('20191001T1100Z'),
eu=None, oda=None):
return {
'observation_id': obsid,
'effective... | pd.Timestamp('20191004T0700Z') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
f... | tm.assert_series_equal(actual, sn) | pandas.util.testing.assert_series_equal |
"""Tools to calculate mass balance and convert appropriately from volume."""
from __future__ import annotations
import json
import os
import pathlib
import warnings
from typing import Any, Callable
import geopandas as gpd
import numpy as np
import pandas as pd
import rasterio as rio
import shapely
from tqdm import tq... | pd.concat(result_data) | pandas.concat |
# Calculate cutting points for each case in the database, based on 2D scores.
# Generates computed/cuts.npy
import math
import json
import pandas as pd
import numpy as np
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from tqdm import tqdm
def calculate(scores=None, votes=None, scdb=None, format="npy")... | pd.read_csv("data/votes.csv") | pandas.read_csv |
"""
This script collects data from the Futurology and Science subreddits for NLP.
"""
############# DEPENDENCIES ##############
import requests
import pandas as pd
import time
import datetime
#########################################
# Save start time so time elapsed can be printed later.
t0 = time.time()
# Genera... | pd.DataFrame(sci_posts) | pandas.DataFrame |
import pandas as pd
WINNING_PTS = 3
DRAWING_PTS = 1
LOOSING_PTS = 0
def single_match_points(match_row: pd.Series):
'''
Compute match points for a single match row.
:param match_row: Row representing a sigle match between two teams.
:return:
'''
if match_row["FTHG"] > match_row["FTAG"]:
... | pd.DataFrame(team_matches_points_rows, index=indexes) | pandas.DataFrame |
import getpass, os, time, subprocess, math, pickle, queue, threading, argparse, time, backoff, shutil, gc
import numpy as np
import pandas as pd
import pyvo as vo
import traceback as tb
from timewise.general import main_logger, DATA_DIR_KEY, data_dir, bigdata_dir, backoff_hndlr
from timewise.wise_data_by_visit import ... | pd.DataFrame(columns=['chunk_number']) | pandas.DataFrame |
from collections import Counter
from itertools import product
import os
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import OneHotEncoder
import sys
sys.path.insert(0, '../myfunctions')
from prediction import fit_prediction_model
#from causal_inference import inf... | pd.DataFrame(data=res2, columns=col_names) | pandas.DataFrame |
import pandas as pd
import numpy as np
# 用于订单数据,用户数据,菜单数据的一系列操作
class Operate:
def __init__(self):
pass
#查询用户是否存在
def find_user(self,user):
data=pd.read_csv('用户.csv')
if user in np.array(data['username']).tolist():
return True
else:
return False
... | pd.read_csv('临时订单.csv') | pandas.read_csv |
"""
STAT 656 HW-10
@author:<NAME>
@heavy_lifting_by: Dr. <NAME>
@date: 2020-07-29
"""
import pandas as pd
# Classes provided from AdvancedAnalytics ver 1.25
from AdvancedAnalytics.Text import text_analysis
from AdvancedAnalytics.Text import sentiment_analysis
from sklearn.feature_extraction.text imp... | pd.get_option('max_colwidth') | pandas.get_option |
#!/usr/bin/env python
import os,sys
import pandas as pd
import argparse
daismdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,daismdir)
import daism.modules.simulation as simulation
import daism.modules.training as training
import daism.modules.prediction as prediction
#-----------... | pd.read_csv(inputArgs.feature,sep='\t') | pandas.read_csv |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.c... | tm.makeStringIndex(100) | pandas.util.testing.makeStringIndex |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to i... | pd.Series(bins) | pandas.Series |
import pandas as pd
from sklearn.base import TransformerMixin
class Pruner(TransformerMixin):
"""Prune identifier columns, columns with numerous tokens (>100) and columns
with low information."""
def __init__(self):
self.pruned_columns = ['subjuct_id', 'row_id', 'hadm_id', 'cgid', 'itemid', 'icus... | pd.DataFrame(X) | pandas.DataFrame |
import numpy as np
import pandas as pd
import sidekick as sk
from scipy.integrate import cumtrapz
from sidekick import placeholder as _
from .hospitalization_with_delay import HospitalizationWithDelay
from ..utils import param_property, sliced
from ..mixins.info import Event
class HospitalizationWithOverflow(Hospita... | pd.Series(deaths, index=self.times) | pandas.Series |
import os, re
import pandas as pd
_mag = None
def load(path='data/mag/', **kwargs):
global _mag
if 'target' in kwargs.keys() and kwargs['target']:
_mag = kwargs['target']
else:
_mag = {}
if 'file_regex' not in kwargs.keys():
kwargs['file_regex'] = re.compil... | pd.DataFrame() | pandas.DataFrame |
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta
from unittest import TestCase
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import n... | pd.DataFrame(columns=self.df.columns) | pandas.DataFrame |
from django.shortcuts import render
from plotly.offline import plot
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import os
def home(chart):
return render(chart, "index.html")
def engage(chart):
directory = os.getcwd() + "/simulation/engage_sector.xlsx"
data = pd.rea... | pd.read_excel(directory) | pandas.read_excel |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIn... | DataFrame({'A': [1, '2', 3.]}) | pandas.DataFrame |
#!/usr/bin/env python3
import tempfile
import unittest
import warnings
import webbrowser
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from sklearn.model_selection import GridSearchCV
from sklearn.utils i... | pd.DataFrame(edmdcv.cv_results_) | pandas.DataFrame |
from copy import (
copy,
deepcopy,
)
import numpy as np
import pytest
from pandas.core.dtypes.common import is_scalar
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
# ----------------------------------------------------------------------
# Generic types test cases
def const... | tm.assert_equal(result, o) | pandas._testing.assert_equal |
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from faker import Faker
from faker.config import DEFAULT_LOCALE
from rdt.transformers.numerical import NumericalTransformer
from sdv.constraints.base import Constraint
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.error... | pd.Series([True, False, True]) | pandas.Series |
import pymongo
from finviz.screener import Screener
import logging
#client = pymongo.MongoClient("mongodb://xin:<EMAIL>/myFirstDatabase?ssl=true&authSource=admin")
#db = client.test
#https://github.com/peerchemist/finta
#https://medium.com/automation-generation/algorithmically-detecting-and-trading-technical-chart-pat... | pd.read_csv(local_csv, index_col="timestamp") | pandas.read_csv |
from flask import g
from libs.extensions import Extension
import pandas as pd
from settings.gdxf.extensions.wxzb.base_sql_map import get_base_sql_map
from utils.qh_processor import get_qh_level
DEBUG = False
def get_sql_map(flag):
if flag:
base_sql_map = {
"1-1-全": "1_1",
}
else... | pd.DataFrame([None], columns=["wxzb"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas.... | ujson.encode(escaped_input) | pandas._libs.json.encode |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed ... | pd.Series([sortedcol, location],index=['mutations', 'locations']) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from code import visualize
# Iteration 13
def split_3(df, test_size=0.1, oversampling_ratio=1):
print('\nSplit - Train&Dev Size = ', 1-test_size, ' , Test Size = ', test_size, '.', sep='')
vcs_one = df['city_id'].valu... | pd.DataFrame(y_train_dev) | pandas.DataFrame |
import pytest
import warnings
import json
import numpy as np
import pandas as pd
from .. import infer_vegalite_type, sanitize_dataframe
def test_infer_vegalite_type():
def _check(arr, typ):
assert infer_vegalite_type(arr) == typ
_check(np.arange(5, dtype=float), "quantitative")
_check(np.arange... | pd.array(["a", "b", "c", "d"], dtype="string") | pandas.array |
"""
The `frame` module allows working with StarTable tables as pandas dataframes.
This is implemented by providing both `Table` and `TableDataFrame` interfaces to the same object.
## Idea
The central idea is that as much as possible of the table information is stored as a pandas
dataframe, and that the remaining inf... | pd.DataFrame(self) | pandas.DataFrame |
import os
import json
import pandas as pd
import numpy as np
from segmenter.collectors.BaseCollector import BaseCollector
import glob
class MetricCollector(BaseCollector):
results = | pd.DataFrame() | pandas.DataFrame |
# import pandas, numpy, and matplotlib
import pandas as pd
from feature_engine.encoding import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from ... | pd.set_option('display.width', 75) | pandas.set_option |
#!/usr/bin/env python
from scipy import sparse
from sklearn.datasets import dump_svmlight_file
from sklearn.preprocessing import LabelEncoder
import argparse
import logging
import numpy as np
import os
import pandas as pd
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
le... | pd.concat([trn, tst], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
""" Overplot
Series of functions designed to help with charting in Plotly
"""
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
# %% PLOTLY EXPRESS STANLIB TEMPLATE
# Approximatation of STANLIB colour theme
COLOUR_MAP = {0:'purple',
... | pd.concat([z, i]) | pandas.concat |
from netCDF4 import Dataset
import pandas as pd
import numpy as np
ncep_path = '/SubX/forecast/tas2m/daily/full/NCEP-CFSv2/' # the path where the raw data from NCEP-CFSv2 is saved
gmao_path = '/SubX/forecast/tas2m/daily/full/GMAO-GEOS_V2p1/'
for model in ['NCEP', 'GMAO']:
if model == 'NCEP':
path = ncep_... | pd.date_range('2017-07-25', '2020-09-28') | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Tools to collect Twitter data from specific accounts.
Part of the module is based on Twitter Scraper library:
https://github.com/bpb27/twitter_scraping
Author: <NAME> <<EMAIL>>
Part of https://github.com/crazyfrogspb/RedditScore project
Copyright (c) 2018 <NAME>. All rights rese... | pd.concat(all_members) | pandas.concat |
import itertools
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
datasets = [
'BPI11/f1/',
'BPI11/f2/',
'BPI11/f3/',
'BPI11/f4/',
'BPI15/f1/',
'BPI15/f2/',
'BPI15/f3/',
'Drift1/f1/',
'Drift2/f1/'
]
split_sizes = [
'0-40_80-100',
'0-60_80-100',
... | pd.to_timedelta(table['evaluation_elapsed_time']) | pandas.to_timedelta |
import math
import re
import pandas as pd
from pandas.core.dtypes.inference import is_hashable
from .transformation import Transformation
from ..exceptions import IndexFilterException
class FilterMissing(Transformation):
filter = True
title = "Filter rows with missing values in {field}"
key = "Filter mis... | pd.isnull(row[self.field]) | pandas.isnull |
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
IntervalIndex,
NaT,
Series,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesMissingDa... | Categorical(["a", "b", "c", np.nan]) | pandas.Categorical |
from ieeg.auth import Session
from numpy.lib.function_base import select
import pandas as pd
import pickle
from pull_patient_localization import pull_patient_localization
from numbers import Number
import numpy as np
def get_iEEG_data(username, password, iEEG_filename, start_time_usec, stop_time_usec, select_electrode... | pd.DataFrame(data, columns=channel_names) | pandas.DataFrame |
import config
import os
import pandas as pd
import numpy as np
from datetime import date, datetime, time, timedelta
import re
import warnings
warnings.filterwarnings('ignore')
CONF_CASES_THRESHOLD = 25000
# src_confirmed = config.confirmed_cases_global_online
# src_recovered = config.recovered_cases_global_online
# s... | pd.read_csv(src_confirmed) | pandas.read_csv |
import pandas as pd
import numpy as np
"""
this file is crucial
data5 is the file that goes to R to create the forecast.
"""
data = pd.read_csv('Lokad_Orders.csv')
data['Quantity'] = data['Quantity'].astype('int')
data['Date'] =pd.to_datetime(data.Date) - pd.to_timedelta(7,unit = 'd')
data =data.groupby(['Id', | pd.Grouper(key='Date', freq='W-MON') | pandas.Grouper |
from nilearn import surface
import argparse
from braincoder.decoders import GaussianReceptiveFieldModel
from braincoder.utils import get_rsq
from bids import BIDSLayout
import pandas as pd
import os
import os.path as op
import numpy as np
import nibabel as nb
from nipype.interfaces.freesurfer.utils import SurfaceTransf... | pd.concat((r2_df, r2), axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinka... | pdt.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Core classes and functions
============================
Entities & accounts
--------------------
The system is built upon the concept of *accounts* and
*transactions*.
An *account* is wehere the money goes to (or comes from) and
can be a person, company or a gener... | pd.DataFrame.from_records(self.data) | pandas.DataFrame.from_records |
import numpy as np
import json
import os
import collections
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import subprocess
import game_py
def count_last_positions(dir):
pos = set()
total_pos = []
for f in os.listdir(dir):
game = json.load(open(dir + f))
for p in game... | pd.read_csv(file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 16:41:37 2018
@author: krzysztof
This module contains utilities useful when performing data analysis and drug sensitivity prediction with
Genomics of Drug Sensitivity in Cancer (GDSC) database.
Main utilities are Drug classes and Experiment c... | pd.read_csv(coding_variants) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 09:20:37 2021
Compiles SNODAS data into SQLite DB
@author: buriona,tclarkin
"""
import sys
from pathlib import Path
import pandas as pd
import sqlalchemy as sql
import sqlite3
import zipfile
from zipfile import ZipFile
# Load directories and defaults
this_dir = Path... | pd.unique(df['LOCAL_NAME']) | pandas.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The fluorescence_extract() function performs the following:
1) Combines MEAN_INTENSITY values from the Spots Statistics TrackMate output files for a single neuron.
2) Subtracts the background, finds the maximal value for each timepoint
3) Calculates the ch... | pd.read_csv(data) | pandas.read_csv |
#!/usr/bin/env python3
"""
Trenco Modules for running scripts
"""
import sys
import re
import os
import glob
import subprocess
import json
import logging
import time
import shlex
import pandas as pd
import numpy as np
import seaborn as sb
import multiprocessing as mp
import matplotlib.pyplot as plt
plt.switch_backend... | pd.read_csv(gene, sep='\t', index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import math
import sys
import numpy as np
import pandas as pd
import sample_functions
from sklearn import metrics, svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_predict, train_test_split, GridSearchCV... | pd.read_csv('unique_m.csv', index_col=-1) | pandas.read_csv |
from datetime import datetime, timedelta
import pandas as pd
class TuDataModel:
"""
Class Terminal Unit, never modify any variable direct. The idea is that all gets managed via functions
"""
def __init__(self, name):
"""
Constructor of the Tu as a holder for all the TG Tu Dat... | pd.set_option('display.max_colwidth', -1) | pandas.set_option |
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
... | pd.to_datetime(ohlc.time, unit="s") | pandas.to_datetime |
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
import transforms3d as t3d
import scipy.ndimage.filters as filters
from sklearn.base import BaseEstimator, TransformerMixin
from analysis.pymo.rotation_tools import Rotation,... | pd.Series(data=recx, index=new_df.index) | pandas.Series |
import pandas as pd
import helpers
import re
from datetime import datetime
from Bio import SeqIO
from .generate_viral_seq_dataset import get_viralseq_dataset
from .generate_viral_seq_datadownload import get_viralseq_downloads
DATADIR = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/"
alignment... | pd.DataFrame(columns=['sequenceID', 'uncurated']) | pandas.DataFrame |
"""
Script for plotting Figures 3, 5, 6
"""
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.lines import Line2D
import matplotlib.ticker as mtick
import seaborn as sns
from datetime import timedelta
i... | pd.read_csv('predictions/RF_preds.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 06:03:28 2019
@author: tanujsinghal
"""
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Lo... | pd.DataFrame(l) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[74]:
import pandas as pd
import numpy as np
from pathlib import Path
import os
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.decomposition import PCA
... | pd.DataFrame(cv.cv_results_) | pandas.DataFrame |
import csv
import json
import pysam
import numpy as np
import pandas as pd
import random
from .bamutils import fetch_upto_next_ID
from . import gtfutils
DEFAULT_ANNOT=['?','?','?','?',int(0)]
def model(file_ambiv_gps, file_expression_model):
#load ambivalence gps
with open(file_ambiv_gps) as f:
jso... | pd.DataFrame(q,columns=['ambiv_gene','ambiv_gene_noprior','unique','ambiv_tx','ambiv_tx_noprior','ambiv_gene2','ambiv_gene_noprior2','unique2','ambiv_tx2','ambiv_tx_noprior2'], index=self.tx_LUT) | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests
import time
import sqlite3
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from bs4 import BeautifulSoup
column_names = ["Team", "ABV", "Schedule"]
abbreviations = pd.read_csv("schedule_urls.csv", names=column_names)
team = abbreviations.ABV.t... | pd.set_option('display.max_colwidth', 40) | pandas.set_option |
# coding: utf-8
# # ------------- Logistics -------------
# In[1]:
from __future__ import division
import numpy
import os
import pandas
import sklearn
import sys
import sqlite3
import pickle
from operator import itemgetter
from collections import Counter
import itertools
import matplotlib
import matplotlib.pyplot a... | pandas.DataFrame(images_test, columns=variables_image) | pandas.DataFrame |
# Collection of preprocessing functions
from nltk.tokenize import word_tokenize
from transformers import CamembertTokenizer
from transformers import BertTokenizer
from tqdm import tqdm
import numpy as np
import pandas as pd
import re
import string
import unicodedata
import tensorflow as tf
import glob
i... | pd.DataFrame(file_names, columns=['filename']) | pandas.DataFrame |
# Copyright 2017-2021 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl... | pd.DatetimeIndex(["2018-05-01","2018-05-02","2018-05-03", "2018-05-04"]) | pandas.DatetimeIndex |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use ... | pd.to_datetime(value, format="%H:%M:%S.%f", exact=False) | pandas.to_datetime |
"""Testing calculate axis range"""
import pandas as pd
import pytest
from gov_uk_dashboards.axes import calc_axis_range
def test_given_positive_data_returns_min_range_zero():
"""Testing the axis range given values greater than zero returns zero for min range"""
df = | pd.DataFrame(data={"col1": [6, 8]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
f... | tm.assert_index_equal(index, back) | pandas.util.testing.assert_index_equal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.