prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 18:11:05 2020
@author: charlie.henry
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 28 15:46:31 2019
@author: berkunis
"""
##############################################01_02_PythonLibraries####################################... | pd.read_csv("volumes-3.csv") | pandas.read_csv |
from ._learner import KilobotLearner
import os
import gc
import logging
from typing import Generator
from cluster_work import InvalidParameterArgument
from kb_learning.kernel import KilobotEnvKernel
from kb_learning.kernel import compute_median_bandwidth, compute_median_bandwidth_kilobots, angle_from_swarm_mean, \... | pd.MultiIndex.from_product([['S'], ['extra'], ['o_x', 'o_y', 'sin_o_t', 'cos_o_t']]) | pandas.MultiIndex.from_product |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.c... | read_stata(fname, iterator=True) | pandas.io.stata.read_stata |
import numpy as np
import pandas as pd
import xarray as xr
class HyData:
def __init__(self, files, stations):
self.stations = stations
self.files = files
# print('Reading Data....')
def read(self):
ds = xr.Dataset()
for station in self.stations:
ds[station]... | pd.to_datetime(dates, format="%Y%m%d%H") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 11:40:40 2020
@author: hendrick
"""
# =============================================================================
# # # # import packages
# =============================================================================
import numpy as np
import pandas as pd
import ma... | pd.read_csv(filef, sep='\t') | pandas.read_csv |
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntInd... | is_scalar(data) | pandas.core.dtypes.common.is_scalar |
import pandas as pd
import pytest
from pandera import Column, DataFrameSchema, Check
from pandera import dtypes
from pandera.errors import SchemaError
def test_numeric_dtypes():
for dtype in [
dtypes.Float,
dtypes.Float16,
dtypes.Float32,
dtypes.Float64]:
s... | pd.Series(["A", "B", "A", "B", "C"], dtype="object") | pandas.Series |
import scipy.io as sio
import matplotlib.pyplot as plt
import os
import numpy as np
import logging
import argparse
import pandas as pd
def find_normalized_errors(preds, y, ord):
diffs = preds - y
raw_errors = np.linalg.norm(diffs, ord=ord, axis=1)
raw_mean = raw_errors.mean()
norms = np.linalg.norm(y... | pd.read_table(args.results_df) | pandas.read_table |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import torch
import click
from tqdm import tqdm
import numpy as np
from sklearn.metrics import fbeta_score
import pandas as pd
from model.model import get_model
from util.util import make_output_dir
from config.config import load_config
from data.dataset import ImetDataset
im... | pd.DataFrame(valid_df_base) | pandas.DataFrame |
import sys
import unittest
import pandas as pd
from src.preprocessing import format_ocean_proximity
class FormattingTestCase(unittest.TestCase):
def setUp(self):
self.ref_df = | pd.read_csv("housing.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime
from scipy import stats
pd.plotting.register_matplotlib_converters() # addresses complaints about Timestamp instead of float for plotting x-values
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import m... | pd.DataFrame(all_predictions) | pandas.DataFrame |
#%%
import pymaid
from pymaid_creds import url, name, password, token
rm = pymaid.CatmaidInstance(url, token, name, password)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from data_settings import data_date, pairs_path
from contools import Prom... | pd.DataFrame([x.iloc[:, 0] for x in fraction_types], index = fraction_types_names) | pandas.DataFrame |
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in comp... | pd.read_csv(csv_file) | pandas.read_csv |
import os
import gc
import time
import datetime
import pickle as pk
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
import matplotlib.pyplot as plt
import numpy as np
import pandas_market_calendars as mcal
import pandas as pd
from tqdm import tqdm
import wrds
import wrds_utils as... | pd.to_datetime(df['from'], utc=True) | pandas.to_datetime |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index... | Timedelta('1 days') | pandas.Timedelta |
#!/usr/bin/python3
"""
Last update: March 2021
Author: <NAME>, PhD - The Scripps Research Institute, La Jolla (CA)
Contact info: <EMAIL>
GitHub project repository: https://github.com/ldascenzo/pytheas
***DESCRIPTION***
Preliminary work-work-in-progress step towards the Pytheas support of discovery mode. At the presen... | pd.DataFrame(mods) | pandas.DataFrame |
# Package
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from pandas import DataFrame
from plot_metric.functions import MultiClassClassification
import numpy as np
import matplotlib.pyplot as plt
# Load dataset
X, y = load_digi... | DataFrame(X) | pandas.DataFrame |
# coding: utf-8
# # Notebook to generate a dataframe that captures data reliability
# Perform a series of tests/questions on each row and score the result based on 0 (missing), 1 (ambiguous), 2 (present)
# - is the plot number recorded? If not, this makes it very difficult to identify the plot as unique vs others (2... | pd.isnull(x) | pandas.isnull |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset
from transformers import DistilBertTokenizerFast,DistilBertForSequenceClas... | pd.read_csv(path_test) | pandas.read_csv |
import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random as rn
import warnings
from kneed import KneeLocator
class BuildDriftKnowledge():
"""
Description :
Class to build the pareto knowledge from hyper-parameters configurations evaluated on differents ... | pd.DataFrame(pareto_front) | pandas.DataFrame |
import matplotlib
matplotlib.use("TKagg")
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
#Simple Linear Regression
#y = ax + b
rng = np.random.RandomState(1)
x = 10 * rng.rand(50)
y = 2 * x - 5 + rng.randn(50)
plt.scatter(x, y);
plt.show()
plt.clf()
from sklearn.linear_model imp... | pd.Series(model.coef_, index=X.columns) | pandas.Series |
from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
import psycopg2
import math
import pandas as pd
from openpyxl import Workbook
import csv
import random
def psql_pdc(query):
#credenciales PostgreSQL produccion
connP_P = {
'host' : '10.150.1.74',
'p... | pd.DataFrame(anwr) | pandas.DataFrame |
import click
import os
from tqdm import tqdm
import pandas as pd
from PIL import Image
@click.command()
@click.option('--images', help='input directory')
@click.option('--output', help='output directory')
def main(images, output):
""" convert tiled images to abd format for building detection """
os.makedirs(... | pd.DataFrame() | pandas.DataFrame |
from . import pyheclib
import pandas as pd
import numpy as np
import os
import time
import warnings
# some static functions
def set_message_level(level):
"""
set the verbosity level of the HEC-DSS library
level ranges from "bort" only (level 0) to "internal" (level >10)
"""
pyheclib.hec_... | pd.tseries.offsets.Hour(n=2) | pandas.tseries.offsets.Hour |
# bchhun, {2020-03-22}
import csv
import natsort
import numpy as np
import os
import xmltodict
from xml.parsers.expat import ExpatError
import xml.etree.ElementTree as ET
import pandas as pd
import math
import array_analyzer.extract.constants as constants
"""
functions like "create_<extension>_dict" parse files of <e... | pd.read_excel(well_xlsx_path, sheet_name=None) | pandas.read_excel |
from summit.utils.dataset import DataSet
from summit.domain import *
from summit.experiment import Experiment
from summit import get_summit_config_path
from summit.utils import jsonify_dict, unjsonify_dict
import torch
import torch.nn.functional as F
from skorch import NeuralNetRegressor
from skorch.utils import to_de... | pd.DataFrame(X, columns=input_columns) | pandas.DataFrame |
import itertools
import numpy as np
import cantera as ct
import pandas as pd
import re
import pickle
from .. import simulation as sim
from ...cti_core import cti_processor as ctp
class shockTube(sim.Simulation):
def __init__(self,pressure:float,temperature:float,observables:list,
kineticSens:... | pd.concat(interpolated_against_original_time,axis=1) | pandas.concat |
from examples.residential_mg_with_pv_and_dewhs.modelling.micro_grid_models import (DewhModel, GridModel, PvModel,
ResDemandModel)
from models.agents import ControlledAgent
from typing import MutableMapping, AnyStr, Tuple as Tuple_T
fro... | pd.Timedelta(seconds=self.mld_info.ts) | pandas.Timedelta |
import numpy as np
import os
import pandas as pd
import pickle
import unittest
from predictor import Predictor
from scipy.signal import savgol_filter
class PredictorTests(unittest.TestCase):
def setUp(self) -> None:
self.predictor = Predictor()
def test_create_predictor(self):
"""
Te... | pd.DataFrame() | pandas.DataFrame |
import glob
import pandas as pd
from configparser import ConfigParser
import os
from simba.drop_bp_cords import *
def create_emty_df(shape_type):
if shape_type == 'rectangle':
col_list = ['Video', 'Shape_type', 'Name', 'Color name', 'Color BGR', 'Thickness', 'topLeftX',
'topLeftY', 'Bot... | pd.DataFrame(columns=col_list) | pandas.DataFrame |
import pandas as pd
from pkg_resources import resource_filename
from .data_simulator import SimulatedData
from .base import survival_stats
from .base import survival_df
from .base import survival_dmat
__ALL__ = [
"survival_stats",
"survival_df",
"survival_dmat",
"load_simulated_data",
"load_metabr... | pd.concat([train_X, train_y], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 16:49:18 2020
@author: kwanale1
"""
import pandas as pd
from shapely.geometry import Multipolygon, Polygon, Point
#voting_shp = gpd.read_file("VOTING_SUBDIVISION_2014_WGS84.shp")
#wards_shp = gpd.read_file("icitw_wgs84.shp")
#print(wards_shp)
#sh... | pd.Series(A_ref) | pandas.Series |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#pl... | pd.read_csv(indel_counts_path, sep='\t', low_memory=False) | pandas.read_csv |
"""Materials discovery using Earth Mover's Distance, DensMAP embeddings, and HDBSCAN*.
Create distance matrix, apply densMAP, and create clusters via HDBSCAN* to search for
interesting materials. For example, materials with high-target/low-density (density
proxy) or high-target surrounded by materials with low targets... | pd.DataFrame({"y_true": y_true, "y_pred": y_pred}) | pandas.DataFrame |
import root_pandas
import basf2_mva
import b2luigi
import pandas as pd
from sklearn.model_selection import train_test_split
def split_sample(
ntuple_file,
train_size,
test_size,
random_seed=42):
"""Split rootfile and return dataframes. Select 0th candidate."""
df = root_pandas.... | pd.DataFrame(empty) | pandas.DataFrame |
from typing import Any, Dict
import pandas as pd
from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode
def convert_file_to_list(filelocation):
urls = None
if filelocation.endswith('.csv'):
df = pd.read_csv(filelocation)
urls = df['products'].to_list()
elif filelocation.endsw... | pd.DataFrame(data, index=[0]) | pandas.DataFrame |
import random, os, copy, torch, torch.nn as nn, numpy as np, pandas as pd
from sklearn.utils import resample
from collections import defaultdict, Counter
import matplotlib.pyplot as plt
def upsampling(data, target_col_name):
np.random.seed(10)
data_copy = copy.deepcopy(data)
classes_up = np.unique(dat... | pd.Series(pos) | pandas.Series |
try:
from datetime import timedelta,datetime
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from datetime import datetime
import pandas as pd
print("All Dag modules are ok ......")
except Exception as e:
print("Error {} ".format(e))
def first_functi... | pd.DataFrame(data=data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 10:54:48 2019
@author: raahul46
"""
####DEPENDENCIES####
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import Stan... | pd.read_csv("test.csv") | pandas.read_csv |
import pickle
import numpy as np
import pandas as pd
## plot conf
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 7})
width = 8.5/2.54
height = width*(3/4)
###
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
plot_path = './'
## Load true fits
real_fit_male = | pd.read_csv(script_dir+'/plot_pickles/real_male_fit.csv') | pandas.read_csv |
"""Miscellaneous internal PyJanitor helper functions."""
import fnmatch
import functools
import os
import re
import socket
import sys
import warnings
from collections.abc import Callable as dispatch_callable
from itertools import chain, combinations
from typing import (
Callable,
Dict,
Iterable,
List,
... | pd.Series(value) | pandas.Series |
def enter_foodgroup():
import tqdm
import psycopg2
import pandas as pd
from cnf_xplor.api.models import FoodGroup
def fix_french_accents(df, key):
df[key] = [x.encode('utf-8').decode('utf-8') if x is not None else x for x in df[key].values]
return df
def reformat_bool(s):
... | pd.read_csv('UpdatedCNFData/CNFADM_NUTR_NAME.csv', delimiter = "\t", encoding='utf-16') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 17 11:56:35 2019
@author: hcamphausen
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFe... | pd.get_dummies(df['weather']) | pandas.get_dummies |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index... | tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0) | pandas.util.testing.assertRaisesRegexp |
# author: <NAME> and <NAME>
# date: 2020-06-24
"""
This script performs will train the best model
Usage: src/03_modelling/011_modeling.py \
--file_path1=<file_path1> --file_path2=<file_path2> --file_path3=<file_path3> \
--save_to1=<save_to1> --save_to2=<save_to2> --save_model=<save_model>
Options:
--file_path1=<file... | pd.concat([X_train, X_valid], ignore_index=True) | pandas.concat |
__all__ = [
'factorize_array',
'factorize_dataframe',
'factorize_ndarray',
'fast_zip_arrays',
'fast_zip_dataframe_columns',
'get_dataframe',
'get_dtypes_and_required_cols',
'get_ids',
'get_json',
'get_timestamp',
'get_utctimestamp',
'merge_dataframes',
'PANDAS_BASIC_D... | pd.DataFrame(data=new, index=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
普量学院量化投资课程系列案例源码包
普量学院版权所有
仅用于教学目的,严禁转发和用于盈利目的,违者必究
©Plouto-Quants All Rights Reserved
普量学院助教微信:niuxiaomi3
"""
from pymongo import ASCENDING, DESCENDING
from database import DB_CONN
from datetime import datetime, timedelta
import tushare as ts
import numpy as np
import pandas as pd
def... | pd.Series() | pandas.Series |
'''
Created on Jun 25, 2015
@author: eze
'''
import logging
import os
import re
import traceback
from multiprocessing.synchronize import Lock
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.Polypeptide import CaPPBuilder
from SNDG import Str... | pd.read_csv(DNsPDBs + "2") | pandas.read_csv |
#!/usr/bin/env python
"""plotlib.py: module is dedicated to plottting."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
im... | pd.read_csv(fname, parse_dates=["dn"]) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
Age,
EmailAddressToDomain,
IsFreeEmailDomain,
TimeSince,
URLToDomain,
URLToProtocol,
URLToTLD,
Week,
get_transform_primitives
)
def test_time_since():
time... | pd.Series(['', '<EMAIL>', '<EMAIL>']) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) University of St Andrews 2020-2021
# (c) University of Strathclyde 2020-2021
# Author:
# <NAME>
#
# Contact
# <EMAIL>
#
# <NAME>,
# Biomolecular Sciences Building,
# University of St Andrews,
# North Haugh Campus,
# St Andrews,
# KY16 9ST
# Scotland,
# UK
#
# The MIT... | pd.to_datetime(start_time) | pandas.to_datetime |
###############################################################################
# #
# pre-processing and dataset construction #
# July 6 2020... | pd.DataFrame(columns=['Record_ID','Name','Relation']) | pandas.DataFrame |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# ***Important: still, need to install "crypto-news-api" package from PyPI first!
# first, activate cryptoalgowheel conda environment
# then: /anaconda3/envs/cryptoalgowheel/bin/pip install crypto-news-api
# %%
from crypto_new... | pd.DataFrame(latest_news_coin) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#Classes and functions of XAS experiments
import datetime, numpy as np, operator, pandas as pd, matplotlib.pyplot as plt
from scipy import interpolate
# Parent Classes
class _DataFile():
"""Parent class for all XAS data files"""
filename = ""
shortname = ""
dataframe... | pd.concat([scan.dataframe for scan in self._MDAlist]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
import warnings
warnings.filterwarnings('ignore')
# In[2]:
#Installing pmdarima package
#get_ipython().system... | pd.read_csv("Champagne Sales.csv") | pandas.read_csv |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_deco... | Term("columns=A", encoding="ascii") | pandas.io.pytables.Term |
import os
import sys
import torch
import pickle
import argparse
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn as skl
import tensorflow as tf
from scipy.stats import gamma
from callbacks import RegressionCallback
from regression_data import generate_toy_data
from ... | pd.DataFrame(columns=self.cols_eval) | pandas.DataFrame |
# Importem les llibreries
import numpy as np
import pandas as pd
# Llegim les dades
dades = pd.read_csv('data.csv', sep=";")
# Afegim l'atribut nombre d'acords
dades['N_acords'] = 1
# Transformem les variables quantitatives per als acords de més d'un país
atribut = ['','','Loc2ISO','Loc3ISO','Loc4ISO','Loc5ISO','Loc... | pd.notna(dades['Loc9ISO']) | pandas.notna |
import json
import os
import pandas as pd
from sqlalchemy import create_engine
from newstrends import utils
_ROOT_DIR = os.path.abspath(__file__ + "/../../../../../")
_ENGINE = None
def get_engine():
global _ENGINE
if _ENGINE is None:
db_info_path = os.path.join(_ROOT_DIR, 'data/db_info.json')
... | pd.DataFrame(fetched, columns=['link']) | pandas.DataFrame |
import pandas as pd
import path_utils
from Evolve import Evolve, replot_evo_dict_from_dir
import traceback as tb
import os, json, shutil
import numpy as np
import matplotlib.pyplot as plt
import itertools
from copy import deepcopy
import pprint as pp
from tabulate import tabulate
import seaborn as sns
import shutil
imp... | pd.DataFrame(row_dict, index=[index]) | pandas.DataFrame |
import sys, os
import numpy as np
import pandas as pd
import scipy.io as sio
# met mast functions and utilities
sys.path.append('../')
import met_funcs as MET
# import vis as vis
import utils as utils
import pickle as pkl
import time
# paths (must mount volume smb://nrel.gov/shared/wind/WindWeb/MetData/135mData/)
to... | pd.DataFrame() | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not us... | pd.Categorical([1, 2, 3, 1, 2, 3]) | pandas.Categorical |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIn... | DataFrame(index=[0, 1, 2], dtype=object) | pandas.DataFrame |
import argparse
import os
import re
from pathlib import Path
import pandas as pd
import json
import matplotlib.pyplot as plt
def lastmatch_file(log, match_str):
lastmatch = None
for line in log:
if match_str in line:
lastmatch = line
return lastmatch
def load_dict(line):
json_s... | pd.read_pickle(dirname) | pandas.read_pickle |
"""Performance visualization class"""
import os
from dataclasses import dataclass, field
from typing import Dict, List
import pandas as pd
import seaborn as sns
import scikit_posthocs as sp
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot
import matplotlib.pylab as plt
from tqdm import... | pd.DataFrame(columns=self.cv_methods) | pandas.DataFrame |
import pandas as pd
import pytest
import helpers.unit_test as hut
import im.common.data.types as icdtyp
import im.kibot.data.load as vkdloa
class TestKibotS3DataLoader(hut.TestCase):
def setUp(self) -> None:
super().setUp()
self._s3_data_loader = vkdloa.KibotS3DataLoader()
@pytest.mark.slow
... | pd.to_datetime("1990-12-28 00:00:00") | pandas.to_datetime |
from os import listdir
from os.path import isfile, join
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input, Concate... | pd.DataFrame(results, columns=['Model', 'Condition', 'AUC']) | pandas.DataFrame |
from django.http import (
HttpResponse,
JsonResponse
)
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
from django.core.files.storage import File... | pandas.DataFrame(columns=["report_id","report_text"]) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import Series
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
from wreckognize.sensitive_dataframe import SensitiveFrame
from wreckognize.sensitive_dataframe import Sens... | DataFrame([2335, 2340], columns=['birth_year']) | pandas.DataFrame |
from datetime import (
datetime,
time,
)
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestBetweenTime:
@td.skip_if_has_locale
... | date_range("1/1/2000", periods=100, freq="10min") | pandas.date_range |
import numpy as np
import pandas as pd
import boto3
from io import BytesIO
import librosa
from botocore.exceptions import ClientError
def call_s3(s3_client, bucket_name, fname, folder='audio_train/'):
"""Call S3 instance to retrieve data from .wav file(or other format).
Assumes file is in folder name path"""
... | pd.concat(vectors, axis=1, sort=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This is the main class for the abacra model
"""
# enable for python2 execution
# from __future__ import print_function, division, absolute_import
import matplotlib.pylab as plt
import networkx as nx
import numpy as np
import pandas as pd
import time
import os
import pickle
import abacra.ne... | pd.set_option("display.max_columns",200) | pandas.set_option |
__all__ = [
"str_c",
"str_count",
"str_detect",
"str_extract",
"str_locate",
"str_replace",
"str_replace_all",
"str_sub",
"str_split",
"str_which",
"str_to_lower",
"str_to_upper",
"str_to_snake",
]
import re
from grama import make_symbolic, pipe, valid_dist, param_d... | Series(args[0]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Originally created on Tue Feb 17 2015
This script has been repurposed to provide summary counts from class files. May 2020
This script will grab the biovolume feature data from extracted feature files
for all images in an automated class file.
Can bin data by category or leave each image s... | pd.DataFrame(index=roinums) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY... | Index(recons.values) | pandas.Index |
##### file path
### input
# data_set keys and lebels
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_... | pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category']) | pandas.merge |
import csv
import pandas
def readCSV():
with open("weather_data.csv") as file:
data = csv.reader(file)
temperatures = []
for row in data:
temperature = row[1]
if temperature != 'temp':
temperatures.append(int(temperature))
else:
... | pandas.DataFrame(dict) | pandas.DataFrame |
import pandas as pd
counties = ['Antrim','Armagh','Carlow','Cavan','Clare','Cork','Derry','Donegal','Down','Dublin','Fermanagh','Galway',
'Kerry','Kildare','Kilkenny','Laois','Leitrim','Limerick','Longford','Louth','Mayo','Meath','Monaghan',
'Offaly','Roscommon','Sligo','Tipperary','Tyrone','Wa... | pd.DataFrame(ireland) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.c... | pd.Series([4.0, 8.0, 3.0], index=[1, 2, 3]) | pandas.Series |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
... | Categorical(["a", "b"], categories=["a", "b", "c"]) | pandas.Categorical |
#LC Jan/Feb 2022
# This module is part of the extensions to the project. Its purpose is to create a map of stations and relative water levels.
# This module is WIP
# Suggestions LC:
# Maybe try to implement importing directly to a pandas dataframe and stations?
# Maybe try to implement using a 'bubble map' with slide... | pd.DataFrame(data=d) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script is used for measuring some coefficients of the molecules."""
import numpy as np
import pandas as pd
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Crippen, Descriptors as desc, Lipinski, MolSurf
from sklearn.cluster import KMeans
from s... | pd.DataFrame() | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
import sys
import datetime as dt
from dateutil.parser import parse
from Kernel import Kernel
from util import util
from util.order import LimitOrder
from util.oracle.SparseMeanRevertingOracle import SparseMeanRevertingOracle
from util.oracle.ExternalFileOracle imp... | pd.to_timedelta('11:30:00') | pandas.to_timedelta |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store', 'dict.csv', 'all.csv']... | pd.concat([train_tiles, tile_ids]) | pandas.concat |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, Timedelt... | pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx') | pandas.TimedeltaIndex |
import numpy as np
import pandas as pd
import pytest
from pydrograph.baseflow import IHmethod
@pytest.fixture(scope='function')
def test_data(test_data_path):
data = pd.read_csv(test_data_path / 'UV_04087088_Discharge_20071001_ab.csv')
data.index = pd.to_datetime(data.date)
return data
@pytest.mark.para... | pd.Series() | pandas.Series |
import poseconnect.utils
import poseconnect.defaults
import smc_kalman
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tqdm
from uuid import uuid4
import logging
import time
import itertools
import functools
import copy
logger = logging.getLogger(__name__)
def track_poses_3d(
poses_3... | pd.to_datetime(self.latest_timestamp) | pandas.to_datetime |
import pandas as pd
# Tools for machine learning
import pickle
import time
import xgboost as xgb
from sklearn.model_selection import train_test_split
matches = | pd.read_csv('data/seasons_merged.csv') | pandas.read_csv |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import get_meta
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
def test_get_meta(postgres_url: str) -> None:
query = "SELECT * FROM... | pd.Series([], dtype="Int64") | pandas.Series |
import re
from collections import defaultdict
from typing import List, Dict, Optional, Callable, Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
from lexsubgen.datasets.nlu import NLUDatasetReader
from lexsubgen.subst_generator import SubstituteGenerator
from lexsubgen.utils.augmentation import (
... | pd.DataFrame(columns=dataset.columns) | pandas.DataFrame |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = ... | pd.concat([dfAll, df]) | pandas.concat |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
f... | pd.Series(df['log_ret_1d']) | pandas.Series |
import requests
import json
import pandas as pd
import datetime
import asyncio
import os
from sklearn.preprocessing import Normalizer
import logging
import sys
from dotenv import load_dotenv
from csv import writer
load_dotenv() # Load environment variables
def gen_daily_data(name, lat, lon, t_start, t_end, compone... | pd.read_csv(filepath_or_buffer=f"{os.environ['HOME']}/github_repos/Pollution-Autoencoders/data/other/city_lat_lon.csv") | pandas.read_csv |
import os
import shutil
import uuid
import pandas
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.PangenomeAPIClient import PanGenomeAPI
class PangenomeDownload:
def __init__(self, config):
self.cfg = config
self.scratch = config['scratch']
self.pga =... | pandas.ExcelWriter(files['path']) | pandas.ExcelWriter |
import nltk.data
import pandas as pd
import argparse
import os
def section_start(lines, section=' IMPRESSION'):
"""Finds line index that is the start of the section."""
for idx, line in enumerate(lines):
if line.startswith(section):
return idx
return -1
def generate_whole_report_impres... | pd.DataFrame(df_imp, columns=['dicom_id', 'study_id', 'subject_id', 'sentence_id', 'report']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
impo... | assert_index_equal(value, level) | pandas.util.testing.assert_index_equal |
__author__ = 'lucabasa'
__version__ = '1.2.0'
__status__ = 'development'
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.base impo... | pd.concat(fold_pdp, axis=0) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import tree
import pydotplus
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
import time
from sklearn.neighbors import KNeighb... | pd.concat([dataframe, rename], axis=1, sort=False) | pandas.concat |
#!/usr/bin/env python
"""
This module will read sas7bdat files using pure Python (2.7+, 3+).
No SAS software required!
"""
from __future__ import division, absolute_import, print_function,\
unicode_literals
import atexit
import csv
import logging
import math
import os
import platform
import struct
import sys
from c... | pd.DataFrame(data[1:], columns=data[0]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# > Note: KNN is a memory-based model, that means it will memorize the patterns and not generalize. It is simple yet powerful technique and compete with SOTA models like BERT4Rec.
# In[1]:
import os
project_name = "reco-tut-itr"; branch = "main"; account = "sparsh-ai"
project_p... | pd.merge(df, df2, on='itemId') | pandas.merge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.