prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import itertools
import subprocess
import io
import re
import numpy as np
import pandas as pd
from monty.io import zopen
from... | pd.DataFrame(descriptors) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets imp... | tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import time
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pathlib import Path
import context
from mhealth.utils.plotter_helper import save_figure
from mhealth.utils.commons import create_progress_bar
# Used if com... | pd.concat(dfs, axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""Test the views for the scheduler pages."""
import json
import os
from django.conf import settings
from django.db import IntegrityError
import pandas as pd
from ontask import tests
from ontask.table import serializers
class TableTestSerializers(tests.OnTaskTestCase):
"""Test stat view... | pd.to_datetime(df['d5'], infer_datetime_format=True) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script saves bid and ask data for specified ETFs to files for each day
during market open hours.
It assumes the computer is at US East Coast Time.
@author: mark
"""
import os
import pandas as pd
import numpy as np
from itertools import product
import streaml... | pd.Timedelta(minutes=5) | pandas.Timedelta |
# Train fastText model
import argparse
import csv
import multiprocessing
import os
import sys
import time
csv.field_size_limit(sys.maxsize)
import fasttext
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc, average_precision_score
VERBOSITY = 2
WORDNGRAMS = 1
MINN = 0
MAXN = 0
MAXTHRE... | pd.DataFrame(lbl_statistics) | pandas.DataFrame |
from os import abort
from requests import get
from bs4 import BeautifulSoup
from pandas import read_html, concat, DataFrame, read_csv
from .utils import url_daerah, total_page, _baseurl_
def get_daerah() -> list:
page = get(_baseurl_)
data = []
soup = BeautifulSoup(page.text, 'lxml')
table = soup.find_all('td'... | concat([tail2, data2]) | pandas.concat |
import os
import random
import sys
import pandas as pd
from ATM import welcome
from Validate import validateDetails2, validateLogin
filePath = r".\{}.csv".format("atm")
if not os.path.isfile(filePath) or os.path.getsize(filePath) == 0:
df = pd.DataFrame({"firstName": [], "lastName": [], "email": [], "... | pd.read_csv(filePath, dtype=str) | pandas.read_csv |
import os
from itertools import product
import altair as alt
import arviz as az
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from bayes_window import BayesRegression, LMERegression, BayesConditions
from bayes_window import models
from bayes_window import utils
from bayes_window.fitting import... | pd.DataFrame(rocs) | pandas.DataFrame |
import requests
import pandas as pd
import pickle
import datetime
import guithread
import numpy as np
import concurrent.futures
import time
from os import makedirs
from config import text_width, max_thread_count
class Acquisition(guithread.GUIThread):
def __init__(self, filename='default.csv', brain_region='All'... | pd.to_numeric(neurons_df["Bifurcation angle remote"], downcast="float") | pandas.to_numeric |
# Search the TSX web site to get a list of all listed companies
import os
import sys
import getopt
import datetime
# from numpy.lib.function_base import append
import pandas as pd
import sqlalchemy
import logging
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdr... | pd.read_sql("SELECT name FROM sqlite_master WHERE type='table'", engine1) | pandas.read_sql |
"""
core.py
Created by <NAME> at 31/08/2020, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
from collections import namedtuple
import numpy as np
import pa... | pd.to_datetime(df.index.date) | pandas.to_datetime |
import re
import math
import pandas as pd
import numpy as np
import nltk
import heapq
import pickle
import datetime
from nltk.corpus import stopwords
from operator import itemgetter
# Loading the dictionary
with open('dictionary.pkl', 'rb') as f:
data = pickle.load(f)
# Loading the dictionary with term count
with... | pd.set_option('display.max_colwidth', -1) | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
# # Machine Learning analysis
#
# - This is a Python base notebook
#
# Kaggle's [Spotify Song Attributes](https://www.kaggle.com/geomack/spotifyclassification/home) dataset contains a number of features of songs from 2017 and a binary variable `target` that represents whether ... | pd.Series(data=out_col, index=mean_scores.index) | pandas.Series |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limita... | pd.concat([coverage, purity, precision, recall], axis=1) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make dataset for the End-to-End model (CSJ corpus).
Note that feature extraction depends on transcripts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import sys
im... | pd.concat([df_kanji, df_i], axis=0) | pandas.concat |
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import ConvergenceWarning
from glob import glob
from multiprocessing import Pool
import sys, getopt, warnings, traceback
import pandas as pd
import numpy as np
import re
warnings.filterwarn... | pd.read_csv(labels, index_col=0) | pandas.read_csv |
from ontobio.io import assocparser, gpadparser
from ontobio import ecomap
import click
import pandas as pd
import datetime
from ontobio.io import qc
from ontobio.io.assocparser import Report
from ontobio.model.association import GoAssociation
from ontobio.model import collections
from typing import List
import warnings... | pd.concat([counts_frame1, counts_frame2], axis=1) | pandas.concat |
# encoding: utf-8
# copyright: GeoDS Lab, University of Wisconsin-Madison
# authors: <NAME>, <NAME>, <NAME>
import requests
import os
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Start month, start day, and output_folder are necessary')
parser.add_argument('--st... | pd.DataFrame(time_df, columns=["date"]) | pandas.DataFrame |
from abc import ABC, abstractproperty
from collections import namedtuple
import numpy as np
import pandas as pd
from loguru import logger
#from helpers import persist_model
@logger.catch
def persist_model(name,clf=None, method='load'):
'Pass in the file name, object to be saved or loaded'
import dill
... | pd.DataFrame.sparse.from_spmatrix(v) | pandas.DataFrame.sparse.from_spmatrix |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ... | TS('2015-01-04') | pandas.Timestamp |
import networkx as nx
import pandas as pd
def apply(df, prev, curr, prev_type, curr_type):
prev_nodes = set(df.dropna(subset=[prev], how="any")[prev].unique())
succ_nodes = set(df.dropna(subset=[curr], how="any")[curr].unique())
all_nodes = prev_nodes.union(succ_nodes)
edges = set()
df = df.dropna... | pd.DataFrame({"node": []}) | pandas.DataFrame |
import pymongo
from pymongo import MongoClient
from tkinter import *
import time;
import datetime
import random
from tkinter import messagebox
import numpy as np
import pandas as pd
from tkinter import simpledialog
#GLOBAL VALUES
d_c = []
x = pd.DataFrame()
y = pd.DataFrame()
X_train = pd.DataFrame()
X... | pd.DataFrame() | pandas.DataFrame |
import os
from collections import Counter
from os import listdir
from os.path import isfile, join
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure
from matplotlib import style
style.use('ggplot')
import scipy
from matplotlib.ticker import M... | pd.DataFrame(columns=['PREPROC', 'MEDIA_RANK_F1']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import tables
from phildb.constants import MISSING_VALUE, METADATA_MISSING_VALUE
class TabDesc(tables.IsDescription):
time = tables.Int64Col(dflt=0, pos=0)
value = tables.Float64Col(dflt=np.nan, pos=1)
meta = tables.Int32Col(dflt=0, pos=2)
replacement_time = tabl... | pd.to_datetime(df["time"], unit="s") | pandas.to_datetime |
from rdkit import Chem
from .utils import *
from rdkit.Chem.MolStandardize import rdMolStandardize
import os
import pandas as pd
#==========================================================
# process SMILES of chemotypes
def normChemotypes(compounds,
getChemotypes=False,
... | pd.DataFrame(DuplicatedNormalizedIdxList) | pandas.DataFrame |
import pandas as pd
import json_parser
import trace_visualizer
import logging
import os.path
import plotly.graph_objects as go
def parse_k8s_kpis_as_dataframe(filename):
# Parses a KPI file consisting of several lines of raw KPIs as output by the following kubectl command
# kubectl get - -raw / apis / metrics... | pd.concat(packets_df_list) | pandas.concat |
import os
import math
import warnings
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import shutil as sh
from glob import glob
from PIL import Image
from copy import copy
from tqdm ... | pd.concat(indexListNoDups, axis=0, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 11:29:34 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import ... | pd.read_excel('spy.xlsx', index_col=None) | pandas.read_excel |
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import os
process_raw = pd.read_csv(os.getcwd() + '/tf_scripts/suggester/process.csv')
features = ['RoomCount', 'EdgeCount', 'SubStepsCount', 'FPcount']
process_data = | pd.concat([process_raw], axis=1) | pandas.concat |
import subprocess
from datetime import datetime
import pandas as pd
def sacct_jobs(account_query, d_from, d_to='', debugging=False,
write_txt='', sacct_file='', serialize_frame=''):
"""Ingest job record information from slurm via sacct and return DataFrame.
Parameters
-------
account_q... | pd.to_datetime(job_frame['end'], errors='coerce') | pandas.to_datetime |
# %% 说明
# ------------------------------------------------------------------->>>>>>>>>>
# 最后更新ID name的时候用这个脚本,从师兄的list汇总完成替换
# os.chdir("/Users/zhaohuanan/NutstoreFiles/MyNutstore/Scientific_research/2021_DdCBE_topic/Manuscript/20220311_My_tables")
# ------------------------------------------------------------------->>... | pd.set_option("display.width", 250) | pandas.set_option |
import re
import os
import xml.etree.ElementTree as ET
import pandas as pd
import boto3
import csv
from urllib.parse import unquote_plus
s3_client = boto3.client('s3')
s3 = boto3.resource('s3')
from xml_2_data import mnfp_2_data
from xml_2_data import mnfp1_2_data
from xml_2_data import mnfp2_2_data
from nmfp_rename_... | pd.DataFrame(columns=collateral_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, ... | DataFrame([[2, 1], [2, 1]], columns=['b', 'a']) | pandas.DataFrame |
"""
TRAIN CLASSIFIER
Disaster Resoponse Project
Udacity - Data Science Nanodegree
How to run this script (Example)
> python train_classifier.py ../data/DisasterResponse.db classifier.pkl
Arguments:
1) SQLite db path (containing pre-processed data)
2) pickle file name to save trained ML model
"""
# import libr... | pd.read_sql_table('messages_categories',engine) | pandas.read_sql_table |
import numpy as np
import pandas as pd
from sapextractor.algo.o2c import o2c_common
from sapextractor.utils import constants
from sapextractor.utils.change_tables import extract_change
from sapextractor.utils.filters import case_filter
from sapextractor.utils.graph_building import build_graph
from sapextractor.algo.o2... | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from os import path
from src.utils.logger import Logger
from src.utils.path import PATH_DATA_OUTPUT, PATH_FEATURES, PATH_DATA_PROCESSED, PATH_REPORTS
import src.utils.input_output as io
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import mean_sq... | pd.Series([max_month_gain]) | pandas.Series |
# Source
# Portfolio optimization in finance is the technique of creating a portfolio of assets, for which your investment has the maximum return and minimum risk.
# https://pythoninvest.com/long-read/practical-portfolio-optimisation
# https://github.com/realmistic/PythonInvest-basic-fin-analysis
###################... | pd.Series(weights_min_volatility) | pandas.Series |
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
import vectorbt as vbt
from vectorbt import settings
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, ... | pd.Timestamp('2018-01-01 00:00:00') | pandas.Timestamp |
#!/usr/bin/env python
import collections
import numpy as np
import pandas as pd
from scipy.sparse import *
__author__ = "peiyong"
class Sample:
def __init__(self, feature=None, label=None):
self.feature = feature
self.label = label
def read_sparse(datafile):
labels = []
cols = []
... | pd.pandas.read_csv(datafile) | pandas.pandas.read_csv |
"""Module providing functions to plot data collected during sleep studies."""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
import pandas as pd
import seaborn as sns
from fau... | pd.to_datetime(date) | pandas.to_datetime |
import dask.dataframe as dd
import deimos
from functools import partial
import multiprocessing as mp
import numpy as np
import pandas as pd
def threshold(features, by='intensity', threshold=0):
'''
Thresholds input :obj:`~pandas.DataFrame` using `by` keyword, greater than
value passed to `threshold`.
... | pd.DataFrame(features, index=rindex, columns=cols) | pandas.DataFrame |
import pandas as pd
import numpy as np
from collections import defaultdict
from datetime import datetime, timedelta
def mta_end_of_week(d):
''' Calculates the end of the week for a given date to conform to MTA data publication on Saturday
d = date vaule
return: date
'''
return d -... | pd.DataFrame(columns=['C/A','UNIT','SCP','STATION','LINENAME','DIVISION','DATE','TIME','DESC','ENTRIES','EXITS']) | pandas.DataFrame |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial import distance
from matplotlib import rcParams
from numpy.random import seed
seed(123)
from scipy.stats.mstats import spearmanr
from scipy.stats.msta... | pd.DataFrame.transpose(df) | pandas.DataFrame.transpose |
import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from dataset_creation.params import Params
from dataset_creation.text_cleaning import TextCleaner
class DataPreparer:
@classmethod
def create_data_set(cls):
print('Started data set creati... | pd.DataFrame(dataset, columns=['hadm_id', 'input_text', 'output_text']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import requests
import logging
import pandas as pd
pd.set_option("max_colwidth", 4096)
from lxml import etree
import requests
from odoo import api, fields, models, SUPERUSER_ID, _
_logger = logging.getLogger(__name__)
class WecomServerApiError(models.Model):
_name = "... | pd.read_html(table, encoding="utf-8", header=0) | pandas.read_html |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from... | integer_array([1, 2], dtype="int8") | pandas.core.arrays.integer_array |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, out... | pd.read_csv(door_name, usecols=[0, 1]) | pandas.read_csv |
import argparse
import os
import torch
import time
import numpy as np
import pandas as pd
import shutil
from data_utils import g_node_col, g_date_col, process_cdc_truth_from_csse, process_cdc_loc, get_all_cdc_label, read_cdc_forecast
from base_task import load_json_from
# exp_dir_template = '../Exp_us_{}_{}' # leve... | pd.concat(cdc_results, axis=0, ignore_index=True) | pandas.concat |
import itertools
from sklearn.model_selection import train_test_split
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
import pandas as pd
import re
PATTERN = re.compile(r"((?P<days1>[1-9]\d*)D(?P<amount1>[1... | pd.read_csv("../datasets/agoda_cancellation_train.csv") | pandas.read_csv |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import precision_recall_fscore_support, mean_squared_error
from collections import Counter
import math
import xgboost as xgb... | pd.read_pickle(var_path) | pandas.read_pickle |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std... | pd.Series(lower * std + y) | pandas.Series |
"""
Module with classes and methods to perform kriging of elements (and at some point exploit the potential field to
choose the directions of the variograms)
Tested on Ubuntu 16
Created on 1/5/2017
@author: <NAME>
"""
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
import pymc3 as pm
import ... | pn.DataFrame() | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime,... | DatetimeIndex(arr) | pandas.DatetimeIndex |
from utils import *
import time, copy, os, glob, csv, ast
import pandas as pd
import numpy as np
from collections import defaultdict
from config import parameters
from PatternHandler import PatternHandler
from DependencyGraphHandler import DependencyGraphHandler
from SubsetHandler import SubsetHandler
from sklearn.feat... | pd.read_json(data_filepath) | pandas.read_json |
from node2vec import Node2Vec
import pandas as pd
import numpy as np
import networkx as nx
import pickle
import os
import argparse
from numpy import linalg as la
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import model_selection as sk_ms
from sklearn.metrics import confusion_matrix
from sklearn.... | pd.read_csv('our_imdb/train/optimaize_values_Node2Vec_l2.csv') | pandas.read_csv |
import click
import logging
import os
import pandas as pd
from tqdm import tqdm
from rxnmapper import RXNMapper
logger = logging.getLogger(__name__)
@click.command()
@click.option(
'--file_path',
'-f',
help='Input file path to csv, tsv or json with "rxn" column'
)
@click.option('--output_path', '-o', hel... | pd.DataFrame(results) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="U... | Series(vals1) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import os
import re
import string
from math import log
from pathlib import Path
from typing import List
from transformers impor... | pd.read_csv(csv_location) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~2... | pd.merge(other_feature1,t7,on=['user_id','coupon_id','date_received']) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:47:30 2018
@author: SilverDoe
"""
#============ Selecting a column ==============================================
import pandas as pd
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df =... | pd.DataFrame(d) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# --------------------------------------------... | pd.DataFrame.from_records(max_rhos, index='vars') | pandas.DataFrame.from_records |
# Link between theoretic network graph and trajectories
# Map-matching the trajectories to the underlying theoretical network
# Using Leuven Map-matching algorithm
# Start and End node of matched edge in dataframe of trajectories --> link between theoretical network and measured data
from pneumapackage.settings import ... | pd.concat([tr_first, tr_last]) | pandas.concat |
import json
import copy
import unittest
import tempfile
import numpy as np
import pandas as pd
import uuid
from supervised.preprocessing.preprocessing_missing import PreprocessingMissingValues
from supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical
from supervised.preprocessing.preproce... | pd.DataFrame(data=d_test) | pandas.DataFrame |
"""
Testing framework for the `ArrayCableInstallation` class.
"""
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from ORBIT import ProjectManager
fro... | pd.DataFrame(sim.env.actions) | pandas.DataFrame |
import itertools
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from solarforecastarbiter.reference_forecasts import forecast
def assert_none_or_series(out, expected):
assert len(out) == len(expected)
for o, e in zip(out, expected):
if e is None:
assert... | assert_series_equal(out, exp) | pandas.testing.assert_series_equal |
import pandas as pd
import tensorflow as tf
from pathlib import Path
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model
#enviroment settings
path = Path(__file__).parent.absolute()/'Deep Training'
name_data = 'none_'#''
metric = 'binary_accu... | pd.merge(targets, predictions, how='left', left_index=True, right_index=True, suffixes=('',' prediction')) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 5 21:13:34 2016
@author: Marty
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import unittest
from unittest import mock
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from hydrofunctions impo... | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
import os
import json
import tzlocal
import numpy as np
import pandas as pd
from Fetcher import Dataset
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing
# glo... | pd.concat(final_df) | pandas.concat |
# -*- coding: utf-8 -*-
"""Precily.ipynb
Automatically generated by Colaboratory.
Author:: <NAME>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf #version 2.0
import tensorflow_hub as hub
from math import *
import numpy as np
import pandas as pd
#tf.rand... | pd.DataFrame(STS_in_p) | pandas.DataFrame |
import pandas as pd
import numpy as np
file1 = '../data/F9.xlsx'
x1 = pd.ExcelFile(file1)
feature = x1.parse('Sheet1')
print(feature.shape)
feature = feature.drop(['\'DAY_OF_DISCHARGE\''], axis=1)
feature = feature.drop(['\'FOLLOW_UP_3WEEKS\''], axis=1)
feature = feature.drop(['\'FOLLOW_UP_8WEEKS\''], axis=1)
feature ... | pd.ExcelFile(file2) | pandas.ExcelFile |
import pandas as pd
import os, requests, logging
import sys
# from bs4 import BeautifulSoup as bs
from .utils import *
class EdgarBase(object):
def __init__(self, dir_edgar=None):
# self.dir_edgar =
# self.__dir_download = None
# self.__dir_data = None
self.__dir_output = None
... | pd.datetime.today() | pandas.datetime.today |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
... | pd.Timestamp("2013-03-26 00:00:00") | pandas.Timestamp |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from scipy import signal
import data_processing0 as dp
import datetime
import math
from scipy.spatial.distance import pdist, squareform
DATA_PATH = "sitaiqu/samples_image/"
import os
if not os.path.isdir(DATA_PATH):
... | pd.to_datetime(df['date']) | pandas.to_datetime |
"""
Testing that functions from rpy work as expected
"""
import pandas as pd
import numpy as np
import unittest
import nose
import pandas.util.testing as tm
try:
import pandas.rpy.common as com
from rpy2.robjects import r
import rpy2.robjects as robj
except ImportError:
raise nose.SkipTest('R not inst... | com.convert_robj(obj) | pandas.rpy.common.convert_robj |
# -*- coding: utf-8 and gbk -*-
"""
Created on Sat Oct 16 08:44:53 2021
@author: <NAME>
"""
# from __future__ import division
from tensorflow.keras.models import Sequential
# from nltk.book import *
import pandas as pd
import numpy as np
np.set_printoptions(threshold=np.inf) # 将numpy数组显示完全
import matplotlib.mlab as ... | pd.to_datetime(df3['day']) | pandas.to_datetime |
from contextlib import ExitStack as does_not_raise # noqa: N813
import numpy as np
import pandas as pd
import pytest
from sid.msm import _flatten_index
from sid.msm import _harmonize_input
from sid.msm import _is_diagonal
from sid.msm import get_diag_weighting_matrix
from sid.msm import get_flat_moments
from sid.msm ... | pd.Series([1]) | pandas.Series |
# Utilities
import re
import pickle
import numpy as np
import pandas as pd
import tensorflow.keras.metrics as metrics
from gensim.models import KeyedVectors
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import Sequential... | pd.read_csv('assets/contractions.csv', index_col='Contraction') | pandas.read_csv |
import pandas as pd
import numpy as np
import pickle
from scipy.sparse import *
from sklearn.model_selection import train_test_split
SEED = 5525
def update_index(df):
index_set = set()
for i in df.tolist():
index_set.update(set(i))
indices = list(index_set)
indices.sort()
r... | pd.DataFrame(emojis) | pandas.DataFrame |
import pandas as pd
import glob
## concatenate data frames into one
path = "HMXB_output/*"
all_param_files = glob.glob(path)
#df = pd.read_csv("./0_params.csv")
df = pd.DataFrame()
for pfile in all_param_files:
#if pfile == "0_params.csv": continue
pf = | pd.read_csv(pfile) | pandas.read_csv |
# (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernme... | pd.DataFrame(md_ref) | pandas.DataFrame |
from common_code.common import *
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from matplotlib import tight_layout
mpl.style.use('seaborn-poster')
sns.set_palette(sns.color_palette(['#43406b', '#d15a00', '#27f77d']))
# sns.pal... | pd.DataFrame(measures) | pandas.DataFrame |
import numpy as np
from numpy.fft import fft, ifft
# from: http://www.mirzatrokic.ca/FILES/codes/fracdiff.py
# small modification: wrapped 2**np.ceil(...) around int()
# https://github.com/SimonOuellette35/FractionalDiff/blob/master/question2.py
_default_thresh = 1e-4
def get_weights(d, size):
"""Expanding windo... | pd.DataFrame(x) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index... | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import keras
from keras_self_attention import SeqSelfAttention
# In[2]:
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.c... | pd.DataFrame({'text':text_word_count, 'Author':Author_word_count}) | pandas.DataFrame |
from bs4 import BeautifulSoup
from crossref.restful import Works
import datetime
from habanero import Crossref
import json
import lxml
import numpy as np
import os
import pandas as pd
import shutil
import random
import re
import requests
import time
from a0001_admin import clean_dataframe
from a0001_ad... | pd.read_csv(df_file) | pandas.read_csv |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
from scipy import interpolate
#feature+activity1+activity2+attributenumber+extractionMethod+freqforfft
feature_combinations=["31112","31111","11121","21121","31125", "11113","3112616","311263"] #used for generalizing initial work
... | pd.read_csv("Data/Cooking1/gyro-1533863975.csv") | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.axes as ax
# creates the data for the HOAc/Ni(110) IR
colnames = ['Wavenumber', 'Intensity']
# 15 s
f1 = pd.read_csv("Ni(110) 1e-9Torr15s 210K.0.dpt", '\t', header=None, names=colnames)
f1.set_index(colnames[0], inplace=True)
f2 = pd.read_csv("Ni(... | pd.read_csv("Ni(110) 1e-9Torr15s 452K.0.dpt", '\t', header=None, names=colnames) | pandas.read_csv |
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import zscore
from sklearn.decomposition import PCA
import pandas as pd
from itertools import combinations
# Load helper function(s) for interacting with CTF dataset
from ctf_dataset.load import create_wr... | pd.DataFrame(lstm_stack_pca_long) | pandas.DataFrame |
"""Habitat risk assessment (HRA) model for InVEST."""
# -*- coding: UTF-8 -*-
import os
import logging
import pickle
import shutil
import tempfile
import numpy
from osgeo import gdal, ogr, osr
import pandas
import shapely.ops
import shapely.wkb
import taskgraph
import pygeoprocessing
from . import uti... | pandas.concat(region_df_list) | pandas.concat |
# Module: Regression
# Author: <NAME> <<EMAIL>>
# License: MIT
# Release: PyCaret 2.1
# Last modified : 17/08/2020
def setup(data,
target,
train_size = 0.7,
sampling = True,
sample_estimator = None,
categorical_features = None,
categorical_imputation = 'con... | pd.reset_option("display.max_columns") | pandas.reset_option |
import pandas as pd
from pandas.core.frame import DataFrame
pd.options.display.max_rows=None
pd.options.display.max_columns=None
Actores = 'actores'
NombreArchivo = f'Base_de_datos_{Actores}.ods'
df_rows = pd.read_excel(NombreArchivo) #, index_col=0
df_rows2 = pd.read_excel(NombreArchivo, skiprows=range(0,1))
rows = ... | DataFrame(rows) | pandas.core.frame.DataFrame |
import numpy as np
from typing import Tuple, List
import cv2
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from skimage.color import label2rgb
from skimage import img_as_ubyte
from skimage.measure import block_reduce
import pandas as pd
from .basic import saturation_rectified_intensity, fg_pts
... | pd.DataFrame(columns=[0, 1]) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Modelagem em tempo real | COVID-19 no Brasil
--------------------------------------------
Ideias e modelagens desenvolvidas pela trinca:
. <NAME>
. <NAME>
. <NAME>
Esta modelagem possui as seguintes características:
a) NÃO seguimos modelos paramétricos => Não existem dur... | pd.Series(projetado) | pandas.Series |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SP... | pd.testing.assert_frame_equal(exp, o) | pandas.testing.assert_frame_equal |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import joblib
import calendar
from datetime import datetime, timedelta
from collections import OrderedDict
from constants import *
plt.style.use('seaborn-whitegrid')
if not os.path.exists('tmp'):
os.mkdir('tmp')
class Covid:
... | pd.DataFrame() | pandas.DataFrame |
"""
Main script for the paper
A Comparison of Patient History- and EKG-based Cardiac Risk Scores
<NAME>, <NAME>, <NAME>
Proceedings of the AMIA Summit on Clinical Research Informatics (CRI), 2018
Runs various models, saves prediction outcomes.
"""
import feather, os, sys, pickle
from torch.autograd import Varia... | pd.isnull(encs['has_mace']) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
This code allows us to run the configuration slices analysis for the TGM model
"""
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import random
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams.update({'font.size': 18})
pathRe... | pd.concat([tabVide, tableClustering], axis=0) | pandas.concat |
import operator
import warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, to_timedelta
import pandas._testing as tm
from pandas.core.algorithms import checked_add_with_arr
from .pandas_vb_common import numeric_dtypes
try:
import pandas.core.computation.e... | pd.offsets.MonthBegin() | pandas.offsets.MonthBegin |
# -*- coding: utf-8 -*-
"""Cross references from cbms2019.
.. seealso:: https://github.com/pantapps/cbms2019
"""
import pandas as pd
from pyobo.constants import (
PROVENANCE,
SOURCE_ID,
SOURCE_PREFIX,
TARGET_ID,
TARGET_PREFIX,
XREF_COLUMNS,
)
__all__ = [
"get_cbms2019_xrefs_df",
]
#: C... | pd.DataFrame(rows, columns=XREF_COLUMNS) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.