prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or a... | pd.RangeIndex(0) | pandas.RangeIndex |
import typing
from typing import List
import numpy as np
import pandas as pd
from numpy import ndarray
from models.analysis import Analysis
import logging
from utils.a_weighting import A_weighting
from utils.audio_calcs import calc_db_from_frequency_dbs, magnitude_to_db
logger = logging.getLogger(__name__)
class... | pd.DataFrame() | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
from selenium import webdriver
import time
import re
all_games_list = | pd.read_fwf('all_games_nbasite.txt', header=None) | pandas.read_fwf |
"""
Python source code to extract listing from mudah.my
"""
from functools import total_ordering
from mudah.config import General, Region, PropertyCategory, SupportedPropertyRegionArea, PropertyArea
import pandas as pd
import requests
import webbrowser as web
import urllib.parse as urlparse
from urllib.par... | pd.Series(links) | pandas.Series |
"""
Purpose: Data type transforms
Contributors:
<Include Your Name/Names>
Sponsor: DataDisca Pty Ltd. Australia
https://github.com/DataDisca
"""
import pandas as pd
import numpy as np
from abc import ABC, abstractmethod
from meta_data import DataTypes, DateTimeTransforms
from fractions import Fraction
import dateti... | pd.isnull(value) | pandas.isnull |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
ma... | pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate') | pandas.Series |
#!/usr/bin/env python
"""
Represent connectivity pattern using pandas DataFrame.
"""
from collections import OrderedDict
import itertools
import re
from future.utils import iteritems
from past.builtins import basestring
import networkx as nx
import numpy as np
import pandas as pd
from .plsel import Selector, Select... | pd.DataFrame(index=idx, columns=columns, dtype=object) | pandas.DataFrame |
import os
import pandas as pd
import matplotlib.pyplot as plt
import shap
import lightgbm as lgb
from sklearn.metrics import average_precision_score
from takaggle.training.model import Model
from takaggle.training.util import Util
# LightGBMに使えるカスタムメトリクス
# 使用例(この関数で最適化したい場合はパラメーターに metric: 'None'を指定する必要がある)
# self.mo... | pd.merge(importance_df_mean, importance_df_std, left_index=True, right_index=True, suffixes=['_mean', '_std']) | pandas.merge |
import numpy as np
import pandas as pd
from collections import OrderedDict
from pandas.api.types import is_numeric_dtype, is_object_dtype, is_categorical_dtype
from typing import List, Optional, Tuple, Callable
def inspect_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show column types and null values in DataFrame d... | is_categorical_dtype(column) | pandas.api.types.is_categorical_dtype |
#!/usr/bin/env python
# encoding: utf-8
'''
\ \ / /__| | ___ _ _ __ / ___| | | | / \ |_ _|
\ V / _ \ |/ / | | | '_ \ | | | |_| | / _ \ | |
| | __/ <| |_| | | | | | |___| _ |/ ___ \ | |
|_|\___|_|\_\\__,_|_| |_| \____|_| |_/_/ \_\___
===... | pd.DataFrame.from_dict(data, orient='columns') | pandas.DataFrame.from_dict |
import os
import pandas as pd
from pandas.util.testing import assert_equal
from nlp_profiler.constants \
import HIGH_LEVEL_OPTION, GRANULAR_OPTION, GRAMMAR_CHECK_OPTION, \
SPELLING_CHECK_OPTION, EASE_OF_READING_CHECK_OPTION
from nlp_profiler.core import apply_text_profiling
from tests.common_functions import ... | pd.read_csv(csv_filename) | pandas.read_csv |
"""
ncaa_scraper
A module to scrape and parse college baseball statistics from stats.ncaa.org
Created by <NAME> in Spring 2022
"""
import pandas as pd
import time
import random
from bs4 import BeautifulSoup
import requests
import numpy as np
#lookup paths
_SCHOOL_ID_LU_PATH = 'collegebaseball/data/schools.parquet'... | pd.read_parquet(_SEASON_ID_LU_PATH) | pandas.read_parquet |
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
from pandas.util.testing import assert_series_equal
from numpy.testing import assert_allclose
from windpowerlib.density import barometric, ideal_gas
class TestDensity:
def te... | pd.Series(data=[1.30305336, 1.29656645]) | pandas.Series |
from functools import reduce
import os
import pandas as pd
import numpy as np
import multiprocessing as mp
class MapReducer:
def __init__(self, df):
self.df = df
self.counter = 0
def mapper(self, group):
gp_name, lst = group
gp_df = pd.DataFrame([self.df.loc[x] for x in lst], ... | pd.DataFrame(results, columns=['author', 'text']) | pandas.DataFrame |
"""
Low pass filter implementation in python.
The low pass filter is defined by the recurrence relation:
y_(n+1) = y_n + alpha (x_n - y_n)
where x is the measured data and y is the filtered data. Alpha is a constant
dependent on the cutoff frequency, f, and is defined as:
alpha = 2 pi dt f
2 pi ... | pd.DataFrame() | pandas.DataFrame |
# ##### run this script for each project to produce the true link for that project #####
import pandas as pd
import numpy as np
dummy_commit = pd.read_parquet('path to read commit')
dummy_commit
# deleting all the null issue_ids
dummy_commit.reset_index(drop=True, inplace=True)
print(np.where(pd.isnull(dummy_commit... | pd.merge(left=selected_issue, right=selected_commit, how='left', left_on=['source', 'issue_id'], right_on=['source', 'issue_id']) | pandas.merge |
import numpy as np
import pandas as pd
from random import randint
from statistics import mode
from datetime import datetime
import backend.utils.finder as finder
from dateutil.relativedelta import relativedelta
def arrange_df(df, df_type, relevant_col_idx=None, items_to_delete=None, assembly_df=None, bom_trim=False):... | pd.to_timedelta(1, unit="d") | pandas.to_timedelta |
# Neural network for pop assignment
# Load packages
import tensorflow.keras as tf
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
import numpy as np
import pandas as pd
import allel
import zarr
import h5py
from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split
from s... | pd.DataFrame(ensemble_report) | pandas.DataFrame |
import os
import sqlite3
import pandas as pd
from pygbif import occurrences
from pygbif import species
from datetime import datetime
import geopandas as gpd
import shapely
import numpy as np
import fiona
from shapely.geometry import shape, Polygon, LinearRing, Point
from dwca.read import DwCAReader
import random
from s... | pd.read_sql(sql="SELECT * FROM filter_set", con=conn) | pandas.read_sql |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-15 22:20
# @Author : erwin
import pandas as pd
from common.util_function import *
import numpy as np
df = | pd.DataFrame({'col1': ['a'] * 2 + ['b'] * 3, 'col2': [1, 1, 2, 3, 3]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from .utils import store_data, stoi
# ------------------------------------------------------------------------
# Globals
cols = ['time', 'cases', 'deaths', 'hospitalized', 'icu', 'recovered']
# ------------------------------------------------------------------------
# Main point... | pd.notnull(dataframe_japan) | pandas.notnull |
# Question: Please concatenate this file with this one to a single text file.
# The content of the output file should look like below.
# http://www.pythonhow.com/data/sampledata.txt
# http://pythonhow.com/data/sampledata_x_2.txt
# Expected output:
# x,y
# 3,5
# 4,9
# 6,10
# 7,11
# 8,12
# 6,10
# 8,18
# 12,20
# 14,22
... | pandas.read_csv("sampledata_x_2.txt") | pandas.read_csv |
import datetime
import glob
import pathlib
import tempfile
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from mockito import ANY, unstub, when
from src.constants import ROOT_DIR
from src.data.forecast import CAMSProcessor
from src.data.observations import OpenAQDownloader
from src.data.tran... | pd.Timestamp("2021-08-24 06:00:00+0000", tz="UTC") | pandas.Timestamp |
import copy
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from data.dataloader import JHULoader
from pytz import timezone
from utils.fitting.loss import Loss_Calculator
from utils.generic.config import read_config
"""
Helper functions for processing different reichlab submi... | pd.DataFrame(columns=columns) | pandas.DataFrame |
#Genero el dataset de febrero para el approach de boosting. Este approach tiene algunas variables mas incluyendo sumas y promedios de valores pasados
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% Cargo los datos, Con el dataset de boosting no hice las pruebas de quita... | pd.merge(final, subtest4, left_index=True, right_index=True) | pandas.merge |
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = | pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True) | pandas.read_csv |
import pandas as pd
import numpy as np
from pandas._testing import assert_frame_equal
from NEMPRO import planner, units
def test_start_off_with_initial_down_time_of_zero():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatc... | assert_frame_equal(expect_dispatch, dispatch) | pandas._testing.assert_frame_equal |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy as sc
import pickle
import os
from . import preprocess
from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import no... | pd.read_csv('data/target_playlists.csv', delimiter='\t') | pandas.read_csv |
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.compone... | pd.DateOffset(**offset) | pandas.DateOffset |
#!/usr/bin/env python
# coding: utf-8
"""
Created on Mon November 10 14:13:20 2019
@author: <NAME>
takes the condition name as input (e.g. lik or int)
"""
def covariate (cond):
# data analysis and wrangling
import pandas as pd
import numpy as np
import os
from pathlib import Path
... | pd.DataFrame() | pandas.DataFrame |
# creating my first module:
# libraries
import pandas as pd
import numpy as np
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pandas import read_csv as csv
def Explore(file, column_names=None, title_line_number=100, head_line_number=20):
#df = pd.read_csv(file... | pd.DataFrame(X_train_SM, columns=X_train.columns) | pandas.DataFrame |
import pandas as pd
import app.data.score_calculator as sc
def get_marks(df, subjects, terms=[1, 2, 3, 4, 5, 6, 7, 8]):
"""
Returns a data frame with marks for given subjects and terms for given schools
Parameters
----------
subjects : list of subjects ["History","Sinhala","English"]
... | pd.Series(grades) | pandas.Series |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import pickle
class SummaryEvaluationPlotter:
def __init__(self):
pass
def load_tvsum_result(self):
results = []
for method in ['Random', 'Human', 'Model']:
df = pd.read_pickle(f'... | pd.concat(results) | pandas.concat |
from typing import Any
import numpy as np
import pandas as pd
from resources.backend_scripts.feature_selection import FeatureSelection
from resources.backend_scripts.parameter_search import ParameterSearch
DataFrame = pd.DataFrame
NpArray = np.ndarray
class GlobalVariables:
_df: DataFrame = pd.DataFrame()
... | pd.DataFrame() | pandas.DataFrame |
"""
Visualise landmarks on images for a particular set/scale or whole dataset
The expected structure for dataset is as follows
* DATASET/<tissue>/scale-<number>pc/<image>
* DATASET/<tissue>/scale-<number>pc/<csv-file>
EXAMPLE
-------
>> python run_visualise_landmarks.py -l dataset -i dataset -o output
>> python han... | pd.read_csv(p_lnds) | pandas.read_csv |
"""
Module: LMR_proxy_preprocess.py
Purpose: Takes proxy data in their native format (e.g. .pckl file for PAGES2k or collection of
NCDC-templated .txt files) and generates Pandas DataFrames stored in pickle files
containing metadata and actual data from proxy records. The "pickled" DataFrames
... | pd.DataFrame({'Proxy ID':frame_data[:,0], siteID:frame_data[:,1]}) | pandas.DataFrame |
#!/usr/bin/env python3
import json
import math
import sys
import glob
import argparse
import os
from collections import namedtuple, defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
impo... | pandas.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
'''用来扫描java类中的api,统计所有controller类文件中的api的url和请求类型
'''
import os
import re
import pandas as pd
header = ["controller", "url", "request", "对应菜单", "状态", "技术", "测试"] # 表格头,除了前三项,后面均可改动
FILE_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
output = os.path.join(FILE_ROOT_PATH, "公有云加解密.xlsx") # 输出文件夹
# 源文件... | pd.ExcelWriter(path, mode='a') | pandas.ExcelWriter |
import joblib
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import autorch
from autorch.function import sp2wt
class F(object):
def __init__(self,config):
# simulation data model
self.icg_model = joblib.load(config['icg_model_path'])
self.c620_model = joblib.load(config[... | pd.DataFrame(index=idx,columns=self.c660_col['case']) | pandas.DataFrame |
import os
import random
from itertools import product
from unittest import mock
import arff
import pytest
import numpy as np
import pandas as pd
import scipy.sparse
from oslo_concurrency import lockutils
import openml
from openml import OpenMLDataset
from openml.exceptions import OpenMLCacheException, OpenMLHashExce... | pd.DataFrame([[1], ['2'], [3.]]) | pandas.DataFrame |
#!/usr/bin/env python
from itertools import combinations
import random
import scanpy.api as sc
import matplotlib.pyplot as plt
import numpy as np
from granatum_sdk import Granatum
import pandas as pd
import seaborn as sns
def main():
gn = Granatum()
tb1 = gn.pandas_from_assay(gn.get_import('assay1'))
... | pd.concat([tb1 * fct1, tb2 * fct2], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import sys
from typing import List, NamedTuple
from datetime import datetime
from google.cloud import aiplatform, storage
from google.cloud.aiplatform import gapic as aip
from kfp.v2 import compiler, dsl
from kfp.v2.dsl import component, pipeline, Input, Output, Model, Metrics, Datas... | pd.Timedelta(30, "d") | pandas.Timedelta |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequency... | Timedelta('5m4s') | pandas.Timedelta |
"""
****************************************
* @author: <NAME>
* Date: 5/22/21
****************************************
"""
import time
import tensorflow.keras as keras
import pandas as pd
from tqdm import tqdm
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense... | pd.get_dummies(temp_df, columns=['optimizer']) | pandas.get_dummies |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
impor... | to_datetime(['1/1/2000', '1/2/2000', '1/3/2000']) | pandas.tseries.tools.to_datetime |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal... | assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
from surprise import KNNWithMeans, SVD, SVDpp, NMF
from surprise.prediction_algorithms.slope_one import SlopeOne
from settings.config import user_label, NMF_LABEL, \
SVDpp_LABEL, SVD_LABEL, SLOPE_LABEL, ITEMKNN_LABEL, USERKNN_LABEL, item_label, value_label, K_NEIGHBOR
from conversions.pandas_to... | pd.concat(evaluation_results_df) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Three classes' definition in here.
* a params class which stores and manipulate the parameters of our MRS fitting/simulation model
* a metabolite class which stores and can compute a MRS modeled signal for a single metabolite, based on the pyGAMMA library (for... | pd.concat([df_self, df_found], axis=0) | pandas.concat |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import a... | pd.DataFrame(series) | pandas.DataFrame |
#!/usr/bin/python
from keras.models import load_model
import pandas as pd
import numpy as np
# Read data
test = pd.read_csv('test.csv')
X_test = (test.ix[:,:].values).astype('float32')
# 28x28 pixels
X_test = X_test.reshape(X_test.shape[0], 28, 28,1)
# pre-processing: divide by max and substract mean
scale = 255
... | pd.DataFrame() | pandas.DataFrame |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import ... | Timestamp("2011-01-01") | pandas.Timestamp |
"""
Helper functions for dfds_ds_toolbox.analysis.plotting.
"""
from typing import List, Union
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
def _get_equally_grouped_data(
input_data: pd.DataFrame,
feature: str,
target_col: str,
bins:... | pd.isnull(input_data[feature]) | pandas.isnull |
"""
Created on Thu Nov 7, 2019
@author: <NAME>
"""
import serial # `pyserial` package; NOT `serial` package
import warnings
import pandas as pd
import numpy as np
import time
import os
import sys
from datetime import datetime
try:
from serial.tools import list_ports
IMPORTED_LIST_PORTS = True
except ValueE... | pd.DataFrame(dV_y.T, index=freqs, columns=Vs) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import scipy.stats as ss
import os
#import matplotlib.pyplot as plt
import matplotlib
#matplotlib.get_backend()
from matplotlib import pyplot as plt
import seaborn as sns
#import matplotlib.pyplot as plt
#import matplotlib
#matplotlib.use('TkAgg')
#i... | pd.read_csv(depth_minus1_file) | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may... | pd.DataFrame(dow) | pandas.DataFrame |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, t... | isna(arr) | pandas.core.dtypes.missing.isna |
import sys
import numpy as np
from skimage import measure
sys.path.append("../")
def test_clustering_widget(make_napari_viewer):
import napari_clusters_plotter as ncp
viewer = make_napari_viewer(strict_qt=True)
widget_list = ncp.napari_experimental_provide_dock_widget()
n_wdgts = len(viewer.window... | pd.DataFrame(X) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_deco... | read_hdf(hh, "df", where="l1=[2, 3, 4]") | pandas.io.pytables.read_hdf |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFacto... | pd.date_range(start_date, end_date) | pandas.date_range |
import os
import os.path as osp
import shutil
import json
from tqdm.auto import tqdm as tq
from itertools import repeat, product
import numpy as np
import pandas as pd
import torch
from torch_geometric.data import Data, InMemoryDataset, extract_zip
from torch_geometric.io import read_txt_array
import torch_geometric.t... | pd.Series(meshply.elements[0].data["WSS"]) | pandas.Series |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest... | Index(['a', 'b', 'c']) | pandas.core.index.Index |
# -*- coding: utf-8 -*-
import argparse
import json
from os import listdir
from os.path import join
import numpy as np
import pandas as pd
from src.utilities import mkdir_if_needed
def read_presentation_type(sequence):
"""
This function extracts the presentation_type variable from a sequence dictionary.
... | pd.read_csv(args.subject_summary, index_col=0) | pandas.read_csv |
import pandas as pd
import sqlite3
class Co2:
# ind_name -> 산업명
def ind_name(self,ind):
con = sqlite3.connect('./sorting.db')
df = | pd.read_sql_query('select * from sorting',con) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 13 21:37:34 2021
@author: <NAME>
"""
"""
Functions of Question 1
"""
"""
1.1 Get the list of animes
"""
# import modules
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import time
import os
import random
import dateti... | pd.Series(ans) | pandas.Series |
# -*- coding: utf-8 -*-
import geopandas as gpd
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
import sys
import time
from tqdm import tqdm
from matplotlib import pyplot as plt
import warnings
from hs_process.utilities import defaults
from hs_process.util... | pd.isnull(cs['crop_e_m']) | pandas.isnull |
import os
import pytest
import pandas as pd
import numpy as np
from scripts.national_load import (
filter_outliers,
_interpolate_gaps,
_fill_29th_feb,
_countries_with_missing_data_in_model_year,
_get_index_of_missing_data,
_ignore_feb_29th,
clean_load_data
)
THIS_DIR = os.path.dirname(__f... | pd.DataFrame({"foo": foo, "bar": bar}) | pandas.DataFrame |
###############################################################################
# PCAAnomalyDetector
import numpy as np
import pandas
from nimbusml.datasets import get_dataset
from nimbusml.decomposition import PcaAnomalyDetector
from sklearn.model_selection import train_test_split
# use 'iris' data set to create test... | pandas.concat([X_test, not_iris], sort=False) | pandas.concat |
"""
A non-blending lightGBM model that incorporates portions and ideas from various public kernels.
"""
DEBUG = False
WHERE = 'kaggle'
FILENO = 4
NCHUNK = 32000000
OFFSET = 75000000
VAL_RUN = False
MISSING32 = 999999999
MISSING8 = 255
PUBLIC_CUTOFF = 4032690
if WHERE=='kaggle':
inpath = '../input/talkingdata-adtrack... | pd.read_csv(inpath+"test.csv", nrows=100000, parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id']) | pandas.read_csv |
"""
Generate ensemble submission by majority vote.
Authors:
<NAME> and <NAME>
"""
import argparse
import glob
import pandas as pd
parser = argparse.ArgumentParser('Get args for ensemble script')
parser.add_argument('--split',
type=str,
default='dev',
... | pd.concat(data, axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import os, csv
from collections import defaultdict
import logging
class CityInfo:
def __init__(self):
# Make dict
self.cities_data = {}
self.cities_data_ascii_names = {}
with open('worldcities.csv', encoding='utf-8') as csvDataFile:
... | pd.DataFrame.from_dict(db) | pandas.DataFrame.from_dict |
import sys
import os
import json
import argparse
import urllib.request
import multiprocessing
import pandas as pd
# download abstact text and NER annotation in pubtator format
def download_abs(X):
_id_s, tar_dir, url_prefix = X
file_path = tar_dir+_id_s
url_s = url_prefix+_id_s
# only retrive gene/dis... | pd.DataFrame(rst_rec, columns=['pmcid' if is_ft else 'pmid']) | pandas.DataFrame |
import pandas
import math
import csv
import random
import numpy
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
# 当每支队伍没有elo等级分时,赋予其基础elo等级分
base_elo = 1600
team_elos = {}
team_stats = {}
x = []
y = []
folder = 'data'
# 根据每支队伍的Micellaneous, Opponent, Team统计数据csv文件进行初始化
def initia... | pandas.read_csv('data/MiscellaneousStats.csv') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: 07_location_history_parse.ipynb (unless otherwise specified).
__all__ = ['load_json_file', 'parse_activities', 'parse_json_file', 'parse_json_file_as_rows', 'parse_json_data',
'parse_data_point', 'parse_activity', 'filter_json_data', 'sort_json_data', 'rowify_json... | pd.Timedelta('8H') | pandas.Timedelta |
import pandas as pd
import sys
if len(sys.argv) != 3:
print("Usage: python3 overhead.py raw.csv transform.csv")
raw = pd.read_csv(sys.argv[1])
tran = pd.read_csv(sys.argv[2])
half = len(tran) // 2
# raw = raw[half:]
# tran = tran[half:]
merged = pd.merge(raw,tran, on=['Index', 'Index'])
merged["diff"] = (merged[... | pd.set_option('display.max_rows', None) | pandas.set_option |
"""
Plot the IQR of your janky light curves vs KC19 reported age.
"""
###########
# imports #
###########
import os, socket, requests
from glob import glob
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from numpy import array as nparr
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.sta... | pd.DataFrame(varinfos) | pandas.DataFrame |
import itertools
import numpy
import os
import random
import re
import scipy.spatial.distance as ssd
import scipy.stats
from scipy.cluster.hierarchy import dendrogram, linkage
import pandas
from matplotlib import colors
from matplotlib import pyplot as plt
import vectors
from libs import tsne
rubensteinGoodenoughDat... | pandas.DataFrame.from_csv(metricsHistoryPath) | pandas.DataFrame.from_csv |
import numpy as np
import pandas as pd
import xarray as xr
import copy
import warnings
try:
from plotly import graph_objs as go
plotly_installed = True
except:
plotly_installed = False
# warnings.warn("PLOTLY not installed so interactive plots are not available. This may result in unexpected funtionali... | pd.DataFrame(binned_data_stats) | pandas.DataFrame |
from __future__ import absolute_import
import collections
import gzip
import logging
import os
import sys
import multiprocessing
import threading
import numpy as np
import pandas as pd
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, Min... | pd.read_csv(test_cell_path) | pandas.read_csv |
from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
from pandas._libs.tslibs import (
NaT,
NaTType,
frequencies as libfrequencies,
iNaT,
period as libperiod,
)
from pandas._libs.tslibs.fields import isleapyear_arr
from... | Period._maybe_convert_freq(freq) | pandas._libs.tslibs.period.Period._maybe_convert_freq |
r"""Submodule frequentist_statistics.py includes the following functions: <br>
- **normal_check():** compare the distribution of numeric variables to a normal distribution using the
Kolmogrov-Smirnov test <br>
- **correlation_analysis():** Run correlations for numerical features and return output in different forma... | pd.DataFrame(columns=col_list, index=row_list) | pandas.DataFrame |
import os
import timeit
import pandas as pd
from numpy.random import uniform
import featherstore as fs
def time_it(func, number, *args, **kwargs):
MS = 1000
runtime = timeit.timeit('func(*args, **kwargs)',
globals={**globals(), **locals()},
number=number... | pd.DataFrame(data=data, index=index) | pandas.DataFrame |
import pandas as pd
#import matplotlib.pyplot as plt
import numpy as np
import datetime
from datetime import datetime
import glob
import os.path as path
one_up = path.abspath(path.join(__file__ ,".."))
two_up = path.abspath(path.join(__file__ ,"../.."))
three_up = path.abspath(path.join(__file__ ,"../../.."))
df = | pd.read_csv(two_up + '/dataset/20210717182858/submissions.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Heart Disease Dataset
# ## **0. Before we begin**
# Please **comment** or **upvote** this kernel.
# ### Kernel goals:
#
# * Data exploration
# * Find important features for L1-regularized Logistic regression
# * Propose correct scoring metrics for this dataset
# * Fight o... | pd.DataFrame({'KNN': knn_scores, 'Logistic regression': lr_scores, 'SVC': svm_scores, 'AdaBoost': tree_scores, 'Neural network': nn_scores}) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],... | pd.Timestamp('2019-03-03') | pandas.Timestamp |
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGe... | tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pickle
from glob import glob
import pandas as pd
from imdb import Cinemagoer
import numpy as np
import os
def extract_from_list_col(dataframe, col, max_items=4, normalize=True):
return dataframe[col].apply(
lambda x: extract_from_list(x, max_items=max_items, normalize=normalize)
)
def extract... | pd.read_csv("data/processed/filtered_id_list.csv") | pandas.read_csv |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions o... | pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) | pandas.DataFrame |
import os
import audiofile
import audiofile as af
import numpy as np
import pandas as pd
import pytest
import audinterface
import audformat
def signal_duration(signal, sampling_rate):
return signal.shape[1] / sampling_rate
def signal_max(signal, sampling_rate):
return np.max(signal)
SEGMENT = audinterfa... | pd.to_timedelta('1s') | pandas.to_timedelta |
import collections
from functools import lru_cache
import logging
import pandas as pd
import time
import numpy as np
from tqdm import tqdm
from holoclean.dataset import AuxTables, CellStatus
from .estimators import *
from .correlations import compute_norm_cond_entropy_corr
from holoclean.utils import NULL_REPR
clas... | pd.DataFrame(data=cells) | pandas.DataFrame |
import os
import requests
import pandas as pd
from random import randint
from django.db.models import Q
from .models import Account
api_key = os.environ.get('IEX_API_KEYS')
TEST_OR_PROD = 'cloud'
def make_position_request(tickers):
data = []
for x in tickers:
response = requests.get("https://{}.iexapi... | pd.DataFrame(data) | pandas.DataFrame |
from typing import List, Tuple, Iterable
from cobra import Model, Reaction, Metabolite
import re
import pandas as pd
import numpy as np
from ncmw.utils import pad_dict_list
def transport_reactions(model: Model) -> List[str]:
"""This function return a list of potential transport reactions, we define a
transp... | pd.DataFrame(df_dict) | pandas.DataFrame |
from numpy.random import default_rng
import numpy as np
import emcee
import pandas as pd
from tqdm.auto import tqdm
from sklearn.preprocessing import StandardScaler
import copy
from scipy.stats import norm, ortho_group
import random
import math
import scipy.stats as ss
"""
A collection of synthetic data generators, i... | pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns) | pandas.DataFrame |
#!/usr/bin/env python3
"""Tools to export data from MS2Analyte as flat files for viewing in Tableau"""
import os
import pickle
import pandas as pd
import sys
import csv
from ms2analyte.file_handling import file_load
def full_export(input_file, input_data, input_structure, input_type, **kwargs):
"""Export data ... | pd.concat([ms1_data, ms2_data]) | pandas.concat |
# coding: utf-8
# In[34]:
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# In[35]:
import sklearn
# In[36]:
data = pd.read_csv('a.csv')
# In[37]:
data.head()
# In[38]:
cor = data.corr()
cor = abs(cor['mortality_rate'])
print(cor[cor > 0.3])
# In[39]:
dat... | pd.DataFrame(scaled, columns=data.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# Copyright (C) <NAME> 2019
# Licensed under the 2-clause BSD licence
# Plots a coloured matrix of Android versions over time, showing the types of exploit possible per month per version
import numpy as np
import pandas
import matplotlib.pyplot as plt
from matplotlib import colors
from graph_... | pandas.DataFrame(grid, columns=dates, index=versions) | pandas.DataFrame |
import jsonlines
import pandas as pd
def write_output_to_file(output, path):
with jsonlines.open(path, mode="w") as writer:
for obj in output:
writer.write(obj)
def create_dfs_from_file(path, include_articles):
with jsonlines.open(path) as reader:
articles = []
entities ... | pd.DataFrame(articles) | pandas.DataFrame |
"""
Detection Recipe - 192.168.3.11
References:
(1) 'Asteroseismic detection predictions: TESS' by Chaplin (2015)
(2) 'On the use of empirical bolometric corrections for stars' by Torres (2010)
(3) 'The amplitude of solar oscillations using stellar techniques' by Kjeldson (2008)
(4) 'An absolutely calibrated Teff ... | pd.DataFrame(data={'B-V': bv, 'Vmag': vmag, 'g_mag_abs': g_mag_abs, 'Ai': 0}) | pandas.DataFrame |
import base64
import calendar
import json
import logging
import re
import sqlparse
import uuid
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from django.conf import settings
from django.db import models, DatabaseError, connection
from django.db.models import signals
from dja... | pd.DataFrame(columns=columns) | pandas.DataFrame |
import logging
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectKBest, chi2
from classes.Dataset import Dataset
class FeaturesSelector:
def __init__(self, dataset: Dataset):
self.__f... | pd.concat([columns, scores], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""Human Activity Recognition dataset example.
http://groupware.les.inf.puc-rio.br/har
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. Wearable Computing: Accelerometers' Data Classification of Body Postures and Movements.
Proceedings of 21st Brazilian Symposium on Artificial Intelligence... | pd.Series(y) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.