prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import wf_core_data.utils
import requests
import pandas as pd
from collections import OrderedDict
# import pickle
# import json
import datetime
import time
import logging
import os
logger = logging.getLogger(__name__)
DEFAULT_DELAY = 0.25
DEFAULT_MAX_REQUESTS = 50
DEFAULT_WRITE_CHUNK_SIZE = 10
SCHOOLS_BASE_ID = 'app... | pd.to_datetime(hub_data_df['pull_datetime']) | pandas.to_datetime |
"""
Tasks for the serving pipeline
"""
from pathlib import Path
import pickle
import pandas as pd
from sklearn import datasets
def get(product, sample):
"""Get input data to make predictions
"""
Path(str(product)).parent.mkdir(parents=True, exist_ok=True)
d = datasets.load_iris()
df = | pd.DataFrame(d['data']) | pandas.DataFrame |
from copy import copy
from pandas import DataFrame, concat, notnull, Series
from typing import List, Optional
from survey.attributes import RespondentAttribute
class AttributeContainerMixin(object):
_attributes: List[RespondentAttribute]
@property
def data(self) -> DataFrame:
"""
Return... | notnull(row) | pandas.notnull |
import requests
import os
import pandas as pd
from flask import Flask, render_template, request, redirect
#from bokeh.plotting import figure
#from bokeh.embed import components
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import dill
import spotipy.util as util
import spotipy.oauth2 as oauth2
imp... | pd.DataFrame(data=[['', input_text, '', '']], columns=['genre', 'lyrics', 'orig_index', 'track_id']) | pandas.DataFrame |
"""
Python module to do secondary preprocessing
Creates processed_train and processed_test .csv files
"""
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.parser import parse
import os
def feature_engineering(df):
"""
Function to calcualte debt-to-income
"""
df['dti... | pd.get_dummies(df_new[cat_cols]) | pandas.get_dummies |
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import re
import ast
import os
import sys
from urllib.request import urlopen
from datetime import datetime, timedelta, date
from traceback import format_exc
import json
import math
import urllib.error
from urllib.parse im... | pd.Series(df['세부정보']) | pandas.Series |
import tensorflow as tf
import pandas as pd
import pickle
def predict_model(crim, zn, indus, chas,
nox, rm, age, dis, rad,
tax, ptratio, black, lstat):
# Import variable
scaler_x = pickle.load(open('./saved_model/scaler_x.pickle', 'rb'))
scaler_y = pickle.load(open('./... | pd.DataFrame(data=data) | pandas.DataFrame |
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, confusion_matrix, classification_report, roc_curve, auc
import xgboost as xgb
from pandas import DataFrame, concat
from preprocess_helper_functions import *
from sklearn.model_selection... | DataFrame({'pred': p_test, 'truth': y_test}) | pandas.DataFrame |
import numpy as np
import pandas as pd
# Crime data is collected into two separate csv files. The first contains
# 40 years of data by state, and 10 years (in 10 xls files) by city
# data in this csv contains estimates in instances of no reporting
df = pd.read_csv(
"http://s3-us-gov-west-1.amazonaws.com/cg-d4b776... | pd.merge(masta, masta2, how='outer', on='city') | pandas.merge |
import scipy
import numpy
import pandas
import os
import isatools.isatab as isatab
import json
import inspect
import re
from ..enumerations import VariableType, DatasetLevel, SampleType, AssayRole
from ..utilities.generic import removeDuplicateColumns
from .._toolboxPath import toolboxPath
from datetime import datetime... | pandas.merge(self.limsFile,self.sampleMetadata, left_on='Assay data name Normalised', right_on='Sample Base Name Normalised', how='right', sort=False) | pandas.merge |
from datetime import date
from pprint import pprint
from typing import List, Any, Union
import pandas as pd
from pandas import DataFrame
import Common.Measures.TradingDateTimes.PyDateTimes as PyDays
import Common.Readers.TickerNameList as PyTickers
import Common.Readers.YahooTicker as PyTicker
from Common.TimeSeries im... | pd.DataFrame(new_dic_field) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_singl... | tm.assert_index_equal(result, result2) | pandas.util.testing.assert_index_equal |
import pandas as pd
data = | pd.read_csv("2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv") | pandas.read_csv |
from myutils.utils import getConnection, cronlog
import pandas as pd
import numpy as np
import datetime
import requests
class TestRequest:
def __init__(self, url, method='GET', META=None, postdata=None):
self.method = method
u = url.split('?')
self.path_info = u[0]
self.META = META... | pd.DataFrame(idx, columns=['timestamp']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# four representative days in each season
winter_day = '01-15'
spring_day = '04-15'
summer_day = '07-15'
fall_day = '10-15'
# define a function to plot household profile and battery storage level
def plot_4days(mode, tmy_code, utility, year, c_c... | pd.to_datetime(year + '-' + summer_day + ' ' + '23:00:00') | pandas.to_datetime |
from numpy import dtype
import pandas as pd
import logging
import json
from nestshredder.pyshred_core import _shred_recursive, pad_dict_list
from nestshredder.pyshred_util import check_arguments
def shred_json(path_or_buf,target_folder_path,object_name,batch_ref=None,orient=None,dtype=None,convert_axes=None,convert_da... | pd.DataFrame.from_dict(new_list) | pandas.DataFrame.from_dict |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for moth... | pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# <b>Python Scraping of Book Information</b>
# In[1]:
get_ipython().system('pip install bs4')
# In[2]:
get_ipython().system('pip install splinter')
# In[3]:
get_ipython().system('pip install webdriver_manager')
# In[1]:
# Setup splinter
from splinter import Browser
... | pd.read_csv('greek-roman.csv') | pandas.read_csv |
"""
===================================================================================
Train distributed CV search with a logistic regression on the breast cancer dataset
===================================================================================
In this example we optimize hyperparameters (C) for a logistic ... | pd.DataFrame(model.cv_results_) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Folium interact with GeoJSON data
Examples: overlay another GeoJSON zipcode map to the original map
Author: <NAME>
"""
import pandas as pd
import folium
def show_zipcode_map(zipcode_path, data, col):
"""
Interact zipcode GeoJSON data with other data set (house price o... | pd.groupby(house_data, 'zipcode') | pandas.groupby |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
import re
import pandas as pd
from os.path import isfile
try:
from .remarkuple import helper as h
except:
from remarkuple import helper as h
from IPython.display import HTML
try:
from .isopsephy import greek_letters as letters
from .isopse... | pd.read_csv(csvProcessedFileName + ".csv") | pandas.read_csv |
import numpy as np
import pandas as pd
# If you import here, you can use it.
from sklearn.linear_model import LogisticRegression, HuberRegressor, LinearRegression,Ridge,Perceptron
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestRegressor, Rando... | pd.concat([data, number_of_nan]) | pandas.concat |
# Changing the actions in self.actions should automatically change the script to function with the new number of moves.
# Developed and improved by past CG4002 TAs and students: <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
import os
import sys
import time
import traceback
import random
import sock... | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
import os
import gzip
import random
import pickle
import yaml
import pandas as pd
from base64 import b64encode
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy import create_engine, Column, In... | pd.DataFrame(data, columns=['x', 'y']) | pandas.DataFrame |
import os
import sys
sys.path.append("../..")
import datetime
import pymongo
from pandas.io.json import json_normalize
import pandas as pd
class Test():
"""
This page is used to create a Graph in Sqlgraph which include three kinds of node ----Document, Event and Knowledge.
"""
def __init__(self, ... | pd.DataFrame(relation_list, columns=["Head_id", "Tail", "id", "relation_id", "type"]) | pandas.DataFrame |
"""General data-related utilities."""
import functools
import operator
import pandas as pd
def cartesian(ranges, names=None):
"""Generates a data frame that is a cartesian product of ranges."""
if names is None:
names = range(len(ranges))
if not ranges:
return pd.DataFrame()
if len(ran... | pd.DataFrame({names[0]: ranges[0]}) | pandas.DataFrame |
import numpy
import pyearth
import pandas as pd
from pyearth import Earth
pathToInputData = 'C:\\__DEMO1\\Memory.csv'
dateTimeFormat = '%d/%m/%Y %H:%M'
pathToOutputData = 'C:\\__DEMO1\\output.txt'
# Write array to file
def array_to_file(the_array, file_name):
the_file = open(file_name, 'w')
for ... | pd.to_datetime(data.index, format=dateTimeFormat) | pandas.to_datetime |
import pytest
from pigging.connectors import googleBigQueryConnector, googleSheetsConnector
import os
import warnings
import pandas as pd
### Credentials ###
CREDENTIALS_PATH = os.environ.get('CREDENTIALS_PATH')
### Google Big Query ###
SELECT_QUERY = os.environ.get('SELECT_QUERY')
PROEJCT_ID = os.environ.get('PROEJ... | pd.DataFrame(["test value"], columns=['Test col']) | pandas.DataFrame |
# Copyright (C) 2021 ServiceNow, Inc.
import pytest
import pandas as pd
import numpy as np
from nrcan_p2.data_processing.utils import (
produce_updown_df,
decide_lang
)
def test_produce_updown_df():
df = pd.DataFrame({
'text': ['a', "b", "c", "d", "e"],
'mycol': [0,1,2,3,4],
'o... | pd.testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 16:27:12 2017
@author: xinruyue
"""
import pandas as pd
import numpy as np
import xlrd
import pickle
import os
def get_country():
f = open('country.txt','r')
country = []
for line in f:
line = line.strip('\n')
countr... | pd.read_excel(file1, sheetname=sheet.name) | pandas.read_excel |
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import re
from library.utils import StatisticResult, statistic_test
from collections import defaultdict
import pandas as pd
from typing import final
from library.RecRunner import NameType, RecRunner
from library.constants import METRICS_PRETTY, RECS_PRETTY, exper... | pd.concat(dfs,axis=1) | pandas.concat |
import multiprocessing
import pandas as pd
import numpy as np
from tqdm import tqdm
from gensim.models import Doc2Vec
from sklearn import utils
from gensim.models.doc2vec import TaggedDocument
import re
import nltk
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
nltk.download('punkt')
def tokenize_text(text)... | pd.DataFrame() | pandas.DataFrame |
"""Data Profiling
This script runs the routine of applying data profiling metrics
using the pydeequ library.
github: (https://github.com/awslabs/python-deequ)
This function receives configuration parameters,
process the analyses and saves the results in a BigQuery table.
An way to call this module would be:
gcloud ... | pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from... | pd.Series(mixed) | pandas.Series |
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from supervised.preprocessing.label_encoder import LabelEncoder
class LabelEncoderTest(unittest.TestCase):
def test_fit(self):
# training data
d = {"col1": ["a", "a", "c"], "col2": ["w", "e", "d"]}
df = pd.... | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding:utf-8 -*-
__author__ = 'boredbird'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from datetime import datetime
from sklearn.svm import l1_min_c
from woe.eval import compute_ks
import pickle
import time
"""
Search for optimal hyp... | pd.read_csv(config_path) | pandas.read_csv |
from pathlib import Path
import sklearn
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr
def calc_preds(model, x, y, mltype):
""" Calc predictions. """
if mltype == 'cls':
def get_pred_fn(model):
if hasattr(model, 'predict_proba'):
return ... | pd.Series(y_pred, name='y_pred') | pandas.Series |
from __future__ import absolute_import, division, print_function
from pandas import DataFrame, Series
from numpy import zeros
from pennies.trading.assets import Swap, Annuity, IborLeg, FixedLeg, VanillaSwap
from pennies.market.market import RatesTermStructure
from pennies.market.curves import ConstantDiscountRateCurv... | pd.Timedelta(days=200) | pandas.Timedelta |
import pandas as pd
from expenses_report.config import config
from itertools import product
class DataProvider(object):
_transactions = list()
_columns = None
def __init__(self, transactions):
self._transactions = transactions
self._columns = list(config.import_mapping.keys()) + [config.... | pd.DataFrame(columns=columns) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
from poor_trader import chart
from poor_trader import utils
TRADE_DAYS_PER_YEAR = 244
def SQN(df_trades):
"""
System Quality Number = (Expectancy / Standard Deviation R) * sqrt(Number of Trades)
:param df_trades:
:return:
"""
try:
... | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random i... | Index(right.values) | pandas.Index |
import logging
import math
import pandas
import numpy
from statsmodels.formula.api import OLS
from statsmodels.tools import add_constant
from fls import FlexibleLeastSquare
_LOGGER = logging.getLogger('regression')
class RegressionModelFLS(object):
def __init__(self, securities, delta, with_constant_term=True)... | pandas.DataFrame({self.securities[0]: self._y_values}) | pandas.DataFrame |
import operator
from enum import Enum
from typing import Union, Any, Optional, Hashable
import numpy as np
import pandas as pd
import pandas_flavor as pf
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_dtype_equal,
... | extract_array(left_c, extract_numpy=True) | pandas.core.construction.extract_array |
import os
import shutil
from deepsense import neptune
import pandas as pd
import math
from .pipeline_config import DESIRED_CLASS_SUBSET, ID_COLUMN, SEED, SOLUTION_CONFIG
from .pipelines import PIPELINES
from .utils import competition_metric_evaluation, generate_list_chunks, get_img_ids_from_folder, \
init_logger,... | pd.read_csv(PARAMS.annotations_human_labels_filepath) | pandas.read_csv |
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import tinkoff_data as td
import edhec_risk_kit as erk
import csv
#l=[]
#l=["TIPO", "TGLD", "TUSD", "TSPX", "TBIO", "TECH"]
l=["FXUS","FXRW","FXWO","FXKZ","FXCN","FXIT","FXDE","FXRL","FXRB","FXRU","FXGD","FXMM","FXTB"]
pddf = td.getTinkof... | pd.to_datetime(pddf.index) | pandas.to_datetime |
"""
Date: Nov 2018
Author: <NAME>
Retrieves sample counts to help select train, validation and testing subsets.
We have already created sync samples using script "create_sync_samples".
This script gets the numbers of samples for each datasets, speakers, and sessions.
These counts are used to select training, validat... | pd.DataFrame.to_csv(df_files, output_file_names, index=False) | pandas.DataFrame.to_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils i... | meric(big_df['发行价格']) | pandas.to_numeric |
import pandas as pd
import numpy as np
try:
from paraview.vtk.numpy_interface import dataset_adapter as dsa
from paraview.vtk.numpy_interface import algorithms as algs
from paraview import servermanager as sm
from paraview.simple import *
except:
pass
from vtk.util.numpy_support import vtk_to_numpy
... | pd.DataFrame() | pandas.DataFrame |
from __future__ import annotations
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import gamma, exponnorm
sns.set()
BASE_PATH = Path('..', 'data', 'experimental')
INPUT_PATHS = [
BASE_PATH / 'control.csv',
BASE_PATH / 't... | pd.read_csv(path, index_col=None) | pandas.read_csv |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from numpy import mean, var
from scipy import stats
from matplotlib import rc
from lifelines import KaplanMeierFitter
# python program to plot the OS difference between M2 HOXA9 low and M2 high HOXA9
def find_gene_... | pd.DataFrame(data=M2_low_tab) | pandas.DataFrame |
#
# Collective Knowledge ()
#
#
#
#
# Developer:
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
import os
import sys
import json
import re
import pandas as pd
import numpy as np
# Local s... | pd.MultiIndex.from_tuples([(x[0],x[1],x[2],x[3],x[4],x[5],x[6]+1) for x in df_prev.index]) | pandas.MultiIndex.from_tuples |
# -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to u... | pd.read_parquet(input_path) | pandas.read_parquet |
"""
Original work Copyright 2017 <NAME>
Modified work Copyright 2018 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by a... | pd.DataFrame(index=self.dfP.index,columns=['Frequency']) | pandas.DataFrame |
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import random
import math
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from mlos.Optimizers.RegressionModels.Prediction import Prediction
from mlos.Optimizers.RegressionModels.LassoCro... | pd.DataFrame(poly_terms_x, columns=['1', 'x1', 'x2', 'x1**2', 'x1*x2', 'x2**2']) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
... | tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
import sys
import pandas as pd
from sqlalchemy import *
def load_data(messages_filepath, categories_filepath):
'''
load the data set from the csv file and convert it to pandas
dataframe and combine the two data frame
Argument :
messages_filepath - path of the csv file disaster_messages.csv
... | pd.read_csv('disaster_messages.csv') | pandas.read_csv |
import os.path
import logging
import pandas as pd
from common.constants import *
from common.base_parser import BaseParser
PATHWAY_FILE = 'pathway.tsv'
KO_FILE = 'ko.tsv'
GENE_FILE = 'gene.tsv'
GENOME_FILE = 'genome.tsv'
KO2PATHWAY_FILE = 'ko2pathway.tsv'
GENOME2PATHWAY_FILE = 'genome2pathway.tsv'
GENE2KO_FILE = 'ge... | pd.read_csv(infile, sep='\t', chunksize=3000, header=None, names=[PROP_ID, 'gene_id']) | pandas.read_csv |
import glob
import os
import pathlib
import tempfile
import warnings
import logging
from abc import ABC
from pathlib import Path
from shutil import copy
from tempfile import mkstemp
from typing import Union, Dict
from zipfile import ZipFile
import numpy as np
import pandas as pd
from flask import send_from_directory, ... | pd.to_datetime(timestamps,utc=False) | pandas.to_datetime |
from difflib import SequenceMatcher
import functools
from typing import Optional
import pandas
__doc__ = """Get specialty codes and consolidate data from different sources in basic_data."""
COLUMNS = ['first_name', 'last_name', 'city', 'postal_code', 'state', 'specialty_code']
GENERIC_OPHTHALMOLOGY_CODE = '207W00000X... | pandas.concat([out_df, address_data], 1) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
... | pd.Timestamp("2013-05-19 00:00:00") | pandas.Timestamp |
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.api.indexers import (
BaseIndexer,
FixedForwardWindowIndexer,
)
from pandas.core.window.indexers import (
ExpandingIndexer,... | FixedForwardWindowIndexer(window_size=window_size) | pandas.api.indexers.FixedForwardWindowIndexer |
import pandas as pd
import zipfile
import re
import collections
from lxml import etree
import pathlib
import utils
import random
docxFileName = "../resources/quicks/quick_section4.docx"
annFileName = "../resources/quicks/annotations.tsv"
### Issue a warning if either the docx Chronology or the annotations are not a... | pd.merge(df_test, parsedf, on=["MainId", "SubId", "MainStation", "SubStation", "SubStFormatted"]) | pandas.merge |
#Library of functions called by SimpleBuildingEngine
import pandas as pd
import numpy as np
def WALLS(Btest=None):
#Building height
h_building = 2.7#[m]
h_m_building = h_building / 2
h_cl = 2.7# heigth of a storey
#number of walls
n_walls = 7
A_fl = 48
#WALLS CHARACTERISTICS
#Orie... | pd.Series([0, 0, 0, 0, 0, 0, 0]) | pandas.Series |
import numpy as np
import hydra
from hydra.utils import get_original_cwd
from dataset.dataloader.labeledDS import LabeledDataModule
from dataset.dataloader.unlabeledDS import UnlabeledDataModule
import os
from utils.metrics import AllMetrics
import json
from sklearn.preprocessing import StandardScaler
import warnings
f... | pd.DataFrame(data=pred_target, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
df = pd.read_csv('./survey_results_public.csv')
schema = pd.read_csv('./survey_results_schema.csv')
##Categorical Vari... | pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Dirichlet Mixing Module v1.2
# Implemented by <NAME>, based on original MatLab code by <NAME>.
# Mathematics described in Rudge et al.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Mean composition of melts from all lithologies
def mean_comp_total(f,w,c):
retu... | pd.DataFrame(EmptyColumns, columns=columns) | pandas.DataFrame |
from tifffile import TiffFile
import numpy as np
import pandas as pd
import sys, hashlib, json
from scipy.ndimage.morphology import binary_dilation
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import gaussian_filter
from collections import OrderedDict
#from random import random
"""
A set of functio... | pd.DataFrame({'mod':[-1,0,1]}) | pandas.DataFrame |
import pandas as pd
import datetime as dt
from scipy.interpolate import interp1d
from trios.utils.sunposition import sunpos
from trios.config import *
class awr_data:
'''
Above-water radiometry
'''
def __init__(self, idpr=None, files=None, Edf=None, Lskyf=None, Ltf=None):
# ''' get file name... | pd.Timedelta("2 seconds") | pandas.Timedelta |
from collections import namedtuple
from datetime import datetime as dt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from speculator.features.OBV import OBV
from speculator.features.RSI import RSI
from speculator.features.SMA import SMA
from speculator.features.SO import SO... | pd.DataFrame(data=data, dtype=np.float32) | pandas.DataFrame |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | pd.DataFrame(c) | pandas.DataFrame |
"""
Train a neural network for regression task:
cv: 10
batch size: 8
initializer: He normal initializer
optimizer: AdamMax
learning rate: 0.0004
loss: RMSE
Calculate RMSE at once, Oct. 3, 2020 revised
"""
import argparse
import numpy as np
import pandas as pd
import scipy.stats as scistat
fro... | pd.concat(loss_df_list, axis=0) | pandas.concat |
import sys
import subprocess
import os
import pandas as pd
def get_repo_root():
"""Get the root directory of the repo."""
dir_in_repo = os.path.dirname(os.path.abspath('__file__'))
return subprocess.check_output('git rev-parse --show-toplevel'.split(),
cwd=dir_in_repo,
... | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Extract a COCO captions dataframe from the annotation files."""
from __future__ import print_function
import os
import sys
import argparse
import pandas as pd
def main(args):
"""Extract a COCO captions dataframe from the annotation files."""
# Load coco library
sys.path.append(a... | pd.concat([cocoDF, df], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lam... | pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type) | pandas.crosstab |
import pandas as pd
from iexfinance.base import _IEXBase
from iexfinance.utils import _handle_lists, no_pandas
from iexfinance.utils.exceptions import IEXSymbolError, IEXEndpointError
class StockReader(_IEXBase):
"""
Base class for obtaining data from the Stock endpoints of IEX.
"""
# Possible option... | pd.DataFrame(d) | pandas.DataFrame |
## 1. Introduction ##
import pandas as pd
hn = | pd.read_csv('hacker_news.csv') | pandas.read_csv |
import os
import csv
import numpy as np
import pandas as pd
import logging
from collections import deque
from datetime import date, datetime, timedelta, time
from typing import Dict, List, Iterator
from libs.utils.loggers import get_source_log_directory, get_area_log_directory, get_source_logging_interval
logger = l... | pd.to_datetime(df['Date'], format='%Y-%m-%d') | pandas.to_datetime |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
parameters:
messages_filepath --> messages file location
categories_filepath --> categories file location
output:
merged messages, categ... | pd.read_csv(messages_filepath) | pandas.read_csv |
from autodesk.model import Model
from autodesk.sqlitedatastore import SqliteDataStore
from autodesk.states import UP, DOWN, ACTIVE, INACTIVE
from pandas import Timestamp, Timedelta
from pandas.testing import assert_frame_equal
from tests.stubdatastore import StubDataStore
import pandas as pd
import pytest
def make_sp... | Timestamp(2018, 1, 1, 0, 0, 0) | pandas.Timestamp |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
impo... | tm.box_expected(expected, box_with_array) | pandas._testing.box_expected |
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
from model.main import *
import json
import pandas as pd
import numpy as np
class DataStore():
model=None
model_month=None
sale_model=None
data = DataStore()
@app.route('/', methods=["GET"])
def... | pd.to_datetime(dff['date']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pytest
import scipy.stats as st
from ..analysis import GroupCorrelation
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import UnequalVectorLengthError, Vector
@pytest.fixture
def random_seed():
"""Generate a numpy random seed for repeatable test... | pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIn... | tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
#system libraries
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import K... | pd.DataFrame({'Nombre': nombre, 'Nacionalidad': de, 'Idiomas': idiomas, 'Edad': edad, 'Cuerpo': cuerpo,'Detalles': detalles,'Etnia': etnia,'Pelo': pelo,'C_Ojos': ojos, 'Subcultura': subcultura,'R_Sociales': redes })
print(df) | pandas.DataFrame |
import numpy as np
import pandas as pd
class Stream:
def __init__(self, stream_id, side_a, side_b, plist, direct=None):
self.stream_id = stream_id
self.side_a = side_a
self.side_b = side_b
self.packets = plist
self.direct = direct
self._pkt_size_list = None
... | pd.Series([1]*num, index=self._pkt_time_list) | pandas.Series |
import datareader
import dataextractor
import bandreader
import numpy as np
from _bisect import bisect
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import pandas as pd
from scipy import stats
from sklearn import metrics
def full_signal_extract(path, ident):
"""Extract breathing and heartbe... | pd.DataFrame([[i, mae, mse, cor[0]]], columns=['ID', 'MAE', 'MSE', 'COR']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import simpy
from sim_utils.audit import Audit
from sim_utils.data import Data
from sim_utils.patient import Patient
import warnings
warnings.filterwarnings("ignore")
class Model(object):
def __init__(self, scenario):
"""
"""
self.env = simpy.En... | pd.DataFrame(self.audit.global_audit) | pandas.DataFrame |
"""Locator functions to interact with geographic data"""
import numpy as np
import pandas as pd
import flood_tool.geo as geo
__all__ = ['Tool']
def clean_postcodes(postcodes):
"""
Takes list or array of postcodes, and returns it in a cleaned numpy array
"""
postcode_df = pd.DataFrame({'Postcode':post... | pd.read_csv(self.values_file) | pandas.read_csv |
#!/usr/bin/env python
# Copyright (C) 2019 <NAME>
import crispy
import logging
import numpy as np
import pandas as pd
import pkg_resources
import seaborn as sns
from natsort import natsorted
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy import stats
from crispy.BGExp import GExp
from ... | pd.read_csv(f"{RPATH}/bgexp/bgexp_{sample}.csv", index_col=0) | pandas.read_csv |
# Copyright (c) 2013, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
import pandas
from operator import itemgetter
def execute(filters=None):
return get_columns(filters), get_data(filters... | pandas.DataFrame.from_records(data) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Serie... | tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
"Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
"""
import os
from timeit import default_timer as timer
from datetime import datetime
from functools import reduce
import pandas as pd
import src.common as common
import src.config.constants as constants
import src.munging as pr... | pd.read_csv(f"{constants.OOF_DIR}/{sub_1_oof_name}") | pandas.read_csv |
import datetime
import json
import numpy as np
import requests
import pandas as pd
import streamlit as st
from copy import deepcopy
from fake_useragent import UserAgent
import webbrowser
from footer_utils import image, link, layout, footer
service_input = st.selectbox('Select Service',["","CoWin Vaccine... | pd.read_csv("beds_final.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categor... | tm.assert_series_equal(res, exp) | pandas.util.testing.assert_series_equal |
# %% [markdown]
# # FOI-based hospital/ICU beds data analysis
import pandas
import altair
altair.data_transformers.disable_max_rows()
# %% [markdown]
# ## BHSCT FOI data
#
# * weekly totals, beds data is summed (i.e. bed days)
bhsct_beds = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 20... | pandas.to_datetime(shsct_ae['Arrival Date'], format='%Y-%m-%d') | pandas.to_datetime |
import numpy as np
import pandas as pd
import datetime
import random as r
def randate():
start_date = datetime.date(2020, 1, 1)
end_date = datetime.date(2021, 2, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = r.randrange(... | pd.read_csv("donors.csv") | pandas.read_csv |
import logging
from urllib.request import urlopen
import zipfile
import os.path
import io
import pandas as pd
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def data_url():
return "file:///home/orest/PycharmProjects/hdx/hdx-ecb-reference-fx/eurofxref-hist.zip"
return... | pd.DataFrame([hxl],columns=df.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to u... | pd.Series(agg, name="aggregates") | pandas.Series |
import numpy as np
from scipy.stats import ranksums
import pandas as pd
import csv
file = pd.read_csv('merged-file.txt', header=None, skiprows=0, delim_whitespace=True)
file.columns = ['Freq_allel','dpsnp','sift','polyphen','mutas','muaccessor','fathmm','vest3','CADD','geneName']
df = file.drop_duplicates(keep=False... | pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t') | pandas.read_csv |
import datetime
import time
import pandas as pd
import numpy as np
import tensorflow as tf
import random as rn
import os
import keras
from keras import Input
from keras.models import Sequential, Model
from keras.layers import concatenate
from keras.layers import Dense
from keras.layers import LSTM, Dropout
from keras.... | pd.DataFrame(train_time) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.