prompt stringlengths 19 1.03M | completion stringlengths 4 2.12k | api stringlengths 8 90 |
|---|---|---|
import pytest
import unittest
from unittest import mock
from ops.tasks.anomalyDetection import anomalyService
from anomaly.models import Anomaly
from pandas import Timestamp
from decimal import Decimal
from mixer.backend.django import mixer
import pandas as pd
@pytest.mark.django_db(transaction=True)
def test_createAn... | Timestamp('2021-06-09 00:00:00+0000', tz='UTC') | pandas.Timestamp |
import pandas
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import preprocessing
from setlist import setlist
import sys
import os
path=os.getcw... | pandas.DataFrame(train_sets_features[i],columns=feature_columns) | pandas.DataFrame |
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.analysis import analysis
from quetzal.engine import engine, linearsolver_utils, nested_logit
from quetzal.io import export
from quetzal.model import model, summarymodel, transportmodel
from syspy.spatial import geometries,... | pd.concat([self.zone_to_road, self.zone_to_transit]) | pandas.concat |
try:
import pandas as pd
except ImportError:
pd = None
if pd:
import numpy as np
from . import Converter, Options
class PandasDataFrameConverter(Converter):
writes_types = pd.DataFrame
@classmethod
def base_reader(cls, options):
return (
super... | pd.Index(value[header - 1][:index] if header else [None] * index) | pandas.Index |
import time
import numpy as np
import pandas as pd
def add_new_category(x):
"""
Aimed at 'trafficSource.keyword' to tidy things up a little
"""
x = str(x).lower()
if x == 'nan':
return 'nan'
x = ''.join(x.split())
if r'provided' in x:
return 'not_provided'
if r'youtube... | pd.DatetimeIndex(merged_df['visitStartTime']) | pandas.DatetimeIndex |
from alphaVantageAPI.alphavantage import AlphaVantage
from unittest import TestCase
from unittest.mock import patch
from pandas import DataFrame, read_csv
from .utils import Path
from .utils import Constant as C
from .utils import load_json, _mock_response
## Python 3.7 + Pandas DeprecationWarning
# /alphaVantageAPI... | DataFrame(cls.csv_delisted) | pandas.DataFrame |
"""Unit tests for orbitpy.coveragecalculator.gridcoverage class.
``TestGridCoverage`` class:
* ``test_execute_0``: Test format of output access files.
* ``test_execute_1``: Roll Circular sensor tests
* ``test_execute_2``: Yaw Circular sensor tests
* ``test_execute_3``: Pitch Circular sensor tests
* ``test_execute_4``... | pd.read_csv(out_file_access, skiprows = [0,1,2,3]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 11:13:38 2016
@author: adityanagarajan
"""
import pandas as pd
import os
import numpy as np
import time
import multiprocessing
def unwrap_self_f(arg, **kwarg):
"""Taken from
http://www.rueckstiess.net/research/snippets/show/ca1d7d90
"""
return pars... | pd.concat(frames, ignore_index=True) | pandas.concat |
"""
Created on Thursday Mar 26 2020
<NAME>
based on
https://www.kaggle.com/bardor/covid-19-growing-rate
https://github.com/CSSEGISandData/COVID-19
https://github.com/imdevskp
https://www.kaggle.com/yamqwe/covid-19-status-israel
https://www.kaggle.com/vanshjatana/machine-learning-on-coronavirus
https://www.lewuathe.com/... | pd.to_datetime(data['Date']) | pandas.to_datetime |
import cv2
import face_recognition
import json
import numpy as np
import pandas as pd
def myChangeFace(BASE_DIR,timestamp):
'''
换脸模块
@refer: https://blog.csdn.net/qq_41562735/article/details/104978448?spm=1001.2014.3001.5501
@param:
BASE_DIR: 服务器存储文件全局路径
timestamp: 时间戳用于定义图片名
@ret... | pd.DataFrame(face_feature) | pandas.DataFrame |
import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from dask import dataframe as dd
from distributed.utils_test import cluster
from tqdm import tqdm
import featu... | pd.to_datetime("2014-01-01 03:00:00") | pandas.to_datetime |
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import overhang.tree as tree
import overhang.reaction_node as node
import logging
from overhang.dnastorage_utils.system.dnafile import *
import os
import sys
import shutil
import math
import numpy as np
import overhang.plot_utils.plot_utils as plt_... | pd.read_csv(root_prefix+'1_bit_ideal_to_no_opt_'+category+'.csv',index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 17:16:12 2019
@author: Meagatron
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import math
import itertools
from dtw import dtw
import timeit
from helper_functions import normalize,alphabetize_ts,hammin... | pd.DataFrame() | pandas.DataFrame |
from numpy.core.fromnumeric import shape
import pytest
import pandas as pd
import datetime
from fast_trade.build_data_frame import (
build_data_frame,
detect_time_unit,
load_basic_df_from_csv,
apply_transformers_to_dataframe,
apply_charting_to_df,
prepare_df,
process_res_df,
)
def test_de... | pd.read_csv("./test/ohlcv_data.csv.txt", parse_dates=True) | pandas.read_csv |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from thoipapy.utils import convert_truelike_to_bool, convert_falselike_to_bool
import thoipapy
def fig_plot_BOcurve_mult_train_datasets(s):
"""Plot the BO-cu... | pd.read_csv(Train02_Test02_BoCurve_file, index_col=0) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
import numpy as np
import pandas as pd
import utils
import time
from collections import deque, defaultdict
from scipy.spatial.distance import cosine
from scipy import stats
import math
seed = SEED
cur_stage = CUR_STAGE
mode = cur_mode... | pd.merge( feat,data1, how='left',on=['stage','user'] ) | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 30 20:25:08 2019
@author: alexandradarmon
"""
### RUN TIME SERIES
import pandas as pd
from punctuation.recognition.training_testing_split import (
get_nn_indexes
)
from punctuation.feature_operations.distances import d_KL
from punctuation... | pd.read_csv('data/Alex_Shakespeare.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 14:54:41 2020
@author: aschauer
"""
import socket
import pandas as pd
from pathlib import Path
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float
# store locally if on my machine (dat... | pd.read_pickle(file) | pandas.read_pickle |
"""
October 2020
Updated: August 2021
Software version: Python 3.7
This code retrieves the calculation of building material demand and embodied greenhouse gas emissions in 26 global regions between 2020-2060. For the original code & latest updates, see: https://github.com/oucxiaoyang/GloBUME
The building mater... | pd.DataFrame(rurpop_tail.values*pop_tail.values, columns = pop_tail.columns, index = pop_tail.index) | pandas.DataFrame |
"""
Generate figures for the DeepCytometer paper for v8 of the pipeline.
Environment: cytometer_tensorflow_v2.
We repeat the phenotyping from klf14_b6ntac_exp_0110_paper_figures_v8.py, but change the stratification of the data so
that we have Control (PATs + WT MATs) vs. Het MATs.
The comparisons we do are:
* Cont... | pd.read_pickle(dataframe_areas_filename) | pandas.read_pickle |
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for fuel economy of all US light-duty vehicles
data = | pd.read_csv('../processed/tidy_automotive_trends.csv') | pandas.read_csv |
from __future__ import division
import json
import numpy as np
import pandas as pd
from scipy import stats
from visigoth.stimuli import Point, Points, PointCue, Pattern
from visigoth import (AcquireFixation, AcquireTarget,
flexible_values, limited_repeat_sequence)
def define_cmdline_params(sel... | pd.DataFrame([t for t, _ in self.trial_data]) | pandas.DataFrame |
import pandas as pd
import src.variables as var
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
from __future__ import print_function
from random_agent import random_agent
from policy_agent import policy_agent
import numpy as np
import pandas as pd
class Board(object):
def __init__(self):
self.tic = -1
self.tac = 1
self.board = np.zeros([3, 3])
def print_board(self):
pr... | pd.DataFrame(self.board) | pandas.DataFrame |
"""Tests for piece.py"""
from fractions import Fraction
import pandas as pd
import numpy as np
from harmonic_inference.data.data_types import KeyMode, PitchType
from harmonic_inference.data.piece import Note, Key, Chord, ScorePiece, get_reduction_mask
import harmonic_inference.utils.harmonic_constants as hc
import ha... | pd.Series(note_dict) | pandas.Series |
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Build optimisation problems from PyPSA networks ... | pd.Series('', rhs.index) | pandas.Series |
import os
import pandas as pd
DATA_CUISINE_PATH = "data/cuisine_data/"
DATA_RECIPES_PATH = "data/recipes_data/"
def import_data():
train = pd.read_json(os.path.join(DATA_CUISINE_PATH, 'train.json'))
test = pd.read_json(os.path.join(DATA_CUISINE_PATH, 'test.json'))
return pd.concat([train,test],axis=0)
de... | pd.read_json(data_path_ar, orient='index') | pandas.read_json |
import argparse
import glob
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import jit, prange
from sklearn import metrics
from utils import *
@jit(nopython=True, nogil=True, cache=True, parallel=True, fastmath=True)
def compute_tp_tn_fp_fn(y_true,... | pd.DataFrame([values], columns=model_params) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import anthro.io
import altair as alt
# Load thea data
data = | pd.read_csv('../processed/FAOSTAT_crop_primary_yields.csv') | pandas.read_csv |
"""
Code borrowed/reproduced from kjchalup's 'A fast conditional independence test'
Reference: <NAME> and <NAME>, 2017.
@author: roshanprakash
"""
import pandas as pd
from joblib import Parallel, delayed
import numpy as np
import time
from scipy.stats import ttest_1samp
from sklearn.preprocessing import StandardScale... | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import ipaddress
import codecs
import time
import pandas as pd
import urllib3
from urllib3 import util
from classifier4gyoithon.GyoiClassifier import DeepClassifier
from classifier4gyoithon.GyoiExploit import Metasploit
from classifier4gyoitho... | pd.Series([ip_addr]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat May 22 01:11:59 2021
@author: <NAME>, Department of Planning, DCEA, Aalborg University
<EMAIL>
"""
'''
Demonstrates the behavior of the module estimating the solar power received by a given PBR geometry.
Execute the block on the influence of azimuth to reproduce the figure... | pd.DataFrame(columns=['Upper','Lower','Average']) | pandas.DataFrame |
from sklearn.metrics import f1_score,recall_score,precision_score,confusion_matrix,accuracy_score
from pylab import *
import torch
import torch.nn as nn
import copy
import random
import pandas as pd
import numpy as np
from tqdm import trange
import pickle
import json
import sys
import time
import shap
from sklearn.mod... | pd.read_csv(PATH_COUNTS) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import joblib
from utils import string2json
# from config import TIMESTEP
import argparse
import sys
plt.interactive(True)
pd.options.display.max_columns = 15
pic_prefix = 'pic/'
data_dict_resampled = joblib.load('data/data_dict_resample... | pd.to_datetime(gamedata_dict4player['time_game_start'], unit='s') | pandas.to_datetime |
import pandas as pd
import numpy as np
from dplypy.dplyframe import DplyFrame
from dplypy.pipeline import row_name_subset, slice_row, slice_column
def test_row_name_subset():
pandas_df = pd.DataFrame(
[[1, 2], [3, 4], [5, 6]], index=["idx1", 7, "idx3"], columns=["col1", "col2"]
)
df = DplyFrame(p... | pd.Index(["idx3", 7], name="indices") | pandas.Index |
"""
Script goal, to produce trends in netcdf files
This script can also be used in P03 if required
"""
#==============================================================================
__title__ = "Global Vegetation Trends"
__author__ = "<NAME>"
__version__ = "v1.0(28.03.2019)"
__email__ = "<EMAIL>"
#=====... | pd.DataFrame(obsMA) | pandas.DataFrame |
import logging, os, sys, yaml
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import pandas as pd
import numpy as np
from tqdm import tqdm
from Models import *
from Datasets import STD_Dataset
# Function to load YAML config file into a Python dict
def load_parameters(yaml_path):
with ... | pd.DataFrame(columns=csv_cols) | pandas.DataFrame |
from tqdm import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.ste... | pd.to_datetime(df['publish_date']) | pandas.to_datetime |
import pandas as pd
class CFBDataframe:
def __init__(self):
# list of dfs values[0] and empty init df values[1]
self.data_map = {"drives": [[], pd.DataFrame()], "games": [[], pd.DataFrame()], "lines": [[], pd.DataFrame()],
"player_game_stats": [[], pd.DataFrame()], "player... | pd.read_csv(file, encoding='ISO-8859-1') | pandas.read_csv |
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from ast import literal_eval
import numpy as np
import pandas as pd
from fooltrader.consts import CHINA_STOCK_INDEX, USA_STOCK_INDEX
from fooltrader.contract import data_contract
from fooltrader.contract import files_contract
from fooltrader.contract.f... | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import os, sys, pickle, bcolz
from miki.data import dataGlovar
from miki.data.dataFunction import DataFunction
from miki.data.dataBcolz import DataBcolz
class Query(object):
def __init__(self):
self.__time1 = pd.to_datetime... | pd.DataFrame(columns=field_list) | pandas.DataFrame |
import urllib
from io import StringIO
from io import BytesIO
import csv
import numpy as np
from datetime import datetime
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
datos=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fouri... | pd.to_datetime(datos[0], format='%d/%m/%Y/ %H:%M:%S') | pandas.to_datetime |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: data_explore.py
@time: 2019-05-06 17:22
"""
import pandas as pd
import math
import featuretools as ft
from feature_selector import FeatureSelector
from mayiutils.datasets.data_preprocessing import DataExplore as de
if __name__ == '__main__':
mode = 2
... | pd.read_csv('zy_all_featured_event.csv', parse_dates=['就诊结帐费用发生日期', '入院时间', '出院时间'], encoding='gbk') | pandas.read_csv |
"""Genera los reportes de los modulos."""
# Utilidades
import collections
import functools
import ssl
import sys
# matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
# Pandas
import pandas as pd
# Django
from django.http.response import Http404
from django.template.exceptions impor... | pd.DataFrame(user, columns=['date_joined']) | pandas.DataFrame |
"""
Utils for time series generation
--------------------------------
"""
import math
from typing import Union, Optional, Sequence
import numpy as np
import pandas as pd
import holidays
from darts import TimeSeries
from darts.logging import raise_if_not, get_logger, raise_log, raise_if
logger = get_logger(__name__... | pd.Index([column_name]) | pandas.Index |
"""
A collection of classes extending the functionality of Python's builtins.
email <EMAIL>
"""
import re
import typing
import string
import enum
import os
import sys
from glob import glob
from pathlib import Path
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %% ===============... | pd.DataFrame(self.__dict__, *args, **kwargs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Calculation of inhomogeneity factor for a population of stacking sequence
@author: <NAME>
"""
import sys
sys.path.append(r'C:\LAYLA')
import numpy as np
import pandas as pd
from src.CLA.lampam_functions import calc_lampam
# Creation of a table of stacking sequences
ss = np.array([0, 45, ... | pd.ExcelWriter('Inhomogeneity factors.xlsx') | pandas.ExcelWriter |
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import numpy as np
import os
from py2neo import Graph, Node, Relationship, NodeMatcher, RelationshipMatcher
# from neo4j import GraphDatabase
# import neo4j
import networkx as nx
import json
import datetime
import matplotlib.pyplot as plt
#... | pd.read_csv(file_path) | pandas.read_csv |
import numpy as np
import pandas as pd
from estimagic.parameters.block_trees import block_tree_to_matrix
from estimagic.parameters.block_trees import matrix_to_block_tree
from numpy.testing import assert_array_equal
from pybaum import tree_equal
def test_matrix_to_block_tree_array_and_scalar():
t = {"a": 1.0, "b"... | pd.DataFrame([[0, 1], [5, 6]], columns=["a", "b"], index=["a", "b"]) | pandas.DataFrame |
import pandas as pd
from scipy import sparse
from itertools import repeat
import pytest
import anndata as ad
from anndata.utils import import_function, make_index_unique
from anndata.tests.helpers import gen_typed_df
def test_make_index_unique():
index = pd.Index(["val", "val", "val-1", "val-1"])
with pytest... | pd.Index(["val", "val-2", "val-1", "val-1-1"]) | pandas.Index |
#%%
import os
import glob
import itertools
import re
import regex
import numpy as np
import pandas as pd
import skbio
import collections
import git
#%%
# Import this project's library
import rnaseq_barcode as rnaseq
# Find home directory for repo
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.w... | pd.DataFrame.from_records(seq_list, columns=names) | pandas.DataFrame.from_records |
# encoding=utf-8
'''
lb 0.2190 2 folds
'''
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_valid... | pd.read_csv("../input/test.csv", parse_dates=["activation_date"]) | pandas.read_csv |
from typing import Any, Callable, Iterable, List
import toolz as fp
from toolz import curry
import pandas as pd
import numpy as np
from pandas.util import hash_pandas_object
from sklearn.metrics import roc_auc_score, r2_score, mean_squared_error, log_loss, precision_score, recall_score, \
fbeta_score, brier_score_... | pd.cut(test_data[prediction_column], bins=n_bins) | pandas.cut |
import pytest
from cellrank.tl._colors import _map_names_and_colors, _create_categorical_colors
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from matplotlib.colors import is_color_like
class TestColors:
def test_create_categorical_colors_too_many_colors(self):
... | pd.Series(["foo", "bar", "baz"], dtype="category") | pandas.Series |
import os
import pandas as pd
from autumn.projects.covid_19.mixing_optimisation.constants import OPTI_REGIONS, PHASE_2_START_TIME
from autumn.projects.covid_19.mixing_optimisation.mixing_opti import DURATIONS, MODES
from autumn.projects.covid_19.mixing_optimisation.utils import (
get_country_population_size,
... | pd.DataFrame(columns=column_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
imp... | pd.Series(newseq) | pandas.Series |
from cafcoding.tools import etl
from cafcoding.tools import meteo
from cafcoding.tools import log
from cafcoding import constants
from pandarallel import pandarallel
import pandas as pd
import srtm
import numpy as np
import logging
logger = logging.getLogger(constants.LOGGER_ID)
pandarallel.initialize()
ETL_VERSI... | pd.to_datetime(df.ts_date,format="%Y/%m/%d %H:%M:%S.%f") | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Properties
------... | pds.DateOffset(months=1) | pandas.DateOffset |
import pandas as pd
import requests as req
from io import StringIO
######################################### Items DF ########################################
def get_items():
'''
This function obtains the items data from the base url,
loops through items pages,
makes items df,
and writes the df... | pd.DataFrame(page_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module is EXPERIMENTAL, that means that tests are missing.
The reason is that the coastdat2 dataset is deprecated and will be replaced by
the OpenFred dataset from Helmholtz-Zentrum Geesthacht. It should work though.
This module is designed for the use with the coastdat2 weather data ... | pd.DataFrame(wind_types) | pandas.DataFrame |
from strava_segment_rank.util.strava_api.strava_api_helpers import compute_athlete_segment_frequency
from strava_segment_rank.util.strava_selenium.strava_selenium_helpers import strava_scrape_segment_leaderboard
from strava_segment_rank.util.strava_selenium.strava_selenium_helpers import strava_login
from strava_segmen... | pandas.DataFrame(segment_leadboard_datas) | pandas.DataFrame |
import os
import argparse
import tables as h5
import pandas as pd
import numpy as np
import gvar as gv
import matplotlib as mpl
import matplotlib.pyplot as plt
# now module for Madras-Sokal autocorr time
from emcee import autocorr
# Figure formatting for paper
fig_width = 6.75 # in inches, 2x as wide as APS column
gr ... | pd.DataFrame(data=dataset_trj,index=index_trj) | pandas.DataFrame |
import jax.numpy as np
import qtensornetwork.components
import qtensornetwork.circuit
import qtensornetwork.ansatz
import qtensornetwork.util
import qtensornetwork.optimizer
from qtensornetwork.gate import *
from jax.config import config
config.update("jax_enable_x64", True)
import tensorflow as tf
from tensorflow im... | pd.DataFrame(columns=["label"]) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create... | pd.DataFrame(reshapedf) | pandas.DataFrame |
# We use word2vec instead of glove embedding in this file
# This word2vec is a self-trained one
import argparse
import json
import os
import pickle
from itertools import chain
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sns
from gensim.models... | pandas.DataFrame(columns=['id', 'labels']) | pandas.DataFrame |
###
# This code provides a way to approximate the probability of
# finding two features together using von Neumann Diffusion Kernel.
# Also plots a cluster heatmap of the normalized von Neumann diffusion
#
# by: <NAME>, 09/15/2016
#
# required modules:
# scipy, numpy, matplotlib, pandas, seaborn
#
#
# How it works:
# m... | pd.read_csv(args[0]) | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class Str... | pandas.Series([True, True, True, True, True]) | pandas.Series |
"""Filter copy number segments."""
import functools
import logging
import numpy as np
import pandas as pd
import hashlib
from .descriptives import weighted_median
def require_column(*colnames):
"""Wrapper to coordinate the segment-filtering functions.
Verify that the given columns are in the CopyNumArray t... | pd.Series(levels) | pandas.Series |
from manifesto_data import get_manifesto_texts
import warnings,json,gzip,re
import os, glob
from scipy.sparse import hstack, vstack
import scipy as sp
import pandas as pd
import numpy as np
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import HashingVectorizer, CountVectorizer
from... | pd.DataFrame(learning_curves) | pandas.DataFrame |
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension.base.base import BaseExtensionTests
class BaseGroupbyTests(BaseExtensionTests):
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{"A"... | pd.Index(index, name="B") | pandas.Index |
#! /usr/bin/env python3
'''
HERO - Highways Enumerated by Recombination Observations
Author - <NAME>
'''
from argparse import ArgumentParser
from Bio.SeqIO import parse as BioParse
from itertools import product
import math
import multiprocessing
import os
import pandas as pd
from plotnine import *
from random import... | pd.read_csv(file_loc, header=0) | pandas.read_csv |
#Version 2.0
#Version 1.1.3
#--Updated from development version: 6/24/21
#Description:
#Module toolkit used for the gridded temperature map production and post-processing
#Development notes:
#2021-06-24
#--Updated version to 1.1
#--Deprecated 1.0 versions of removeOutlier, get_predictors, and makeModel
#--Added new fu... | pd.concat([train_only_df[STN_IDX_NAME],train_meta,train_only_df[train_only_df.columns[1:]]],axis=1) | pandas.concat |
# Training code for D4D Boston Crash Model project
# Developed by: bpben
import numpy as np
import pandas as pd
import scipy.stats as ss
from sklearn.metrics import roc_auc_score
import os
import json
import argparse
import yaml
from .model_utils import format_crash_data
from .model_classes import Indata, Tuner, Teste... | pd.get_dummies(data_segs[f]) | pandas.get_dummies |
import os
from random import uniform
import matplotlib
import pandas as pd
from geopy import Point
import uuid as IdGenerator
from geopy import distance
import multiprocessing as mp
from math import sin, cos, atan2, floor, sqrt, radians
def histogram(path, layers, show=True, max_x=None, save_log=True, **kwargs):
i... | pd.read_csv(filename, header=0) | pandas.read_csv |
import numpy as np
from tspdb.src.database_module.sql_imp import SqlImplementation
from tspdb.src.pindex.predict import get_prediction_range, get_prediction
from tspdb.src.pindex.pindex_managment import TSPI, load_pindex
from tspdb.src.pindex.pindex_utils import index_ts_mapper
import time
interface = SqlImple... | pd.read_csv('tspdb/tests/testdata/tables/%s.csv'%table) | pandas.read_csv |
import logging
import random
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.transforms.functional as FT
log = logging.getLogger(__name__)
INPUT_DIM = 224
MAX_PIXEL_VAL = 255
MEAN = 58.09
STDD... | pd.notnull(df) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Autor: <NAME>
Email: <EMAIL>
Functions that implement the ensemble of models
"""
import sys
sys.path.insert(0,'../') # including the path to deep-tasks folder
sys.path.insert(0,'./utils') # including the path to deep-tasks folder
from constants import TOPSIS_PATH
sys.path.insert(0,TOPSIS_... | pd.DataFrame(df_values, columns=df_cols) | pandas.DataFrame |
# %% [markdown]
# # FOI-based hospital/ICU beds data analysis
import pandas
import altair
altair.data_transformers.disable_max_rows()
# %% [markdown]
# ## BHSCT FOI data
#
# * weekly totals, beds data is summed (i.e. bed days)
bhsct_beds = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 20... | pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='Non ICU Wards') | pandas.read_excel |
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import DateFlagsTransform
from etna.transforms import TimeSeriesImputerTransform
@py... | pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid... | pd.Series([1, 2], dtype=np.int64) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(obj... | tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 17:13:29 2018
@author: pamelaanderson
"""
from difflib import SequenceMatcher
import json
import numpy as np
import os
import operator
import pandas as pd
def load_adverse_events(path, year, q):
""" Loading adverse drug events while perfor... | pd.concat([df_adverse_ev, df_adverse_ev_json]) | pandas.concat |
#Soccer Dataset Analysis_______________________________________________________
#Import libraries
import sqlite3
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
| pd.set_option('display.precision', 3) | pandas.set_option |
"""
# Extracting twitter data
Uses package tweepy (v4.5.0).
Note that Twitter API was recently updated, and articles like
[this one](https://realpython.com/twitter-bot-python-tweepy/)
are now probably out of date?
References:
- https://dev.to/twitterdev/a-comprehensive-guide-for-using-the-twitter-api-v2-using-tw... | pd.DataFrame({"tweet_text": tweets.data}) | pandas.DataFrame |
# split into words
import os
import sys
import pandas as pd
import numpy as np
from ast import literal_eval
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import string
from collections import Counter
from keras.preprocessing.text import Tokenizer
... | pd.read_csv(load_from, sep=';', header=0) | pandas.read_csv |
# Created by <NAME>
# email : <EMAIL>
import json
import os
import time
from concurrent import futures
from copy import deepcopy
from pathlib import Path
from typing import IO, Union, List
from collections import defaultdict
import re
from itertools import tee
import logging
# Non standard libraries
import pandas as p... | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.preprocessing import PowerTransformer
from src.utils.utils_s3 import read_s3_graphml, write_s3_graphml
class PanelDataETL:
def __init__(self,input_filepath, output_filepath):
self.input_filepath = input_filep... | pd.concat(all_years) | pandas.concat |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
im... | AbstractMethodError(self) | pandas.core.common.AbstractMethodError |
"""Tests for the cost bounds."""
import pytest
import uclasm
from uclasm import Graph, MatchingProblem
from uclasm.matching import *
from uclasm.matching import *
import numpy as np
from scipy.sparse import csr_matrix
import pandas as pd
@pytest.fixture
def smp():
"""Create a subgraph matching problem."""
adj... | pd.DataFrame(['a', 'b', 'c'], columns=[Graph.node_col]) | pandas.DataFrame |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
nor... | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import cv2
import sys
import os
from keras.models import Sequential
from keras.callbacks import Callback, ModelCheckpoint
from keras.layers import (Flatten, Dense, Convolution2D, MaxPool2D,
BatchNormalization, Dropout, Activation, Cropping2D, Lambda)
from keras.optimizers import... | pd.concat([df_with_zero, df_without_zero]) | pandas.concat |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def te... | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
from pso.APSO_01 import APSO
import numpy as np
import time
import pandas as pd
np.random.seed(42)
def Sphere(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return np.sum(x ** 2, axis=1)
def Schwefel_P222(x):
if x.ndim == 1:
x = x.reshape(1, -1)
return np.sum(np.abs(x), axis=1) + np.prod(... | pd.DataFrame(table) | pandas.DataFrame |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | pd.read_csv(f, error_bad_lines=False, index_col=False) | pandas.read_csv |
import os
import numpy as np
import holoviews as hv
hv.extension('bokeh')
from collections import defaultdict
from fcsy.fcs import write_fcs
from sklearn.preprocessing import MinMaxScaler
from sklearn import cluster
from sklearn import mixture
from scipy.stats import gaussian_kde
from ssc.cluster import selfrepresentat... | pd.concat(list_df) | pandas.concat |
import pandas as pd
snhp = pd.read_csv("raw.csv")
ref = pd.read_csv("./persistent_data/snhp2014.csv")
lookup = | pd.read_csv("./persistent_data/sc/COUNCIL AREA 2011 LOOKUP.csv") | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.u... | assert_series_equal(result, s1) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
import os
import re
import sys
from datetime import datetime
from random import randint
from time import sleep
import numpy as np
import pandas.util.testing as tm
import pytest
import pytz
from pandas import DataFrame, NaT, compat
from pandas.compat import range, u
from pandas.compat.numpy imp... | tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import pytest
import numpy as np
import pandas as pd
EXP_IDX = pd.MultiIndex(levels=[['model_a'], ['scen_a', 'scen_b']],
codes=[[0, 0], [0, 1]], names=['model', 'scenario'])
def test_set_meta_no_name(test_df):
idx = pd.MultiIndex(levels=[['a_scenario'], ['a_model'], ['some_region']],
... | pd.Series(data=[0.3, np.nan], index=EXP_IDX, name='meta_values') | pandas.Series |
import unittest
import pandas as pd
from enda.backtesting import BackTesting
class TestBackTesting(unittest.TestCase):
def test_yield_train_test_1(self):
df = pd.date_range(
start=pd.to_datetime('2015-01-01 00:00:00+01:00').tz_convert('Europe/Paris'),
end=pd.to_datetime('2021-01-... | pd.to_datetime('2019-01-01 00:00:00+01:00') | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.