prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
import pytest import pandas as pd import pandas._testing as tm @pytest.mark.parametrize( "values, dtype", [ ([1, 2, 3], "int64"), ([1.0, 2.0, 3.0], "float64"), (["a", "b", "c"], "object"), (["a", "b", "c"], "string"), ([1, 2, 3], "datetime64[ns]"), ([1, 2, 3], ...
pd.Series(mask, index=ser.index)
pandas.Series
import numpy as np import pandas as pd; pd.options.mode.chained_assignment = None import matplotlib.pyplot as plt from tqdm import tqdm from scipy.stats import pearsonr from scipy.stats import spearmanr from scipy.optimize import minimize from scipy.optimize import least_squares import os def is_const(x): ...
pd.read_csv(csv2)
pandas.read_csv
# Copyright (c) 2019, MD2K Center of Excellence # - <NAME> <<EMAIL>>, <NAME> <<EMAIL>> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above co...
pd.DataFrame(all_dict, columns=[centroid_id_name, 'centroid_latitude', 'centroid_longitude', 'centroid_area'])
pandas.DataFrame
# -*- coding: utf-8 -*- """ Functions for cleaning mdredze Sandy Twitter dataset. """ import matplotlib.pyplot as plt import numpy as np import pandas as pd from statsmodels.graphics.tsaplots import plot_acf from twitterinfrastructure.tools import cross_corr, output, query def create_timeseries_diff(df, col1, col2...
pd.to_timedelta(s_y1.index.values, unit='h')
pandas.to_timedelta
import shutil import random import tempfile import pandas as pd from catalyst.exchange.exchange_bundle import ExchangeBundle from catalyst.exchange.exchange_bcolz import BcolzExchangeBarWriter, \ BcolzExchangeBarReader from catalyst.exchange.bundle_utils import get_df_from_arrays from nose.tools import assert_eq...
pd.to_datetime('2015-04-01 00:00')
pandas.to_datetime
import pandas from dmscripts.models.writecsv import csv_path from dmscripts.models.modeltrawler import ModelTrawler def base_model(base_model, keys, get_data_kwargs, client, logger=None, limit=None): """Fetch all the data for a given Digital Marketplace model from the api. :param base_model: A Digital Marke...
pandas.DataFrame(columns=keys)
pandas.DataFrame
import fcntl import sys import time import numpy as np import pandas as pd from keras.layers import Dense, BatchNormalization, Dropout from keras.models import Sequential from keras.optimizers import SGD from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import GridSearchCV from sklea...
pd.DataFrame(data={'probability': results})
pandas.DataFrame
import bedrock.viz import bedrock.common from sklearn.ensemble import ExtraTreesClassifier from sklearn.feature_selection import RFE from sklearn.naive_bayes import BernoulliNB, MultinomialNB from tpot import TPOTClassifier from sklearn.svm import LinearSVC from sklearn.feature_selection import SelectFwe, f_classif fro...
pd.DataFrame({'y': y_df, 'y_hat': y_hat, 'group': groups_df[y_df.index]})
pandas.DataFrame
import pandas as pd import numpy as np import datetime def min2day_v2(df,lag_ps): intraday = df; #preparation intraday['range1']=intraday['high'].rolling(lag_ps).max()-intraday['close'].rolling(lag_ps).min() intraday['range2']=intraday['close'].rolling(lag_ps).max()-intraday['low'].rolling(lag_ps).mi...
pd.Series(signals['signals'])
pandas.Series
# coding=utf-8 # pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta from numpy import nan import numpy as np import pandas as pd from pandas.types.common import is_integer, is_scalar from pandas import Index, Series, DataFrame, isnull, date_range from pandas.core.index import MultiIndex from pa...
Series(1, index=['a', 'a', 'b', 'b', 'c'])
pandas.Series
#!/usr/bin/env python # encoding: utf-8 import os import numpy as np import scipy as sp import matplotlib as mpl mpl.use("TkAgg") mpl.rcParams['pdf.fonttype'] = 42 import matplotlib.pylab as plt import seaborn as sns import pandas as pd from IPython import embed as shell from tqdm import tqdm from sim_tools import ge...
pd.DataFrame()
pandas.DataFrame
import glob import os from networkx.readwrite import json_graph import json import networkx as nx import pandas as pd from subs2network.utils import add_prefix_to_dict_keys from subs2network.imdb_dataset import imdb_data from subs2network.consts import MOVIE_YEAR def get_node_features(g): closeness = nx.closenes...
pd.DataFrame(res)
pandas.DataFrame
# pylint: disable=too-many-lines """Field class.""" import os import sys from copy import deepcopy import weakref from functools import partial from string import Template import logging import numpy as np import pandas as pd import h5py import pyvista as pv from anytree import PreOrderIter from deprecated.sphinx impor...
pd.to_datetime(self.meta['START'])
pandas.to_datetime
import pytest from mapping import mappings from pandas.util.testing import assert_frame_equal, assert_series_equal import pandas as pd from pandas import Timestamp as TS import numpy as np from pandas.tseries.offsets import BDay @pytest.fixture def dates(): return pd.Series( [
TS('2016-10-20')
pandas.Timestamp
import pandas as pd import numpy as np import time import sys import json from jsmin import jsmin from collections import Counter import os.path from xlrd.biffh import XLRDError from aenum import IntEnum import time # set up logging (to console) import logging logger = logging.getLogger(__name__) logger.setLevel(loggi...
pd.concat(years_dat, sort=False)
pandas.concat
"""Amazon Neptune Module.""" import logging import re from typing import Any import pandas as pd from gremlin_python.process.graph_traversal import GraphTraversalSource, __ from gremlin_python.process.translator import Translator from gremlin_python.process.traversal import Cardinality, T from gremlin_python.structur...
pd.concat([df, expanded], axis=1)
pandas.concat
import io import os import re import sys import time import pandas import datetime import requests import mplfinance from matplotlib import dates # Basic Data file_name = __file__[:-3] absolute_path = os.path.dirname(os.path.abspath(__file__)) # <editor-fold desc='common'> def load_json_config(): global file_dir...
pandas.concat([stock_open_old, stock_open_new], join='outer')
pandas.concat
#!/usr/bin/env python # Author: <NAME> (jsh) [<EMAIL>] import itertools import joblib import logging import os.path import pathlib import random import shutil import sys import numpy as np import pandas as pd from pandas.api.types import CategoricalDtype from sklearn import preprocessing as skpreproc from keras.layer...
pd.DataFrame(voframe.loc[matchmask])
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Sep 12 17:13:29 2018 @author: pamelaanderson """ from difflib import SequenceMatcher import json import numpy as np import os import operator import pandas as pd def load_adverse_events(path, year, q): """ Loading adverse drug events while perfor...
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python # -*- coding: utf-8 -*- # # QTPyLib: Quantitative Trading Python Library # https://github.com/ranaroussi/qtpylib # # Copyright 2016-2018 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a ...
pd.to_datetime(df['datetime'], utc=True)
pandas.to_datetime
import subprocess, gzip, datetime, pickle, glob, os, openpyxl, shutil, math import pandas as pd from plotly.subplots import make_subplots from pathlib import Path from joblib import Parallel, delayed import plotly.graph_objects as go import plotly.express as px from statistics import mean from statistics import median ...
pd.DataFrame()
pandas.DataFrame
""" test fancy indexing & misc """ from datetime import datetime import re import weakref import numpy as np import pytest import pandas.util._test_decorators as td from pandas.core.dtypes.common import ( is_float_dtype, is_integer_dtype, ) import pandas as pd from pandas import ( DataFrame, Index,...
Index(["b", "a", "a"])
pandas.Index
import pytest import jax.numpy as np import pandas as pd from pzflow import Flow from pzflow.bijectors import Chain, Reverse, Scale from pzflow.distributions import * @pytest.mark.parametrize( "data_columns,bijector,info,file", [ (None, None, None, None), (("x", "y"), None, None, None), ...
pd.DataFrame(xarray, columns=("redshift", "y", "y_err", "redshift_err"))
pandas.DataFrame
import requests import pandas as pd from io import StringIO, BytesIO from lxml import etree as et API_KEY = '<GREATSCHOOLS.ORG API KEY GOES HERE>' def generate_file(name, response): d = {} df =
pd.DataFrame()
pandas.DataFrame
import numpy as np import pandas as pd import random from rpy2.robjects.packages import importr utils = importr('utils') prodlim = importr('prodlim') survival = importr('survival') #KMsurv = importr('KMsurv') #cvAUC = importr('pROC') #utils.install_packages('pseudo') #utils.install_packages('prodlim') #utils...
pd.get_dummies(long_test_clindata, columns=['time_point'])
pandas.get_dummies
#!/usr/bin/python import time import numpy as np import pandas as pd import argparse from math import exp from math import sqrt from datetime import datetime from os import listdir import sys # BOKEH from bokeh import events from bokeh.io import output_file, show from bokeh.models import CustomJS, HoverT...
pd.DataFrame()
pandas.DataFrame
# pylint: disable-msg=E1101,W0612 from datetime import datetime, timedelta import os import operator import unittest import cStringIO as StringIO import nose from numpy import nan import numpy as np import numpy.ma as ma from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull from pandas.core.index...
assert_series_equal(result, expected)
pandas.util.testing.assert_series_equal
from collections import OrderedDict import numpy as np import pytest from pandas import ( DataFrame, Index, MultiIndex, Series, ) import pandas._testing as tm from pandas.core.construction import create_series_with_explicit_dtype class TestFromDict: # Note: these tests are specif...
DataFrame.from_dict(a, orient="columns")
pandas.DataFrame.from_dict
# -*- coding: utf-8 -*- """project3.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1PW90I5c1X5VipzIvowFpbLOAtjLw7-co """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns """1.**transforming data csv ...
pd.read_csv("/content/drive/MyDrive/simplilearn/python with data science /project3/Comcast_telecom_complaints_data.csv")
pandas.read_csv
import pandas as pd from autor import Author from excel import ExcelFile from individuos import Student, Egress from verifica_autores import em_lista_autores, trata_exceçoes from valores import ND, quadrennium from PyscopusModified import ScopusModified from pprint import pprint from excecoes import excecoes_artigos_sc...
pd.read_csv("Qualis/QualisCC_2013_2016.csv", sep=";", encoding='iso-8859-1')
pandas.read_csv
import numpy as np import pandas as pd from numba import njit, typeof from numba.typed import List from datetime import datetime, timedelta import pytest import vectorbt as vbt from vectorbt.portfolio.enums import * from vectorbt.generic.enums import drawdown_dt from vectorbt import settings from vectorbt.utils.random...
pd.Index(['first', 'second'], dtype='object', name='group')
pandas.Index
import pandas as pd import os import warnings import pickle from nltk.corpus import stopwords from nltk.tokenize import RegexpTokenizer from collections import namedtuple Fact = namedtuple("Fact", "uid fact file") answer_key_map = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5} tables_dir = "annotation/expl-tablesto...
pd.isna(s)
pandas.isna
import streamlit as st import streamlit.components.v1 as stc import time from random import random import numpy as np import pandas as pd import altair as alt from altair import Chart, X, Y, Axis, SortField, OpacityValue # 2020-10-25 edit@ from st.annotated_text import annotated_text from annotated_text import annota...
pd.Series(res)
pandas.Series
import os import pandas as pd BASE_DIR = os.path.abspath(os.path.dirname(__file__)) def open_csv(filepath, header_names=None): """Opens CSV file with option to add header names.""" if header_names and hasattr(header_names, "__iter__"): return
pd.read_csv(filepath, sep=",", header=0, names=header_names)
pandas.read_csv
# -*- coding: utf-8 -*- """ @author: <NAME> - https://www.linkedin.com/in/adamrvfisher/ """ #This is a strategy tester #pandas_datareader is deprecated, use YahooGrabber #Import modules from pandas_datareader import data import pandas as pd import numpy as np #Assign ticker ticker = '^GSPC' #Re...
pd.Series(AroonUp, index=s.index)
pandas.Series
# coding=utf-8 # pylint: disable-msg=E1101,W0612 import numpy as np import pytest from pandas.compat import lrange, range import pandas as pd from pandas import DataFrame, Index, Series import pandas.util.testing as tm from pandas.util.testing import assert_series_equal def test_get(): # GH 6383 s = Series...
tm.assert_frame_equal(result, expected)
pandas.util.testing.assert_frame_equal
import argparse import os import matplotlib.pyplot as plt import pandas as pd import seaborn as sns def parse_args(args): """define arguments""" parser = argparse.ArgumentParser(description="TATA_enrichment_plots") parser.add_argument( "file_names", type=str, help="Name of folder ...
pd.read_table(gat_output2, sep="\t", header=0)
pandas.read_table
""" PFRA Module for working with HEC-RAS model output files """ import gdal from time import time import geopandas as gpd from geopandas.tools import sjoin from shapely.ops import cascaded_union from shapely.geometry import Point, LineString, Polygon import numpy as np import pandas as pd import h5py from matplotlib im...
pd.Index(desired_columns)
pandas.Index
import pandas as pd import json import os import sys import datetime from datetime import time from src.util import logger def loadIntradayData(filepath): data =
pd.read_csv(filepath, parse_dates=[0], names=['datetime', 'value'])
pandas.read_csv
# -*- coding: utf-8 -*- """ @file:base_6900.py @time:2019/7/6 21:49 @author:Tangj @software:Pycharm @Desc """ import pandas as pd import numpy as np from sklearn.model_selection import StratifiedKFold from lightgbm.sklearn import LGBMClassifier from sklearn.metrics import mean_squared_error, mean_absolute_error, log_lo...
pd.read_csv('../data/user_behavior_logs.csv', parse_dates=['behavior_time'])
pandas.read_csv
from ..parsers import get_parsing_function import pandas as pd from tqdm import tqdm from ..DataStructures import reg_fixed_fileds from ...settings import get_regions_batch_size # Loading strategy loading_strategy = "single_core" batch = get_regions_batch_size() def load_regions(collected_result): # get the num...
pd.DataFrame.from_dict(result)
pandas.DataFrame.from_dict
""" Module for static data retrieval. These functions were performed once during the initial project creation. Resulting data is now provided in bulk at the url above. """ import datetime import json from math import sin, cos, sqrt, atan2, radians import re import requests import pandas as pd from riverrunner import s...
pd.unique(group.STATION)
pandas.unique
""" Machine learning examples with SciPy and scikit-learn. """ from pandas import Categorical, DataFrame, Series from scipy.cluster.hierarchy import fcluster, linkage from sklearn import linear_model class Classify: """ Train, use, and re-use an automatic classifier. Input training data, then call with ne...
Series(data, index=clues.index, name="class")
pandas.Series
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by ...
melt(data, id_vars=['y'], var_name='x')
pandas.melt
import pandas as pd import matplotlib.pyplot as plt def plot_results_for_probability_changes(): df1 = pd.read_csv("base.csv") df2 = pd.read_csv("base_pc_100_pm_80.csv") df3 = pd.read_csv("base_pc_80_pm_5.csv") df_iterations = pd.DataFrame({ "90%% crossover, 40%% mutação": df1["iterations"], ...
pd.read_csv("pmx_pc_100_pm_80_pop_200.csv")
pandas.read_csv
import pandas as pd from datetime import date from pandas.core.indexes import category import config as config from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler, MaxAbsScaler from main_table import MainInsert class AlgoInsert: def __init__(self): self.category = config.Config.CA...
pd.merge(camping_data, last_df, how="left", left_on = 'place_id', right_on='index')
pandas.merge
import pandas as pd import numpy as np def frequency_encoding(df,feature): map_dict=df[feature].value_counts().to_dict() df[feature]=df[feature].map(map_dict) def target_guided_encoding(df,feature,target): order=df.groupby([feature])[target].mean().sort_values().index map_dic={k:i for i,k in enumerate(order,0)...
pd.DataFrame(order)
pandas.DataFrame
from pathlib import Path import pandas as pd import typer from jinja2 import Environment, FileSystemLoader from reki.data_finder import find_local_file from reki_data_tool.postprocess.grid.gfs.ne.config import OUTPUT_DIRECTORY from reki_data_tool.postprocess.grid.gfs.util import get_random_start_time, get_random_fore...
pd.to_datetime(start_time, format="%Y%m%d%H")
pandas.to_datetime
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, ...
isna(x)
pandas.isna
#파이썬으로 상관분석 회기분석테스트 import numpy as np import pandas as pd #csv 파일 읽어오기 hdr = ['V1','V2','V3','V4','V5','V6','V7','V8','V9'] df =
pd.read_csv('c:/java/phone-02.csv', header=None,names=hdr)
pandas.read_csv
from caes import ICAES2 import pandas as pd from joblib import Parallel, delayed, parallel_backend import time import os from datetime import datetime # ===================== # function to enable sensitivity analysis # ===================== def sizing_and_sensitivity(wrkdir, xlsx_filename, sheet_name, capacity, durat...
pd.Series()
pandas.Series
from datetime import ( datetime, timedelta, ) from importlib import reload import string import sys import numpy as np import pytest from pandas._libs.tslibs import iNaT import pandas.util._test_decorators as td from pandas import ( NA, Categorical, CategoricalDtype, Index, Interval, ...
Series(["2010-01-04 00:00:00-05:00"])
pandas.Series
import re import time import requests import pandas as pd from bs4 import BeautifulSoup class stackScrape(object): def __init__(self): pass def extractDataFromUrl(self, url): ''' Returns the scraped data from the target URL in raw format (HTML), which can be stackoverflow or stackexchange P...
pd.Series(bagOfWordsViews)
pandas.Series
import csv import json import multiprocessing import os import queue import subprocess import warnings from datetime import datetime, timedelta from glob import glob from time import time import joblib import numpy as np import pandas as pd import psutil # import wfdb from sklearn.model_selection import train_test_sp...
pd.DataFrame({})
pandas.DataFrame
import warnings from copy import deepcopy from typing import Dict from typing import List from typing import Optional from typing import Union import numpy as np import pandas as pd from sklearn.base import TransformerMixin from etna.core import StringEnumWithRepr from etna.transforms.base import Transform from etna....
pd.concat([x[segment] for segment in segments])
pandas.concat
"""Functions for transofrmation of films and books datasets. Functions --------- get_books_ratings - transform books dataset get_films_ratings - transform films dataset generate_datasets - generate films and books datasets """ from typing import Set import pandas as pd from pathlib im...
pd.read_csv(location_1, sep='\t', low_memory=False)
pandas.read_csv
import streamlit as st import pandas as pd import numpy as np import plotly.graph_objects as go from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from MulticoreTSNE import MulticoreTSNE as TSNE import umap from sklearn.cluster import KMeans SEED = 100 # @st.cache is speedup opti...
pd.read_csv(uploaded_file)
pandas.read_csv
import types from functools import wraps import numpy as np import datetime import collections from pandas.compat import( zip, builtins, range, long, lzip, OrderedDict, callable ) from pandas import compat from pandas.core.base import PandasObject from pandas.core.categorical import Categorical from pandas.co...
DataFrame(output, index=obj.index, columns=columns)
pandas.core.frame.DataFrame
#!/usr/bin/env python # coding: utf-8 # In[1]: from pandas import ExcelFile from pandas import ExcelWriter from scipy import ndimage from scipy.stats import randint as sp_randint from sklearn.base import BaseEstimator from sklearn.base import TransformerMixin from sklearn.ensemble import ExtraTreesClassifier from s...
pd.DataFrame(Average_output)
pandas.DataFrame
import pandas as pd import numpy as np def get_rating_summary(df,num_users = None, num_items = None): ''' print summary of user-item matrix args: df: data frame which contains userId & itemId columns ''' if num_users == None: num_users = len(df['userId'].unique()) if num_items ...
pd.DataFrame({'userId':old_userId,'new_userId':new_userId})
pandas.DataFrame
# for each 1-minute window from the training data, apply the ensemble model to get the score. Sort the scores and find out what score s is at a specific percentile p. e.g., if p=10, it means that 10% of scores are <= than s # --- Imports --- from sklearn.preprocessing import MinMaxScaler import scipy.integrate as in...
pd.DataFrame(scores_topk, columns=COL_NAMES_RANKING)
pandas.DataFrame
import pandas as pd import numpy as np from random import gauss, uniform def get_makespan(curr_plan, num_resources, workflow_inaccur, positive=False, dynamic_res=False): ''' Calculate makespan ''' under = False reactive_resource_usage = [0] * num_resources resource_usage = [0] * num_resources ...
pd.read_csv('../Data/heft/DynHeteroResources_StHomoCampaignsHEFT.csv')
pandas.read_csv
from __future__ import absolute_import, division, print_function import os import numpy as np import pandas as pd import shutil import requests import numpy.testing as npt import pytest import skimage.io as skio from .. import argus_shapes as shapes import pulse2percept.implants as p2pi try: FileNotFoundError e...
pd.DataFrame(data)
pandas.DataFrame
import sys import pandas as pd import numpy as np from scipy import stats from itertools import compress import statsmodels.stats.multitest as smt import scikits.bootstrap as bootstrap from sklearn.decomposition import PCA from .scaler import scaler from .imputeData import imputeData class statistics: usage = """G...
pd.DataFrame({'Percent_Total_Missing': totalMissing})
pandas.DataFrame
import codecs import math import os import re import gensim import jieba.posseg as jieba import numpy as np import pandas as pd from sklearn.cluster import KMeans # 返回特征词向量 def getWordVecs(wordList, model): name = [] vecs = [] for word in wordList: word = word.replace('\n', '') try: ...
pd.DataFrame(name, columns=['word'])
pandas.DataFrame
import matplotlib.image as mpimg import matplotlib.style as style import matplotlib.pyplot as plt from matplotlib import rcParams from simtk.openmm.app import * from simtk.openmm import * from simtk.unit import * from sys import stdout import seaborn as sns from math import exp import pandas as pd import mdtraj as md i...
pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Aug 18 17:54:55 2020 @author: RredRrobin """ import os import tkinter as tk import tkinter.filedialog as filedialog import tkinter.ttk as ttk import pandas as pd from datetime import datetime, timedelta import numpy as np class TextScrollCombo(tk.Fra...
pd.to_timedelta(self.df2['time'])
pandas.to_timedelta
"""Tests for the sdv.constraints.tabular module.""" import numpy as np import pandas as pd import pytest from sdv.constraints.errors import MissingConstraintColumnError from sdv.constraints.tabular import ( ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations) def dummy_transform(): pass def d...
pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
pandas.to_datetime
"""Data abstractions.""" from abc import abstractmethod from collections import defaultdict, namedtuple import copy import os import time import numpy as np import pandas as pd import torch from torch.utils.data import Dataset, IterableDataset TYPE_NORMAL_ATTR = 0 TYPE_INDICATOR = 1 TYPE_FANOUT = 2 def time_this(f...
pd.DataFrame({c.name: c.data for c in self.columns})
pandas.DataFrame
"""Build daily-level feature sets, stitching together weather datasets and defining features. """ import numpy as np import pandas as pd import geopandas as gpd from dask import dataframe as dd from loguru import logger from shapely.ops import nearest_points from src.data.gfs.utils import grb2gdf from src.conf import...
pd.NamedAgg(column="t_mean", aggfunc="mean")
pandas.NamedAgg
import pandas as pd import numpy as np # from.tools import * from Multivariate_Markov_Switching_Model.tools import * from Multivariate_Markov_Switching_Model.core import * from Multivariate_Markov_Switching_Model.tools import _2dim import numpy as np import os # os.chdir("Multivariate_Markov_Switching_Model") """ tes...
pd.DataFrame(data)
pandas.DataFrame
# Classification # SVM # -*- coding: utf-8 -*- ### 기본 라이브러리 불러오기 import pandas as pd import seaborn as sns ''' [Step 1] 데이터 준비/ 기본 설정 ''' # load_dataset 함수를 사용하여 데이터프레임으로 변환 df = sns.load_dataset('titanic') # IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기
pd.set_option('display.max_columns', 15)
pandas.set_option
# This script generates the scoring and schema files # Creates the schema, and holds the init and run functions needed to # operationalize the chestXray model import os, sys, pickle, base64 import keras.models import keras.layers import keras_contrib.applications.densenet import pandas as pd import numpy as np impor...
pd.DataFrame(data=[[encoded_image]], columns=[as_string_b64encoded_pickled_data_column_name])
pandas.DataFrame
import json import logging import os import pandas as pd import wandb import yaml from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score from transformers import RobertaConfig, RobertaTokenizerFast, RobertaForSequenceClassification, Trainer, \ TrainingArguments from ClassificationDat...
pd.read_csv("./data/dev/in.tsv", delimiter='\t', header=None, encoding="utf8", quoting=0)
pandas.read_csv
################################################################################ ### Python port of rlassoEffects.R ### https://github.com/cran/hdm/blob/master/R/rlassoEffects.R ################################################################################ ############################################################...
pd.DataFrame(se, index=idx)
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Module to parse/process/visualize/export GTFS feed. @author: ikespand """ import pandas as pd import zipfile # from keplergl_cli.keplergl_cli import Visualize from rasta.rasta_kepler import RastaKepler from shapely.geometry import Point, LineString import geopandas a...
pd.concat(geo_df)
pandas.concat
import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, DatetimeIndex, Series, date_range, ) import pandas._testing as tm from pandas.core.api import Int64Index class TestDataFrameTruncate: def test_truncate(self, datetime_frame, frame_or_series): ts = datetim...
tm.assert_equal(result, expected)
pandas._testing.assert_equal
import os import pickle import numpy as np import torch import torch.nn.functional as F from collections import Counter from collections import OrderedDict import copy from sys import argv import json import pandas as pd import argparse from tqdm.auto import tqdm from IPython.core.debugger import Pdb split_files = { ...
pd.DataFrame(df)
pandas.DataFrame
import pandas as pd from functools import reduce def load(): print("Cargando datos") datos ={} """ Seguridad y convivencia """ datos['Convivencia'] = data_convivencia = pd.read_excel('./data/datos separados.xlsx', 'Indicadores de convivencia decr') datos['Seguridad'] = data_seguridad = p...
pd.read_excel('./data/datos separados.xlsx', 'Producción de agua')
pandas.read_excel
import numpy as np import pandas as pd from numba import njit from datetime import datetime import pytest from itertools import product from sklearn.model_selection import TimeSeriesSplit import vectorbt as vbt from vectorbt.generic import nb seed = 42 day_dt = np.timedelta64(86400000000000) df = pd.DataFrame({ ...
pd.RangeIndex(start=0, stop=4, step=1)
pandas.RangeIndex
import numpy as np import pandas as pd from datetime import datetime import random as rd from pandas import DataFrame from math import sqrt from scipy.stats import norm from pandas import DataFrame from functools import wraps class create_data(): '''create data e.g. s = pd.to_datetime('01-01-2019') cr...
DataFrame(dates, columns=['Date'])
pandas.DataFrame
# -*- coding: utf-8 -*- """ Main module, contains the base object that host all the different analysis Authors: B.G. 18/11/2018 """ # This module manages raster I/O operations, based on rasterio (which itself depends on GDAL) from lsdtopytools import raster_loader as rl from lsdtopytools import lsdtopytools_utilitie...
pd.DataFrame(Dict_of_ksn)
pandas.DataFrame
# coding=utf-8 # Author: <NAME> # Date: Sept 11, 2019 # # Description: Indexes meta-genes to select core meiotic genes. # Pipeline: Only mammal (HS & MM) conserved genes that Up/Down Regulated. # # import math import numpy as np import pandas as pd
pd.set_option('display.max_rows', 100)
pandas.set_option
import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import numpy as np import pandas as pd from adjustText import adjust_text from pylab import cm from matplotlib import colors def PCA_var_explained_plots(adata): n_rows = 1 n_cols = 2 fig = plt.figure(figsize=(n_cols*4.5, n...
pd.isnull(s)
pandas.isnull
""" Name : c9_44_equal_weighted_vs_value_weighted.py Book : Python for Finance (2nd ed.) Publisher: Packt Publishing Ltd. Author : <NAME> Date : 6/6/2017 email : <EMAIL> <EMAIL> """ import pandas as pd import scipy as sp x=pd.read_pickle("c:/temp/yanMonthly.pkl") def ret_f(t...
pd.DataFrame(p[1:],index=ddate)
pandas.DataFrame
from __future__ import print_function import collections import os import re import sys import numpy as np import pandas as pd from sklearn.preprocessing import Imputer from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler file_path = os.path.dirname(os.path.realpath(__file__)) lib_path = os....
pd.read_csv(path, engine='c', dtype=np.float32)
pandas.read_csv
# -*- coding: utf-8 -*- import unittest import platform import pandas as pd import numpy as np import pyarrow.parquet as pq import hpat from hpat.tests.test_utils import ( count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end) from hpat.tests.gen_test_data import ParquetGenerator from numba import ...
pd.Series(data)
pandas.Series
from matplotlib import cm, rcParams import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib as matplotlib import numpy as np import math as math import random as rand import os, sys, csv import pandas as pd #matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2) c = ['#aa3863', '#d9702...
pd.Series(phis4_uncorr)
pandas.Series
from typing import Iterable, Optional import pandas as pd import numpy as np from scipy.special import expit def get_expanded_df(df, event_type_col='J', duration_col='X', pid_col='pid'): """ This function gets a dataframe describing each sample the time of the observed events, and returns an expanded dat...
pd.concat(temp_series, axis=1)
pandas.concat
# coding: utf-8 # In[69]: import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import random get_ipython().magic('matplotlib inline') # In[70]: PokemonDf=pd.read_csv('pokemon/Pokemon.csv') # In[71]: PokemonDf.head(200000) # In[72]: PokemonDf.describe() PokemonDf.Name...
pd.concat([PokemonDf['Attack'],PokemonDf['HP']])
pandas.concat
import jieba import pandas as pd import wordcloud # 读取弹幕 txt 文件 with open("dan_mu.txt", encoding="utf-8") as f: txt = f.read() danmu_list = txt.split("\n") # jieba 分词 danmu_cut = [jieba.lcut(item) for item in danmu_list] # 获取停用词 with open("baidu_stopwords.txt",encoding="utf-8") as f: stop = f.r...
pd.Series(all_words)
pandas.Series
from typing import Union, Optional, List, Dict import faiss import pickle import torch.distributed # noqa: WPS301 import numpy as np import pandas as pd from time import time from pathlib import Path from catalyst.dl import IRunner, CallbackOrder, Callback from catalyst.utils.torch import get_activation_fn from s...
pd.read_excel(doev2_xlsx, sheet_name=None)
pandas.read_excel
from ioUtils import getFile, saveFile from timeUtils import clock, elapsed from numpy import isnan from pandas import DataFrame, Series def testManualEntries(fast=False, saveit=False): if fast: start, cmt = clock("Testing Manual Entries Pickle File") else: start, cmt = clock("Testing Manual ...
DataFrame(manualEntries)
pandas.DataFrame
# Copyright (c) 2019-2020, NVIDIA CORPORATION. import datetime as dt import re import cupy as cp import numpy as np import pandas as pd import pyarrow as pa import pytest from pandas.util.testing import ( assert_frame_equal, assert_index_equal, assert_series_equal, ) import cudf from cudf.core import Data...
pd.Series([None, None], dtype="datetime64[ns]")
pandas.Series
import numpy as np import pytest import pandas as pd from pandas.core.sorting import nargsort import pandas.util.testing as tm from .base import BaseExtensionTests class BaseMethodsTests(BaseExtensionTests): """Various Series and DataFrame methods.""" @pytest.mark.parametrize('dropna', [True, False]) d...
pd.Series(orig_data2)
pandas.Series
""" Getting most discussed stocks from r/wallstreetbets hot """ import json import os import re import time from collections import ChainMap, Counter from datetime import datetime import pandas as pd import requests from dotenv import load_dotenv load_dotenv() CLIENT_ID = os.getenv("CLIENT_ID") SECRET = os.getenv("S...
pd.DataFrame()
pandas.DataFrame
from datetime import datetime import operator import numpy as np import pytest from pandas import DataFrame, Index, Series, bdate_range import pandas._testing as tm from pandas.core import ops class TestSeriesLogicalOps: @pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor]) def te...
tm.assert_series_equal(result, expected)
pandas._testing.assert_series_equal
""" Extracts path data for a user or a set of users and analyses with pathpy. """ import csv import json import os import numpy as np import matplotlib.pyplot as plt import igraph import pathpy as pp from scipy.stats import chi2 from collections import Counter from pandas import DataFrame import seaborn as sns from sci...
DataFrame(eval_list)
pandas.DataFrame
import matplotlib matplotlib.use('Agg') import re import argparse from datetime import datetime, timedelta, time import matplotlib.pyplot as plt import matplotlib.lines as mlines import matplotlib.patches as mpatches import numpy as np import pandas as pd from pandas.plotting import register_matplotlib_converters regis...
pd.read_csv(labelDictCSV, usecols=['annotation', labelDictCol])
pandas.read_csv
from mpl_toolkits import mplot3d import sys, os import numpy as np import matplotlib.pyplot as plt import pandas as pd from plotnine import * import copy, math dist = 10 def find_min_discm_each_hyperparam(df): x = df.sort_values(by=['Discm_percent', 'Points-Removed']).groupby("Model-count", as_index=False).first(...
pd.concat([df_noremoval, df_nosensitive, df_massaging, df_ps, df_lfr, df_DIR, df_adver, df_our], sort=True)
pandas.concat