prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
# -*- coding: utf-8 -*- import numpy as np import pandas as pd import quantipy as qp # from matplotlib import pyplot as plt # import matplotlib.image as mpimg import string import pickle import warnings try: import seaborn as sns from PIL import Image except: pass from quantipy.core.cache import Cac...
pd.DataFrame(self.valdiffs, index=self.ypairs, columns=self.xdef)
pandas.DataFrame
import folium import pandas as pd #helper method to setup any input dataframe into dictionaries that can be input into OSR isochrone methods and folium maps def dictSetup(dataframe): station_dict = dataframe.to_dict(orient='index') for name, station in station_dict.items(): station['locations'] = [stati...
pd.DataFrame.from_dict(maps[i][1])
pandas.DataFrame.from_dict
#! /usr/bin/python import datetime import json import os import pandas import urllib.request import time #define constants workingDir = os.getcwd() print(workingDir) stationID ='114' yesterdayDate = (datetime.date.today() - datetime.timedelta(1)) todayDate = datetime.date.today() yesterdayYear = yesterdayDate.year ur...
pandas.to_datetime(weatherData["Date"])
pandas.to_datetime
from collections import Counter from os import getenv from pathlib import Path from matplotlib.pyplot import savefig from pandas import DataFrame from . import database from ..crawler.models import Article current_path = Path(__file__).parent.resolve() def test_rank(): # The test is not suitable for CI if ...
DataFrame(common)
pandas.DataFrame
# Implementation of random stuff import json import torch import pandas as pd import pickle from torch_geometric.data import Data from pathlib import Path from itertools import repeat from collections import OrderedDict class MetricTracker: """ Class implementation for tracking all the metrics. """ de...
pd.DataFrame(index=keys, columns=['total', 'counts', 'average'])
pandas.DataFrame
import xgboost as xgb from sklearn.impute import SimpleImputer import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error, mean_absolute_error from src.utils.io import load, save from src.visualization.visualize import * def get_X_y(data): ...
pd.DataFrame(data=values, columns=['feature_labels', 'feature_importance'])
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Sep 16 17:37:51 2020 @author: sawleen """ import pandas as pd from selenium import webdriver from selenium.webdriver.chrome.options import Options import os os.chdir('/Users/sawleen/Documents/Leen/Python/stock_analysis') import data.get_yf_data as get_y...
pd.concat([saved_health_metrics, health_metric_symbol])
pandas.concat
import pandas as pd import math from numpy import nanmin,nanmax #maximum number of records in a parquet file (except the index file) max_rows = 500000 states = ["ACT", "NSW", "NT", "OT", "QLD", "SA", "TAS", "VIC", "WA"] #states = ["ACT", "WA"] #initiate the index file index_file =
pd.DataFrame(columns=['IDX','STREET_NAME','STREET_TYPE_CODE','LOCALITY_NAME','STATE','POSTCODE','FILE_NAME','ADDRESS_COUNT','MIN_STREET_NUMBER','MAX_STREET_NUMBER'])
pandas.DataFrame
import pydoc import pandas as pd import os import random def read_excel(): df = pd.read_excel('/Users/ls/Downloads/babycare11-1.xlsx') data = df.head(2) print(str(data)) # print(df.head(2)) def merge_excel(): dfs = [] dir = '/Users/ls/babycare/' des = '/Users/ls/babycare/babycare-stats-...
pd.concat(dfs)
pandas.concat
import streamlit as st import datetime import pytz from datetime import date from utils.metrics import log_runtime import pandas as pd import timeit short_title = "iterrows() and itertuples()" long_title = "iterrows() and itertuples()" key = 6 content_date = datetime.datetime(2021, 10, 5).astimezone(pytz.timezone("US/...
pd.to_datetime(df['date'], format='%Y-%m-%d')
pandas.to_datetime
import pandas as pd from output.helpers import * from datetime import datetime import emoji import re import string import nltk from nltk import ngrams, FreqDist from nltk.sentiment import SentimentIntensityAnalyzer from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_select...
pd.read_csv(input_path)
pandas.read_csv
# general imports import numpy as np import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns sns.set() import cv2 import os from PIL import Image from pprint import pprint import time from tqdm import tqdm # torch and torchvision import torch import tor...
pd.read_csv(cfg['train_csv_path'])
pandas.read_csv
""" This file is for playing around with song data from the MSD data set. In particular, we are interesting in getting all of the data out in an exportable manner. We can't get all of the information from the summary file, we have to open all files and extract the data to do this. """ import os import pandas as pd im...
pd.read_csv(training_file)
pandas.read_csv
import json import os import matplotlib from matplotlib import pyplot as plt from matplotlib.gridspec import GridSpec from numpy import array as nparray import countryinfo import pandas as pd import datetime import folium import torch import numpy as np def compare_models(): """ Output a table showing final tr...
pd.DataFrame(cellText)
pandas.DataFrame
from sys import set_asyncgen_hooks import streamlit as st import plotly.graph_objects as go import pandas as pd import numpy as np featuresAbbrev = {'Points' : 'pts', 'Goal Scored' : 'gs_cum', 'Goal Conceded' : 'gc_cum', 'Goal difference' : 'gd', 'Form' ...
pd.concat([homeTeamData, awayTeamData])
pandas.concat
from pathlib import Path import numpy as np import pandas as pd import pytest from pandas.testing import assert_series_equal from src.contact_models.contact_model_functions import _draw_nr_of_contacts from src.contact_models.contact_model_functions import _draw_potential_vacation_contacts from src.contact_models.cont...
pd.Series(False, index=a_saturday.index)
pandas.Series
import urllib.request import xmltodict, json # import pygrib import numpy as np import pandas as pd from datetime import datetime, timedelta import time import urllib.request import xmltodict # Query to extract parameter forecasts for one particular place (point) # # http://data.fmi.fi/fmi-apikey/f96cb70b-64d1-4bbc-9...
pd.DataFrame(columns=['Measurement_Number', 'Name', 'DateTime', 'Lat', 'Long', 'Value'])
pandas.DataFrame
import warnings import numpy as np import pandas as pd import re import string @pd.api.extensions.register_dataframe_accessor('zookeeper') class ZooKeeper: def __init__(self, pandas_obj): # validate and assign object self._validate(pandas_obj) self._obj = pandas_obj # define incor...
pd.to_numeric(vals_not_null, errors='coerce')
pandas.to_numeric
from bert_embedding import BertEmbedding #from bert_serving.client import BertClient from flask import Flask, render_template, request import os import json import requests import pickle import joblib import numpy as np import pandas as pd #import tensorflow as tf #all packages import nltk import string ...
pd.DataFrame(data,columns=['text'])
pandas.DataFrame
from datetime import date as dt import numpy as np import pandas as pd import pytest import talib import os from finance_tools_py.simulation import Simulation from finance_tools_py.simulation.callbacks import talib as cb_talib from finance_tools_py.simulation import callbacks @pytest.fixture def init_global_data(): ...
pd.Series.equals(real, pytest.global_data[col])
pandas.Series.equals
#------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under...
pd.DataFrame(X)
pandas.DataFrame
# -*- coding: utf-8 -*- """ Created on Mon May 14 17:29:16 2018 @author: jdkern """ from __future__ import division import pandas as pd import numpy as np def exchange(year): df_data = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_daily_interchange.csv',header=0) paths = ['SALBRYNB', 'ROSET...
pd.read_excel('Path_setup/NEISO_path_export_profiles.xlsx',sheet_name='SALBRYNB',header=None)
pandas.read_excel
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jan 21 23:24:11 2021 @author: rayin """ import os, sys import pandas as pd import matplotlib.pyplot as plt import numpy as np import math import re import random from collections import Counter from pprint import pprint os.chdir("/Users/rayin/Google ...
pd.Series(aa)
pandas.Series
#!/usr/bin/python # -*- coding: utf-8 -*- import subprocess import os.path import time import sys import pandas as pd work_path = sys.path[0]+'/work_space' def hetero_to_homo(filepath,jobid,role,garblers): """异构图转同构图,并存储""" hetero_df = pd.read_csv(filepath,index_col=None,header=None,sep=" ") fold_name = o...
pd.read_csv(path,index_col=None,header=None,sep=" ",engine = "python")
pandas.read_csv
from ctypes import sizeof import traceback from matplotlib.pyplot import axis import pandas as pd import numpy as np from datetime import datetime from time import sleep from tqdm import tqdm import random import warnings from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from skl...
pd.concat(data_collected_dfs, axis=0)
pandas.concat
#!/usr/bin/python3 # # Data Indexer # This script sweeps the file index and consolidate channel and site information. # - Read files on designated folder # Import standard libraries import pandas as pd import h5py # Import specific libraries used by the cortex system import h5_spectrum as H5 import cortex_names as ...
pd.DataFrame()
pandas.DataFrame
from gensim import corpora import gensim from gensim.matutils import hellinger import pyLDAvis import pyLDAvis.gensim_models as gensimvis from IPython.core.display import HTML from collections import defaultdict from sklearn.cluster import KMeans from sklearn.feature_extraction.text import TfidfVectorizer import pprint...
pd.DataFrame(columns=['Course', 'Topics'])
pandas.DataFrame
import numpy as np import pytest from pandas.compat import range, u, zip import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series import pandas.core.common as com from pandas.core.indexing import IndexingError from pandas.util import testing as tm @pytest.fixture def frame_random_data_integer_mul...
Series([1, 2, 3])
pandas.Series
import pandas as pd import sparse import numpy as np class AnnotationData: """ Contains all the segmentation and assignment data WARNING: self.assignments['Clusternames'] will contain neurite ids (as strings) rather than names """ # Todo: if we can preserve segments instead of merging them when two...
pd.DataFrame({"Time": [], "Segment": [], "x": [], "y": [], "z": []}, dtype=int)
pandas.DataFrame
# -------------- # import the libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') # Code starts here df=
pd.read_json(path,lines=True)
pandas.read_json
#!/usr/bin/env python # coding: utf-8 import torch import numpy as np from sklearn import metrics import pandas as pd import torch.utils.data as Data import sklearn from sklearn import tree from sklearn.metrics import precision_score, recall_score, f1_score from sklearn.ensemble import RandomForestClassifier, AdaBoos...
pd.DataFrame(x_test)
pandas.DataFrame
import numpy as np import pandas as pd from sklearn.decomposition import NMF class ClusterModel: @property def clusters(self): return len(self._cluster_names) @property def cluster_names(self): return self._cluster_names @clusters.setter def clusters(self, value): se...
pd.DataFrame(self._H, index=self._cluster_names, columns=self._X.columns)
pandas.DataFrame
import json import pytest import numpy as np import pandas as pd import scipy.spatial.distance as scipy_distance from whatlies import Embedding, EmbeddingSet from .common import validate_plot_general_properties """ *Guide* Here are the plot's propertites which could be checked (some of them may not be applicable f...
pd.DataFrame(chart["datasets"][chart["data"]["name"]])
pandas.DataFrame
# coding: utf-8 # In[1]: import pandas as pd import findspark findspark.init('spark24') from pyspark.sql import SparkSession import numpy as np import matplotlib.pyplot as plt # In[2]: reviews = pd.read_csv("/home/yashika/Downloads/zomato.csv") reviews.head(3) # In[3]: #pd.show_versions() #reviews.value_...
pd.DataFrame(data)
pandas.DataFrame
#!/usr/bin/python # _____________________________________________________________________________ # ---------------- # import libraries # ---------------- # standard libraries # ----- import torch import numpy as np import os import pandas as pd import matplotlib.pyplot as plt from torch.utils.data import Dataset, D...
pd.concat(streambits, ignore_index=True)
pandas.concat
import sys import pandas as pd from sqlalchemy import create_engine def load_data(messages_filepath, categories_filepath): ''' Load the datasets and merged them to generate a dataframe to be used for analysis Args: messages_filepath: The path of messages dataset. categories_filepat...
pd.read_csv(categories_filepath)
pandas.read_csv
import warnings import yfinance as yf from pathlib import Path import numpy as np import pandas as pd import requests import seaborn as sns import matplotlib as mpl from matplotlib import pyplot as plt from datetime import datetime, date from yahooquery import Ticker from tensorflow.keras.callbacks import ModelCheckpoi...
pd.DataFrame(X, index=y.index)
pandas.DataFrame
from datetime import datetime, timedelta import inspect import numpy as np import pytest from pandas.core.dtypes.common import ( is_categorical_dtype, is_interval_dtype, is_object_dtype, ) from pandas import ( Categorical, DataFrame, DatetimeIndex, Index, IntervalIndex, MultiIndex...
tm.assert_produces_warning(FutureWarning)
pandas.util.testing.assert_produces_warning
from sklearn import metrics import numpy as np import pandas as pd import seaborn as sns from .stats import * from .scn_train import * import matplotlib import matplotlib.pyplot as plt def divide_sampTab(sampTab, prop, dLevel="cell_ontology_class"): cts = set(sampTab[dLevel]) trainingids = np.empty(0) for...
pd.crosstab(true_label, pred_label)
pandas.crosstab
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np from collections import namedtuple import math import geopy.distance pd.set_option('display.max_rows', 10000) def generate_dataset_gps(): # tx_coord = (63.4073927,10.4775050) #old tx_coord = (63.40742, 10.47752) #ole...
pd.DataFrame(measurements)
pandas.DataFrame
#!/usr/bin/env python # coding: utf-8 # In[ ]: # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra i...
pd.read_csv("../../../input/ronitf_heart-disease-uci/heart.csv")
pandas.read_csv
#!/usr/bin/env python # coding=utf-8 # vim: set filetype=python: from __future__ import print_function from __future__ import absolute_import from __future__ import division import os import posixpath import sys import math import datetime import string from functools import wraps import traceback import xlrd3 as xl...
pd.DataFrame()
pandas.DataFrame
import pytest import numpy as np import pandas as pd from pandas import Categorical, Series, CategoricalIndex from pandas.core.dtypes.concat import union_categoricals from pandas.util import testing as tm class TestUnionCategoricals(object): def test_union_categorical(self): # GH 13361 data = [ ...
Categorical(['x', 'y', 'z'])
pandas.Categorical
from sqlalchemy import true import FinsterTab.W2020.DataForecast import datetime as dt from FinsterTab.W2020.dbEngine import DBEngine import pandas as pd import sqlalchemy as sal import numpy from datetime import datetime, timedelta, date import pandas_datareader.data as dr def get_past_data(self): """ Get raw...
pd.read_sql_query(query, self.engine)
pandas.read_sql_query
""" To extract compile time and runtime data from evo-suite dataset Version 0.3.0 - Project metric computation has been omitted. To be used in CodART project """ import multiprocessing import sys import os import subprocess import threading from collections import Counter from functools import wraps import warnings ...
pd.DataFrame(data=[dummy_data], columns=columns)
pandas.DataFrame
import sys import re import requests from bs4 import BeautifulSoup as soup import pandas as pd def ItemResults(item): ''' Function for scrapping list of items available for the desired one. The function scrapping: - Item name - Item link - Item price per piece - Minimum orde...
pd.DataFrame(columns=['name','item','link','price','min-order'])
pandas.DataFrame
# %% import math import multiprocessing as mp import numpy as np import pandas as pd import pickle import string from sklearn.dummy import DummyClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.linear_model import Logist...
pd.read_csv('train\hosts-210311.txt', delim_whitespace=True, usecols=[1], names=['hostname'], skiprows=39, skipfooter=11)
pandas.read_csv
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.preprocessing import StandardScale...
pd.DataFrame(gs_knn.cv_results_)
pandas.DataFrame
# -*- coding: utf-8 -*- # pylint: disable=E1101 # flake8: noqa from datetime import datetime import csv import os import sys import re import nose import platform from multiprocessing.pool import ThreadPool from numpy import nan import numpy as np from pandas.io.common import DtypeWarning from pandas import DataFr...
StringIO(data)
pandas.compat.StringIO
#!/usr/bin/env python3 # coding: utf-8 """Abstract command classes for hicstuff This module contains all classes related to hicstuff commands: -iteralign (iterative mapping) -digest (genome chunking) -cutsite (preprocess fastq by cutting reads into digestion products) -filter (Hi-C 'event' sorting: l...
pd.DataFrame({"size": size})
pandas.DataFrame
############################################################# # ActivitySim verification against TM1 # <NAME>, <EMAIL>, 02/22/19 # C:\projects\activitysim\verification>python compare_results.py ############################################################# import pandas as pd import openmatrix as omx ################...
pd.read_csv(tm1_per_filename)
pandas.read_csv
import sys import time import numpy as np import pandas as pd from sklearn.metrics import accuracy_score from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from glob import glob from sklearn.neural_network import MLPClassifier def load_data(file_name): print('FI...
pd.DataFrame(y_train)
pandas.DataFrame
import pandas as pd import scipy.io as sio import scipy.interpolate import numpy as np import scipy.sparse import scipy import gzip import subprocess import collections from collections import defaultdict, Counter import scipy.sparse as sp_sparse import warnings import pickle import os #warnings.filterwarnings('ignore'...
pd.Series(stat_dict)
pandas.Series
# pylint: disable=E1101,E1103,W0232 from datetime import datetime, timedelta from pandas.compat import range, lrange, lzip, u, zip import operator import re import nose import warnings import os import numpy as np from numpy.testing import assert_array_equal from pandas import period_range, date_range from pandas.c...
tm.assertRaisesRegexp(ValueError, length_error)
pandas.util.testing.assertRaisesRegexp
import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings import calendar import seaborn as sns sns.set(style='white', palette='deep') plt.style.use('grayscale') warnings.filterwarnings('ignore') width = 0.35 # Funções def autolabel(rects,ax, df): #autolabel for rect in rects: ...
pd.read_excel('Banco de Dados - WDO.xlsx')
pandas.read_excel
""" Market Data Presenter. This module contains implementations of the DataPresenter abstract class, which is responsible for presenting data in the form of mxnet tensors. Each implementation presents a different subset of the available data, allowing different models to make use of similar data. """ from typing impo...
pd.Series.ewm(data['close'], span=period)
pandas.Series.ewm
# pylint: disable=E1101 from datetime import time, datetime from datetime import timedelta import numpy as np from pandas.core.index import Index, Int64Index from pandas.tseries.frequencies import infer_freq, to_offset from pandas.tseries.offsets import DateOffset, generate_range, Tick from pandas.tseries.tools impo...
Index.intersection(self, other)
pandas.core.index.Index.intersection
import asyncio import sqlite3 import logging.config from typing import List, Any from datetime import datetime from pathlib import Path import aiohttp import xmltodict import yaml import pandas as pd from credentials.credentials import GOODREADS_KEY # configuring logging with open('log_config.yaml', 'r') as f: ...
pd.DataFrame(data, columns=['book_id', 'book_title', 'title_without_series', 'publication_year', 'publication_month'])
pandas.DataFrame
import pandas as pd import numpy as np import pdb class ArucoCorner: """ Object which holds corner data for a specific aruco tag id """ def __init__(self, id_num, corners, data_attributes=None, file_folder=None): """ Creates the object """ # TODO: add aruco dictio...
pd.DataFrame(reshaped_c, columns=["x1","y1","x2","y2","x3","y3","x4","y4"])
pandas.DataFrame
#libraries import numpy as np import pandas as pd from datetime import datetime as dt import time import datetime import os import warnings warnings.filterwarnings("ignore") import logging logging.basicConfig(filename='log.txt',level=logging.DEBUG, format='%(asctime)s %(message)s') pd.set_option('max_colwidth', 50...
pd.concat([train, test], ignore_index=True)
pandas.concat
import pandas as pd from collections import defaultdict import os import requirements import numpy as np import xmlrpc.client as xc client = xc.ServerProxy('https://pypi.python.org/pypi') packages = client.list_packages() datadict = defaultdict(list) with open('requirements.txt', 'r') as infile: new_package = Tru...
pd.DataFrame(data=datadict)
pandas.DataFrame
# starpar.py import numpy as np import pandas as pd from ..load_sim import LoadSim from ..util.mass_to_lum import mass_to_lum class StarPar(): @LoadSim.Decorators.check_pickle def read_starpar_all(self, prefix='starpar_all', savdir=None, force_override=False): rr = dict() ...
pd.DataFrame(rr)
pandas.DataFrame
# Copyright Contributors to the Pyro-Cov project. # SPDX-License-Identifier: Apache-2.0 import argparse from collections import defaultdict import numpy as np import pandas as pd import torch from pyrocov.sarscov2 import aa_mutation_to_position # compute moran statistic def moran(values, distances, lengthscale): ...
pd.DataFrame(data=results, index=index, columns=columns)
pandas.DataFrame
# -*- coding: utf-8 -*- import pandas as pd import numpy as np import matplotlib.pyplot as plt import keras from keras.models import Sequential from keras.layers import Dense, Activation from keras.optimizers import SGD from sklearn.metrics import classification_report, confusion_matrix df = pd.read_csv("data/iris.c...
pd.DataFrame.from_dict(trained_model.history)
pandas.DataFrame.from_dict
import math import requests import os import pandas as pd import matplotlib.pyplot as plt import os import numpy as np import sys import math from datetime import datetime from glob import glob from datetime import timedelta plt.style.use('ggplot') from mpl_toolkits.basemap import Basemap from igrf12py.igrf12fun impor...
pd.read_csv(mpath, header=0, sep=' ', parse_dates=0, index_col=0, low_memory=False)
pandas.read_csv
# pylint: disable-msg=E1101,W0612 from datetime import datetime, time, timedelta, date import sys import os import operator from distutils.version import LooseVersion import nose import numpy as np randn = np.random.randn from pandas import (Index, Series, TimeSeries, DataFrame, isnull, date_ran...
tm.assert_frame_equal(result, expected)
pandas.util.testing.assert_frame_equal
from flask import * import pandas as pd import os from pandas.tseries.holiday import USFederalHolidayCalendar from pandas.tseries.offsets import CustomBusinessDay from keras.models import load_model from sklearn.preprocessing import MinMaxScaler from flask_ngrok import run_with_ngrok import numpy as np app = Flask(__n...
USFederalHolidayCalendar()
pandas.tseries.holiday.USFederalHolidayCalendar
from datetime import datetime from typing import List import pandas as pd import pytest from hyperwave import ( HyperwaveWeekLenghtGrouping, HyperwavePhaseGrouper, HyperwaveGroupingPhasePercent, HyperwaveGroupingPhaseAggregator, HyperwaveGroupingToPhase4, HyperwaveGrouperByMedianSlopeIncrease, ...
pd.DataFrame(raw_data)
pandas.DataFrame
import os from pathlib import Path from typing import List, Tuple, Optional, Sequence, Any, Union, Generator import numpy as np import pandas as pd import matplotlib.pyplot as plt import penguins as pg from penguins import dataset as ds # for type annotations class Experiment: """ Generic interface for expe...
pd.DataFrame.from_records(peaklist, columns=("f1", "f2"))
pandas.DataFrame.from_records
''' NMF learns topics of documents In the video, you learned when NMF is applied to documents, the components correspond to topics of documents, and the NMF features reconstruct the documents from the topics. Verify this for yourself for the NMF model that you built earlier using the Wikipedia articles. Previously, yo...
pd.DataFrame(model.components_, columns=words)
pandas.DataFrame
#import the pandas library and aliasing as pd import pandas as pd import numpy as np # Create an Empty DataFrame df = pd.DataFrame() print (df) # Create a DataFrame from Lists data = [1,2,3,4,5] df = pd.DataFrame(data) print (df) data = [['Ankit',21],['Bob',24],['Clarke',20]] df = pd.DataFrame(data,col...
pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
pandas.Series
# Arithmetic tests for DataFrame/Series/Index/Array classes that should # behave identically. # Specifically for datetime64 and datetime64tz dtypes from datetime import ( datetime, time, timedelta, ) from itertools import ( product, starmap, ) import operator import warnings import numpy as np impo...
tm.assert_series_equal(result, exp)
pandas._testing.assert_series_equal
import itertools import pandas as pd import numpy as np from log import Log def gen_EMA(data: pd.Series, n=20): alpha = 1 / (n + 1) EMA = [] for t in range(len(data.index)): if t == 0: EMA_t = data.iat[t] else: EMA_t = alpha * data.iat[t] + (1 - alpha) * EMA[-1] ...
pd.Series(multinomial, index=data.index)
pandas.Series
# -*- coding: utf-8; py-indent-offset:4 -*- import os, sys import datetime as dt import tabulate as tb import numpy as np import pandas as pd import matplotlib.pyplot as plt from ..core import get_cn_fund_list, get_cn_fund_daily, get_cn_fund_manager, get_cn_fund_company, get_all_symbol_name, get_daily from ..utils im...
pd.read_excel(excel_file, dtype=str)
pandas.read_excel
import pandas as pd import datetime import numpy as np import icd def get_age(row): """Calculate the age of patient by row Arg: row: the row of pandas dataframe. return the patient age """ raw_age = row['DOD'].year - row['DOB'].year if (row['DOD'].month < row['DOB'].month) or ((row['...
pd.to_datetime(patient['DOB'])
pandas.to_datetime
#!/usr/bin/python import sys, os; import argparse; from os.path import expanduser; import pandas as pd; import math; from datetime import datetime as dt; from datetime import timedelta; __author__ = "<NAME>" def main(): parser = argparse.ArgumentParser(description="This script normalizes the Binance buy history ...
pd.read_excel(args.foreignexchange, sheet_name="sheet1")
pandas.read_excel
from datetime import timedelta import numpy as np import pandas as pd import pickle def generate_data(df, freq: str, scenario: int, regr_vars = None, multiplier = None, baseline = None, look_back = None, look_ahead = None): ''' fr...
pd.DataFrame(x_test_scaled, columns=testX.columns, index=testX.index)
pandas.DataFrame
import pandas as pd import argparse import pickle from collections import defaultdict COL2Label = {0:'transcript', 1: 'dna', 2: 'protein'} parser = argparse.ArgumentParser(description='Variant Results.') parser.add_argument('--results_file', type = str, required = True, help = 'paths results') parser.add_argument('-...
pd.DataFrame(summary)
pandas.DataFrame
# coding: utf-8 import pandas as pd from pandas import Series,DataFrame import numpy as np import itertools import matplotlib.pyplot as plt get_ipython().magic('matplotlib inline') from collections import Counter import re import datetime as dt from datetime import date from datetime import datetime i...
pd.to_datetime(tweets['date'])
pandas.to_datetime
import sqlite3 import pandas as pd import numpy as np from datetime import datetime class Rankings: def run(self, database): print("Starting product ranking...") start_time = datetime.now() conn = sqlite3.connect(database) query = conn.execute("SELECT * From reviews") cols...
pd.merge(average_by_asin, count, on='asin')
pandas.merge
""" Tests dtype specification during parsing for all of the parsers defined in parsers.py """ from io import StringIO import numpy as np import pytest from pandas import Categorical, DataFrame, Index, MultiIndex, Series, concat import pandas._testing as tm def test_dtype_all_columns_empty(all_parsers): # see gh...
Series([], dtype="timedelta64[ns]")
pandas.Series
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.finance as mpf ''' 读入一支股票指定年份的ohlcv数据 输入:baseDir,stockCode为字符, startYear,yearNum为整数, 输出:dataframe ''' def readWSDFile(baseDir, stockCode, startYear, yearNum=1): # 解析日期 datepa...
pd.datetime.strptime(x, '%Y-%m-%d')
pandas.datetime.strptime
# coding=utf-8 from datetime import datetime from wit import Wit from string import Template from time import sleep from collections import namedtuple from pathlib import Path import pandas as pd import deepcut import os import glob import pickle import config toq_key = config.toq_key say_key = config.say_key sub_key ...
pd.Series(analyse.sAppear)
pandas.Series
""" Pull information using python ColecticaPortal api """ from io import StringIO import xml.etree.ElementTree as ET import pandas as pd import json import api def remove_xml_ns(xml): """ Read xml from string, remove namespaces, return root """ it = ET.iterparse(StringIO(xml)) for _, el in it...
pd.DataFrame(columns=['response_type', 'Value', 'Name', 'ID', 'Label'])
pandas.DataFrame
import json import os import sqlite3 import pyAesCrypt import pandas from os import stat from datetime import datetime import time import numpy # Global variables for use by this file bufferSize = 64*1024 password = os.environ.get('ENCRYPTIONPASSWORD') # py -c 'import databaseAccess; databaseAccess.reset()' def reset...
pandas.DataFrame()
pandas.DataFrame
import numpy as np import scipy.stats as sp import os import pandas as pd import h5py import bokeh.io as bkio import bokeh.layouts as blay import bokeh.models as bmod import bokeh.plotting as bplt from bokeh.palettes import Category20 as palette from bokeh.palettes import Category20b as paletteb import plot_results a...
pd.HDFStore(in_h5_file)
pandas.HDFStore
# Author : <EMAIL> # Date : 2020-12-03 import logging import numpy as np import pandas as pd import os, glob, time, datetime import pickle import gzip import copy import json import cv2 import random import torch from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedKFold,...
pd.read_csv(f'{input_csv}{val_fold}.csv')
pandas.read_csv
# Copyright (c) 2018-2020, NVIDIA CORPORATION. from __future__ import division import operator import random from itertools import product import numpy as np import pandas as pd import pytest import cudf from cudf.core import Series from cudf.core.index import as_index from cudf.tests import utils from cudf.utils.d...
pd.DataFrame({})
pandas.DataFrame
import os import pandas_datareader os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow import keras import pandas import pandas as pd import plotly.express as px import pandas_datareader.data as web from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler ...
pd.to_datetime(End)
pandas.to_datetime
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error from math import sqrt from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt import statsmodels.api as sm #Importing data # dataframe df = pd.read_csv("<NAME>.txt", sep='\t')...
pd.to_datetime(train.DATE,format="%Y-%m-%d")
pandas.to_datetime
from collections.abc import Sequence from functools import partial from math import isnan, nan import pytest from hypothesis import given import hypothesis.strategies as st from hypothesis.extra.pandas import indexes, columns, data_frames import pandas as pd import tahini.core.base import tahini.testing names_index_...
pd.Index([(0, 1)])
pandas.Index
from datetime import datetime import pandas as pd from bs4 import BeautifulSoup import cloudscraper from datetime import timedelta class CalendarDataFeed: def __init__(self, startYear, endYear, calendarSite = "https://www.forexfactory.com/calendar?day=" ): self.startYear = startYear self.en...
pd.DataFrame()
pandas.DataFrame
from rpy2.robjects import pandas2ri import numpy as np import pandas as pd import wrfpywind.data_preprocess as pp import xarray as xr from .util import _get_r_module, _attach_obs, _xr2pd, _fxda, _fxda_grid def fmt_training_data(wrfda, obsda): # Get and format data for only north buoy at 100m data_n = _attach...
pd.DateOffset(days=sim_len)
pandas.DateOffset
from django.http import JsonResponse from collections import Counter import pandas as pd import json from datetime import date, timedelta from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.urls import reverse from django.db.models import Avg, Sum, ...
pd.to_timedelta("0 days")
pandas.to_timedelta
""" OneSeries is an extended variant of pandas.Seres, which also inherits all the pandas.Series features and ready to use. It contains many useful methods for a better experience on data analysis. WARNING: Because this module is still pre-alpha, so many features are unstable. """ import pandas as pd from pandas impor...
pd.concat([self, other], axis=1)
pandas.concat
from ...utils import constants import pandas as pd import geopandas as gpd import numpy as np import shapely import pytest from contextlib import ExitStack from sklearn.metrics import mean_absolute_error from ...models.geosim import GeoSim from ...core.trajectorydataframe import TrajDataFrame def global_variables(): ...
pd.to_datetime('2020/01/01 08:00:00')
pandas.to_datetime
from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.corpus import wordnet from datetime import datetime from numpy.linalg import norm from tqdm.auto import tqdm from glob import glob import pandas as pd import numpy as np import subprocess import ...
pd.concat([subreddits, vectors], axis=1)
pandas.concat
#!/usr/bin/env python # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or...
DataFrame(columns=table_curr.columns)
pandas.DataFrame
import numpy as np from RecSearch.DataInterfaces.Recommenders.Abstract import IMatrixRankRecommender from itertools import combinations, permutations import pandas as pd class IXCourseDiffRankRecommend(IMatrixRankRecommender): def iget_recommendation(self, who: dict, possible: pd.DataFrame, n_column: str, ir_colu...
pd.merge(distance_df, neg_df, how='left', on=['Course1', 'Course2'])
pandas.merge
# Runs after normalization and per_person_ratio_and_factor and pre_plot_aggregation. import shutil from pathlib import Path import itertools import numpy as np import pandas as pd from matplotlib import pyplot as plt import collections def PlotWithSlices(df, data_name, output_dir): for group_name in ['Gender', 'Age...
pd.read_csv(input_base_dir / 'S_all_plot_raw_data.csv')
pandas.read_csv