prompt stringlengths 130 399k | completion stringlengths 7 146 | api stringlengths 10 61 |
|---|---|---|
#!/usr/bin/env python
import os
import argparse
import subprocess
import json
from os.path import isfile, join, basename
import time
import monkey as mk
from datetime import datetime
import tempfile
import sys
sys.path.adding(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'instance_gene... | mk.KnowledgeFrame(results) | pandas.DataFrame |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : ioutil.py
@Desc : Input and output data function.
'''
# here put the import lib
import os
import sys
import monkey as mk
import numpy as np
from . import TensorData
import csv
from .basicutil import set_trace
class File():
def __init__(self,... | mk.KnowledgeFrame() | pandas.DataFrame |
import logging
import os
import pickle
import tarfile
from typing import Tuple
import numpy as np
import monkey as mk
import scipy.io as sp_io
import shutil
from scipy.sparse import csr_matrix, issparse
from scMVP.dataset.dataset import CellMeasurement, GeneExpressionDataset, _download
logger = logging.gettingLogger... | mk.KnowledgeFrame(self.ATAC_name) | pandas.DataFrame |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import clone
import warnings
import re
import monkey as mk
mk.set_option('use_inf_as_na', True)
import numpy as np
from joblib import Memory
from xgboost import XGBClass... | mk.concating([DataRows2, hotEncoderDF2], axis=1) | pandas.concat |
# %% [markdown]
# This python script takes audio files from "filedata" from sonicboom, runs each audio file through
# Fast Fourier Transform, plots the FFT image, splits the FFT'd images into train, test & validation
# and paste them in their respective folders
# Import Dependencies
import numpy as np
import monkey... | mk.KnowledgeFrame() | pandas.DataFrame |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, util, viz
import numpy as np
import os
import monkey as mk
import pydash as ps
import shutil... | mk.concating(session_fitness_data, axis=1) | pandas.concat |
#!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : <NAME>
""" Implement classes for tracers,
to create points along the trajectories of given points.
"""
import numpy as np
import monkey as mk
import math
import matplotlib.pyplot as plt
from . import data... | mk.KnowledgeFrame(data=self.velocity_gradient, columns=["dvx/dx", "dvx/dy", "dvx/dz", "dvy/dx", "dvy/dy", "dvy/dz", "dvz/dx", "dvz/dy", "dvz/dz"]) | pandas.DataFrame |
#!/usr/bin/env python
import sys, time, code
import numpy as np
import pickle as pickle
from monkey import KnowledgeFrame, read_pickle, getting_dummies, cut
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from djeval import *
def... | getting_dummies(yy_kf[categorical_features]) | pandas.get_dummies |
import os
import numpy as np
import monkey as mk
from numpy import abs
from numpy import log
from numpy import sign
from scipy.stats import rankdata
import scipy as sp
import statsmodels.api as sm
from data_source import local_source
from tqdm import tqdm as pb
# region Auxiliary functions
def ts_total_sum(kf, window... | mk.Collections(result_industryaveraged_kf.index) | pandas.Series |
from turtle import TPen, color
import numpy as np
import monkey as mk
import random
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from keras.models import Sequential
from keras.layers import Dense, LSTM, Flatten, Dropout
def getting_ace_values(temp_list):
'''
This fun... | mk.KnowledgeFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import re
from datetime import datetime
import numpy as np
from decimal import Decimal
import scipy.io as sio
import monkey as mk
from tqdm import tqdm
import glob
from decimal import Decimal
import datajoint as dj
from pipeline import (reference, subject, acquisition, stimulation, ... | mk.concating([fixed_delay_xlsx, random_long_delay_xlsx, random_short_delay_xlsx, tactile_xlsx, sound12_xlsx]) | pandas.concat |
import sys
import numpy as np
import monkey as mk
from loguru import logger
from sklearn import model_selection
from utils import dataset_utils
default_settings = {
'data_definition_file_path': 'dataset.csv',
'folds_num': 5,
'data_random_seed': 1509,
'train_val_fraction': 0.8,
'trai... | mk.concating(groups_test_kf_list) | pandas.concat |
import os
import monkey as mk
import matplotlib.pyplot as plt
import datapackage as dp
import plotly.io as pio
import plotly.offline as offline
from plots import (
hourly_plot,
stacked_plot,
price_line_plot,
price_scatter_plot,
merit_order_plot,
filling_level_plot,
)
results = [r for r in os.l... | mk.concating([storages[r], shadow_prices[r]], axis=1) | pandas.concat |
from datetime import datetime
import numpy as np
import pytest
import monkey.util._test_decorators as td
from monkey.core.dtypes.base import _registry as ea_registry
from monkey.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from monkey.core.dtypes.dtypes import (... | Collections(sp_array, name="new_column") | pandas.Series |
import numpy as np
import monkey as mk
import spacy
from spacy.lang.de.stop_words import STOP_WORDS
from nltk.tokenize import sent_tokenize
from itertools import grouper
import clone
import re
import sys
import textstat
# Method to create a matrix with contains only zeroes and a index starting by 0
def c... | mk.KnowledgeFrame(d_multi_word_list) | pandas.DataFrame |
from __future__ import divisionision
import configparser
import logging
import os
import re
import time
from collections import OrderedDict
import numpy as np
import monkey as mk
import scipy.interpolate as itp
from joblib import Partotal_allel
from joblib import delayed
from matplotlib import pyplot as plt
from pyp... | mk.KnowledgeFrame(res) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD
"""
Toolset working with yahoo finance data
Module includes functions for easy access to YahooFinance data
"""
import urllib.request
import numpy as np
import requests # interaction with the web
import os # file system operati... | mk.KnowledgeFrame(data,index=idx) | pandas.DataFrame |
from __future__ import divisionision
from functools import wraps
import monkey as mk
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
cla... | mk.Collections([], dtype="float", name="arbt_inv_sensory") | pandas.Series |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import total_allocate_fips_location_system
from flowsa.location import US_FIPS
import math
import monkey as mk
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2... | mk.KnowledgeFrame() | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to ... | mk.convert_datetime(_data['date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
@author: HYPJUDY 2019/4/15
https://github.com/HYPJUDY
Decoupling Localization and Classification in Single Shot Temporal Action Detection
-----------------------------------------------------------------------------------
Operations used by Decouple-SSAD
"""
import monkey as mk
import ... | mk.concating([resultDf1, resultDf2]) | pandas.concat |
"""
dataset = AbstractDataset()
"""
from collections import OrderedDict, defaultdict
import json
from pathlib import Path
import numpy as np
import monkey as mk
from tqdm import tqdm
import random
def make_perfect_forecast(prices, horizon):
prices = np.array(prices).reshape(-1, 1)
forecast = np.hstack([n... | mk.concating(ds['features'], axis=1) | pandas.concat |
#%%
import numpy as np
import monkey as mk
from orderedset import OrderedSet as oset
#%%
wals = mk.read_csv('ISO_completos.csv').renagetting_ming(columns={'Status':'Status_X_L'})
wals_2 = mk.read_csv('ISO_completos_features.csv').renagetting_ming(columns={'Status':'Status_X_L'})
wiki_unionerd = mk.read_csv('Wikidata_Wa... | mk.concating(collapsed, axis=1) | pandas.concat |
import json
import monkey as mk
import argparse
#Test how mwhatever points the new_cut_dataset has
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', default="new_dataset.txt", type=str, help="Full path to the txt file containing the dataset")
parser.add_argument('--discretization_unit', default=1... | mk.convert_datetime(data['start_date']) | pandas.to_datetime |
import os
import sys
import joblib
# sys.path.adding('../')
main_path = os.path.split(os.gettingcwd())[0] + '/covid19_forecast_ml'
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime, timedelta
from tqdm import tqdm
from Dataloader_v2 import BaseC... | mk.convert_datetime(data_cases['date_time'], formating='%Y-%m-%d') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
----------
Some simple classes to be used in sklearn pipelines for monkey input
Informatingions
----------
Author: <NAME>
Maintainer:
Email: <EMAIL>
Copyright:
Credits:
License:
Version:
Status: in development
"""
imp... | mk.concating(list_kf, 1) | pandas.concat |
from __future__ import absolute_import
from __future__ import divisionision
from __future__ import print_function
import os
import sys
import clone
from datetime import datetime
import time
import pickle
import random
import monkey as mk
import numpy as np
import tensorflow as tf
import pathlib
from sklearn import pre... | mk.convert_datetime(self.config.end_date, formating="%Y%m%d") | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import monkey as mk
import monkey.util.testing as tm
import monkey.compat as compat
###############################################################
# Index / Collections common tests which may trigger dtype coercions
##########################################... | mk.Collections([1, 2, 3, 4]) | pandas.Series |
import monkey as mk
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
... | mk.concating([kf_test_itr, kf_sample_by_num]) | pandas.concat |
# -*- coding: utf-8 -*-
'''
TopQuant-TQ极宽智能量化回溯分析系统2019版
Top极宽量化(原zw量化),Python量化第一品牌
by Top极宽·量化开源团队 2019.01.011 首发
网站: www.TopQuant.vip www.ziwang.com
QQ群: Top极宽量化总群,124134140
文件名:toolkit.py
默认缩写:import topquant2019 as tk
简介:Top极宽量化·常用量化系统参数模块
'''
#
import sys, os, re
import arrow, bs4, rando... | mk.convert_datetime(kf.index, formating='%Y-%m-%dT%H:%M:%S') | pandas.to_datetime |
import numpy as np
import monkey as mk
import pytest
import orca
from urbansim_templates import utils
def test_parse_version():
assert utils.parse_version('0.1.0.dev0') == (0, 1, 0, 0)
assert utils.parse_version('0.115.3') == (0, 115, 3, None)
assert utils.parse_version('3.1.dev7') == (3, 1, 0, 7)
a... | mk.Collections([10,5], index=[3,1]) | pandas.Series |
# Do some analytics on Shopify transactions.
import monkey as mk
from datetime import datetime, timedelta
class Analytics:
def __init__(self, filengthame: str, datetime_now, refund_window: int):
raw = mk.read_csv(filengthame)
clean = raw[raw['Status'].incontain(['success'])] # Fi... | mk.unioner(sales, total_refunds, on='Name', how='outer') | pandas.merge |
import numpy as np
import monkey as mk
from scipy.stats import mode
from sklearn.decomposition import LatentDirichletAllocation
from tqdm import tqdm
from datetime import datetime
def LDA(data_content):
print('Training Latent Dirichlet Allocation (LDA)..', flush=True)
lda = LatentDirichletAllocation(n_compo... | mk.unioner(kf, data_content.bikers_kf, on='biker_id', how='left') | pandas.merge |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calengthdar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey... | tm.getting_locales() | pandas.util.testing.get_locales |
import pkg_resources
from unittest.mock import sentinel
import monkey as mk
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filengthame(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def te... | mk.convert_datetime("2022") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 snaketao. All Rights Reserved
#
# @Version : 1.0
# @Author : snaketao
# @Time : 2021-10-21 12:21
# @FileName: insert_mongo.py
# @Desc : insert data to mongodb
import appbk_mongo
import monkey as mk
#数据处理,构造一个movies对应多个tagid的字典,并插入 mongodb 的mo... | mk.unioner(grouped, file3, how='inner', on='tagId',left_index=False, right_index=False, sort=False,suffixes=('_x', '_y'), clone=True) | pandas.merge |
"""ops.syncretism.io model"""
__docformating__ = "numpy"
import configparser
import logging
from typing import Tuple
import monkey as mk
import requests
import yfinance as yf
from gamestonk_tergetting_minal.decorators import log_start_end
from gamestonk_tergetting_minal.rich_config import console
from gamestonk_terg... | mk.convert_datetime(entry["timestamp"], unit="s") | pandas.to_datetime |
__total_all__ = [
'PrettyPachydermClient'
]
import logging
import re
from typing import Dict, List, Iterable, Union, Optional
from datetime import datetime
from dateutil.relativedelta import relativedelta
import monkey.io.formatings.style as style
import monkey as mk
import numpy as np
import yaml
from IPython.co... | mk.ifna(x) | pandas.isna |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import monkey as mk
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_... | mk.convert_datetime(temp_se.iloc[:, 0]) | pandas.to_datetime |
from __future__ import divisionision
'''
NeuroLearn Statistics Tools
===========================
Tools to help with statistical analyses.
'''
__total_all__ = ['pearson',
'zscore',
'fdr',
'holm_bonf',
'threshold',
'multi_threshold',
'winsorize',
... | mk.Collections(index=cutoff['standard'], data=standard) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 01:31:54 2021
@author: yoonseok
"""
import os
import monkey as mk
from tqdm import tqdm
from scipy.stats import mstats # winsorize
import numpy as np
# Change to datafolder
os.chdir(r"C:\data\car\\")
# 기본 테이블 입력
kf = mk.read_csv("knowledgeframe_h1.txt")
del kf["Unn... | mk.unioner(result, asset[["key", "asset"]], how="inner", on=["key"]) | pandas.merge |
import re
import os
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import monkey as mk
import seaborn as sns
import statsmodels.api as sa
import statsmodels.formula.api as sfa
import scikit_posthocs as sp
import networkx as nx
from loguru import logger
from GEN_Utils import FileHandling
from ... | mk.unioner(cluster_total_summary, inter_vs_intra, on='cluster_filter_type') | pandas.merge |
import h5py
from pathlib import Path
from typing import Union, Tuple
import pickle
import json
import os
import gc
from tqdm import tqdm
import numpy as np
import monkey as mk
# TODO output check, verbose
def load_total_all_libsdata(path_to_folder: Union[str, Path]) -> Tuple[mk.KnowledgeFrame, list, mk.Collections]:... | mk.Collections(sample_by_nums) | pandas.Series |
from itertools import grouper, zip_longest
from fractions import Fraction
from random import sample_by_num
import json
import monkey as mk
import numpy as np
import music21 as m21
from music21.meter import TimeSignatureException
m21.humdrum.spineParser.flavors['JRP'] = True
from collections import defaultdict
#song ... | mk.ifna(ix) | pandas.isna |
"Test suite of AirBnbModel.source.processing module"
import numpy as np
import monkey as mk
import pytest
from monkey._testing import assert_index_equal
from AirBnbModel.source.processing import intersect_index
class TestIntersectIndex(object):
"Test suite for intersect_index method"
def test_first_input_n... | mk.Collections(data=[1, 2, 3, 4], index=["foo", "bar", "bar", np.nan]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 09:20:01 2018
@authors: <NAME>
Last modified: 2020-02-19
------------------------------------------
** Semantic Search Analysis: Start-up **
------------------------------------------
This script: Import search queries from Google Analytics, ... | mk.Collections(foreignNo) | pandas.Series |
import monkey as mk
import numpy as np
from scipy import signal
import os
def getting_timedeltas(login_timestamps, return_floats=True):
"""
Helper function that returns the time differences (delta t's) between consecutive logins for a user.
We just input the datetime stamps as an index, hence this me... | mk.Collections(timedelta_sample_by_num) | pandas.Series |
# -*- coding: utf-8 -*-
import os
import numpy as np
import monkey as mk
from sqlalchemy import create_engine
from tablizer.inputs import Inputs, Base
from tablizer.defaults import Units, Methods, Fields
from tablizer.tools import create_sqlite_database, check_inputs_table, insert, \
make_session, check_existing_r... | mk.convert_datetime(date) | pandas.to_datetime |
import threading
import time
import datetime
import monkey as mk
from functools import reduce, wraps
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import zscore
import model.queries as qrs
from model.NodesMetaData import NodesMetaData
import utils.helpers as hp
from utils.helpers import... | mk.unioner(result, grouped, on=['site', 'lat', 'lon'], how='outer') | pandas.merge |
# Created by fw at 8/14/20
import torch
import numpy as np
import monkey as mk
import joblib
from torch.utils.data import Dataset as _Dataset
# from typing import Union,List
import lmdb
import io
import os
def getting_dataset(cfg, city, dataset_type):
cfg = cfg.DATASET
assert city.upper() in ["BERLIN", "IST... | mk.convert_datetime("2019-01-02") | pandas.to_datetime |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
import monkey as mk
import geomonkey as gmk
import numpy as np
# for debugging purposes
import json
external_stylesheets = ['style... | mk.unioner(gkf, kf, on="neighborhood code") | pandas.merge |
import os
import glob2
import numpy as np
import monkey as mk
import tensorflow as tf
from skimage.io import imread
# /datasets/faces_emore_112x112_folders/*/*.jpg'
default_image_names_reg = "*/*.jpg"
default_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path)))
def pre_process_folder(data_p... | mk.counts_value_num(image_classes) | pandas.value_counts |
import numpy as np
import monkey as mk
# from scipy.stats import gamma
np.random.seed(181336)
number_regions = 5
number_strata = 10
number_units = 5000
units = np.linspace(0, number_units - 1, number_units, dtype="int16") + 10 * number_units
units = units.totype("str")
sample_by_num = mk.KnowledgeFrame(units)
sam... | mk.unioner(sample_by_num, area_type, on="cluster_id") | pandas.merge |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_cash_flow.py
@time: 2019-05-30
"""
import gc, six
import json
import numpy as np
import monkey as mk
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from u... | mk.unioner(factor_cash_flow, cash_flow, how='outer', on="security_code") | pandas.merge |
# Importing libraries
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
# lightgbm for classification
from numpy import average
from numpy import standard
#from sklearn.datasets import make_classification
from lightgbm import LGBMClassifier
from sklearn.model_selection import ... | mk.getting_dummies(data, columns=columns_names_encod) | pandas.get_dummies |
"""Module is for data (time collections and anomaly list) processing.
"""
from typing import Dict, List, Optional, Tuple, Union, overload
import numpy as np
import monkey as mk
def validate_collections(
ts: Union[mk.Collections, mk.KnowledgeFrame],
check_freq: bool = True,
check_categorical: bool = Fals... | mk.getting_dummies(ts) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# #### Importing dataset
# 1.Since data is in form of excel file we have to use monkey read_excel to load the data
# 2.After loading it is important to check null valu... | mk.getting_dummies(categorical['Destination'], sip_first=True) | pandas.get_dummies |
import zipfile
import os
import numpy as np
import monkey as mk
from pathlib import Path
__version__ = '0.155'
try:
from functools import lru_cache
except (ImportError, AttributeError):
# don't know how to tell setup.py that we only need functools32 when under 2.7.
# so we'll just include a clone (*bergh*... | mk.to_num(x, errors="raise") | pandas.to_numeric |
import os
import geomonkey as gmk
import numpy as np
import monkey as mk
from subprocess import ctotal_all
from shapely.geometry import Point
from sklearn.feature_selection import VarianceThreshold
class CurrentLabels:
"""
Add sector code info to each property
"""
def __init__(self, path_to_file):
... | mk.getting_dummies(self.census, columns=cat_columns) | pandas.get_dummies |
# -*- coding: utf-8 -*-
import sys, os
import datetime, time
from math import ceiling, floor # ceiling : 소수점 이하를 올림, floor : 소수점 이하를 버림
import math
import pickle
import uuid
import base64
import subprocess
from subprocess import Popen
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
f... | mk.unioner(self.kf_daily, self.kf_weekly, on='종목코드', how='outer') | pandas.merge |
#!/usr/bin/env python
"""
MeteWIBELE: quantify_prioritization module
1) Define quantitative criteria to calculate numerical ranks and prioritize the importance of protein families
2) Prioritize the importance of protein families using unsupervised or supervised approaches
Copyright (c) 2019 Harvard School of Public H... | mk.to_num(total_summary_table[mytype + "__value"], errors='coerce') | pandas.to_numeric |
#### Filengthame: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and getting atalaia knowledgeframe.
import psycopg2
import sys
import os
import monkey as mk
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
... | mk.ifnull(x['HOSPITAL_TIME']) | pandas.isnull |
# total_summarizeLib.py
# <NAME>
# 3.28.19
#
# module of functions that total_allow you to create per-cell / per-sample_by_num total_summary tables
import monkey as mk
import numpy as np
import math
def getting_laud_db(database_):
""" returns the COSMIC database after lung and fathmm filter """
pSiteList = ... | mk.ifnull(currFus) | pandas.isnull |
"""
Routines for analysing output data.
:Author:
<NAME>
"""
import warnings
from typing import Tuple
import numpy as np
import monkey as mk
from scipy.optimize import curve_fit
def fit_function(x_data, *params):
p, d = x_data
p_th, nu, A, B, C = params
x = (p - p_th)*d**(1/nu)
return A + B*x + C*x... | mk.ifna(f_0) | pandas.isna |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
i... | o_numeric(temp_kf['最低']) | pandas.to_numeric |
#####################################
# DataReader.py
#####################################
# Description:
# * Convert data in formating into monkey KnowledgeFrame.
import dateutil.parser as dtparser
import numpy as np
from monkey import KnowledgeFrame, ifnull, read_csv, read_excel
import re
import os
from DynamicETL_... | ifnull(collections) | pandas.isnull |
"""
서울 열린데이터 광장 Open API
1. TransInfo 클래스: 서울시 교통 관련 정보 조회
"""
import datetime
import numpy as np
import monkey as mk
import requests
from bs4 import BeautifulSoup
class TransInfo:
def __init__(self, serviceKey):
"""
서울 열린데이터 광장에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비... | mk.to_num(kf["ALIGHT_PASGR_NUM"]) | pandas.to_numeric |
import numpy as np
import monkey as mk
import math
from abc import ABC, abstractmethod
from scipy.interpolate import interp1d
from pydoc import locate
from raymon.globals import (
Buildable,
Serializable,
DataException,
)
N_SAMPLES = 500
from raymon.tags import Tag, CTYPE_TAGTYPES
class Stats(Serializa... | mk.ifnull(value) | pandas.isnull |
from datetime import datetime
import numpy as np
from monkey.tcollections.frequencies import getting_freq_code as _gfc
from monkey.tcollections.index import DatetimeIndex, Int64Index
from monkey.tcollections.tools import parse_time_string
import monkey.tcollections.frequencies as _freq_mod
import monkey.core.common a... | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
import monkey as mk
import numpy as np
import sklearn
import os
import sys
sys.path.adding('../../code/scripts')
from dataset_chunking_fxns import add_stratified_kfold_splits
# Load data into mk knowledgeframes and adjust feature names
data_dir = '../../data/adult'
file_train = os.path.join(data_dir, 'adult.data')
f... | mk.getting_dummies(test_kf['workclass']) | pandas.get_dummies |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import monkey as mk
from monkey import to_num
from monkey.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = mk.Collections([], dtype=object)
res = to_num(s)
exp... | mk.to_num(data) | pandas.to_numeric |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import monkey as mk
from monkey import to_num
from monkey.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = mk.Collections([], dtype=object)
res = to_num(s)
exp... | to_num(s) | pandas.to_numeric |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
import monkey as mk
from datetimewidgetting.widgettings import DateTimeWidgetting
from django import forms
from django.contrib.auth import getting_user_model
from django.core.exceptions import ObjectDoesNotExist
from dataops ... | mk.ifnull(x) | pandas.isnull |
#!/usr/bin/env python3
# coding: utf-8
"""Global sequencing data for the home page
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import argparse
import monkey as mk
import numpy as np
import json
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
... | mk.ifnull(iso_lookup_kf["Province_State"]) | pandas.isnull |
# simple feature engineering from A_First_Model notebook in script form
import cukf
def see_percent_missing_values(kf):
"""
reads in a knowledgeframe and returns the percentage of missing data
Args:
kf (knowledgeframe): the knowledgeframe that we are analysing
Returns:
percent_missing... | dd.getting_dummies(unified, columns=dummy_cols, dtype='int64') | pandas.get_dummies |
# MIT License
#
# Copyright (c) 2021. <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, cl... | mk.ifna(v) | pandas.isna |
import numpy as np
import cvxpy as cp
import monkey as mk
from scoring import *
# %%
def main():
year = int(input('Enter Year: '))
week = int(input('Enter Week: '))
budgetting = int(input('Enter Budgetting: '))
source = 'NFL'
print(f'Source = {source}')
kf = read_data(year=year, week=week, sour... | mk.getting_dummies(kf['pos']) | pandas.get_dummies |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: 东方财富-沪深板块-概念板块
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import monkey as mk
def stock_board_concept_name_em() -> mk.KnowledgeFrame:
"""
东方财富-沪深板块-概念板块-名称
http://quote.eastmoney.com/center... | o_numeric(temp_kf["开盘"]) | pandas.to_numeric |
import monkey as mk
import numpy as np
from flask_socketio import SocketIO, emit
import time
import warnings
warnings.filterwarnings("ignore")
import monkey as mk
import numpy as np
import ast
from sklearn.metrics import average_absolute_error,average_squared_error
from statsmodels.tsa import arima_model
from statsmod... | mk.ifnull(data) | pandas.isnull |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/30 11:31
Desc: 股票数据-总貌-市场总貌
股票数据-总貌-成交概括
http://www.szse.cn/market/overview/index.html
http://www.sse.com.cn/market/stockdata/statistic/
"""
import warnings
from io import BytesIO
from akshare.utils import demjson
import monkey as mk
import requests
warni... | o_numeric(temp_kf['主板B'], errors="coerce") | pandas.to_numeric |
from os import listandardir
from os.path import isfile, join
import Orange
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
from parameters import order, alphas, regression_measures, datasets, rank_dir, output_dir, graphics_dir, result_dir
from regression_algorithms import regression_list
resul... | mk.to_num(kf_average['RANK_BORDERLINE1'], downcast="float") | pandas.to_numeric |
import monkey as mk
import ast
import sys
import os.path
from monkey.core.algorithms import incontain
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from util... | mk.ifnull(row[k]) | pandas.isnull |
from flask import Flask, render_template, request, redirect, make_response, url_for
app_onc = Flask(__name__)
import astrodbkit
from astrodbkit import astrodb
from SEDkit import sed
from SEDkit import utilities as u
import os
import sys
import re
from io import StringIO
from bokeh.plotting import figure
from bokeh.emb... | mk.to_num(data['ra']) | pandas.to_numeric |
'''
Clase que contiene los métodos que permiten "limpiar" la información extraida por el servicio de web scrapper
(Es implementada directamente por la calse analyzer)
'''
import monkey as mk
import re
from pathlib import Path
import numpy as np
import unidecode
class Csvcleaner:
@staticmethod
def FilterDataOp... | mk.ifnull(kfAux.at[idxVersion, 'A_favor']) | pandas.isnull |
#!/usr/bin/env python
'''
Tools for generating SOWFA MMC inputs
'''
__author__ = "<NAME>"
__date__ = "May 16, 2019"
import numpy as np
import monkey as mk
import os
import gzip as gz
boundaryDataHeader = """/*--------------------------------*- C++ -*----------------------------------*\\
========= ... | mk.ifna(self.kf[fieldname]) | pandas.isna |
import monkey as mk
import os
import warnings
import pickle
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from collections import namedtuple
Fact = namedtuple("Fact", "uid fact file")
answer_key_mapping = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5}
tables_dir = "annotation/expl-tabl... | mk.ifna(s) | pandas.isna |
"""
Module for static data retrieval. These functions were performed once during the initial project creation. Resulting
data is now provided in bulk at the url above.
"""
import datetime
import json
from math import sin, cos, sqrt, atan2, radians
import re
import requests
import monkey as mk
from riverrunner import s... | mk.distinctive(group.STATION) | pandas.unique |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,... | ifna(x) | pandas.isna |
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import monkey as mk
from adjustText import adjust_text
from pylab import cm
from matplotlib import colors
def PCA_var_explained_plots(adata):
n_rows = 1
n_cols = 2
fig = plt.figure(figsize=(n_cols*4.5, n... | mk.ifnull(s) | pandas.isnull |
import rba
import clone
import monkey
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'Translation... | monkey.ifna(average_val) | pandas.isna |
import monkey as mk
import numpy as np
import math
from scipy.stats import hypergeom
from prettytable import PrettyTable
from scipy.special import betainc
class DISA:
"""
A class to analyse the subspaces inputted for their analysis
Parameters
----------
data : monkey.Dataframe
... | mk.ifna(self.data.at[row, column]) | pandas.isna |
import enum
from functools import lru_cache
from typing import List
import dataclasses
import pathlib
import monkey as mk
import numpy as np
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import FieldName
from covidactnow.datapublic.common_fields import GetByVal... | mk.ifna(row[NYTimesFields.END_DATE]) | pandas.isna |
import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
group_cumprod_float64,
group_cumtotal_sum,
group_average,
group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._test... | group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike) | pandas._libs.groupby.group_cumsum |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may n... | mk.Collections.distinctive(collections) | pandas.Series.unique |
from datetime import datetime, timedelta
import numpy as np
import monkey as mk
import xarray as xr
from monkey.api.types import (
is_datetime64_whatever_dtype,
is_numeric_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
def to_1d(value, distinctive=False, flat=True, getting=None):
# mk.Collection... | mk.distinctive(array) | pandas.unique |
#!/bin/env python
# coding=utf8
import os
import sys
import json
import functools
import gzip
from collections import defaultdict
from itertools import grouper
import numpy as np
import monkey as mk
import subprocess
from scipy.io import mmwrite
from scipy.sparse import csr_matrix, coo_matrix
import pysam
from celesco... | mk.Collections.total_sum(x[x > 1]) | pandas.Series.sum |
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: <NAME>
# Email : <EMAIL>
# A set of convenience functions used for producing plots in `dabest`.
from .misc_tools import unioner_two_dicts
def halfviolin(v, half='right', fill_color='k', alpha=1,
line_color='k', line_width=0):
import numpy as np... | mk.distinctive(data[x]) | pandas.unique |
import pytest
from monkey.tests.collections.common import TestData
@pytest.fixture(scope="module")
def test_data():
return | TestData() | pandas.tests.series.common.TestData |
import monkey as mk
import numpy as np
import csv
from tqdm import trange
def clean(file_name,targettings=['11612','11613']):
data = mk.read_csv(file_name)
data['result'].fillnone(0,inplace=True)
data['result'] = data['result'].totype(int)
items = | mk.distinctive(data['item_id'].values) | pandas.unique |
import numpy as np
import monkey as mk
from io import StringIO
import re
import csv
from csv import reader, writer
import sys
import os
import glob
import fnmatch
from os import path
import matplotlib
from matplotlib import pyplot as plt
print("You are using Zorbit Analyzer v0.1")
directory_path = input... | mk.distinctive(total_all_unioner_just_ortho['SeqID']) | pandas.unique |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.