code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import os
from subprocess import run
from tempfile import TemporaryDirectory
import pandas as pd
from reco_utils.common.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
DEFAULT_PREDICTION_COL,
)
class VW:
"""Vowpal Wabbit Class"""
def __in... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/vowpal_wabbit/vw.py | 0.617282 | 0.242127 | vw.py | pypi |
import numpy as np
import pandas as pd
import fastai
import fastprogress
from fastprogress.fastprogress import force_console_behavior
from reco_utils.common import constants as cc
def cartesian_product(*arrays):
"""Compute the Cartesian product in fastai algo. This is a helper function.
Args:
arra... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/fastai/fastai_utils.py | 0.810854 | 0.399929 | fastai_utils.py | pypi |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from time import time
import logging
logger = logging.getLogger(__name__)
MODEL_CHECKPOINT = "model.ckpt"
class NCF:
"""Neural Collaborative Filtering (NCF) implementation
Note:
He, Xiangnan, Lizi Liao, Hanwang Zhang, ... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/ncf/ncf_singlenode.py | 0.828454 | 0.31012 | ncf_singlenode.py | pypi |
import tensorflow as tf
import six
import os
from sklearn.metrics import (
roc_auc_score,
log_loss,
mean_squared_error,
accuracy_score,
f1_score,
)
import numpy as np
import yaml
import zipfile
from reco_utils.dataset.download_utils import maybe_download
from reco_utils.recommender.deeprec.deeprec... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/newsrec_utils.py | 0.81615 | 0.229784 | newsrec_utils.py | pypi |
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from reco_utils.recommender.newsrec.models.base_model import BaseModel
from reco_utils.recommender.newsrec.models.layers import PersonalizedAttentivePooling
__all__ = ["NPAModel"]
class NPAModel(BaseMod... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/models/npa.py | 0.959278 | 0.472927 | npa.py | pypi |
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import backend as K
class AttLayer2(layers.Layer):
"""Soft alignment attention implement.
Attributes:
dim (int): attention hidden dim
"""
def __init__(self, dim=200, seed=0, **... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/models/layers.py | 0.945588 | 0.638159 | layers.py | pypi |
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from reco_utils.recommender.newsrec.models.base_model import BaseModel
from reco_utils.recommender.newsrec.models.layers import AttLayer2
__all__ = ["NAMLModel"]
class NAMLModel(BaseModel):
"""NAML ... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/models/naml.py | 0.958314 | 0.384074 | naml.py | pypi |
from os.path import join
import abc
import time
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from reco_utils.recommender.deeprec.deeprec_utils import cal_metric
__all__ = ["BaseModel"]
class BaseModel:
"""Basic class of models
Attributes:
hparams (o... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/models/base_model.py | 0.927211 | 0.429609 | base_model.py | pypi |
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from reco_utils.recommender.newsrec.models.base_model import BaseModel
from reco_utils.recommender.newsrec.models.layers import AttLayer2, SelfAttention
__all__ = ["NRMSModel"]
class NRMSModel(BaseModel... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/models/nrms.py | 0.958654 | 0.486575 | nrms.py | pypi |
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from reco_utils.recommender.newsrec.models.base_model import BaseModel
from reco_utils.recommender.newsrec.models.layers import (
AttLayer2,
ComputeMasking,
OverwriteMasking,
)
__all__ = ["LST... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/models/lstur.py | 0.960694 | 0.425725 | lstur.py | pypi |
import tensorflow as tf
import numpy as np
import pickle
from reco_utils.recommender.deeprec.io.iterator import BaseIterator
from reco_utils.recommender.newsrec.newsrec_utils import word_tokenize, newsample
__all__ = ["MINDIterator"]
class MINDIterator(BaseIterator):
"""Train data loader for NAML model.
Th... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/io/mind_iterator.py | 0.75392 | 0.392744 | mind_iterator.py | pypi |
import tensorflow as tf
import numpy as np
import pickle
from reco_utils.recommender.deeprec.io.iterator import BaseIterator
from reco_utils.recommender.newsrec.newsrec_utils import word_tokenize, newsample
__all__ = ["MINDAllIterator"]
class MINDAllIterator(BaseIterator):
"""Train data loader for NAML model.
... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/newsrec/io/mind_all_iterator.py | 0.742608 | 0.300245 | mind_all_iterator.py | pypi |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import lightfm
from lightfm.evaluation import precision_at_k, recall_at_k
def model_perf_plots(df):
"""Function to plot model performance metrics.
Args:
df (pd.DataFrame): Dataframe in tidy format, with ['e... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/lightfm/lightfm_utils.py | 0.932707 | 0.556249 | lightfm_utils.py | pypi |
import tensorflow as tf
from reco_utils.common.constants import DEFAULT_USER_COL, DEFAULT_ITEM_COL
from reco_utils.common.tf_utils import MODEL_DIR
def build_feature_columns(
users,
items,
user_col=DEFAULT_USER_COL,
item_col=DEFAULT_ITEM_COL,
item_feat_col=None,
crossed_feat_dim=1000,
us... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/wide_deep/wide_deep_utils.py | 0.921979 | 0.505188 | wide_deep_utils.py | pypi |
import tensorflow as tf
from reco_utils.recommender.deeprec.models.dkn import DKN
import numpy as np
from reco_utils.recommender.deeprec.deeprec_utils import cal_metric
r"""
This new model adapts DKN's structure for item-to-item recommendations.
The tutorial can be found at: https://github.com/microsoft/recommenders/... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/models/dkn_item2item.py | 0.930844 | 0.464841 | dkn_item2item.py | pypi |
from os.path import join
import abc
import time
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from reco_utils.recommender.deeprec.deeprec_utils import cal_metric
__all__ = ["BaseModel"]
class BaseModel:
def __init__(self, hparams, iterator_creator, graph=None, seed=None):
... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/models/base_model.py | 0.898589 | 0.229298 | base_model.py | pypi |
import numpy as np
import tensorflow as tf
from reco_utils.recommender.deeprec.models.base_model import BaseModel
__all__ = ["DKN"]
class DKN(BaseModel):
"""DKN model (Deep Knowledge-Aware Network)
H. Wang, F. Zhang, X. Xie and M. Guo, "DKN: Deep Knowledge-Aware Network for News
Recommendation", in P... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/models/dkn.py | 0.933975 | 0.343975 | dkn.py | pypi |
import numpy as np
import tensorflow as tf
from reco_utils.recommender.deeprec.models.base_model import BaseModel
__all__ = ["XDeepFMModel"]
class XDeepFMModel(BaseModel):
"""xDeepFM model
J. Lian, X. Zhou, F. Zhang, Z. Chen, X. Xie, G. Sun, "xDeepFM: Combining Explicit
and Implicit Feature Interact... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/models/xDeepFM.py | 0.892451 | 0.355299 | xDeepFM.py | pypi |
import numpy as np
import tensorflow as tf
import abc
class BaseIterator(object):
@abc.abstractmethod
def parser_one_line(self, line):
pass
@abc.abstractmethod
def load_data_from_file(self, infile):
pass
@abc.abstractmethod
def _convert_data(self, labels, features):
... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/io/iterator.py | 0.843573 | 0.488039 | iterator.py | pypi |
import tensorflow as tf
import numpy as np
from reco_utils.recommender.deeprec.io.iterator import BaseIterator
__all__ = ["DKNTextIterator"]
class DKNTextIterator(BaseIterator):
"""Data loader for the DKN model.
DKN requires a special type of data format, where each instance contains a label, the candidat... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/io/dkn_iterator.py | 0.810741 | 0.377627 | dkn_iterator.py | pypi |
import tensorflow as tf
import numpy as np
from reco_utils.recommender.deeprec.io.dkn_iterator import DKNTextIterator
r"""
This new iterator is for DKN's item-to-item recommendations version.
The tutorial can be found at: https://github.com/microsoft/recommenders/blob/kdd2020_tutorial/scenarios/academic/KDD2020-tuto... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/io/dkn_item2item_iterator.py | 0.792986 | 0.367866 | dkn_item2item_iterator.py | pypi |
import tensorflow as tf
import numpy as np
import json
import pickle as pkl
import random
import os
import time
from reco_utils.recommender.deeprec.io.iterator import BaseIterator
from reco_utils.recommender.deeprec.deeprec_utils import load_dict
__all__ = ["SequentialIterator"]
class SequentialIterator(BaseItera... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/io/sequential_iterator.py | 0.782829 | 0.201754 | sequential_iterator.py | pypi |
import tensorflow as tf
import numpy as np
import json
import pickle as pkl
import random
import os
import time
from reco_utils.recommender.deeprec.io.sequential_iterator import SequentialIterator
from reco_utils.recommender.deeprec.deeprec_utils import load_dict
__all__ = ["NextItNetIterator"]
class NextItNetIter... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/deeprec/io/nextitnet_iterator.py | 0.756447 | 0.240017 | nextitnet_iterator.py | pypi |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from reco_utils.common.constants import (
DEFAULT_ITEM_COL,
DEFAULT_USER_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
)
class RLRMCdataset(object):
"""RLRMC dataset implementation. Creates sparse data structures for RL... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/rlrmc/RLRMCdataset.py | 0.857813 | 0.436202 | RLRMCdataset.py | pypi |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from transformers import BertTokenizer
import re, string, unicodedata
import pandas as pd
import numpy as np
import nltk
from nltk.stem.porter import PorterStemmer
class TfidfRecommender:
"""Term Freq... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/tfidf/tfidf_utils.py | 0.875734 | 0.323574 | tfidf_utils.py | pypi |
import pandas as pd
import numpy as np
from reco_utils.common.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_PREDICTION_COL,
)
def predict(
model,
data,
usercol=DEFAULT_USER_COL,
itemcol=DEFAULT_ITEM_COL,
predcol=DEFAULT_PREDICTION_COL,
):
"""Computes predictions ... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/recommender/cornac/cornac_utils.py | 0.802362 | 0.44089 | cornac_utils.py | pypi |
import sys
import os
import glob
from numba import cuda
from numba.cuda.cudadrv.error import CudaSupportError
DEFAULT_CUDA_PATH_LINUX = "/usr/local/cuda/version.txt"
def get_number_gpus():
"""Get the number of GPUs in the system.
Returns:
int: Number of GPUs.
"""
try:
return le... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/common/gpu_utils.py | 0.516108 | 0.188959 | gpu_utils.py | pypi |
import matplotlib.pyplot as plt
def line_graph(
values,
labels,
x_guides=None,
x_name=None,
y_name=None,
x_min_max=None,
y_min_max=None,
legend_loc=None,
subplot=None,
plot_size=(5, 5),
):
"""Plot line graph(s).
Args:
values (list(list(float or tuple)) or list(... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/common/plot.py | 0.75401 | 0.545346 | plot.py | pypi |
import logging
import numpy as np
from scipy import sparse
logger = logging.getLogger()
def exponential_decay(value, max_val, half_life):
"""Compute decay factor for a given value based on an exponential decay.
Values greater than `max_val` will be set to 1.
Args:
value (numeric): va... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/common/python_utils.py | 0.949236 | 0.72208 | python_utils.py | pypi |
import itertools
import numpy as np
import pandas as pd
import tensorflow as tf
MODEL_DIR = "model_checkpoints"
OPTIMIZERS = dict(
adadelta=tf.train.AdadeltaOptimizer,
adagrad=tf.train.AdagradOptimizer,
adam=tf.train.AdamOptimizer,
ftrl=tf.train.FtrlOptimizer,
momentum=tf.train.MomentumOptimizer... | /recommender_utils-2021.2.post1623854186-py3-none-any.whl/reco_utils/common/tf_utils.py | 0.847842 | 0.363252 | tf_utils.py | pypi |
# Recommenders
[](https://microsoft-recommenders.readthedocs.io/en/latest/?badge=latest)
## What's New (July, 2022)
We have a new release [Recommenders 1.1.1](https://github.com/microsoft/recommenders/releases/tag/1... | /recommenders-1.1.1.tar.gz/recommenders-1.1.1/README.md | 0.926728 | 0.979433 | README.md | pypi |
Contributors to Recommenders
============================
Recommenders is developed and maintained by a community of people interested in exploring recommendation algorithms and how best to deploy them in industry settings. The goal is to accelerate the workflow of any individual or organization working on recommender... | /recommenders-1.1.1.tar.gz/recommenders-1.1.1/AUTHORS.md | 0.651466 | 0.911771 | AUTHORS.md | pypi |
# Recompyle
This package provides tools that can be used to rewrite and recompile source code, using the transformed version of the code at runtime. The initial proof-of-concept targets functions only, and only calls within them, but this project is structured to eventually expand to other forms of code rewriting.
Re... | /recompyle-0.1.1.tar.gz/recompyle-0.1.1/README.md | 0.774754 | 0.941169 | README.md | pypi |
from __future__ import annotations
import sys
from dataclasses import dataclass
from enum import Enum
from functools import cached_property
from os import PathLike
from textwrap import dedent
from typing import Any, Literal, Optional, Union
import pandas as pd
from recon.utils import ensure_df
FilePath = Union[str,... | /recon_cli-0.0.5-py3-none-any.whl/recon/reconcile.py | 0.837885 | 0.34505 | reconcile.py | pypi |
from pathlib import Path
import typer
from rich.progress import Progress, SpinnerColumn, TextColumn
from typing_extensions import Annotated
from recon.reconcile import Reconcile
def main(
left: Annotated[
Path,
typer.Argument(
default=...,
help="Path to the left dataset (... | /recon_cli-0.0.5-py3-none-any.whl/recon/main.py | 0.785309 | 0.378143 | main.py | pypi |
__author__ = 'Scott Burns <scott.s.burns@vanderbilt.edu>'
__copyright__ = 'Copyright 2012 Vanderbilt University. All Rights Reserved'
from os.path import basename
class Measure(object):
"""Basic class for storing statistical measures"""
def __init__(self, structure, measure, value, units, descrip=None,
... | /recon-stats_ldax-0.0.4.tar.gz/recon-stats_ldax-0.0.4/recon_stats/io.py | 0.754825 | 0.272179 | io.py | pypi |
from collections import defaultdict
from typing import List, Optional, Tuple
import chess
from reconchess_tools.strategy import SENSE_SQUARES
from reconchess_tools.utilities import (
possible_requested_moves,
simulate_move,
simulate_sense,
)
def board_fingerprint(board: chess.Board):
"""Compute a fi... | /reconchess-tools-0.2.1.tar.gz/reconchess-tools-0.2.1/reconchess_tools/mht.py | 0.829906 | 0.651646 | mht.py | pypi |
from typing import Dict, List, Optional, Tuple
import chess
from reconchess.utilities import move_actions, revise_move
from reconchess_tools.utilities import simulate_move
# Sensing on the edge of the board is never a good idea
SENSE_SQUARES = [
square
for square in chess.SQUARES
if 0 < chess.square_file... | /reconchess-tools-0.2.1.tar.gz/reconchess-tools-0.2.1/reconchess_tools/strategy.py | 0.857141 | 0.475118 | strategy.py | pypi |
import random
from math import sqrt
from typing import List
import chess
import pkg_resources
import pygame
LIGHT_COLOR = (240, 217, 181)
DARK_COLOR = (181, 136, 99)
PIECE_IMAGES = {}
for color in chess.COLORS:
for piece_type in chess.PIECE_TYPES:
piece = chess.Piece(piece_type, color)
img_path ... | /reconchess-tools-0.2.1.tar.gz/reconchess-tools-0.2.1/reconchess_tools/ui/__init__.py | 0.692018 | 0.271101 | __init__.py | pypi |
"""Action base representing the smallest procedures for a Plan to carry out."""
from .Context import ContextMixin
class ActionMixin:
"""Abstract base representing the smallest procedures for a Plan to carry out.
Actions are assembled by Plans and executed by Schedulers.
Plans shoudl architect the overal... | /reconcile-0.0.3.tar.gz/reconcile-0.0.3/pyReconcile/mixins/Action.py | 0.938449 | 0.464294 | Action.py | pypi |
from deepdiff import DeepDiff
class DeclarativeStateMixin(object):
"""The core representation of state for reconciliation."""
def __init__(
self,
declarative_state_data,
declarative_state_ignore_order=True,
declarative_state_report_repetition=False,
**kwargs
):
... | /reconcile-0.0.3.tar.gz/reconcile-0.0.3/pyReconcile/mixins/DeclarativeState.py | 0.889046 | 0.699434 | DeclarativeState.py | pypi |
"""Abstract base that carries out orchestrating one iteration of a reconciliation procedure."""
from .Context import ContextMixin
from .Action import ActionMixin
class PlanMixin:
"""Abstract base that carries out orchestrating one iteration of a reconciliation procedure.
Plans create lists of Actions to be e... | /reconcile-0.0.3.tar.gz/reconcile-0.0.3/pyReconcile/mixins/Plan.py | 0.91854 | 0.472197 | Plan.py | pypi |
import sys
import ftplib
import socket
from typing import Optional, Callable, TypeVar, Union, List, Iterable, Tuple, Dict, Any # pylint: disable=unused-import
class Access:
"""
Represents access information to the FTP server.
"""
def __init__(self):
self.hostname = ''
self.port = 0
... | /reconnecting_ftp-1.1.1.tar.gz/reconnecting_ftp-1.1.1/reconnecting_ftp/__init__.py | 0.619586 | 0.171581 | __init__.py | pypi |
from pathlib import Path
from typing import Any, Dict, List, Union, cast
import srsly
from recon.types import Example
from recon.util import ensure_path
class ExampleStore:
def __init__(self, examples: List[Example] = []):
self._map: Dict[int, Example] = {}
for e in examples:
self.ad... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/store.py | 0.930134 | 0.4831 | store.py | pypi |
from typing import TYPE_CHECKING, Callable, Tuple
import xxhash
if TYPE_CHECKING:
from recon.dataset import Dataset
from recon.types import Example, PredictionError, Span, Token
def token_hash(token: "Token") -> int:
"""Hash of Token type
Args:
token (Token): Token to hash
Returns:
... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/hashing.py | 0.937819 | 0.563798 | hashing.py | pypi |
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
import spacy
import srsly
from spacy.tokens import Doc
from wasabi import Printer
from recon.hashing import dataset_hash
from recon.loaders import from_spacy, read_jsonl, to_spacy
fro... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/dataset.py | 0.918183 | 0.599895 | dataset.py | pypi |
import math
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List, Optional, Sequence, Union, cast
import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import entropy as scipy_entropy
from recon.constants import NOT_LABELED
from recon.types import EntityCo... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/stats.py | 0.946929 | 0.617369 | stats.py | pypi |
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import srsly
from recon.dataset import Dataset
from recon.operations import Operation
from recon.store import ExampleStore
from recon.types import CorpusApplyResult, CorpusMeta, Example, StatsProtocol
from recon.util import ensure_path
cl... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/corpus.py | 0.92632 | 0.599427 | corpus.py | pypi |
import inspect
import warnings
from collections import Counter, defaultdict
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Union
import catalogue
from tqdm import tqdm
from wasabi import Printer
from recon.preprocess import PreProcessor
from recon.preprocess import registry as pre_regis... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/operations.py | 0.858881 | 0.408365 | operations.py | pypi |
import tempfile
from pathlib import Path
from typing import Iterable, Iterator, List, Set
from spacy.language import Language
from spacy.training.corpus import Corpus as SpacyCorpus
from wasabi import Printer
from recon.loaders import to_spacy
from recon.types import Example, Scores, Span, Token
class EntityRecogni... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/recognizer.py | 0.900475 | 0.590218 | recognizer.py | pypi |
from typing import Any, Callable, Dict, List, Optional
import numpy as np
from recon.operations import operation
from recon.types import Example, Span
def mask_1d(length: int, prob: float = 0.5) -> np.ndarray:
if prob < 0 or prob > 1:
raise ValueError(
f"Prob of {prob} is not allowed. Allowe... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/augmentation.py | 0.926462 | 0.549641 | augmentation.py | pypi |
from typing import Any, Dict, List, cast
from spacy.tokens import Span as SpacySpan
from wasabi import msg
from recon.operations import operation
from recon.types import Correction, Example, Span, Token
@operation("recon.rename_labels.v1")
def rename_labels(example: Example, label_map: Dict[str, str]) -> Example:
... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/corrections.py | 0.925006 | 0.544317 | corrections.py | pypi |
from typing import List
from recon.operations import operation
from recon.types import Example, Span
@operation("recon.upcase_labels.v1")
def upcase_labels(example: Example) -> Example:
"""Convert all span labels to uppercase to normalize
Args:
example (Example): Input Example
Returns:
... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/validation.py | 0.928141 | 0.621225 | validation.py | pypi |
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, cast
import spacy
import srsly
from spacy.language import Language
from spacy.tokens import Doc, DocBin
from spacy.util import get_words_and_spaces
from recon.types import Example, Span, Token
def read_jsonl(path: Path) -> List[Example... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/loaders.py | 0.904098 | 0.614336 | loaders.py | pypi |
import random
from collections import defaultdict
from typing import Any, Dict, List, Tuple
from recon.types import Example
def hash_example_meta(
example: Example, fields: List[str] = [], ignore_field_absence: bool = False
) -> Tuple:
"""Create a hash out of the metadata of an example
Args:
exa... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/sample.py | 0.865622 | 0.547585 | sample.py | pypi |
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional
import catalogue
import spacy
from spacy.language import Language
from recon.linker import BaseEntityLinker, EntityLinker
from recon.types import Entity, Example
class registry:
preprocessors = catalogue.create(... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/preprocess.py | 0.888408 | 0.273568 | preprocess.py | pypi |
from collections import defaultdict
from typing import DefaultDict, Dict, List, Set, Tuple
import numpy as np
from spacy.scorer import PRFScore
from wasabi import Printer
from recon.constants import NOT_LABELED
from recon.recognizer import EntityRecognizer
from recon.types import (
Example,
ExampleDiff,
L... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/insights.py | 0.939927 | 0.547706 | insights.py | pypi |
from typing import Any, Dict, Iterable, Iterator, List, Optional, Union
import prodigy
from prodigy.components.db import connect
from prodigy.components.loaders import get_stream
from prodigy.components.preprocess import add_tokens
from prodigy.types import TaskType
from prodigy.util import (
get_labels,
log,... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/prodigy/recipes.py | 0.789031 | 0.379752 | recipes.py | pypi |
from typing import List
from recon.types import Example
def to_prodigy(
examples: List[Example],
prodigy_dataset: str,
overwrite_dataset: bool = False,
add_hash: bool = True,
) -> None:
"""Save a list of examples to Prodigy
Args:
examples (List[Example]): Input examples
prodi... | /reconner-0.14.0.tar.gz/reconner-0.14.0/recon/prodigy/utils.py | 0.800263 | 0.565689 | utils.py | pypi |
__author__ = "Chris Nasr"
__copyright__ = "OuroborosCoding"
__license__ = "Apache"
__version__ = "1.0.0"
__maintainer__ = "Chris Nasr"
__email__ = "ouroboroscode@gmail.com"
# Import python core modules
import math
import re
import sys
# Import pip modules
import rethinkdb as r
# Compile index regex
_INDEX_REGEX ... | /reconsider-1.0.1.tar.gz/reconsider-1.0.1/Reconsider/__init__.py | 0.458349 | 0.202838 | __init__.py | pypi |
from __future__ import absolute_import, print_function
import logging
import re
import time
import urllib3
from elasticsearch import exceptions as esd_exceptions
from elasticsearch import Elasticsearch
from .utils import get_week_dates
urllib3.disable_warnings()
logger = logging.getLogger(__name__)
class Elasti... | /record-recommender-0.0.2.tar.gz/record-recommender-0.0.2/record_recommender/fetcher.py | 0.548432 | 0.160562 | fetcher.py | pypi |
from __future__ import absolute_import, print_function
import hashlib
import logging
from collections import defaultdict
from six import iteritems
logger = logging.getLogger(__name__)
class Profiles(object):
"""Create user profiles from pageviews and downloads."""
def __init__(self, storage, config=None):... | /record-recommender-0.0.2.tar.gz/record-recommender-0.0.2/record_recommender/profiles.py | 0.600774 | 0.160694 | profiles.py | pypi |
import numpy as np
from record3d import Record3DStream
import cv2
from threading import Event
class DemoApp:
def __init__(self):
self.event = Event()
self.session = None
self.DEVICE_TYPE__TRUEDEPTH = 0
self.DEVICE_TYPE__LIDAR = 1
def on_new_frame(self):
"""
Thi... | /record3d-1.3.1-2.tar.gz/record3d-1.3.1.post2/demo-main.py | 0.575827 | 0.218742 | demo-main.py | pypi |
# Kubernetes Support
## Goals
The intent of this sub directory is to add support for tracing libraries and produces results in a reproducible manner on cloud hardware. Previously I had been running everything locally, but this becomes time prohibitive when the runs last many hours and also requires a lot of human int... | /record_api-1.3.2.tar.gz/record_api-1.3.2/k8/README.md | 0.653127 | 0.972389 | README.md | pypi |
from typing import *
@overload
def hash_pandas_object(
obj: pandas.core.series.Series,
index: bool,
encoding: Literal["utf8"],
hash_key: None,
categorize: bool,
):
"""
usage.dask: 1
"""
...
@overload
def hash_pandas_object(
obj: pandas.core.frame.DataFrame,
index: bool,
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.util.hashing.py | 0.810779 | 0.476397 | pandas.core.util.hashing.py | pypi |
from typing import *
class Categorical:
@overload
@classmethod
def from_codes(
cls,
/,
codes: numpy.ndarray,
categories: List[Literal["2014-01-03.csv", "2014-01-02.csv", "2014-01-01.csv"]],
):
"""
usage.dask: 1
"""
...
@overload
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.arrays.categorical.py | 0.861217 | 0.486392 | pandas.core.arrays.categorical.py | pypi |
from typing import *
@overload
def get_dummies(data: pandas.core.series.Series):
"""
usage.dask: 2
"""
...
@overload
def get_dummies(
data: pandas.core.series.Series,
prefix: None,
prefix_sep: Literal["_"],
dummy_na: bool,
columns: None,
sparse: bool,
drop_first: bool,
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.reshape.reshape.py | 0.658418 | 0.59246 | pandas.core.reshape.reshape.py | pypi |
from typing import *
@overload
def merge(
_0: dask.dataframe.core.DataFrame,
_1: dask.dataframe.core.DataFrame,
/,
*,
how: Literal["inner"],
indicator: bool,
left_index: bool,
left_on: None,
npartitions: None,
on: Literal["idx"],
right_index: bool,
right_on: None,
s... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.reshape.merge.py | 0.811527 | 0.48749 | pandas.core.reshape.merge.py | pypi |
from typing import *
# usage.dask: 1
MaskError: object
# usage.matplotlib: 17
# usage.pandas: 2
# usage.scipy: 6
# usage.sklearn: 2
# usage.xarray: 6
MaskedArray: object
# usage.scipy: 1
add: object
# usage.dask: 2
# usage.matplotlib: 19
# usage.pandas: 1
# usage.scipy: 41
# usage.skimage: 5
# usage.sklearn: 4
# us... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.ma.py | 0.815269 | 0.685032 | numpy.ma.py | pypi |
from typing import *
# usage.dask: 1
__name__: object
@overload
def fft(a: pandas.core.series.Series):
"""
usage.pandas: 1
"""
...
@overload
def fft(a: List[float]):
"""
usage.scipy: 3
"""
...
@overload
def fft(a: numpy.ndarray):
"""
usage.dask: 11
usage.matplotlib: 3
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.fft.py | 0.853119 | 0.579638 | numpy.fft.py | pypi |
from typing import *
class Timedelta:
# usage.dask: 1
__module__: ClassVar[object]
# usage.xarray: 1
__name__: ClassVar[object]
@overload
def __add__(self, _0: pandas._libs.tslibs.timestamps.Timestamp, /):
"""
usage.xarray: 2
"""
...
@overload
def __... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas._libs.tslibs.timedeltas.py | 0.849144 | 0.459925 | pandas._libs.tslibs.timedeltas.py | pypi |
from typing import *
@overload
def isna(obj: None):
"""
usage.xarray: 1
"""
...
@overload
def isna(obj: numpy.float64):
"""
usage.dask: 3
usage.xarray: 3
"""
...
@overload
def isna(obj: numpy.ndarray):
"""
usage.dask: 8
usage.xarray: 22
"""
...
@overload
d... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.dtypes.missing.py | 0.801509 | 0.606149 | pandas.core.dtypes.missing.py | pypi |
from typing import *
@overload
def date_range(start: Literal["2000-01-01"], periods: int):
"""
usage.xarray: 27
"""
...
@overload
def date_range(start: Literal["1999-01-05"], periods: int):
"""
usage.xarray: 1
"""
...
@overload
def date_range(start: Literal["2000-02-01"], periods: ... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.indexes.datetimes.py | 0.836087 | 0.53437 | pandas.core.indexes.datetimes.py | pypi |
from typing import *
class RangeIndex:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 2
__name__: ClassVar[object]
# usage.dask: 1
array: object
# usage.dask: 11
# usage.xarray: 4
dtype: object
# usage.dask: 1
is_all_dates: object
# usage.dask: 1
#... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.indexes.range.py | 0.880778 | 0.634543 | pandas.core.indexes.range.py | pypi |
from typing import *
@overload
def to_timedelta(arg: numpy.int64, unit: Literal["D"]):
"""
usage.xarray: 2
"""
...
@overload
def to_timedelta(arg: numpy.ndarray, unit: Literal["ns"]):
"""
usage.xarray: 1
"""
...
@overload
def to_timedelta(arg: numpy.float64, unit: Literal["D"]):
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.tools.timedeltas.py | 0.847983 | 0.759315 | pandas.core.tools.timedeltas.py | pypi |
from typing import *
@overload
def _check_fill_value(fill_value: int, ndtype: numpy.dtype):
"""
usage.dask: 1
"""
...
@overload
def _check_fill_value(fill_value: float, ndtype: numpy.dtype):
"""
usage.dask: 1
"""
...
def _check_fill_value(fill_value: Union[float, int], ndtype: nump... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.ma.core.py | 0.845783 | 0.737655 | numpy.ma.core.py | pypi |
from typing import *
class CategoricalDtype:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 10
categories: object
# usage.dask: 1
# usage.sklearn: 6
kind: object
# usage.sklearn: 1
name: object
# usage.dask: 4
ordered: object
@overload
def __eq__(s... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.dtypes.dtypes.py | 0.878783 | 0.545165 | pandas.core.dtypes.dtypes.py | pypi |
from typing import *
@overload
def read_csv(filepath_or_buffer: _io.BytesIO):
"""
usage.dask: 4
"""
...
@overload
def read_csv(filepath_or_buffer: _io.BytesIO, usecols: List[Literal["id", "name"]]):
"""
usage.dask: 1
"""
...
@overload
def read_csv(filepath_or_buffer: _io.BytesIO, s... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.io.parsers.py | 0.641759 | 0.392279 | pandas.io.parsers.py | pypi |
from typing import *
class CategoricalIndex:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 2
__name__: ClassVar[object]
# usage.dask: 1
array: object
# usage.dask: 11
# usage.xarray: 1
categories: object
# usage.dask: 2
codes: object
# usage.dask: 4
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.indexes.category.py | 0.865636 | 0.654163 | pandas.core.indexes.category.py | pypi |
from typing import *
@overload
def to_offset(_0: Literal["S"], /):
"""
usage.dask: 1
"""
...
@overload
def to_offset(_0: Literal["W"], /):
"""
usage.dask: 1
"""
...
@overload
def to_offset(_0: Literal["B"], /):
"""
usage.dask: 1
"""
...
@overload
def to_offset(_0:... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas._libs.tslibs.offsets.py | 0.815343 | 0.462837 | pandas._libs.tslibs.offsets.py | pypi |
from typing import *
class CFTimeIndex:
def copy(self, /, deep: bool):
"""
usage.xarray: 1
"""
...
def equals(self, /, other: xarray.coding.cftimeindex.CFTimeIndex):
"""
usage.xarray: 11
"""
...
@overload
def get_indexer(self, /, target... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/xarray.coding.cftimeindex.py | 0.873269 | 0.531331 | xarray.coding.cftimeindex.py | pypi |
from typing import *
class Rolling:
# usage.dask: 1
min_periods: object
# usage.dask: 1
win_type: object
# usage.dask: 1
window: object
@overload
def aggregate(self, /, func: List[Callable]):
"""
usage.dask: 4
"""
...
@overload
def aggregate... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.window.rolling.py | 0.85741 | 0.648703 | pandas.core.window.rolling.py | pypi |
from typing import *
class NpzFile:
@overload
def __getitem__(self, _0: Literal["arr_0"], /):
"""
usage.skimage: 1
"""
...
@overload
def __getitem__(self, _0: Literal["autolevel"], /):
"""
usage.skimage: 1
"""
...
@overload
def ... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.lib.npyio.py | 0.830147 | 0.19063 | numpy.lib.npyio.py | pypi |
from typing import *
class Timestamp:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 1
__name__: ClassVar[object]
# usage.dask: 1
dtype: object
# usage.dask: 1
freq: object
# usage.dask: 4
# usage.xarray: 1
tz: object
# usage.dask: 1
# usage.xarray... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas._libs.tslibs.timestamps.py | 0.875021 | 0.451689 | pandas._libs.tslibs.timestamps.py | pypi |
from typing import *
class DataFrameGroupBy:
# usage.dask: 1
__name__: ClassVar[object]
# usage.dask: 1
A: object
# usage.dask: 1
B: object
# usage.dask: 1
_selected_obj: object
# usage.dask: 24
a: object
# usage.dask: 20
b: object
# usage.dask: 1
e: obje... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.groupby.generic.py | 0.838448 | 0.553083 | pandas.core.groupby.generic.py | pypi |
from typing import *
@overload
def cholesky(a: numpy.ndarray):
"""
usage.scipy: 3
"""
...
@overload
def cholesky(a: List[List[float]]):
"""
usage.scipy: 2
"""
...
def cholesky(a: Union[List[List[float]], numpy.ndarray]):
"""
usage.scipy: 5
"""
...
@overload
def co... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.linalg.py | 0.842118 | 0.688868 | numpy.linalg.py | pypi |
from typing import *
@overload
def period_range(start: Literal["2000-01-01"], periods: int, freq: Literal["B"]):
"""
usage.dask: 1
usage.xarray: 1
"""
...
@overload
def period_range(start: Literal["2000-01-01"], periods: int):
"""
usage.xarray: 2
"""
...
@overload
def period_ra... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.indexes.period.py | 0.877122 | 0.633354 | pandas.core.indexes.period.py | pypi |
from typing import *
class TimedeltaArray:
# usage.dask: 1
asi8: object
# usage.dask: 1
dtype: object
def __add__(
self,
_0: Union[
numpy.ndarray,
numpy.float64,
numpy.datetime64,
numpy.float32,
numpy.timedelta64,
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.arrays.timedeltas.py | 0.843847 | 0.536313 | pandas.core.arrays.timedeltas.py | pypi |
from typing import *
class MultiIndex:
# usage.dask: 1
__module__: ClassVar[object]
# usage.dask: 3
# usage.xarray: 1
__name__: ClassVar[object]
@overload
@classmethod
def from_arrays(cls, /, arrays: List[xarray.coding.cftimeindex.CFTimeIndex]):
"""
usage.xarray: 1
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.indexes.multi.py | 0.828523 | 0.528412 | pandas.core.indexes.multi.py | pypi |
from typing import *
@overload
def as_strided(x: numpy.ndarray, shape: Tuple[int, int], strides: Tuple[int, int]):
"""
usage.matplotlib: 5
usage.scipy: 13
usage.skimage: 1
"""
...
@overload
def as_strided(
x: numpy.ndarray, shape: Tuple[int, int, int], strides: Tuple[int, int, int]
):
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.lib.stride_tricks.py | 0.893135 | 0.680618 | numpy.lib.stride_tricks.py | pypi |
from typing import *
@overload
def concat(objs: List[pandas.core.frame.DataFrame], join: Literal["outer"], sort: bool):
"""
usage.dask: 12
"""
...
@overload
def concat(objs: List[pandas.core.series.Series], axis: int):
"""
usage.dask: 7
"""
...
@overload
def concat(objs: List[panda... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.reshape.concat.py | 0.840684 | 0.699434 | pandas.core.reshape.concat.py | pypi |
from typing import *
def is_bool_dtype(arr_or_dtype: pandas.core.series.Series):
"""
usage.dask: 1
"""
...
@overload
def is_categorical_dtype(arr_or_dtype: numpy.dtype):
"""
usage.dask: 47
"""
...
@overload
def is_categorical_dtype(arr_or_dtype: pandas.core.dtypes.dtypes.Categorica... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.dtypes.common.py | 0.806358 | 0.598635 | pandas.core.dtypes.common.py | pypi |
from typing import *
class SparseArray:
# usage.dask: 1
__module__: ClassVar[object]
# usage.sklearn: 1
__name__: ClassVar[object]
# usage.sklearn: 1
__class__: object
def __and__(self, _0: numpy.ndarray, /):
"""
usage.pandas: 2
"""
...
def __eq__(s... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.arrays.sparse.array.py | 0.833426 | 0.521471 | pandas.core.arrays.sparse.array.py | pypi |
from typing import *
@overload
def timedelta_range(start: int, periods: int):
"""
usage.xarray: 1
"""
...
@overload
def timedelta_range(start: Literal["1 days"], periods: int, freq: Literal["D"]):
"""
usage.dask: 1
"""
...
@overload
def timedelta_range(start: Literal["1 day"], peri... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.indexes.timedeltas.py | 0.868827 | 0.5169 | pandas.core.indexes.timedeltas.py | pypi |
from typing import *
class NaTType:
@overload
def __add__(self, _0: pandas._libs.tslibs.timestamps.Timestamp, /):
"""
usage.xarray: 2
"""
...
@overload
def __add__(self, _0: Union[numpy.timedelta64, numpy.ndarray], /):
"""
usage.pandas: 9
"""
... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas._libs.tslibs.nattype.py | 0.861042 | 0.384392 | pandas._libs.tslibs.nattype.py | pypi |
from typing import *
class DataFrame:
# usage.dask: 4
__module__: ClassVar[object]
# usage.dask: 4
__name__: ClassVar[object]
# usage.dask: 6
# usage.sklearn: 39
shape: ClassVar[object]
# usage.sklearn: 2
sparse: ClassVar[object]
@overload
@classmethod
def __ne__(c... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.frame.py | 0.870129 | 0.676493 | pandas.core.frame.py | pypi |
from typing import *
@overload
def apply_along_axis(
func1d: Callable,
axis: int,
arr: numpy.ma.core.MaskedArray,
*args: Literal["v", "t"],
):
"""
usage.scipy: 6
"""
...
@overload
def apply_along_axis(func1d: Callable, axis: int, arr: numpy.ma.core.MaskedArray):
"""
usage.sci... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/numpy.ma.extras.py | 0.87289 | 0.649162 | numpy.ma.extras.py | pypi |
from typing import *
@overload
def to_datetime(arg: numpy.ndarray):
"""
usage.xarray: 1
"""
...
@overload
def to_datetime(arg: List[Literal["NaT", "2000-01-02", "2000-01-01"]]):
"""
usage.xarray: 2
"""
...
@overload
def to_datetime(arg: List[Literal["NaT"]]):
"""
usage.xarr... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.tools.datetimes.py | 0.795975 | 0.724249 | pandas.core.tools.datetimes.py | pypi |
from typing import *
@overload
def array(
data: List[Union[pandas._libs.missing.NAType, Literal["a"]]],
dtype: pandas.core.arrays.string_.StringDtype,
):
"""
usage.dask: 1
"""
...
@overload
def array(data: List[Union[None, int]], dtype: pandas.core.arrays.integer.Int32Dtype):
"""
usa... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.construction.py | 0.796609 | 0.647046 | pandas.core.construction.py | pypi |
from typing import *
class BooleanArray:
# usage.dask: 1
__module__: ClassVar[object]
def __and__(self, _0: numpy.bool_, /):
"""
usage.pandas: 1
"""
...
def __eq__(self, _0: numpy.bool_, /):
"""
usage.pandas: 1
"""
...
def __ior__... | /record_api-1.3.2.tar.gz/record_api-1.3.2/data/typing/pandas.core.arrays.boolean.py | 0.868213 | 0.458349 | pandas.core.arrays.boolean.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.