code stringlengths 458 137k | apis sequence | extract_api stringlengths 287 754k |
|---|---|---|
import io
import os
import re
import shutil
import sys
import typing
import webbrowser
from time import sleep
import click
import pandas as pd
from whylogs.app import SessionConfig, WriterConfig
from whylogs.app.session import session_from_config
from whylogs.cli import (
OBSERVATORY_EXPLANATION,
PIPELINE_DESCRIPTION,
PROJECT_DESCRIPTION,
generate_notebooks,
)
LENDING_CLUB_CSV = "lending_club_1000.csv"
def echo(message: typing.Union[str, list], **styles):
if isinstance(message, list):
for msg in message:
click.secho(msg, **styles)
else:
click.secho(message, **styles)
NAME_FORMAT = re.compile(r"^(\w|-|_)+$")
class NameParamType(click.ParamType):
def convert(self, value, param, ctx):
if NAME_FORMAT.fullmatch(value) is None:
raise click.BadParameter(
"must contain only alphanumeric, underscore and dash characters"
)
return value
@click.command()
@click.option(
"--project-dir",
"-d",
default="./",
help="The root of the new WhyLogs profiling project.",
)
def init(project_dir):
"""
Initialize and configure a new WhyLogs project.
This guided input walks the user through setting up a new project and also
onboards a new developer in an existing project.
It scaffolds directories, sets up notebooks, creates a project file, and
appends to a `.gitignore` file.
"""
echo(INTRO_MESSAGE, fg="green")
project_dir = os.path.abspath(project_dir)
echo(f"Project path: {project_dir}")
is_project_dir_empty = len(os.listdir(path=project_dir)) == 0
if not is_project_dir_empty:
echo(EMPTY_PATH_WARNING, fg="yellow")
if not click.confirm(OVERRIDE_CONFIRM, default=False, show_default=True):
echo(DOING_NOTHING_ABORTING)
sys.exit(0)
os.chdir(project_dir)
echo(BEGIN_WORKFLOW)
echo(PROJECT_DESCRIPTION)
project_name = click.prompt(PROJECT_NAME_PROMPT, type=NameParamType())
echo(f"Using project name: {project_name}", fg="green")
echo(PIPELINE_DESCRIPTION)
pipeline_name = click.prompt(
"Pipeline name (leave blank for default pipeline name)",
type=NameParamType(),
default="default-pipeline",
)
echo(f"Using pipeline name: {pipeline_name}", fg="green")
output_path = click.prompt(
"Specify the WhyLogs output path", default="output", show_default=True
)
echo(f"Using output path: {output_path}", fg="green")
writer = WriterConfig("local", ["all"], output_path)
session_config = SessionConfig(
project_name, pipeline_name, writers=[writer], verbose=False
)
config_yml = os.path.join(project_dir, "whylogs.yml")
with open(file=config_yml, mode="w") as f:
session_config.to_yaml(f)
echo(f"Config YAML file was written to: {config_yml}\n")
if click.confirm(INITIAL_PROFILING_CONFIRM, default=True):
echo(DATA_SOURCE_MESSAGE)
choices = [
"CSV on the file system",
]
for i in range(len(choices)):
echo(f"\t{i + 1}. {choices[i]}")
choice = click.prompt("", type=click.IntRange(min=1, max=len(choices)))
assert choice == 1
full_input = profile_csv(session_config, project_dir)
echo(
f"You should find the WhyLogs output under: {os.path.join(project_dir, output_path, project_name)}",
fg="green",
)
echo(GENERATE_NOTEBOOKS)
# Hack: Takes first all numeric directory as generated datetime for now
output_full_path = os.path.join(project_dir, output_path)
generated_datetime = list(
filter(lambda x: re.match("[0-9]*", x), os.listdir(output_full_path))
)[0]
full_output_path = os.path.join(output_path, generated_datetime)
generate_notebooks(
project_dir,
{
"INPUT_PATH": full_input,
"PROFILE_DIR": full_output_path,
"GENERATED_DATETIME": generated_datetime,
},
)
echo(
f'You should find the output under: {os.path.join(project_dir, "notebooks")}'
)
echo(OBSERVATORY_EXPLANATION)
echo("Your original data (CSV file) will remain locally.")
should_upload = click.confirm(
"Would you like to proceed with sending us your statistic data?",
default=False,
show_default=True,
)
if should_upload:
echo("Uploading data to WhyLabs Observatory...")
sleep(5)
webbrowser.open(
"https://www.figma.com/proto/QBTk0N6Ad0D9hRijjhBaE0/Usability-Study-Navigation?node-id=1%3A90&viewport=185%2C235%2C0.25&scaling=min-zoom"
)
else:
echo("Skip uploading")
echo(DONE)
else:
echo("Skip initial profiling and notebook generation")
echo(DONE)
def profile_csv(session_config: SessionConfig, project_dir: str) -> str:
package_nb_path = os.path.join(os.path.dirname(__file__), "notebooks")
demo_csv = os.path.join(package_nb_path, LENDING_CLUB_CSV)
file: io.TextIOWrapper = click.prompt(
"CSV input path (leave blank to use our demo dataset)",
type=click.File(mode="rt"),
default=io.StringIO(),
show_default=False,
)
if type(file) is io.StringIO:
echo("Using the demo Lending Club Data (1K randomized samples)", fg="green")
destination_csv = os.path.join(project_dir, LENDING_CLUB_CSV)
echo("Copying the demo file to: %s" % destination_csv)
shutil.copy(demo_csv, destination_csv)
full_input = os.path.realpath(destination_csv)
else:
file.close()
full_input = os.path.realpath(file.name)
echo(f"Input file: {full_input}")
echo(RUN_PROFILING)
session = session_from_config(session_config)
df = pd.read_csv(full_input)
session.log_dataframe(df)
session.close()
return full_input
| [
"whylogs.cli.generate_notebooks",
"whylogs.app.WriterConfig",
"whylogs.app.session.session_from_config",
"whylogs.app.SessionConfig"
] | [((647, 673), 're.compile', 're.compile', (['"""^(\\\\w|-|_)+$"""'], {}), "('^(\\\\w|-|_)+$')\n", (657, 673), False, 'import re\n'), ((962, 977), 'click.command', 'click.command', ([], {}), '()\n', (975, 977), False, 'import click\n'), ((979, 1088), 'click.option', 'click.option', (['"""--project-dir"""', '"""-d"""'], {'default': '"""./"""', 'help': '"""The root of the new WhyLogs profiling project."""'}), "('--project-dir', '-d', default='./', help=\n 'The root of the new WhyLogs profiling project.')\n", (991, 1088), False, 'import click\n'), ((1495, 1523), 'os.path.abspath', 'os.path.abspath', (['project_dir'], {}), '(project_dir)\n', (1510, 1523), False, 'import os\n'), ((1851, 1872), 'os.chdir', 'os.chdir', (['project_dir'], {}), '(project_dir)\n', (1859, 1872), False, 'import os\n'), ((2346, 2434), 'click.prompt', 'click.prompt', (['"""Specify the WhyLogs output path"""'], {'default': '"""output"""', 'show_default': '(True)'}), "('Specify the WhyLogs output path', default='output',\n show_default=True)\n", (2358, 2434), False, 'import click\n'), ((2516, 2559), 'whylogs.app.WriterConfig', 'WriterConfig', (['"""local"""', "['all']", 'output_path'], {}), "('local', ['all'], output_path)\n", (2528, 2559), False, 'from whylogs.app import SessionConfig, WriterConfig\n'), ((2581, 2656), 'whylogs.app.SessionConfig', 'SessionConfig', (['project_name', 'pipeline_name'], {'writers': '[writer]', 'verbose': '(False)'}), '(project_name, pipeline_name, writers=[writer], verbose=False)\n', (2594, 2656), False, 'from whylogs.app import SessionConfig, WriterConfig\n'), ((2688, 2728), 'os.path.join', 'os.path.join', (['project_dir', '"""whylogs.yml"""'], {}), "(project_dir, 'whylogs.yml')\n", (2700, 2728), False, 'import os\n'), ((2879, 2933), 'click.confirm', 'click.confirm', (['INITIAL_PROFILING_CONFIRM'], {'default': '(True)'}), '(INITIAL_PROFILING_CONFIRM, default=True)\n', (2892, 2933), False, 'import click\n'), ((5109, 5156), 'os.path.join', 'os.path.join', (['package_nb_path', 'LENDING_CLUB_CSV'], {}), '(package_nb_path, LENDING_CLUB_CSV)\n', (5121, 5156), False, 'import os\n'), ((5875, 5910), 'whylogs.app.session.session_from_config', 'session_from_config', (['session_config'], {}), '(session_config)\n', (5894, 5910), False, 'from whylogs.app.session import session_from_config\n'), ((5920, 5943), 'pandas.read_csv', 'pd.read_csv', (['full_input'], {}), '(full_input)\n', (5931, 5943), True, 'import pandas as pd\n'), ((600, 630), 'click.secho', 'click.secho', (['message'], {}), '(message, **styles)\n', (611, 630), False, 'import click\n'), ((1723, 1788), 'click.confirm', 'click.confirm', (['OVERRIDE_CONFIRM'], {'default': '(False)', 'show_default': '(True)'}), '(OVERRIDE_CONFIRM, default=False, show_default=True)\n', (1736, 1788), False, 'import click\n'), ((1835, 1846), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1843, 1846), False, 'import sys\n'), ((3591, 3629), 'os.path.join', 'os.path.join', (['project_dir', 'output_path'], {}), '(project_dir, output_path)\n', (3603, 3629), False, 'import os\n'), ((3787, 3832), 'os.path.join', 'os.path.join', (['output_path', 'generated_datetime'], {}), '(output_path, generated_datetime)\n', (3799, 3832), False, 'import os\n'), ((3841, 3979), 'whylogs.cli.generate_notebooks', 'generate_notebooks', (['project_dir', "{'INPUT_PATH': full_input, 'PROFILE_DIR': full_output_path,\n 'GENERATED_DATETIME': generated_datetime}"], {}), "(project_dir, {'INPUT_PATH': full_input, 'PROFILE_DIR':\n full_output_path, 'GENERATED_DATETIME': generated_datetime})\n", (3859, 3979), False, 'from whylogs.cli import OBSERVATORY_EXPLANATION, PIPELINE_DESCRIPTION, PROJECT_DESCRIPTION, generate_notebooks\n'), ((4318, 4435), 'click.confirm', 'click.confirm', (['"""Would you like to proceed with sending us your statistic data?"""'], {'default': '(False)', 'show_default': '(True)'}), "('Would you like to proceed with sending us your statistic data?',\n default=False, show_default=True)\n", (4331, 4435), False, 'import click\n'), ((5054, 5079), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5069, 5079), False, 'import os\n'), ((5510, 5553), 'os.path.join', 'os.path.join', (['project_dir', 'LENDING_CLUB_CSV'], {}), '(project_dir, LENDING_CLUB_CSV)\n', (5522, 5553), False, 'import os\n'), ((5625, 5663), 'shutil.copy', 'shutil.copy', (['demo_csv', 'destination_csv'], {}), '(demo_csv, destination_csv)\n', (5636, 5663), False, 'import shutil\n'), ((5685, 5718), 'os.path.realpath', 'os.path.realpath', (['destination_csv'], {}), '(destination_csv)\n', (5701, 5718), False, 'import os\n'), ((5771, 5798), 'os.path.realpath', 'os.path.realpath', (['file.name'], {}), '(file.name)\n', (5787, 5798), False, 'import os\n'), ((555, 581), 'click.secho', 'click.secho', (['msg'], {}), '(msg, **styles)\n', (566, 581), False, 'import click\n'), ((823, 912), 'click.BadParameter', 'click.BadParameter', (['"""must contain only alphanumeric, underscore and dash characters"""'], {}), "(\n 'must contain only alphanumeric, underscore and dash characters')\n", (841, 912), False, 'import click\n'), ((1597, 1625), 'os.listdir', 'os.listdir', ([], {'path': 'project_dir'}), '(path=project_dir)\n', (1607, 1625), False, 'import os\n'), ((4578, 4586), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (4583, 4586), False, 'from time import sleep\n'), ((4599, 4763), 'webbrowser.open', 'webbrowser.open', (['"""https://www.figma.com/proto/QBTk0N6Ad0D9hRijjhBaE0/Usability-Study-Navigation?node-id=1%3A90&viewport=185%2C235%2C0.25&scaling=min-zoom"""'], {}), "(\n 'https://www.figma.com/proto/QBTk0N6Ad0D9hRijjhBaE0/Usability-Study-Navigation?node-id=1%3A90&viewport=185%2C235%2C0.25&scaling=min-zoom'\n )\n", (4614, 4763), False, 'import webbrowser\n'), ((5277, 5298), 'click.File', 'click.File', ([], {'mode': '"""rt"""'}), "(mode='rt')\n", (5287, 5298), False, 'import click\n'), ((5316, 5329), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5327, 5329), False, 'import io\n'), ((3360, 3412), 'os.path.join', 'os.path.join', (['project_dir', 'output_path', 'project_name'], {}), '(project_dir, output_path, project_name)\n', (3372, 3412), False, 'import os\n'), ((3717, 3745), 'os.listdir', 'os.listdir', (['output_full_path'], {}), '(output_full_path)\n', (3727, 3745), False, 'import os\n'), ((4137, 4175), 'os.path.join', 'os.path.join', (['project_dir', '"""notebooks"""'], {}), "(project_dir, 'notebooks')\n", (4149, 4175), False, 'import os\n'), ((3694, 3715), 're.match', 're.match', (['"""[0-9]*"""', 'x'], {}), "('[0-9]*', x)\n", (3702, 3715), False, 'import re\n')] |
import json
from uuid import uuid4
import datetime
from whylogs.core.datasetprofile import DatasetProfile, array_profile, dataframe_profile
from whylogs.core.annotation_profiling import TrackBB, BB_ATTRIBUTES
import os
TEST_DATA_PATH = os.path.abspath(os.path.join(os.path.realpath(
os.path.dirname(__file__)), os.pardir, os.pardir, os.pardir, "testdata"))
def test_track_bb_annotation():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
test_annotation_path = os.path.join(
TEST_DATA_PATH, "files", "yolo_bounding_box.jsonl")
# total_default_features = num_image_features + num_metadata_features
profile_1 = DatasetProfile(name="test",
session_id=shared_session_id,
session_timestamp=now,
tags={"key": "value"},
metadata={"key": "x1"},)
trackbb = TrackBB(test_annotation_path)
trackbb(profile_1)
columns = profile_1.columns
assert len(columns) == len(BB_ATTRIBUTES)
for each_attribute in BB_ATTRIBUTES:
assert columns.get(each_attribute, None) is not None
if each_attribute in ("annotation_count", "area_coverage", "annotation_density"):
assert columns[each_attribute].number_tracker.count == 100
else:
assert columns[each_attribute].number_tracker.count == 4183
def test_track_json_annotation():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
num_bb_features = len(BB_ATTRIBUTES)
test_annotation_path = os.path.join(
TEST_DATA_PATH, "files", "yolo_bounding_box.jsonl")
profile_1 = DatasetProfile(name="test",
session_id=shared_session_id,
session_timestamp=now,
tags={"key": "value"},
metadata={"key": "x1"},)
objs = [json.loads(eachline)
for eachline in open(test_annotation_path, "r")]
trackbb = TrackBB(obj=objs)
trackbb(profile_1)
columns = profile_1.columns
assert len(columns) == len(BB_ATTRIBUTES)
assert columns["annotation_count"].number_tracker.count == 100
| [
"whylogs.core.annotation_profiling.TrackBB",
"whylogs.core.datasetprofile.DatasetProfile"
] | [((409, 435), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (433, 435), False, 'import datetime\n'), ((500, 564), 'os.path.join', 'os.path.join', (['TEST_DATA_PATH', '"""files"""', '"""yolo_bounding_box.jsonl"""'], {}), "(TEST_DATA_PATH, 'files', 'yolo_bounding_box.jsonl')\n", (512, 564), False, 'import os\n'), ((665, 797), 'whylogs.core.datasetprofile.DatasetProfile', 'DatasetProfile', ([], {'name': '"""test"""', 'session_id': 'shared_session_id', 'session_timestamp': 'now', 'tags': "{'key': 'value'}", 'metadata': "{'key': 'x1'}"}), "(name='test', session_id=shared_session_id, session_timestamp\n =now, tags={'key': 'value'}, metadata={'key': 'x1'})\n", (679, 797), False, 'from whylogs.core.datasetprofile import DatasetProfile, array_profile, dataframe_profile\n'), ((932, 961), 'whylogs.core.annotation_profiling.TrackBB', 'TrackBB', (['test_annotation_path'], {}), '(test_annotation_path)\n', (939, 961), False, 'from whylogs.core.annotation_profiling import TrackBB, BB_ATTRIBUTES\n'), ((1460, 1486), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1484, 1486), False, 'import datetime\n'), ((1592, 1656), 'os.path.join', 'os.path.join', (['TEST_DATA_PATH', '"""files"""', '"""yolo_bounding_box.jsonl"""'], {}), "(TEST_DATA_PATH, 'files', 'yolo_bounding_box.jsonl')\n", (1604, 1656), False, 'import os\n'), ((1683, 1815), 'whylogs.core.datasetprofile.DatasetProfile', 'DatasetProfile', ([], {'name': '"""test"""', 'session_id': 'shared_session_id', 'session_timestamp': 'now', 'tags': "{'key': 'value'}", 'metadata': "{'key': 'x1'}"}), "(name='test', session_id=shared_session_id, session_timestamp\n =now, tags={'key': 'value'}, metadata={'key': 'x1'})\n", (1697, 1815), False, 'from whylogs.core.datasetprofile import DatasetProfile, array_profile, dataframe_profile\n'), ((2045, 2062), 'whylogs.core.annotation_profiling.TrackBB', 'TrackBB', ([], {'obj': 'objs'}), '(obj=objs)\n', (2052, 2062), False, 'from whylogs.core.annotation_profiling import TrackBB, BB_ATTRIBUTES\n'), ((460, 467), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (465, 467), False, 'from uuid import uuid4\n'), ((1511, 1518), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1516, 1518), False, 'from uuid import uuid4\n'), ((1949, 1969), 'json.loads', 'json.loads', (['eachline'], {}), '(eachline)\n', (1959, 1969), False, 'import json\n'), ((290, 315), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (305, 315), False, 'import os\n')] |
import logging
import re
from typing import List, Mapping, Optional
from google.protobuf.json_format import Parse
from whylogs.proto import (
DatasetConstraintMsg,
DatasetProperties,
NumberSummary,
Op,
SummaryConstraintMsg,
SummaryConstraintMsgs,
ValueConstraintMsg,
ValueConstraintMsgs,
)
from whylogs.util.protobuf import message_to_json
logger = logging.getLogger(__name__)
"""
Dict indexed by constraint operator.
These help translate from constraint schema to language-specific functions that are faster to evaluate.
This is just a form of currying, and I chose to bind the boolean comparison operator first.
"""
_value_funcs = {
# functions that compare an incoming feature value to a literal value.
Op.LT: lambda x: lambda v: v < x, # assert incoming value 'v' is less than some fixed value 'x'
Op.LE: lambda x: lambda v: v <= x,
Op.EQ: lambda x: lambda v: v == x,
Op.NE: lambda x: lambda v: v != x,
Op.GE: lambda x: lambda v: v >= x,
Op.GT: lambda x: lambda v: v > x, # assert incoming value 'v' is greater than some fixed value 'x'
Op.MATCH: lambda x: lambda v: re.match(x, v) is not None,
Op.NOMATCH: lambda x: lambda v: re.match(x, v) is None,
}
_summary_funcs1 = {
# functions that compare a summary field to a literal value.
Op.LT: lambda f, v: lambda s: getattr(s, f) < v,
Op.LE: lambda f, v: lambda s: getattr(s, f) <= v,
Op.EQ: lambda f, v: lambda s: getattr(s, f) == v,
Op.NE: lambda f, v: lambda s: getattr(s, f) != v,
Op.GE: lambda f, v: lambda s: getattr(s, f) >= v,
Op.GT: lambda f, v: lambda s: getattr(s, f) > v,
}
_summary_funcs2 = {
# functions that compare two summary fields.
Op.LT: lambda f, f2: lambda s: getattr(s, f) < getattr(s, f2),
Op.LE: lambda f, f2: lambda s: getattr(s, f) <= getattr(s, f2),
Op.EQ: lambda f, f2: lambda s: getattr(s, f) == getattr(s, f2),
Op.NE: lambda f, f2: lambda s: getattr(s, f) != getattr(s, f2),
Op.GE: lambda f, f2: lambda s: getattr(s, f) >= getattr(s, f2),
Op.GT: lambda f, f2: lambda s: getattr(s, f) > getattr(s, f2),
}
class ValueConstraint:
"""
ValueConstraints express a binary boolean relationship between an implied numeric value and a literal.
When associated with a ColumnProfile, the relation is evaluated for every incoming value that is processed by whylogs.
Parameters
----------
op : whylogs.proto.Op (required)
Enumeration of binary comparison operator applied between static value and incoming stream.
Enum values are mapped to operators like '==', '<', and '<=', etc.
value : (required)
Static value to compare against incoming stream using operator specified in `op`.
name : str
Name of the constraint used for reporting
verbose : bool
If true, log every application of this constraint that fails.
Useful to identify specific streaming values that fail the constraint.
"""
def __init__(self, op: Op, value=None, regex_pattern: str = None, name: str = None, verbose=False):
self._name = name
self._verbose = verbose
self.op = op
self.total = 0
self.failures = 0
if value is not None and regex_pattern is None:
# numeric value
self.value = value
self.func = _value_funcs[op](value)
elif regex_pattern is not None and value is None:
# Regex pattern
self.regex_pattern = regex_pattern
self.func = _value_funcs[op](regex_pattern)
else:
raise ValueError("Value constraint must specify a numeric value or regex pattern, but not both")
@property
def name(self):
if getattr(self, "value", None):
return self._name if self._name is not None else f"value {Op.Name(self.op)} {self.value}"
else:
return self._name if self._name is not None else f"value {Op.Name(self.op)} {self.regex_pattern}"
def update(self, v) -> bool:
self.total += 1
if self.op in [Op.MATCH, Op.NOMATCH] and not isinstance(v, str):
self.failures += 1
if self._verbose:
logger.info(f"value constraint {self.name} failed: value {v} not a string")
elif not self.func(v):
self.failures += 1
if self._verbose:
logger.info(f"value constraint {self.name} failed on value {v}")
@staticmethod
def from_protobuf(msg: ValueConstraintMsg) -> "ValueConstraint":
return ValueConstraint(msg.op, msg.value, name=msg.name, verbose=msg.verbose)
def to_protobuf(self) -> ValueConstraintMsg:
if hasattr(self, "value"):
return ValueConstraintMsg(
name=self.name,
op=self.op,
value=self.value,
verbose=self._verbose,
)
else:
return ValueConstraintMsg(
name=self.name,
op=self.op,
regex_pattern=self.regex_pattern,
verbose=self._verbose,
)
def report(self):
return (self.name, self.total, self.failures)
class SummaryConstraint:
"""
Summary constraints specify a relationship between a summary field and a static value,
or between two summary fields.
e.g. 'min' < 6
'std_dev' < 2.17
'min' > 'avg'
Parameters
----------
first_field : str
Name of field in NumberSummary that will be compared against either a second field or a static value.
op : whylogs.proto.Op (required)
Enumeration of binary comparison operator applied between summary values.
Enum values are mapped to operators like '==', '<', and '<=', etc.
value : (one-of)
Static value to be compared against summary field specified in `first_field`.
Only one of `value` or `second_field` should be supplied.
second_field : (one-of)
Name of second field in NumberSummary to be compared against summary field specified in `first_field`.
Only one of `value` or `second_field` should be supplied.
name : str
Name of the constraint used for reporting
verbose : bool
If true, log every application of this constraint that fails.
Useful to identify specific streaming values that fail the constraint.
"""
def __init__(
self,
first_field: str,
op: Op,
value=None,
second_field: str = None,
name: str = None,
verbose=False,
):
self._verbose = verbose
self._name = name
self.op = op
self.first_field = first_field
self.second_field = second_field
self.total = 0
self.failures = 0
if value is not None and second_field is None:
# field-value summary comparison
self.value = value
self.func = _summary_funcs1[op](first_field, value)
elif second_field is not None and value is None:
# field-field summary comparison
self.second_field = second_field
self.func = _summary_funcs2[op](first_field, second_field)
else:
raise ValueError("Summary constraint must specify a second value or field name, but not both")
@property
def name(self):
return self._name if self._name is not None else f"summary {self.first_field} {Op.Name(self.op)} {self.value}/{self.second_field}"
def update(self, summ: NumberSummary) -> bool:
self.total += 1
if not self.func(summ):
self.failures += 1
if self._verbose:
logger.info(f"summary constraint {self.name} failed")
@staticmethod
def from_protobuf(msg: SummaryConstraintMsg) -> "SummaryConstraint":
if msg.HasField("value") and not msg.HasField("second_field"):
return SummaryConstraint(
msg.first_field,
msg.op,
value=msg.value,
name=msg.name,
verbose=msg.verbose,
)
elif msg.HasField("second_field") and not msg.HasField("value"):
return SummaryConstraint(
msg.first_field,
msg.op,
second_field=msg.second_field,
name=msg.name,
verbose=msg.verbose,
)
else:
raise ValueError("SummaryConstraintMsg must specify a value or second field name, but not both")
def to_protobuf(self) -> SummaryConstraintMsg:
if self.second_field is None:
msg = SummaryConstraintMsg(
name=self.name,
first_field=self.first_field,
op=self.op,
value=self.value,
verbose=self._verbose,
)
else:
msg = SummaryConstraintMsg(
name=self.name,
first_field=self.first_field,
op=self.op,
second_field=self.second_field,
verbose=self._verbose,
)
return msg
def report(self):
return (self.name, self.total, self.failures)
class ValueConstraints:
def __init__(self, constraints: List[ValueConstraint] = []):
self.constraints = constraints
@staticmethod
def from_protobuf(msg: ValueConstraintMsgs) -> "ValueConstraints":
v = [ValueConstraint.from_protobuf(c) for c in msg.constraints]
if len(v) > 0:
return ValueConstraints(v)
return None
def to_protobuf(self) -> ValueConstraintMsgs:
v = [c.to_protobuf() for c in self.constraints]
if len(v) > 0:
vcmsg = ValueConstraintMsgs()
vcmsg.constraints.extend(v)
return vcmsg
return None
def update(self, v):
for c in self.constraints:
c.update(v)
def report(self) -> List[tuple]:
v = [c.report() for c in self.constraints]
if len(v) > 0:
return v
return None
class SummaryConstraints:
def __init__(self, constraints: List[SummaryConstraint]):
self.constraints = constraints
@staticmethod
def from_protobuf(msg: SummaryConstraintMsgs) -> "SummaryConstraints":
v = [SummaryConstraint.from_protobuf(c) for c in msg.constraints]
if len(v) > 0:
return SummaryConstraints(v)
return None
def to_protobuf(self) -> SummaryConstraintMsgs:
v = [c.to_protobuf() for c in self.constraints]
if len(v) > 0:
scmsg = SummaryConstraintMsgs()
scmsg.constraints.extend(v)
return scmsg
return None
def update(self, v):
for c in self.constraints:
c.update(v)
def report(self) -> List[tuple]:
v = [c.report() for c in self.constraints]
if len(v) > 0:
return v
return None
class DatasetConstraints:
def __init__(
self,
props: DatasetProperties,
value_constraints: Optional[Mapping[str, ValueConstraints]] = None,
summary_constraints: Optional[Mapping[str, SummaryConstraints]] = None,
):
self.dataset_properties = props
# repackage lists of constraints if necessary
if value_constraints is None:
value_constraints = dict()
for k, v in value_constraints.items():
if isinstance(v, list):
value_constraints[k] = ValueConstraints(v)
self.value_constraint_map = value_constraints
if summary_constraints is None:
summary_constraints = dict()
for k, v in summary_constraints.items():
if isinstance(v, list):
summary_constraints[k] = SummaryConstraints(v)
self.summary_constraint_map = summary_constraints
def __getitem__(self, key):
if key in self.value_constraint_map:
return self.value_constraint_map[key]
return None
@staticmethod
def from_protobuf(msg: DatasetConstraintMsg) -> "DatasetConstraints":
vm = dict([(k, ValueConstraints.from_protobuf(v)) for k, v in msg.value_constraints.items()])
sm = dict([(k, SummaryConstraints.from_protobuf(v)) for k, v in msg.summary_constraints.items()])
return DatasetConstraints(msg.properties, vm, sm)
@staticmethod
def from_json(data: str) -> "DatasetConstraints":
msg = Parse(data, DatasetConstraintMsg())
return DatasetConstraints.from_protobuf(msg)
def to_protobuf(self) -> DatasetConstraintMsg:
# construct tuple for each column, (name, [constraints,...])
# turn that into a map indexed by column name
vm = dict([(k, v.to_protobuf()) for k, v in self.value_constraint_map.items()])
sm = dict([(k, s.to_protobuf()) for k, s in self.summary_constraint_map.items()])
return DatasetConstraintMsg(
properties=self.dataset_properties,
value_constraints=vm,
summary_constraints=sm,
)
def to_json(self) -> str:
return message_to_json(self.to_protobuf())
def report(self):
l1 = [(k, v.report()) for k, v in self.value_constraint_map.items()]
l2 = [(k, s.report()) for k, s in self.summary_constraint_map.items()]
return l1 + l2
| [
"whylogs.proto.DatasetConstraintMsg",
"whylogs.proto.SummaryConstraintMsg",
"whylogs.proto.Op.Name",
"whylogs.proto.SummaryConstraintMsgs",
"whylogs.proto.ValueConstraintMsg",
"whylogs.proto.ValueConstraintMsgs"
] | [((384, 411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'import logging\n'), ((12934, 13041), 'whylogs.proto.DatasetConstraintMsg', 'DatasetConstraintMsg', ([], {'properties': 'self.dataset_properties', 'value_constraints': 'vm', 'summary_constraints': 'sm'}), '(properties=self.dataset_properties, value_constraints=\n vm, summary_constraints=sm)\n', (12954, 13041), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((4729, 4821), 'whylogs.proto.ValueConstraintMsg', 'ValueConstraintMsg', ([], {'name': 'self.name', 'op': 'self.op', 'value': 'self.value', 'verbose': 'self._verbose'}), '(name=self.name, op=self.op, value=self.value, verbose=\n self._verbose)\n', (4747, 4821), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((4929, 5037), 'whylogs.proto.ValueConstraintMsg', 'ValueConstraintMsg', ([], {'name': 'self.name', 'op': 'self.op', 'regex_pattern': 'self.regex_pattern', 'verbose': 'self._verbose'}), '(name=self.name, op=self.op, regex_pattern=self.\n regex_pattern, verbose=self._verbose)\n', (4947, 5037), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((8651, 8775), 'whylogs.proto.SummaryConstraintMsg', 'SummaryConstraintMsg', ([], {'name': 'self.name', 'first_field': 'self.first_field', 'op': 'self.op', 'value': 'self.value', 'verbose': 'self._verbose'}), '(name=self.name, first_field=self.first_field, op=self.\n op, value=self.value, verbose=self._verbose)\n', (8671, 8775), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((8898, 9036), 'whylogs.proto.SummaryConstraintMsg', 'SummaryConstraintMsg', ([], {'name': 'self.name', 'first_field': 'self.first_field', 'op': 'self.op', 'second_field': 'self.second_field', 'verbose': 'self._verbose'}), '(name=self.name, first_field=self.first_field, op=self.\n op, second_field=self.second_field, verbose=self._verbose)\n', (8918, 9036), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((9747, 9768), 'whylogs.proto.ValueConstraintMsgs', 'ValueConstraintMsgs', ([], {}), '()\n', (9766, 9768), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((10625, 10648), 'whylogs.proto.SummaryConstraintMsgs', 'SummaryConstraintMsgs', ([], {}), '()\n', (10646, 10648), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((12489, 12511), 'whylogs.proto.DatasetConstraintMsg', 'DatasetConstraintMsg', ([], {}), '()\n', (12509, 12511), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((1142, 1156), 're.match', 're.match', (['x', 'v'], {}), '(x, v)\n', (1150, 1156), False, 'import re\n'), ((1206, 1220), 're.match', 're.match', (['x', 'v'], {}), '(x, v)\n', (1214, 1220), False, 'import re\n'), ((7459, 7475), 'whylogs.proto.Op.Name', 'Op.Name', (['self.op'], {}), '(self.op)\n', (7466, 7475), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((3838, 3854), 'whylogs.proto.Op.Name', 'Op.Name', (['self.op'], {}), '(self.op)\n', (3845, 3854), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n'), ((3954, 3970), 'whylogs.proto.Op.Name', 'Op.Name', (['self.op'], {}), '(self.op)\n', (3961, 3970), False, 'from whylogs.proto import DatasetConstraintMsg, DatasetProperties, NumberSummary, Op, SummaryConstraintMsg, SummaryConstraintMsgs, ValueConstraintMsg, ValueConstraintMsgs\n')] |
import numpy as np
import pandas as pd
import pytest
from whylogs.core.types import TypedDataConverter
_TEST_NULL_DATA = [
([None, np.nan, None] * 3, 9),
([pd.Series(data={"a": None, "b": None}, index=["x", "y"]), pd.Series(data={"c": None, "d": 1}, index=["x", "y"])], 2),
([[None, np.nan], [np.nan], [None]], 3),
([[None, 1], [None]], 1),
([np.zeros(3)], 0),
]
def test_invalid_yaml_returns_string():
x = " \tSr highway safety Specialist"
assert x == TypedDataConverter.convert(x)
# Just verify that `x` is invalid yaml
import yaml
try:
yaml.safe_load(x)
raise RuntimeError("Should raise exception")
except yaml.scanner.ScannerError:
pass
@pytest.mark.parametrize("data,nulls_expected", _TEST_NULL_DATA)
def test_are_nulls(data, nulls_expected):
null_count = 0
for val in data:
if TypedDataConverter._are_nulls(val):
null_count += 1
assert null_count == nulls_expected
| [
"whylogs.core.types.TypedDataConverter._are_nulls",
"whylogs.core.types.TypedDataConverter.convert"
] | [((718, 781), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data,nulls_expected"""', '_TEST_NULL_DATA'], {}), "('data,nulls_expected', _TEST_NULL_DATA)\n", (741, 781), False, 'import pytest\n'), ((485, 514), 'whylogs.core.types.TypedDataConverter.convert', 'TypedDataConverter.convert', (['x'], {}), '(x)\n', (511, 514), False, 'from whylogs.core.types import TypedDataConverter\n'), ((593, 610), 'yaml.safe_load', 'yaml.safe_load', (['x'], {}), '(x)\n', (607, 610), False, 'import yaml\n'), ((875, 909), 'whylogs.core.types.TypedDataConverter._are_nulls', 'TypedDataConverter._are_nulls', (['val'], {}), '(val)\n', (904, 909), False, 'from whylogs.core.types import TypedDataConverter\n'), ((166, 222), 'pandas.Series', 'pd.Series', ([], {'data': "{'a': None, 'b': None}", 'index': "['x', 'y']"}), "(data={'a': None, 'b': None}, index=['x', 'y'])\n", (175, 222), True, 'import pandas as pd\n'), ((224, 277), 'pandas.Series', 'pd.Series', ([], {'data': "{'c': None, 'd': 1}", 'index': "['x', 'y']"}), "(data={'c': None, 'd': 1}, index=['x', 'y'])\n", (233, 277), True, 'import pandas as pd\n'), ((365, 376), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (373, 376), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
TODO:
* Date parsing compatible with EasyDateTimeParser (Java)
"""
from logging import getLogger
from whylogs.util import protobuf
CSV_READER_BATCH_SIZE = int(1e4)
OUTPUT_DATE_FORMAT = "%Y/%m/%d"
LOGGER = "whylogs.logs.profiler"
def write_protobuf(vals: list, fname):
"""
Write a list of objects with a `to_protobuf()` method to a binary file.
`vals` must be iterable
"""
serialized = [x.to_protobuf() for x in vals]
getLogger(LOGGER).info("Writing to protobuf binary file: {}".format(fname))
protobuf.write_multi_msg(serialized, fname)
def df_to_records(df, dropna=True):
"""
Convert a dataframe to a list of dictionaries, one per row, dropping null
values
"""
import pandas as pd
records = df.to_dict(orient="records")
if dropna:
records = [{k: v for k, v in m.items() if pd.notnull(v)} for m in records]
return records
def csv_reader(f, date_format: str = None, dropna=False, infer_dtypes=False, **kwargs):
"""
Wrapper for `pandas.read_csv` to return an iterator to return dict
records for a CSV file
See also `pandas.read_csv`
Parameters
----------
f : str, path object, or file-like object
File to read from. See `pandas.read_csv` documentation
date_format : str
If specified, string format for the date. See `pd.datetime.strptime`
dropna : bool
Remove null values from returned records
infer_dtypes : bool
Automatically infer data types (standard pandas behavior). Else,
return all items as strings (except specified date columns)
**kwargs : passed to `pandas.read_csv`
"""
import pandas as pd
date_parser = None
if date_format is not None:
def date_parser(x):
return pd.datetime.strptime(x, date_format) # noqa pep8
opts = {
"chunksize": CSV_READER_BATCH_SIZE,
"date_parser": date_parser,
}
if not infer_dtypes:
# HACKY way not parse any entries and return strings
opts["converters"] = {i: str for i in range(10000)}
opts.update(kwargs)
for batch in pd.read_csv(f, **opts):
records = df_to_records(batch, dropna=dropna)
for record in records:
yield record
def run(
input_path,
datetime: str = None,
delivery_stream=None,
fmt=None,
limit=-1,
output_prefix=None,
region=None,
separator=None,
dropna=False,
infer_dtypes=False,
):
"""
Run the profiler on CSV data
Output Notes
------------
<output_prefix>_<name>_summary.csv
Dataset profile. Contains scalar statistics per column
<output_prefix>_<name>_histogram.json
Histograms for each column for dataset `name`
<output_prefix>_<name>_strings.json
Frequent strings
<output_prefix>.json
DatasetSummaries, nested JSON summaries of dataset statistics
<output_prefix>.bin
Binary protobuf output of DatasetProfile
Parameters
----------
input_path : str
Input CSV file
datetime : str
Column containing timestamps. If missing, we assume the dataset is
running in batch mode
delivery_stream : str
[IGNORED] The delivery stream name
fmt : str
Format of the datetime column, used if `datetime` is specified.
If not specified, the format will be attempt to be inferred.
limit : int
Limit the number of entries to processes
output_prefix : str
Specify a prefix for the output files. By default, this will be
derived from the input path to generate files in the input directory.
Can include folders
region : str
[IGNORED] AWS region name for Firehose
separator : str
Record separator. Default = ','
dropna : bool
Drop null values when reading
infer_dtypes : bool
Infer input datatypes when reading. If false, treat inputs as
un-converted strings.
"""
datetime_col = datetime # don't shadow the standard module name
import os
from datetime import datetime
from whylogs.logs import DatasetProfile, DatasetSummaries
from whylogs.util import message_to_json
logger = getLogger(LOGGER)
# Parse arguments
if separator is None:
separator = ","
name = os.path.basename(input_path)
parse_dates = False
if datetime_col is not None:
parse_dates = [datetime_col]
nrows = None
if limit > 0:
nrows = limit
if output_prefix is None:
import random
import time
parent_folder = os.path.dirname(os.path.realpath(input_path))
basename = os.path.splitext(os.path.basename(input_path))[0]
epoch_minutes = int(time.time() / 60)
output_base = "{}.{}-{}-{}".format(
basename,
epoch_minutes,
random.randint(100000, 999999),
random.randint(100000, 999999),
)
output_prefix = os.path.join(parent_folder, output_base)
output_base = output_prefix
binary_output_path = output_base + ".bin"
json_output_path = output_base + ".json"
# Process records
reader = csv_reader(
input_path,
fmt,
parse_dates=parse_dates,
nrows=nrows,
sep=separator,
dropna=dropna,
infer_dtypes=infer_dtypes,
)
profiles = {}
for record in reader:
dt = record.get(datetime_col, datetime.now(datetime.timezone.utc))
assert isinstance(dt, datetime)
dt_str = dt.strftime(OUTPUT_DATE_FORMAT)
try:
ds = profiles[dt_str]
except KeyError:
ds = DatasetProfile(name, dt)
profiles[dt_str] = ds
ds.track(record)
logger.info("Finished collecting statistics")
# Build summaries for the JSON output
summaries = DatasetSummaries(profiles={k: v.to_summary() for k, v in profiles.items()})
with open(json_output_path, "wt") as fp:
logger.info("Writing JSON summaries to: {}".format(json_output_path))
fp.write(message_to_json(summaries))
# Write the protobuf binary file
write_protobuf(profiles.values(), binary_output_path)
return profiles
if __name__ == "__main__":
import argh
from whylogs.logs import display_logging
display_logging("DEBUG")
argh.dispatch_command(run)
| [
"whylogs.logs.display_logging",
"whylogs.logs.DatasetProfile",
"whylogs.util.message_to_json",
"whylogs.util.protobuf.write_multi_msg"
] | [((558, 601), 'whylogs.util.protobuf.write_multi_msg', 'protobuf.write_multi_msg', (['serialized', 'fname'], {}), '(serialized, fname)\n', (582, 601), False, 'from whylogs.util import protobuf\n'), ((2149, 2171), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f, **opts)\n', (2160, 2171), True, 'import pandas as pd\n'), ((4250, 4267), 'logging.getLogger', 'getLogger', (['LOGGER'], {}), '(LOGGER)\n', (4259, 4267), False, 'from logging import getLogger\n'), ((4352, 4380), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (4368, 4380), False, 'import os\n'), ((6339, 6363), 'whylogs.logs.display_logging', 'display_logging', (['"""DEBUG"""'], {}), "('DEBUG')\n", (6354, 6363), False, 'from whylogs.logs import display_logging\n'), ((6368, 6394), 'argh.dispatch_command', 'argh.dispatch_command', (['run'], {}), '(run)\n', (6389, 6394), False, 'import argh\n'), ((5005, 5045), 'os.path.join', 'os.path.join', (['parent_folder', 'output_base'], {}), '(parent_folder, output_base)\n', (5017, 5045), False, 'import os\n'), ((478, 495), 'logging.getLogger', 'getLogger', (['LOGGER'], {}), '(LOGGER)\n', (487, 495), False, 'from logging import getLogger\n'), ((1811, 1847), 'pandas.datetime.strptime', 'pd.datetime.strptime', (['x', 'date_format'], {}), '(x, date_format)\n', (1831, 1847), True, 'import pandas as pd\n'), ((4645, 4673), 'os.path.realpath', 'os.path.realpath', (['input_path'], {}), '(input_path)\n', (4661, 4673), False, 'import os\n'), ((4895, 4925), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (4909, 4925), False, 'import random\n'), ((4939, 4969), 'random.randint', 'random.randint', (['(100000)', '(999999)'], {}), '(100000, 999999)\n', (4953, 4969), False, 'import random\n'), ((5474, 5509), 'datetime.datetime.now', 'datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (5486, 5509), False, 'from datetime import datetime\n'), ((6099, 6125), 'whylogs.util.message_to_json', 'message_to_json', (['summaries'], {}), '(summaries)\n', (6114, 6125), False, 'from whylogs.util import message_to_json\n'), ((4711, 4739), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (4727, 4739), False, 'import os\n'), ((4772, 4783), 'time.time', 'time.time', ([], {}), '()\n', (4781, 4783), False, 'import time\n'), ((5689, 5713), 'whylogs.logs.DatasetProfile', 'DatasetProfile', (['name', 'dt'], {}), '(name, dt)\n', (5703, 5713), False, 'from whylogs.logs import DatasetProfile, DatasetSummaries\n'), ((878, 891), 'pandas.notnull', 'pd.notnull', (['v'], {}), '(v)\n', (888, 891), True, 'import pandas as pd\n')] |
"import copy\nimport datetime\nimport json\nimport logging\nimport numbers\nimport re\nfrom typing i(...TRUNCATED) | ["whylogs.core.summaryconverters.ks_test_compute_p_value","whylogs.core.statistics.hllsketch.HllSket(...TRUNCATED) | "[((1343, 1370), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\\n', (136(...TRUNCATED) |
"import datetime\nimport json\nimport os\nfrom uuid import uuid4\n\nimport pytest\nimport numpy as n(...TRUNCATED) | ["whylogs.core.datasetprofile.DatasetProfile.parse_delimited_single","whylogs.util.protobuf.message_(...TRUNCATED) | "[((506, 524), 'numpy.zeros', 'np.zeros', (['[100, 1]'], {}), '([100, 1])\\n', (514, 524), True, 'im(...TRUNCATED) |
"# -*- coding: utf-8 -*-\n\"\"\"The app module, containing the app factory function.\"\"\"\n\nimport(...TRUNCATED) | [
"whylogs.get_or_create_session"
] | "[((431, 444), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\\n', (442, 444), False, 'from dote(...TRUNCATED) |
"from logging import getLogger\nfrom typing import List, Union\n\nimport numpy as np\nfrom sklearn.u(...TRUNCATED) | ["whylogs.core.statistics.NumberTracker.from_protobuf","whylogs.core.statistics.NumberTracker","whyl(...TRUNCATED) | "[((422, 442), 'logging.getLogger', 'getLogger', (['\"\"\"whylogs\"\"\"'], {}), \"('whylogs')\\n\", (...TRUNCATED) |
"from typing import List, Union\n\nfrom whylogs.core.metrics.confusion_matrix import ConfusionMatrix(...TRUNCATED) | ["whylogs.core.metrics.confusion_matrix.ConfusionMatrix.from_protobuf","whylogs.core.metrics.confusi(...TRUNCATED) | "[((1773, 1888), 'whylogs.core.metrics.confusion_matrix.ConfusionMatrix', 'ConfusionMatrix', (['labe(...TRUNCATED) |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 6