id int64 1 6.07M | name stringlengths 1 295 | code stringlengths 12 426k | language stringclasses 1
value | source_file stringlengths 5 202 | start_line int64 1 158k | end_line int64 1 158k | repo dict |
|---|---|---|---|---|---|---|---|
701 | mssql_backend | def mssql_backend():
... | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 194 | 195 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
702 | mysql_backend | def mysql_backend():
... | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 199 | 200 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
703 | postgres_backend | def postgres_backend():
... | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 204 | 205 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
704 | file_backend | def file_backend(tmp_path):
yield tmp_path / "mlruns" | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 209 | 210 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
705 | sqlite_backend | def sqlite_backend():
yield "sqlite:///mlflow.db" | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 214 | 215 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
706 | mlflow_backend | def mlflow_backend(request):
yield request.getfixturevalue(request.param) | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 228 | 229 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
707 | file_artifacts | def file_artifacts(tmp_path):
yield tmp_path / "mlartifacts" | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 233 | 234 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
708 | s3_artifacts | def s3_artifacts():
yield ... | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 238 | 239 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
709 | mlflow_artifacts_destination | def mlflow_artifacts_destination(request):
yield request.getfixturevalue(request.param) | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 248 | 249 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
710 | get_free_port | def get_free_port():
import socket
sock = socket.socket()
sock.bind(("", 0))
return str(sock.getsockname()[1]) | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 252 | 257 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
711 | mlflow_server | def mlflow_server(mlflow_backend, mlflow_artifacts_destination):
new_port = get_free_port()
modified_base_url = MLFLOW_BASE_URL.replace("4040", new_port)
start_cmd = [
"mlflow",
"server",
"-p",
new_port,
"--backend-store-uri",
mlflow_backend,
"--artifacts-destination",
mlflow_artifacts_destination,
]
process = subprocess.Popen(start_cmd) # process
healthy = check_mlflow_server_health(
modified_base_url, MLFLOW_HEALTH_ENDPOINT, num_retries=30
)
if healthy:
yield modified_base_url
else:
raise Exception("MLflow server is not healthy")
kill_child_processes(process.pid) | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 261 | 285 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
712 | prelogged_mlflow_server | def prelogged_mlflow_server(mlflow_server):
log_to_mlflow(mlflow_server, EXPERIMENTS, RUNS_PER_EXPERIMENT, STEPS)
yield mlflow_server, EXPERIMENTS, RUNS_PER_EXPERIMENT, STEPS | python | tests/pytest_tests/system_tests/test_importers/conftest.py | 289 | 291 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
713 | teardown | def teardown():
yield
wandb.finish()
if os.path.isdir("wandb"):
shutil.rmtree("wandb")
if os.path.isdir("artifacts"):
shutil.rmtree("artifacts") | python | tests/standalone_tests/artifact_benchmark.py | 14 | 20 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
714 | test_benchmark_upload_artifact | def test_benchmark_upload_artifact(
tmp_path: pathlib.Path,
benchmark,
_async_upload_concurrency_limit: Optional[int],
num_files: int,
):
data_dir = tmp_path / "data"
data_dir.mkdir()
num_file_digits = len(str(num_files - 1))
for filenum in range(num_files):
(data_dir / f"file{filenum:0{num_file_digits}d}.txt").write_bytes(
np.random.bytes(40_000)
)
with patch.dict(os.environ, {"WANDB_CACHE_DIR": str(tmp_path / "cache")}):
with wandb.init(
settings={
"_async_upload_concurrency_limit": _async_upload_concurrency_limit
},
) as run:
artifact = wandb.Artifact("benchmark", "benchmark")
artifact.add_dir(data_dir)
run.log_artifact(artifact)
benchmark.pedantic(
target=artifact.wait,
rounds=1,
iterations=1,
) | python | tests/standalone_tests/artifact_benchmark.py | 25 | 52 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
715 | main | def main():
wandb.init(name=pathlib.Path(__file__).stem)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(3, 3, activation="relu", input_shape=(28, 28, 1)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
model.fit(
np.ones((10, 28, 28, 1)),
np.ones((10,)),
epochs=7,
validation_split=0.2,
callbacks=[WandbCallback()],
) | python | tests/standalone_tests/mixed_keras.py | 10 | 27 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
716 | main | def main():
run = wandb.init()
print("config", wandb.config)
print("resumed", run.resumed)
config_len = len(wandb.config.keys())
conf_update = {}
conf_update[str(config_len)] = random.random()
wandb.config.update(conf_update) | python | tests/standalone_tests/resume-empty.py | 10 | 17 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
717 | update_versions | def update_versions(version=1):
root = f"./versions{version}"
os.makedirs(root, exist_ok=True)
with open(f"{root}/every.txt", "w") as f:
f.write(f"{PREFIX} every version {version}")
if version % 2 == 0:
with open(f"{root}/even.txt", "w") as f:
f.write(f"{PREFIX} even version {version}")
else:
with open(f"{root}/odd.txt", "w") as f:
f.write(f"{PREFIX} odd version {version}")
return root | python | tests/standalone_tests/artifact_references.py | 20 | 31 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
718 | sync_buckets | def sync_buckets(root):
# Sync up
os.system(f"gsutil rsync {root} {GCS_REMOTE}")
os.system(f"aws s3 sync {root} {S3_REMOTE}")
# Sync down
os.system(f"gsutil rsync {GCS_REMOTE} {root}")
os.system(f"aws s3 sync {S3_REMOTE} {root}") | python | tests/standalone_tests/artifact_references.py | 34 | 40 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
719 | log_artifacts | def log_artifacts():
gcs_art = wandb.Artifact(name=GCS_NAME, type="dataset")
s3_art = wandb.Artifact(name=S3_NAME, type="dataset")
gcs_art.add_reference(GCS_REMOTE)
s3_art.add_reference(S3_REMOTE)
run = wandb.init(project="artifact-references", entity=ENTITY, reinit=True)
run.log_artifact(gcs_art)
run.log_artifact(s3_art)
return gcs_art, s3_art | python | tests/standalone_tests/artifact_references.py | 43 | 51 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
720 | download_artifacts | def download_artifacts(gcs_alias="v0", s3_alias="v0"):
api = wandb.Api()
gcs_art = api.artifact(
name=f"{ENTITY}/artifact-references/{GCS_NAME}:{gcs_alias}", type="dataset"
)
s3_art = api.artifact(
name=f"{ENTITY}/artifact-references/{S3_NAME}:{s3_alias}", type="dataset"
)
gcs_art.download()
s3_art.download()
return gcs_art, s3_art | python | tests/standalone_tests/artifact_references.py | 54 | 64 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
721 | main | def main(argv):
v1_root = update_versions()
sync_buckets(v1_root)
log_artifacts()
v2_root = update_versions(2)
sync_buckets(v2_root)
log_artifacts()
print("Sleeping for arts to get processed...")
time.sleep(1)
gcs_v1_art, s3_v1_art = download_artifacts()
gcs_v2_art, s3_v2_art = download_artifacts("v1", "v1")
gcs_latest_art, s3_latest_art = download_artifacts("latest", "latest")
v1_gcs_cmp = dircmp(gcs_v1_art.cache_dir, v1_root)
v1_s3_cmp = dircmp(s3_v1_art.cache_dir, v1_root)
v2_gcs_cmp = dircmp(gcs_v2_art.cache_dir, v2_root)
v2_s3_cmp = dircmp(s3_v2_art.cache_dir, v2_root)
latest_gcs_cmp = dircmp(gcs_latest_art.cache_dir, v2_root)
latest_s3_cmp = dircmp(s3_latest_art.cache_dir, v2_root)
print("v0 GCS")
v1_gcs_cmp.report()
print("v0 S3")
v1_s3_cmp.report()
print("v1 GCS")
v2_gcs_cmp.report()
print("v1 S3")
v2_s3_cmp.report()
print("latest GCS")
latest_gcs_cmp.report()
print("latest S3")
latest_s3_cmp.report()
assert v1_gcs_cmp.common == ["even.txt", "every.txt", "odd.txt"]
assert v1_s3_cmp.common == ["even.txt", "every.txt", "odd.txt"]
assert v2_gcs_cmp.common == ["even.txt", "every.txt", "odd.txt"]
assert v2_s3_cmp.common == ["even.txt", "every.txt", "odd.txt"]
assert latest_gcs_cmp.common == ["even.txt", "every.txt", "odd.txt"]
assert latest_s3_cmp.common == ["even.txt", "every.txt", "odd.txt"] | python | tests/standalone_tests/artifact_references.py | 67 | 116 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
722 | gen_point | def gen_point(theta, chi, i):
p = sin(theta) * 4.5 * sin(i + 1 / 2 * (i * i + 2)) + cos(chi) * 7 * sin(
(2 * i - 4) / 2 * (i + 2)
)
x = p * sin(chi) * cos(theta)
y = p * sin(chi) * sin(theta)
z = p * cos(chi)
r = sin(theta) * 120 + 120
g = sin(x) * 120 + 120
b = cos(y) * 120 + 120
return [x, y, z, r, g, b] | python | tests/standalone_tests/point_cloud.py | 29 | 42 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
723 | wave_pattern | def wave_pattern(i):
return np.array([gen_point(theta, chi, i) for [theta, chi] in theta_chi]) | python | tests/standalone_tests/point_cloud.py | 45 | 46 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
724 | main | def main():
run = wandb.init()
# Tests 3d OBJ
# wandb.log({"gltf": wandb.Object3D(open(os.path.join(DIR, "assets", "Duck.gltf"))),
# "obj": wandb.Object3D(open(os.path.join(DIR, "assets", "cube.obj")))})
artifact = wandb.Artifact("pointcloud_test_2", "dataset")
table = wandb.Table(
["ID", "Model"],
)
# Tests numpy clouds
for i in range(0, 20, 10):
table.add_data("Cloud " + str(i), wandb.Object3D(wave_pattern(i)))
wandb.log(
{
"Clouds": [
wandb.Object3D(point_cloud_1),
wandb.Object3D(point_cloud_2),
],
"Colored_Cloud": wandb.Object3D(wave_pattern(i)),
}
)
artifact.add(table, "table")
run.log_artifact(artifact) | python | tests/standalone_tests/point_cloud.py | 49 | 76 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
725 | main | def main():
# wandb.init(project="tf2", sync_tensorboard=True, resume=True)
wandb.init(sync_tensorboard=True, resume=True)
wandb.config["nice"] = "So cool fun"
class Logger(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
time.sleep(2)
wandb.log({"wild_metrics": logs, "interval": epoch * 10})
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(3, 3, activation="relu", input_shape=(28, 28, 1)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.compile(
loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
model.fit(
np.ones((10, 28, 28, 1)),
np.ones((10,)),
epochs=17,
validation_split=0.2,
callbacks=[Logger(), tf.keras.callbacks.TensorBoard()],
) | python | tests/standalone_tests/keras_tensorboard.py | 8 | 33 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
726 | on_epoch_end | def on_epoch_end(self, epoch, logs):
time.sleep(2)
wandb.log({"wild_metrics": logs, "interval": epoch * 10}) | python | tests/standalone_tests/keras_tensorboard.py | 15 | 17 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
727 | make_scene | def make_scene(vecs):
return wandb.Object3D(
{
"type": "lidar/beta",
"vectors": np.array(vecs),
"points": points,
"boxes": np.array(
[
{
"corners": [
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 1, 0],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1],
],
# "label": "Tree",
"color": [123, 321, 111],
},
{
"corners": [
[0, 0, 0],
[0, 2, 0],
[0, 0, 2],
[2, 0, 0],
[2, 2, 0],
[0, 2, 2],
[2, 0, 2],
[2, 2, 2],
],
# "label": "Card",
"color": [111, 321, 0],
},
]
),
}
) | python | tests/standalone_tests/point_cloud_scene.py | 14 | 53 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
728 | main | def main():
vectors = [
{"start": [1, 1, 1], "end": [1, 1.5, 1]},
{"start": [1, 1, 1], "end": [1, 1, 1.5]},
{"start": [1, 1, 1], "end": [1.2, 1.5, 1.5]},
]
vectors_2 = [
{"start": [2, 2, 2], "end": [1, 1.5, 1], "color": [255, 255, 0]},
{
"start": [2, 2, 2],
"end": [1, 1, 1.5],
"color": [255, 255, 0],
},
{"start": [2, 2, 2], "end": [1.2, 1.5, 1.5], "color": [255, 255, 0]},
]
vectors_all = vectors + vectors_2
wandb.log(
{
"separate_vectors": [make_scene([v]) for v in vectors],
"color_vectors": make_scene(vectors_2),
"all_vectors": make_scene(vectors_all),
}
) | python | tests/standalone_tests/point_cloud_scene.py | 56 | 81 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
729 | get_init_count | def get_init_count():
global init_count
current_count = init_count
init_count += 1
return current_count | python | tests/standalone_tests/artifact_tests.py | 13 | 17 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
730 | teardown | def teardown():
wandb.finish()
if os.path.isdir("wandb"):
shutil.rmtree("wandb")
if os.path.isdir("artifacts"):
shutil.rmtree("artifacts") | python | tests/standalone_tests/artifact_tests.py | 20 | 25 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
731 | _run_eq | def _run_eq(run_a, run_b):
return (
run_a.id == run_b.id
and run_a.entity == run_b.entity
and run_a.project == run_b.project
) | python | tests/standalone_tests/artifact_tests.py | 28 | 33 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
732 | _runs_eq | def _runs_eq(runs_a, runs_b):
return all([_run_eq(run_a, run_b) for run_a, run_b in zip(runs_a, runs_b)]) | python | tests/standalone_tests/artifact_tests.py | 36 | 37 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
733 | test_artifact_run_lookup_apis | def test_artifact_run_lookup_apis():
artifact_1_name = f"a1-{str(time.time())}"
artifact_2_name = f"a2-{str(time.time())}"
# Initial setup
run_1 = wandb.init(name=f"{run_name_base}-{get_init_count()}")
artifact = wandb.Artifact(artifact_1_name, "test_type")
artifact.add(wandb.Image(np.random.randint(0, 255, (10, 10))), "image")
run_1.log_artifact(artifact)
artifact = wandb.Artifact(artifact_2_name, "test_type")
artifact.add(wandb.Image(np.random.randint(0, 255, (10, 10))), "image")
run_1.log_artifact(artifact)
run_1.finish()
# Create a second version for a1
run_2 = wandb.init(name=f"{run_name_base}-{get_init_count()}")
artifact = wandb.Artifact(artifact_1_name, "test_type")
artifact.add(wandb.Image(np.random.randint(0, 255, (10, 10))), "image")
run_2.log_artifact(artifact)
run_2.finish()
# Use both
run_3 = wandb.init(name=f"{run_name_base}-{get_init_count()}")
a1 = run_3.use_artifact(artifact_1_name + ":latest")
assert _runs_eq(a1.used_by(), [run_3])
assert _run_eq(a1.logged_by(), run_2)
a2 = run_3.use_artifact(artifact_2_name + ":latest")
assert _runs_eq(a2.used_by(), [run_3])
assert _run_eq(a2.logged_by(), run_1)
run_3.finish()
# Use both
run_4 = wandb.init(name=f"{run_name_base}-{get_init_count()}")
a1 = run_4.use_artifact(artifact_1_name + ":latest")
assert _runs_eq(a1.used_by(), [run_3, run_4])
a2 = run_4.use_artifact(artifact_2_name + ":latest")
assert _runs_eq(a2.used_by(), [run_3, run_4])
run_4.finish() | python | tests/standalone_tests/artifact_tests.py | 40 | 77 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
734 | test_artifact_creation_with_diff_type | def test_artifact_creation_with_diff_type():
artifact_name = f"a1-{str(time.time())}"
# create
with wandb.init(name=f"{run_name_base}-{get_init_count()}") as run:
artifact = wandb.Artifact(artifact_name, "artifact_type_1")
artifact.add(wandb.Image(np.random.randint(0, 255, (10, 10))), "image")
run.log_artifact(artifact)
# update
with wandb.init(name=f"{run_name_base}-{get_init_count()}") as run:
artifact = wandb.Artifact(artifact_name, "artifact_type_1")
artifact.add(wandb.Image(np.random.randint(0, 255, (10, 10))), "image")
run.log_artifact(artifact)
# invalid
with wandb.init(name=f"{run_name_base}-{get_init_count()}") as run:
artifact = wandb.Artifact(artifact_name, "artifact_type_2")
artifact.add(wandb.Image(np.random.randint(0, 255, (10, 10))), "image_2")
did_err = False
try:
run.log_artifact(artifact)
except ValueError as err:
did_err = True
assert (
str(err)
== f"Artifact {artifact_name} already exists with type artifact_type_1; cannot create another with type artifact_type_2"
)
assert did_err
with wandb.init(name=f"{run_name_base}-{get_init_count()}") as run:
artifact = run.use_artifact(artifact_name + ":latest")
# should work
image = artifact.get("image")
assert image is not None
# should not work
image_2 = artifact.get("image_2")
assert image_2 is None | python | tests/standalone_tests/artifact_tests.py | 80 | 117 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
735 | random_image | def random_image():
return wandb.Image(np.random.randint(255, size=(32, 32))) | python | tests/standalone_tests/offline_artifacts.py | 25 | 26 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
736 | make_dataset | def make_dataset():
return wandb.Table(
data=[[str(i), random_image()] for i in range(dataset_size)],
columns=["id", "input_image"],
) | python | tests/standalone_tests/offline_artifacts.py | 29 | 33 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
737 | make_linked_table | def make_linked_table(dataset):
tab = wandb.Table(
data=[
[str(np.random.choice(range(dataset_size)).tolist()), i, random_image()]
for i in range(pred_size)
],
columns=["fk_id", "tab_id", "pred_img"],
)
tab.set_fk("fk_id", dataset, "id")
return tab | python | tests/standalone_tests/offline_artifacts.py | 36 | 45 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
738 | make_run | def make_run():
return wandb.init(project=project, mode=mode) | python | tests/standalone_tests/offline_artifacts.py | 48 | 49 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
739 | init_dataset_run | def init_dataset_run():
run = make_run()
dataset = make_dataset()
art = wandb.Artifact("A", "B")
art.add(dataset, "dataset")
run.log_artifact(art)
run.finish()
return dataset | python | tests/standalone_tests/offline_artifacts.py | 55 | 62 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
740 | init_ref_dataset_run | def init_ref_dataset_run():
run = make_run()
dataset = make_dataset()
tab = make_linked_table(dataset)
run.log({"tab": tab})
run.finish()
return dataset | python | tests/standalone_tests/offline_artifacts.py | 65 | 71 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
741 | do_ref_dataset_run_grouped | def do_ref_dataset_run_grouped():
run = make_run()
dataset = make_dataset()
tab = make_linked_table(dataset)
run.log({"dataset": dataset, "tab": tab})
run.finish()
return dataset | python | tests/standalone_tests/offline_artifacts.py | 77 | 83 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
742 | do_ref_dataset_run_ordered | def do_ref_dataset_run_ordered():
run = make_run()
dataset = make_dataset()
tab = make_linked_table(dataset)
run.log({"dataset": dataset})
run.log({"tab": tab})
run.finish()
return dataset | python | tests/standalone_tests/offline_artifacts.py | 86 | 93 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
743 | do_ref_dataset_run_reversed | def do_ref_dataset_run_reversed():
run = make_run()
dataset = make_dataset()
tab = make_linked_table(dataset)
run.log({"tab": tab})
run.log({"dataset": dataset})
run.finish()
return dataset | python | tests/standalone_tests/offline_artifacts.py | 96 | 103 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
744 | do_dep_dataset_run | def do_dep_dataset_run():
dataset = init_dataset_run()
run = make_run()
run.log({"dataset": dataset})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 109 | 114 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
745 | do_dep_ref_dataset_run | def do_dep_ref_dataset_run():
dataset = init_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"tab": tab})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 117 | 123 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
746 | do_dep_ref_dataset_run_grouped | def do_dep_ref_dataset_run_grouped():
dataset = init_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"dataset": dataset, "tab": tab})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 126 | 132 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
747 | do_dep_ref_dataset_run_ordered | def do_dep_ref_dataset_run_ordered():
dataset = init_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"dataset": dataset})
run.log({"tab": tab})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 135 | 142 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
748 | do_dep_ref_dataset_run_reversed | def do_dep_ref_dataset_run_reversed():
dataset = init_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"tab": tab})
run.log({"dataset": dataset})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 145 | 152 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
749 | do_r_dep_dataset_run | def do_r_dep_dataset_run():
dataset = init_ref_dataset_run()
run = make_run()
run.log({"dataset": dataset})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 158 | 163 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
750 | do_r_dep_ref_dataset_run | def do_r_dep_ref_dataset_run():
dataset = init_ref_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"tab": tab})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 166 | 172 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
751 | do_r_dep_ref_dataset_run_grouped | def do_r_dep_ref_dataset_run_grouped():
dataset = init_ref_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"dataset": dataset, "tab": tab})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 175 | 181 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
752 | do_r_dep_ref_dataset_run_ordered | def do_r_dep_ref_dataset_run_ordered():
dataset = init_ref_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"dataset": dataset})
run.log({"tab": tab})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 184 | 191 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
753 | do_r_dep_ref_dataset_run_reversed | def do_r_dep_ref_dataset_run_reversed():
dataset = init_ref_dataset_run()
run = make_run()
tab = make_linked_table(dataset)
run.log({"tab": tab})
run.log({"dataset": dataset})
run.finish()
return run | python | tests/standalone_tests/offline_artifacts.py | 194 | 201 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
754 | sync_all | def sync_all():
print("Syncing...")
ctx = CliRunner()
result = ctx.invoke(cli.sync, args=["--sync-all"])
assert result.exit_code == 0
print("...Syncing Complete") | python | tests/standalone_tests/offline_artifacts.py | 204 | 209 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
755 | main | def main():
# Base Cases
init_dataset_run()
init_ref_dataset_run()
# Alt Log Ordering
do_ref_dataset_run_grouped()
do_ref_dataset_run_ordered()
do_ref_dataset_run_reversed()
# Depend on base case 1
do_dep_dataset_run()
do_dep_ref_dataset_run()
do_dep_ref_dataset_run_grouped()
do_dep_ref_dataset_run_ordered()
do_dep_ref_dataset_run_reversed()
# Depend on base case 2
do_r_dep_dataset_run()
do_r_dep_ref_dataset_run()
do_r_dep_ref_dataset_run_grouped()
do_r_dep_ref_dataset_run_ordered()
do_r_dep_ref_dataset_run_reversed()
sync_all() | python | tests/standalone_tests/offline_artifacts.py | 212 | 235 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
756 | main | def main(args):
run_id = runid.generate_id()
try:
wandb.init(project="resuming", resume="must", id=run_id)
except wandb.Error:
print("Confirmed we can't resume a non-existent run with must")
wandb.init(project="resuming", resume="allow", id=run_id)
print("Run start time: ", wandb.run.start_time)
for i in range(10):
print(f"Logging step {i}")
wandb.log({"metric": i})
time.sleep(1)
wandb.join()
print("Run finished at: ", int(time.time()))
print("Sleeping 5 seconds...")
time.sleep(5)
wandb.init(project="resuming", resume="allow", id=run_id, reinit=True)
print("Run starting step: ", wandb.run.history._step)
print("Run start time: ", int(wandb.run.start_time))
print("Time travel: ", int(time.time() - wandb.run.start_time))
for i in range(10):
print("Resumed logging step %i" % i)
wandb.log({"metric": i})
time.sleep(1)
wandb.join()
try:
wandb.init(project="resuming", resume="never", id=run_id, reinit=True)
raise ValueError("I was allowed to resume!")
except wandb.Error:
print("Confirmed we can't resume run when never")
api = wandb.Api()
run = api.run(f"resuming/{run_id}")
# TODO: This is showing a beast bug, we're not syncing the last history row
print("History")
print(run.history())
print("System Metrics")
print(run.history(stream="system")) | python | tests/standalone_tests/resuming_and_reinit.py | 8 | 51 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
757 | poke | def poke():
f = requests.get(URL)
data = f.text
print("GOT:", data) | python | tests/standalone_tests/sweep_check.py | 18 | 21 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
758 | train | def train(**kwargs):
print("train", kwargs)
if kwargs.get("chdir"):
try:
os.makedirs("./test_chdir")
except Exception as e:
print(e)
os.chdir("./test_chdir")
run = wandb.init()
with run:
c = dict(run.config)
run.name = "{}-{}-{}".format(c.get("param0"), c.get("param1"), c.get("param2"))
run_id = run.id
print("SweepID", run.sweep_id)
length = run.config.get("length", L)
epochs = run.config.get("epochs", 27)
delay = run.config.get("delay", 0)
for e in range(epochs):
n = float(length) * (float(e + 1) / epochs)
val = run.config.param0 + run.config.param1 * n + run.config.param2 * n * n
wandb.log(dict(val_acc=val))
if delay:
time.sleep(delay)
if POKE_LOCAL:
poke()
shutil.copyfile("wandb/debug.log", "wandb/debug-%s.log" % run_id) | python | tests/standalone_tests/sweep_check.py | 24 | 49 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
759 | train_and_check_chdir | def train_and_check_chdir(**kwargs):
if "test_chdir" not in os.getcwd():
try:
os.makedirs("./test_chdir")
except Exception as e:
print(e)
os.chdir("./test_chdir")
run = wandb.init()
with run:
c = dict(run.config)
run.name = "{}-{}-{}".format(c.get("param0"), c.get("param1"), c.get("param2"))
print("SweepID", run.sweep_id)
length = run.config.get("length", L)
epochs = run.config.get("epochs", 27)
for e in range(epochs):
n = float(length) * (float(e + 1) / epochs)
val = run.config.param0 + run.config.param1 * n + run.config.param2 * n * n
wandb.log(dict(val_acc=val))
files = os.listdir(run.dir)
# TODO: Add a check to restoring from another run in this case, WB-3715. Should restore to run.dir
# check files were saved to the right place
assert set(files) == {
"requirements.txt",
"output.log",
"config.yaml",
"wandb-summary.json",
"wandb-metadata.json",
}, print(files)
# ensure run dir does not contain test_chdir, and no files were saved there
assert "test_chdir" not in run.dir
for _, _, files in os.walk("."):
assert files == [], print(files) | python | tests/standalone_tests/sweep_check.py | 53 | 84 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
760 | check | def check(sweep_id, num=None, result=None, stopped=None):
settings = wandb.InternalApi().settings()
api = wandb.Api(overrides=settings)
sweep = api.sweep(f"{PROJECT}/{sweep_id}")
runs = sorted(
sweep.runs, key=lambda run: run.summary.get("val_acc", 0), reverse=True
)
if num is not None:
print(f"CHECKING: runs, saw: {len(runs)}, expecting: {num}")
assert len(runs) == num
val_acc = None
cnt_stopped = 0
for run in runs:
print("stop debug", run.id, getattr(run, "stopped", None), run.state)
if getattr(run, "stopped", None) or run.state == "stopped":
cnt_stopped += 1
tmp = run.summary.get("val_acc")
assert tmp is not None
val_acc = tmp if val_acc is None or tmp > val_acc else val_acc
if stopped is not None:
print(f"NOT CHECKING: stopped, saw: {cnt_stopped}, expecting: {stopped}")
# todo: turn on stopped run state
if result is not None:
print(f"CHECKING: metric, saw: {val_acc}, expecting: {result}")
assert val_acc == result
print("ALL GOOD") | python | tests/standalone_tests/sweep_check.py | 87 | 112 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
761 | sweep_quick | def sweep_quick(args):
config = dict(
method="random",
parameters=dict(
param0=dict(values=[2]),
param1=dict(values=[0, 1, 4]),
param2=dict(values=[0, 0.5, 1.5]),
epochs=dict(value=4),
),
)
sweep_id = wandb.sweep(config, project=PROJECT)
print("sweep:", sweep_id)
wandb.agent(sweep_id, function=train, count=1)
check(sweep_id, num=1) | python | tests/standalone_tests/sweep_check.py | 115 | 128 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
762 | sweep_grid | def sweep_grid(args):
config = dict(
method="grid",
parameters=dict(
param0=dict(values=[2]),
param1=dict(values=[0, 1, 4]),
param2=dict(values=[0, 0.5, 1.5]),
epochs=dict(value=4),
),
)
sweep_id = wandb.sweep(config, project=PROJECT)
print("sweep:", sweep_id)
wandb.agent(sweep_id, function=train)
check(sweep_id, num=9, result=2 + 4 * L + 1.5 * L * L) | python | tests/standalone_tests/sweep_check.py | 131 | 144 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
763 | sweep_bayes | def sweep_bayes(args):
config = dict(
method="bayes",
metric=dict(name="val_acc", goal="maximize"),
parameters=dict(
param0=dict(values=[2]),
param1=dict(values=[0, 1, 4]),
param2=dict(values=[0, 0.5, 1.5]),
),
)
sweep_id = wandb.sweep(config, project=PROJECT)
print("sweep:", sweep_id)
wandb.agent(sweep_id, function=train, count=9)
check(sweep_id, num=9, result=2 + 4 * L + 1.5 * L * L) | python | tests/standalone_tests/sweep_check.py | 147 | 160 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
764 | sweep_bayes_nested | def sweep_bayes_nested(args):
config = dict(
method="bayes",
metric=dict(name="feat1.val_acc", goal="maximize"),
parameters=dict(
param0=dict(values=[2]),
param1=dict(values=[0, 1, 4]),
param2=dict(values=[0, 0.5, 1.5]),
),
)
sweep_id = wandb.sweep(config, project=PROJECT)
print("sweep:", sweep_id)
wandb.agent(sweep_id, function=train, count=9)
check(sweep_id, num=9, result=2 + 4 * L + 1.5 * L * L) | python | tests/standalone_tests/sweep_check.py | 163 | 176 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
765 | sweep_grid_hyperband | def sweep_grid_hyperband(args):
config = dict(
method="grid",
metric=dict(name="val_acc", goal="maximize"),
parameters=dict(
param0=dict(values=[2]),
param1=dict(values=[4, 1, 0]),
param2=dict(values=[1.5, 0.5, 0]),
delay=dict(value=args.grid_hyper_delay or 1),
epochs=dict(value=27),
),
early_terminate=dict(type="hyperband", max_iter=27, s=2, eta=3),
)
sweep_id = wandb.sweep(config, project=PROJECT)
print("sweep:", sweep_id)
wandb.agent(sweep_id, function=train, count=9)
# TODO(check stopped)
check(sweep_id, num=9, result=2 + 4 * L + 1.5 * L * L, stopped=3) | python | tests/standalone_tests/sweep_check.py | 179 | 196 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
766 | sweep_chdir | def sweep_chdir(args):
config = dict(
method="grid",
parameters=dict(
param0=dict(values=[2]),
param1=dict(values=[0, 1, 4]),
param2=dict(values=[0, 0.5, 1.5]),
epochs=dict(value=4),
),
root=os.getcwd(),
)
sweep_id = wandb.sweep(config, project=PROJECT)
wandb.agent(sweep_id, function=train_and_check_chdir, count=2)
# clean up
os.chdir("../")
os.removedirs("./test_chdir") | python | tests/standalone_tests/sweep_check.py | 200 | 216 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
767 | main | def main():
global POKE_LOCAL
# os.environ["WANDB_DEBUG"] = "true"
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--test", default="", type=str)
parser.add_argument("-x", "--exclude", default="", type=str)
parser.add_argument("--grid_hyper_delay", type=int)
parser.add_argument("--dryrun", dest="dryrun", action="store_true")
parser.add_argument("--local", dest="local", action="store_true")
args = parser.parse_args()
all_tests = dict(
quick=sweep_quick,
grid=sweep_grid,
bayes=sweep_bayes,
grid_hyper=sweep_grid_hyperband,
chdir=sweep_chdir,
)
default_tests = ("quick", "grid", "bayes", "chdir")
test_list = args.test.split(",") if args.test else default_tests
exclude_list = args.exclude.split(",") if args.exclude else []
for t in test_list:
POKE_LOCAL = False
if t in exclude_list:
continue
print(f"Testing: {t}")
f = all_tests.get(t)
if f is None:
raise Exception("Unknown test: %s" % t)
if args.dryrun:
continue
if args.local and t == "grid_hyper":
POKE_LOCAL = True
f(args) | python | tests/standalone_tests/sweep_check.py | 219 | 253 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
768 | make_exit_data | def make_exit_data(data):
edata = wandb_internal_pb2.RunExitRecord()
edata.exit_code = data.get("exit_code", 0)
return edata | python | tests/standalone_tests/grpc_client.py | 27 | 30 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
769 | make_log_data | def make_log_data(data):
hdata = wandb_internal_pb2.HistoryRecord()
for k, v in data.items():
item = hdata.item.add()
item.key = k
item.value_json = json.dumps(v)
return hdata | python | tests/standalone_tests/grpc_client.py | 33 | 39 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
770 | make_config | def make_config(config_dict, obj=None):
config = obj or wandb_internal_pb2.ConfigRecord()
for k, v in config_dict.items():
update = config.update.add()
update.key = k
update.value_json = json.dumps(v)
return config | python | tests/standalone_tests/grpc_client.py | 42 | 48 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
771 | _pbmap_apply_dict | def _pbmap_apply_dict(
m: "MessageMap[str, spb.SettingsValue]", d: Dict[str, Any]
) -> None:
for k, v in d.items():
if isinstance(v, datetime.datetime):
continue
if isinstance(v, enum.Enum):
continue
sv = spb.SettingsValue()
if v is None:
sv.null_value = True
elif isinstance(v, int):
sv.int_value = v
elif isinstance(v, float):
sv.float_value = v
elif isinstance(v, str):
sv.string_value = v
elif isinstance(v, bool):
sv.bool_value = v
elif isinstance(v, tuple):
sv.tuple_value.string_values.extend(v)
m[k].CopyFrom(sv) | python | tests/standalone_tests/grpc_client.py | 51 | 72 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
772 | make_settings | def make_settings(settings_dict, obj):
_pbmap_apply_dict(obj, settings_dict) | python | tests/standalone_tests/grpc_client.py | 75 | 76 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
773 | make_run_data | def make_run_data(data):
rdata = wandb_internal_pb2.RunRecord()
run_id = data.get("run_id")
if run_id:
rdata.run_id = run_id
entity = data.get("entity")
if entity:
rdata.entity = entity
project = data.get("project")
if project:
rdata.project = project
run_group = data.get("group")
if run_group:
rdata.run_group = run_group
job_type = data.get("job_type")
if job_type:
rdata.job_type = job_type
config_dict = data.get("config")
config_dict = data.get("config")
if config_dict:
make_config(config_dict, obj=rdata.config)
return rdata | python | tests/standalone_tests/grpc_client.py | 79 | 100 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
774 | make_summary | def make_summary(summary_dict, obj=None):
summary = obj or wandb_internal_pb2.SummaryRecord()
for k, v in summary_dict.items():
update = summary.update.add()
update.key = k
update.value_json = json.dumps(v)
return summary | python | tests/standalone_tests/grpc_client.py | 103 | 109 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
775 | make_output | def make_output(name, data):
if name == "stdout":
otype = wandb_internal_pb2.OutputRecord.OutputType.STDOUT
elif name == "stderr":
otype = wandb_internal_pb2.OutputRecord.OutputType.STDERR
else:
# TODO(jhr): throw error?
print("unknown type")
outdata = wandb_internal_pb2.OutputRecord(output_type=otype, line=data)
outdata.timestamp.GetCurrentTime()
return outdata | python | tests/standalone_tests/grpc_client.py | 112 | 122 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
776 | __init__ | def __init__(self):
self._channel = None
self._stub = None
self._stream_id = None | python | tests/standalone_tests/grpc_client.py | 126 | 129 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
777 | connect | def connect(self):
channel = grpc.insecure_channel("localhost:50051")
stub = wandb_server_pb2_grpc.InternalServiceStub(channel)
self._channel = channel
self._stub = stub | python | tests/standalone_tests/grpc_client.py | 131 | 135 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
778 | _apply_stream | def _apply_stream(self, obj):
assert self._stream_id
obj._info.stream_id = self._stream_id | python | tests/standalone_tests/grpc_client.py | 137 | 139 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
779 | _inform_init | def _inform_init(self, settings):
# only do this once
if self._stream_id:
return
run_id = settings.run_id
assert run_id
self._stream_id = run_id
settings_dict = dict(settings)
settings_dict["_log_level"] = logging.DEBUG
req = spb.ServerInformInitRequest()
make_settings(settings_dict, req._settings_map)
self._apply_stream(req)
_ = self._stub.ServerInformInit(req) | python | tests/standalone_tests/grpc_client.py | 141 | 155 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
780 | run_start | def run_start(self, run_id):
settings = wandb.Settings()
settings._set_run_start_time()
settings.update(run_id=run_id)
files_dir = settings.files_dir
os.makedirs(files_dir)
log_user = settings.log_user
os.makedirs(log_user)
self._inform_init(settings) | python | tests/standalone_tests/grpc_client.py | 157 | 165 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
781 | run_update | def run_update(self, data):
req = make_run_data(data)
self._apply_stream(req)
run = self._stub.RunUpdate(req)
return run | python | tests/standalone_tests/grpc_client.py | 167 | 171 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
782 | log | def log(self, data):
req = make_log_data(data)
self._apply_stream(req)
_ = self._stub.Log(req) | python | tests/standalone_tests/grpc_client.py | 173 | 176 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
783 | config | def config(self, data):
req = make_config(data)
self._apply_stream(req)
_ = self._stub.Config(req) | python | tests/standalone_tests/grpc_client.py | 178 | 181 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
784 | summary | def summary(self, data):
req = make_summary(data)
self._apply_stream(req)
_ = self._stub.Summary(req) | python | tests/standalone_tests/grpc_client.py | 183 | 186 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
785 | output | def output(self, outtype, data):
req = make_output(outtype, data)
self._apply_stream(req)
_ = self._stub.Output(req) | python | tests/standalone_tests/grpc_client.py | 188 | 191 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
786 | exit | def exit(self, data):
req = make_exit_data(data)
self._apply_stream(req)
_ = self._stub.RunExit(req) | python | tests/standalone_tests/grpc_client.py | 193 | 196 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
787 | server_status | def server_status(self):
req = spb.ServerStatusRequest()
_ = self._stub.ServerStatus(req) | python | tests/standalone_tests/grpc_client.py | 198 | 200 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
788 | server_shutdown | def server_shutdown(self):
req = spb.ServerShutdownRequest()
_ = self._stub.ServerShutdown(req) | python | tests/standalone_tests/grpc_client.py | 202 | 204 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
789 | main | def main():
wic = WandbInternalClient()
wic.connect()
def_id = "junk123"
run_id = os.environ.get("WANDB_RUN_ID", def_id)
entity = os.environ.get("WANDB_ENTITY") # noqa: F841
project = os.environ.get("WANDB_PROJECT")
group = os.environ.get("WANDB_RUN_GROUP")
job_type = os.environ.get("WANDB_JOB_TYPE")
run_data = dict(
run_id=run_id,
project=project,
group=group,
job_type=job_type,
config=dict(parm1=2, param2=3),
)
wic.run_start(run_id)
run_result = wic.run_update(run_data)
run = run_result.run
base_url = "https://app.wandb.ai"
print(
"Monitor your run ({}) at: {}/{}/{}/runs/{}".format(
run.display_name, base_url, run.entity, run.project, run.run_id
)
)
wic.log(dict(this=2, _step=1))
wic.log(dict(this=3, _step=2))
wic.log(dict(this=4, _step=3))
wic.config(dict(parm5=55, parm6=66))
wic.summary(dict(sum2=4, sum3=3))
wic.output("stdout", "Hello world\n")
wic.output("stderr", "I am an error\n")
print("delay for 30 seconds...")
time.sleep(30)
print(
"Your run ({}) is complete: {}/{}/{}/runs/{}".format(
run.display_name, base_url, run.entity, run.project, run.run_id
)
)
wic.exit(dict(exit_code=0))
wic.server_shutdown() | python | tests/standalone_tests/grpc_client.py | 220 | 263 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
790 | main | def main():
wandb.init(tensorboard=True)
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
writer = SummaryWriter()
net = ConvNet()
wandb.watch(net, log_freq=2)
for i in range(10):
output = net(torch.ones((64, 1, 28, 28)))
loss = F.mse_loss(output, torch.ones((64, 10)))
output.backward(torch.ones(64, 10))
writer.add_scalar("loss", loss / 64, i + 1)
writer.add_image("example", torch.ones((1, 28, 28)), i + 1)
writer.close() | python | tests/standalone_tests/pytorch_tensorboard.py | 8 | 38 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
791 | __init__ | def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10) | python | tests/standalone_tests/pytorch_tensorboard.py | 12 | 18 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
792 | forward | def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1) | python | tests/standalone_tests/pytorch_tensorboard.py | 20 | 27 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
793 | main | def main():
wandb.init(name=pathlib.Path(__file__).stem)
# Get a pandas DataFrame object of all the data in the csv file:
df = pd.read_csv(pathlib.Path(__file__).parent.resolve() / "tweets.csv")
# Get pandas Series object of the "tweet text" column:
text = df["tweet_text"]
# Get pandas Series object of the "emotion" column:
target = df["is_there_an_emotion_directed_at_a_brand_or_product"]
# Remove the blank rows from the series:
target = target[pd.notnull(text)]
text = text[pd.notnull(text)]
# Perform feature extraction:
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
count_vect.fit(text)
counts = count_vect.transform(text)
# counts_train = counts[:6000]
# target_train = target[:6000]
counts_test = counts[6000:]
target_test = target[6000:]
# Train with this data with a Naive Bayes classifier:
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb.fit(counts, target)
X_test = counts_test # noqa: N806
y_test = target_test
y_probas = nb.predict_proba(X_test)
y_pred = nb.predict(X_test)
print("y", y_probas.shape)
# ROC
wandb.log({"roc": wandb.plot.roc_curve(y_test, y_probas, nb.classes_)})
wandb.log(
{
"roc_with_title": wandb.plot.roc_curve(
y_test, y_probas, nb.classes_, title="MY ROC TITLE"
)
}
)
# Precision Recall
wandb.log({"pr": wandb.plot.pr_curve(y_test, y_probas, nb.classes_)})
wandb.log(
{
"pr_with_title": wandb.plot.pr_curve(
y_test, y_probas, nb.classes_, title="MY PR TITLE"
)
}
)
# Confusion Matrix
class_ind_map = {}
for i, class_name in enumerate(nb.classes_):
class_ind_map[class_name] = i
y_pred_inds = [class_ind_map[class_name] for class_name in y_pred]
y_true_inds = [class_ind_map[class_name] for class_name in y_test]
# test workflow with classes
wandb.log(
{
"conf_mat": wandb.plot.confusion_matrix(
preds=y_pred_inds, y_true=y_true_inds, class_names=nb.classes_
)
}
)
# test workflow without classes
wandb.log(
{
"conf_mat_noclass": wandb.plot.confusion_matrix(
preds=y_pred_inds, y_true=y_true_inds
)
}
)
# test workflow with multiples of inds
y_pred_mult = [y_pred_ind * 5 for y_pred_ind in y_pred_inds]
y_true_mult = [y_true_ind * 5 for y_true_ind in y_true_inds]
wandb.log(
{
"conf_mat_noclass_mult": wandb.plot.confusion_matrix(
preds=y_pred_mult, y_true=y_true_mult, title="I HAVE A TITLE"
)
}
)
# test probs workflow
wandb.log(
{
"conf_mat_probs": wandb.plot.confusion_matrix(
probs=y_probas, y_true=y_true_inds, class_names=nb.classes_
)
}
) | python | tests/standalone_tests/tweets.py | 7 | 108 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
794 | main | def main(argv):
# wandb.init(entity="wandb", project="new-plots-test-5")
wandb.init(name=pathlib.Path(__file__).stem)
data = [[i, random.random() + math.sin(i / 10)] for i in range(100)]
table = wandb.Table(data=data, columns=["step", "height"])
line_plot = wandb.plot.line(
table, x="step", y="height", title="what a great line plot"
)
xs = []
ys = []
keys = [f"y_{i}" for i in range(4)]
xs.append([j for j in range(100)])
xs.append([j for j in range(100)])
xs.append([2 * j for j in range(50)])
xs.append([2 * j for j in range(50)])
ys.append([random.random() + math.sin(i / 10) for i in range(100)])
ys.append([math.sin(i / 10) for i in range(100)])
ys.append([math.cos(i / 10) for i in range(50)])
ys.append([random.random() - math.cos(i / 10) for i in range(50)])
line_series_plot = wandb.plot.line_series(
xs, ys, keys, "Get serial With keys now!", "step"
)
line_series_plot_no_title_no_keys = wandb.plot.line_series(xs, ys, xname="step")
line_series_singular_x_array = wandb.plot.line_series(
[i for i in range(100)], ys, title="Get serial with one x", xname="step"
)
histogram = wandb.plot.histogram(table, value="height", title="my-histo")
scatter = wandb.plot.scatter(table, x="step", y="height", title="scatter!")
bar_table = wandb.Table(
data=[
["car", random.random()],
["bus", random.random()],
["road", random.random()],
["person", random.random()],
["cyclist", random.random()],
["tree", random.random()],
["sky", random.random()],
],
columns=["class", "acc"],
)
bar = wandb.plot.bar(bar_table, label="class", value="acc", title="bar")
wandb.log(
{
"line1": line_plot,
"line_series1": line_series_plot,
"line_series_no_title_no_keys": line_series_plot_no_title_no_keys,
"line_series_single_x": line_series_singular_x_array,
"histogram1": histogram,
"scatter1": scatter,
"bar1": bar,
}
) | python | tests/standalone_tests/basic_plots.py | 9 | 66 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
795 | main | def main():
wandb.init()
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [
([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])
for i in range(len(test_sentence) - 2)
]
vocab = set(test_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super().__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim, sparse=True)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
has_cuda = torch.cuda.is_available()
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
model = model.cuda() if has_cuda else model
optimizer = optim.SGD(model.parameters(), lr=0.001)
wandb.watch(model, log="all", log_freq=100)
for _ in range(100):
total_loss = 0
for context, target in trigrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in tensors)
context_idxs = torch.tensor(
[word_to_ix[w] for w in context], dtype=torch.long
)
context_idxs = context_idxs.cuda() if has_cuda else context_idxs
# Step 2. Recall that torch *accumulates* gradients. Before passing in a
# new instance, you need to zero out the gradients from the old
# instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next
# words
log_probs = model(context_idxs)
# Step 4. Compute your loss function. (Again, Torch wants the target
# word wrapped in a tensor)
target = torch.tensor([word_to_ix[target]], dtype=torch.long)
target = target.cuda() if has_cuda else target
loss = loss_function(log_probs, target)
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
# Get the Python number from a 1-element Tensor by calling tensor.item()
total_loss += loss.item()
wandb.log({"batch_loss": loss.item()})
losses.append(total_loss)
print(losses) # The loss decreased every iteration over the training data! | python | tests/standalone_tests/sparse_tensors.py | 12 | 98 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
796 | __init__ | def __init__(self, vocab_size, embedding_dim, context_size):
super().__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim, sparse=True)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size) | python | tests/standalone_tests/sparse_tensors.py | 41 | 45 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
797 | forward | def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs | python | tests/standalone_tests/sparse_tensors.py | 47 | 52 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
798 | main | def main():
run = wandb.init()
config = run.config
config.img_size = 50
config.batch_size = 32
config.epochs = 0
config.train_path = os.path.join("simpsons", "train")
config.test_path = os.path.join("simpsons", "test")
# download the data if it doesn't exist
if not os.path.exists("simpsons"):
print("Downloading Simpsons dataset...")
subprocess.check_output(
"curl https://storage.googleapis.com/wandb-production.appspot.com/mlclass/simpsons.tar.gz | tar xvz",
shell=True,
)
# this is the augmentation configuration we will use for training
# see: https://keras.io/preprocessing/image/#imagedatagenerator-class
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
# only rescaling augmentation for testing:
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
config.train_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size,
)
# this is a similar generator, for validation data
test_generator = test_datagen.flow_from_directory(
config.test_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size,
)
model = Sequential()
model.add(
Conv2D(
32,
(3, 3),
input_shape=(config.img_size, config.img_size, 3),
activation="relu",
)
)
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(50, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(13, activation="softmax"))
model.compile(
optimizer=optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
def results_data_frame(test_datagen, model):
gen = test_datagen.flow_from_directory(
config.test_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size,
shuffle=False,
)
class_cols = []
class_names = []
for class_col, _ in sorted(gen.class_indices.items(), key=lambda c_i: c_i[1]):
class_cols.append(class_col)
class_names.append(class_col.replace("_", " "))
cards = []
true_class_is = []
true_classes = []
true_probs = []
pred_classes = []
pred_probs = []
class_probs = [[] for c in class_names]
num_batches = int(math.ceil(len(gen.filenames) / float(gen.batch_size)))
# num_batches = 1
for batch_i in range(num_batches):
examples, truth = next(gen)
preds = model.predict(np.stack(examples))
this_true_class_is = [np.argmax(probs) for probs in truth]
true_class_is.extend(this_true_class_is)
true_classes.extend(class_names[i] for i in this_true_class_is)
true_probs.extend(ps[i] for ps, i in zip(preds, true_class_is))
pred_classes.extend(class_names[np.argmax(probs)] for probs in preds)
pred_probs.extend(np.max(probs) for probs in preds)
for cp, p in zip(class_probs, preds.T):
cp.extend(p)
base_i = batch_i * gen.batch_size
for i in range(base_i, base_i + len(examples)):
cards.append(
"""```Predicted:
{pred_class} ({pred_prob:.2%})
Actual:
{true_class} ({true_prob:.2%})

```""".format(
true_class=true_classes[i],
true_prob=true_probs[i],
pred_class=pred_classes[i],
pred_prob=pred_probs[i],
idx=i,
)
)
all_cols = [
"wandb_example_id",
"image",
"card",
"true_class",
"true_prob",
"pred_class",
"pred_prob",
] + class_cols
frame_dict = {
"wandb_example_id": [str(s) for s in gen.filenames[: len(cards)]],
"image": [
wandb.Image(os.path.join(config.test_path, f))
for f in gen.filenames[: len(cards)]
],
"card": cards,
"true_class": true_classes,
"true_prob": true_probs,
"pred_class": pred_classes,
"pred_prob": pred_probs,
}
for c, col in zip(class_cols, class_probs):
frame_dict[c] = col
table = pandas.DataFrame(frame_dict, columns=all_cols)
number_cols = ["true_prob", "pred_prob"] + class_cols
table[number_cols] = table[number_cols].apply(pandas.to_numeric)
# from IPython import embed; embed()
return table
class ResultsDataFrameCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
run.summary["results"] = results_data_frame(test_datagen, model)
model.fit_generator(
train_generator,
steps_per_epoch=len(train_generator),
epochs=config.epochs,
workers=4,
callbacks=[ResultsDataFrameCallback()],
validation_data=test_generator,
validation_steps=len(test_generator),
)
if config.epochs == 0:
# run.summary["results"] = results_data_frame(test_datagen, model)
run.summary.update({"results3": results_data_frame(test_datagen, model)}) | python | tests/standalone_tests/simpsons_data_frames.py | 24 | 188 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
799 | results_data_frame | def results_data_frame(test_datagen, model):
gen = test_datagen.flow_from_directory(
config.test_path,
target_size=(config.img_size, config.img_size),
batch_size=config.batch_size,
shuffle=False,
)
class_cols = []
class_names = []
for class_col, _ in sorted(gen.class_indices.items(), key=lambda c_i: c_i[1]):
class_cols.append(class_col)
class_names.append(class_col.replace("_", " "))
cards = []
true_class_is = []
true_classes = []
true_probs = []
pred_classes = []
pred_probs = []
class_probs = [[] for c in class_names]
num_batches = int(math.ceil(len(gen.filenames) / float(gen.batch_size)))
# num_batches = 1
for batch_i in range(num_batches):
examples, truth = next(gen)
preds = model.predict(np.stack(examples))
this_true_class_is = [np.argmax(probs) for probs in truth]
true_class_is.extend(this_true_class_is)
true_classes.extend(class_names[i] for i in this_true_class_is)
true_probs.extend(ps[i] for ps, i in zip(preds, true_class_is))
pred_classes.extend(class_names[np.argmax(probs)] for probs in preds)
pred_probs.extend(np.max(probs) for probs in preds)
for cp, p in zip(class_probs, preds.T):
cp.extend(p)
base_i = batch_i * gen.batch_size
for i in range(base_i, base_i + len(examples)):
cards.append(
"""```Predicted:
{pred_class} ({pred_prob:.2%})
Actual:
{true_class} ({true_prob:.2%})

```""".format(
true_class=true_classes[i],
true_prob=true_probs[i],
pred_class=pred_classes[i],
pred_prob=pred_probs[i],
idx=i,
)
)
all_cols = [
"wandb_example_id",
"image",
"card",
"true_class",
"true_prob",
"pred_class",
"pred_prob",
] + class_cols
frame_dict = {
"wandb_example_id": [str(s) for s in gen.filenames[: len(cards)]],
"image": [
wandb.Image(os.path.join(config.test_path, f))
for f in gen.filenames[: len(cards)]
],
"card": cards,
"true_class": true_classes,
"true_prob": true_probs,
"pred_class": pred_classes,
"pred_prob": pred_probs,
}
for c, col in zip(class_cols, class_probs):
frame_dict[c] = col
table = pandas.DataFrame(frame_dict, columns=all_cols)
number_cols = ["true_prob", "pred_prob"] + class_cols
table[number_cols] = table[number_cols].apply(pandas.to_numeric)
# from IPython import embed; embed()
return table | python | tests/standalone_tests/simpsons_data_frames.py | 85 | 170 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
800 | on_epoch_end | def on_epoch_end(self, epoch, logs=None):
run.summary["results"] = results_data_frame(test_datagen, model) | python | tests/standalone_tests/simpsons_data_frames.py | 173 | 174 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.