id int64 1 6.07M | name stringlengths 1 295 | code stringlengths 12 426k | language stringclasses 1
value | source_file stringlengths 5 202 | start_line int64 1 158k | end_line int64 1 158k | repo dict |
|---|---|---|---|---|---|---|---|
401 | mocks_from_args | def mocks_from_args(mocker, args, mock_server):
if args["k8s"] is not None:
mock_server.ctx["k8s"] = args["k8s"]
args["env"].update(utils.mock_k8s(mocker))
if args["sagemaker"]:
args["env"].update(utils.mock_sagemaker(mocker)) | python | tests/pytest_tests/unit_tests_old/conftest.py | 475 | 480 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
402 | wandb_init_run | def wandb_init_run(request, runner, mocker, mock_server):
marker = request.node.get_closest_marker("wandb_args")
args = default_wandb_args()
if marker:
args.update(marker.kwargs)
try:
mocks_from_args(mocker, args, mock_server)
with mock.patch.dict(os.environ, {k: v for k, v in args["env"].items()}):
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
run = wandb.init(
settings=dict(console="off", mode="offline", _except_exit=False),
**args["wandb_init"],
)
yield run
run.finish()
finally:
unset_globals() | python | tests/pytest_tests/unit_tests_old/conftest.py | 484 | 501 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
403 | wandb_init | def wandb_init(request, runner, mocker, mock_server):
def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
return wandb.init(
settings=dict(console="off", mode="offline", _except_exit=False),
*args,
**kwargs,
)
finally:
unset_globals()
return init | python | tests/pytest_tests/unit_tests_old/conftest.py | 505 | 519 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
404 | init | def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
return wandb.init(
settings=dict(console="off", mode="offline", _except_exit=False),
*args,
**kwargs,
)
finally:
unset_globals() | python | tests/pytest_tests/unit_tests_old/conftest.py | 506 | 517 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
405 | restore_version | def restore_version():
save_current_version = wandb.__version__
yield
wandb.__version__ = save_current_version
try:
del wandb.__hack_pypi_latest_version__
except AttributeError:
pass | python | tests/pytest_tests/unit_tests_old/conftest.py | 523 | 530 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
406 | parse_ctx | def parse_ctx():
"""Fixture providing class to parse context data."""
def parse_ctx_fn(ctx, run_id=None):
return utils.ParseCTX(ctx, run_id=run_id)
yield parse_ctx_fn | python | tests/pytest_tests/unit_tests_old/conftest.py | 534 | 540 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
407 | parse_ctx_fn | def parse_ctx_fn(ctx, run_id=None):
return utils.ParseCTX(ctx, run_id=run_id) | python | tests/pytest_tests/unit_tests_old/conftest.py | 537 | 538 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
408 | record_q | def record_q():
return queue.Queue() | python | tests/pytest_tests/unit_tests_old/conftest.py | 544 | 545 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
409 | fake_interface | def fake_interface(record_q):
return InterfaceQueue(record_q=record_q) | python | tests/pytest_tests/unit_tests_old/conftest.py | 549 | 550 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
410 | fake_backend | def fake_backend(fake_interface):
class FakeBackend:
def __init__(self):
self.interface = fake_interface
yield FakeBackend() | python | tests/pytest_tests/unit_tests_old/conftest.py | 554 | 559 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
411 | __init__ | def __init__(self):
self.interface = fake_interface | python | tests/pytest_tests/unit_tests_old/conftest.py | 556 | 557 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
412 | fake_run | def fake_run(fake_backend):
def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run
yield run_fn | python | tests/pytest_tests/unit_tests_old/conftest.py | 563 | 570 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
413 | run_fn | def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run | python | tests/pytest_tests/unit_tests_old/conftest.py | 564 | 568 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
414 | records_util | def records_util():
def records_fn(q):
ru = utils.RecordsUtil(q)
return ru
yield records_fn | python | tests/pytest_tests/unit_tests_old/conftest.py | 574 | 579 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
415 | records_fn | def records_fn(q):
ru = utils.RecordsUtil(q)
return ru | python | tests/pytest_tests/unit_tests_old/conftest.py | 575 | 577 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
416 | user_test | def user_test(fake_run, record_q, records_util):
class UserTest:
pass
ut = UserTest()
ut.get_run = fake_run
ut.get_records = lambda: records_util(record_q)
yield ut | python | tests/pytest_tests/unit_tests_old/conftest.py | 583 | 591 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
417 | log_debug | def log_debug(caplog):
caplog.set_level(logging.DEBUG)
yield
# for rec in caplog.records:
# print("LOGGER", rec.message, file=sys.stderr) | python | tests/pytest_tests/unit_tests_old/conftest.py | 603 | 607 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
418 | internal_result_q | def internal_result_q():
return queue.Queue() | python | tests/pytest_tests/unit_tests_old/conftest.py | 616 | 617 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
419 | internal_sender_q | def internal_sender_q():
return queue.Queue() | python | tests/pytest_tests/unit_tests_old/conftest.py | 621 | 622 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
420 | internal_writer_q | def internal_writer_q():
return queue.Queue() | python | tests/pytest_tests/unit_tests_old/conftest.py | 626 | 627 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
421 | internal_process | def internal_process():
# FIXME: return mocked process (needs is_alive())
return MockProcess() | python | tests/pytest_tests/unit_tests_old/conftest.py | 631 | 633 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
422 | __init__ | def __init__(self):
self._alive = True | python | tests/pytest_tests/unit_tests_old/conftest.py | 637 | 638 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
423 | is_alive | def is_alive(self):
return self._alive | python | tests/pytest_tests/unit_tests_old/conftest.py | 640 | 641 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
424 | internal_mailbox | def internal_mailbox():
return Mailbox() | python | tests/pytest_tests/unit_tests_old/conftest.py | 645 | 646 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
425 | _internal_sender | def _internal_sender(record_q, internal_result_q, internal_process, internal_mailbox):
return InterfaceQueue(
record_q=record_q,
result_q=internal_result_q,
process=internal_process,
mailbox=internal_mailbox,
) | python | tests/pytest_tests/unit_tests_old/conftest.py | 650 | 656 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
426 | _internal_context_keeper | def _internal_context_keeper():
context_keeper = context.ContextKeeper()
yield context_keeper | python | tests/pytest_tests/unit_tests_old/conftest.py | 660 | 662 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
427 | internal_sm | def internal_sm(
runner,
internal_sender_q,
internal_result_q,
test_settings,
mock_server,
_internal_sender,
_internal_context_keeper,
tmp_path,
):
with runner.isolated_filesystem(**get_temp_dir_kwargs(tmp_path)):
test_settings.update(
root_dir=os.getcwd(), source=wandb.sdk.wandb_settings.Source.INIT
)
sm = SendManager(
settings=test_settings,
record_q=internal_sender_q,
result_q=internal_result_q,
interface=_internal_sender,
context_keeper=_internal_context_keeper,
)
yield sm | python | tests/pytest_tests/unit_tests_old/conftest.py | 666 | 687 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
428 | stopped_event | def stopped_event():
stopped = threading.Event()
yield stopped | python | tests/pytest_tests/unit_tests_old/conftest.py | 691 | 693 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
429 | internal_hm | def internal_hm(
runner,
record_q,
internal_result_q,
test_settings,
mock_server,
internal_writer_q,
_internal_sender,
stopped_event,
_internal_context_keeper,
tmp_path,
):
with runner.isolated_filesystem(**get_temp_dir_kwargs(tmp_path)):
test_settings.update(
root_dir=os.getcwd(), source=wandb.sdk.wandb_settings.Source.INIT
)
hm = HandleManager(
settings=test_settings,
record_q=record_q,
result_q=internal_result_q,
stopped=stopped_event,
writer_q=internal_writer_q,
interface=_internal_sender,
context_keeper=_internal_context_keeper,
)
yield hm | python | tests/pytest_tests/unit_tests_old/conftest.py | 697 | 722 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
430 | internal_wm | def internal_wm(
runner,
internal_writer_q,
internal_result_q,
internal_sender_q,
stopped_event,
_internal_sender,
_internal_context_keeper,
test_settings,
tmp_path,
):
with runner.isolated_filesystem(**get_temp_dir_kwargs(tmp_path)):
test_settings.update(
root_dir=os.getcwd(), source=wandb.sdk.wandb_settings.Source.INIT
)
wandb_file = test_settings.sync_file
run_dir = Path(wandb_file).parent
os.makedirs(run_dir)
wm = WriteManager(
settings=test_settings,
record_q=internal_writer_q,
result_q=internal_result_q,
sender_q=internal_sender_q,
interface=_internal_sender,
context_keeper=_internal_context_keeper,
)
yield wm | python | tests/pytest_tests/unit_tests_old/conftest.py | 726 | 752 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
431 | internal_get_record | def internal_get_record():
def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i
return _get_record | python | tests/pytest_tests/unit_tests_old/conftest.py | 756 | 764 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
432 | _get_record | def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i | python | tests/pytest_tests/unit_tests_old/conftest.py | 757 | 762 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
433 | start_send_thread | def start_send_thread(
internal_sender_q, internal_get_record, stopped_event, internal_process
):
def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
print("RAISE_SEND", e)
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t
yield start_send
stopped_event.set() | python | tests/pytest_tests/unit_tests_old/conftest.py | 768 | 794 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
434 | start_send | def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
print("RAISE_SEND", e)
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t | python | tests/pytest_tests/unit_tests_old/conftest.py | 771 | 791 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
435 | target | def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
print("RAISE_SEND", e)
internal_process._alive = False | python | tests/pytest_tests/unit_tests_old/conftest.py | 772 | 785 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
436 | start_write_thread | def start_write_thread(
internal_writer_q, internal_get_record, stopped_event, internal_process
):
def start_write(write_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_writer_q, timeout=0.1
)
if payload:
write_manager.write(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
print("RAISE_WRIT", e)
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-writer"
t.daemon = True
t.start()
return t
yield start_write
stopped_event.set() | python | tests/pytest_tests/unit_tests_old/conftest.py | 798 | 824 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
437 | start_write | def start_write(write_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_writer_q, timeout=0.1
)
if payload:
write_manager.write(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
print("RAISE_WRIT", e)
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-writer"
t.daemon = True
t.start()
return t | python | tests/pytest_tests/unit_tests_old/conftest.py | 801 | 821 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
438 | target | def target():
try:
while True:
payload = internal_get_record(
input_q=internal_writer_q, timeout=0.1
)
if payload:
write_manager.write(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
print("RAISE_WRIT", e)
internal_process._alive = False | python | tests/pytest_tests/unit_tests_old/conftest.py | 802 | 815 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
439 | start_handle_thread | def start_handle_thread(record_q, internal_get_record, stopped_event):
def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t
yield start_handle
stopped_event.set() | python | tests/pytest_tests/unit_tests_old/conftest.py | 828 | 845 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
440 | start_handle | def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t | python | tests/pytest_tests/unit_tests_old/conftest.py | 829 | 842 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
441 | target | def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break | python | tests/pytest_tests/unit_tests_old/conftest.py | 830 | 836 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
442 | _start_backend | def _start_backend(
mocked_run,
internal_hm,
internal_sm,
internal_wm,
_internal_sender,
start_handle_thread,
start_write_thread,
start_send_thread,
log_debug,
):
def start_backend_func(initial_run=True, initial_start=False):
ht = start_handle_thread(internal_hm)
wt = start_write_thread(internal_wm)
st = start_send_thread(internal_sm)
if initial_run:
run = _internal_sender.communicate_run(mocked_run)
if initial_start:
_internal_sender.communicate_run_start(run.run)
return (ht, wt, st)
yield start_backend_func | python | tests/pytest_tests/unit_tests_old/conftest.py | 849 | 870 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
443 | start_backend_func | def start_backend_func(initial_run=True, initial_start=False):
ht = start_handle_thread(internal_hm)
wt = start_write_thread(internal_wm)
st = start_send_thread(internal_sm)
if initial_run:
run = _internal_sender.communicate_run(mocked_run)
if initial_start:
_internal_sender.communicate_run_start(run.run)
return (ht, wt, st) | python | tests/pytest_tests/unit_tests_old/conftest.py | 860 | 868 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
444 | _stop_backend | def _stop_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
collect_responses,
):
def stop_backend_func(threads=None):
threads = threads or ()
handle = _internal_sender.deliver_exit(0)
record = handle.wait(timeout=30)
assert record
server_info_handle = _internal_sender.deliver_request_server_info()
result = server_info_handle.wait(timeout=30)
assert result
collect_responses.server_info_resp = result.response.server_info_response
_internal_sender.join()
for t in threads:
t.join()
yield stop_backend_func | python | tests/pytest_tests/unit_tests_old/conftest.py | 874 | 899 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
445 | stop_backend_func | def stop_backend_func(threads=None):
threads = threads or ()
handle = _internal_sender.deliver_exit(0)
record = handle.wait(timeout=30)
assert record
server_info_handle = _internal_sender.deliver_request_server_info()
result = server_info_handle.wait(timeout=30)
assert result
collect_responses.server_info_resp = result.response.server_info_response
_internal_sender.join()
for t in threads:
t.join() | python | tests/pytest_tests/unit_tests_old/conftest.py | 883 | 897 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
446 | backend_interface | def backend_interface(_start_backend, _stop_backend, _internal_sender):
@contextmanager
def backend_context(initial_run=True, initial_start=False):
threads = _start_backend(initial_run=initial_run, initial_start=initial_start)
try:
yield _internal_sender
finally:
_stop_backend(threads=threads)
return backend_context | python | tests/pytest_tests/unit_tests_old/conftest.py | 903 | 912 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
447 | backend_context | def backend_context(initial_run=True, initial_start=False):
threads = _start_backend(initial_run=initial_run, initial_start=initial_start)
try:
yield _internal_sender
finally:
_stop_backend(threads=threads) | python | tests/pytest_tests/unit_tests_old/conftest.py | 905 | 910 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
448 | publish_util | def publish_util(
mocked_run,
mock_server,
backend_interface,
parse_ctx,
):
def fn(
metrics=None,
history=None,
artifacts=None,
files=None,
begin_cb=None,
end_cb=None,
initial_start=False,
):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
files = files or []
with backend_interface(initial_start=initial_start) as interface:
if begin_cb:
begin_cb(interface)
for m in metrics:
interface._publish_metric(m)
for h in history:
interface.publish_history(**h)
for a in artifacts:
interface.publish_artifact(**a)
for f in files:
interface.publish_files(**f)
if end_cb:
end_cb(interface)
ctx_util = parse_ctx(mock_server.ctx, run_id=mocked_run.id)
return ctx_util
yield fn | python | tests/pytest_tests/unit_tests_old/conftest.py | 916 | 952 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
449 | fn | def fn(
metrics=None,
history=None,
artifacts=None,
files=None,
begin_cb=None,
end_cb=None,
initial_start=False,
):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
files = files or []
with backend_interface(initial_start=initial_start) as interface:
if begin_cb:
begin_cb(interface)
for m in metrics:
interface._publish_metric(m)
for h in history:
interface.publish_history(**h)
for a in artifacts:
interface.publish_artifact(**a)
for f in files:
interface.publish_files(**f)
if end_cb:
end_cb(interface)
ctx_util = parse_ctx(mock_server.ctx, run_id=mocked_run.id)
return ctx_util | python | tests/pytest_tests/unit_tests_old/conftest.py | 922 | 950 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
450 | tbwatcher_util | def tbwatcher_util(mocked_run, mock_server, internal_hm, backend_interface, parse_ctx):
def fn(write_function, logdir="./", save=True, root_dir="./"):
with backend_interface() as interface:
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn | python | tests/pytest_tests/unit_tests_old/conftest.py | 956 | 981 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
451 | fn | def fn(write_function, logdir="./", save=True, root_dir="./"):
with backend_interface() as interface:
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util | python | tests/pytest_tests/unit_tests_old/conftest.py | 957 | 979 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
452 | inject_requests | def inject_requests(mock_server):
"""Fixture for injecting responses and errors to mock_server."""
# TODO(jhr): make this compatible with live_mock_server
return utils.InjectRequests(ctx=mock_server.ctx) | python | tests/pytest_tests/unit_tests_old/conftest.py | 985 | 989 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
453 | collect_responses | def collect_responses():
responses = Responses()
yield responses | python | tests/pytest_tests/unit_tests_old/conftest.py | 997 | 999 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
454 | mock_tty | def mock_tty(monkeypatch):
class WriteThread(threading.Thread):
def __init__(self, fname):
threading.Thread.__init__(self)
self._fname = fname
self._q = queue.Queue()
def run(self):
with open(self._fname, "w") as fp:
while True:
data = self._q.get()
if data == "_DONE_":
break
fp.write(data)
fp.flush()
def add(self, input_str):
self._q.put(input_str)
def stop(self):
self.add("_DONE_")
with tempfile.TemporaryDirectory() as tmpdir:
fds = dict()
def setup_fn(input_str):
fname = os.path.join(tmpdir, "file.txt")
if platform.system() != "Windows":
os.mkfifo(fname, 0o600)
writer = WriteThread(fname)
writer.start()
writer.add(input_str)
fds["writer"] = writer
monkeypatch.setattr("termios.tcflush", lambda x, y: None)
else:
# windows doesn't support named pipes, just write it
# TODO: emulate msvcrt to support input on windows
with open(fname, "w") as fp:
fp.write(input_str)
fds["stdin"] = open(fname)
monkeypatch.setattr("sys.stdin", fds["stdin"])
sys.stdin.isatty = lambda: True
sys.stdout.isatty = lambda: True
yield setup_fn
writer = fds.get("writer")
if writer:
writer.stop()
writer.join()
stdin = fds.get("stdin")
if stdin:
stdin.close()
del sys.stdin.isatty
del sys.stdout.isatty | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,003 | 1,058 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
455 | __init__ | def __init__(self, fname):
threading.Thread.__init__(self)
self._fname = fname
self._q = queue.Queue() | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,005 | 1,008 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
456 | run | def run(self):
with open(self._fname, "w") as fp:
while True:
data = self._q.get()
if data == "_DONE_":
break
fp.write(data)
fp.flush() | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,010 | 1,017 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
457 | add | def add(self, input_str):
self._q.put(input_str) | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,019 | 1,020 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
458 | stop | def stop(self):
self.add("_DONE_") | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,022 | 1,023 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
459 | setup_fn | def setup_fn(input_str):
fname = os.path.join(tmpdir, "file.txt")
if platform.system() != "Windows":
os.mkfifo(fname, 0o600)
writer = WriteThread(fname)
writer.start()
writer.add(input_str)
fds["writer"] = writer
monkeypatch.setattr("termios.tcflush", lambda x, y: None)
else:
# windows doesn't support named pipes, just write it
# TODO: emulate msvcrt to support input on windows
with open(fname, "w") as fp:
fp.write(input_str)
fds["stdin"] = open(fname)
monkeypatch.setattr("sys.stdin", fds["stdin"])
sys.stdin.isatty = lambda: True
sys.stdout.isatty = lambda: True | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,028 | 1,045 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
460 | api | def api(runner):
return Api() | python | tests/pytest_tests/unit_tests_old/conftest.py | 1,062 | 1,063 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
461 | get_step_metric_dict | def get_step_metric_dict(ml):
"""Get mapping from metric to preferred x-axis."""
nl = [m["1"] for m in ml]
md = {m["1"]: nl[m["5"] - 1] for m in ml if m.get("5")}
return md | python | tests/pytest_tests/unit_tests_old/utils/parse_metrics.py | 25 | 29 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
462 | matplotlib_multiple_axes_figures | def matplotlib_multiple_axes_figures(total_plot_count=3, data=[1, 2, 3]):
"""Helper generator which create a figure containing up to `total_plot_count`
axes and optionally adds `data` to each axes in a permutation-style loop.
"""
for num_plots in range(1, total_plot_count + 1):
for permutation in range(2**num_plots):
has_data = [permutation & (1 << i) > 0 for i in range(num_plots)]
fig, ax = plt.subplots(num_plots)
if num_plots == 1:
if has_data[0]:
ax.plot(data)
else:
for plot_id in range(num_plots):
if has_data[plot_id]:
ax[plot_id].plot(data)
yield fig
plt.close() | python | tests/pytest_tests/unit_tests_old/utils/dummy_data.py | 9 | 25 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
463 | matplotlib_with_image | def matplotlib_with_image():
"""Create a matplotlib figure with an image"""
fig, ax = plt.subplots(3)
ax[0].plot([1, 2, 3])
ax[1].imshow(np.random.rand(200, 200, 3))
ax[2].plot([1, 2, 3])
return fig | python | tests/pytest_tests/unit_tests_old/utils/dummy_data.py | 28 | 34 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
464 | matplotlib_without_image | def matplotlib_without_image():
"""Create a matplotlib figure without an image"""
fig, ax = plt.subplots(2)
ax[0].plot([1, 2, 3])
ax[1].plot([1, 2, 3])
return fig | python | tests/pytest_tests/unit_tests_old/utils/dummy_data.py | 37 | 42 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
465 | bokeh_plot | def bokeh_plot():
# from https://docs.bokeh.org/en/latest/docs/user_guide/quickstart.html
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label="x", y_axis_label="y")
# add a line renderer with legend and line thickness
p.line(x, y, legend_label="Temp.", line_width=2)
return p | python | tests/pytest_tests/unit_tests_old/utils/dummy_data.py | 45 | 57 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
466 | execute_cells | def execute_cells(self, cell_index=0, execution_count=None, store_history=True):
"""Execute a specific cell. Since we always execute setup.py in the first
cell we increment the index offset here
"""
if not isinstance(cell_index, list):
cell_index = [cell_index]
executed_cells = []
for idx in cell_index:
try:
cell = self.nb["cells"][idx + 1]
ecell = super().execute_cell(
cell,
idx + 1,
execution_count=execution_count,
store_history=store_history,
)
except CellExecutionError as e:
print("Cell output before exception:")
print("=============================")
for output in cell["outputs"]:
if output["output_type"] == "stream":
print(output["text"])
raise e
for output in ecell["outputs"]:
if output["output_type"] == "error":
print("Error in cell: %s" % idx + 1)
print("\n".join(output["traceback"]))
raise ValueError(output["evalue"])
executed_cells.append(ecell)
return executed_cells | python | tests/pytest_tests/unit_tests_old/utils/notebook_client.py | 6 | 37 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
467 | execute_all | def execute_all(self, store_history=True):
return self.execute_cells(list(range(len(self.nb["cells"]) - 1)), store_history) | python | tests/pytest_tests/unit_tests_old/utils/notebook_client.py | 39 | 40 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
468 | cell_output_text | def cell_output_text(self, cell_index):
"""Return cell text output
Arguments:
cell_index {int} -- cell index in notebook
Returns:
str -- Text output
"""
text = ""
outputs = self.nb["cells"][cell_index + 1]["outputs"]
for output in outputs:
if "text" in output:
text += output["text"]
return text | python | tests/pytest_tests/unit_tests_old/utils/notebook_client.py | 42 | 58 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
469 | all_output_text | def all_output_text(self):
text = ""
for i in range(len(self.nb["cells"]) - 1):
text += self.cell_output_text(i)
return text | python | tests/pytest_tests/unit_tests_old/utils/notebook_client.py | 60 | 64 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
470 | cells | def cells(self):
return iter(self.nb["cells"][1:]) | python | tests/pytest_tests/unit_tests_old/utils/notebook_client.py | 67 | 68 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
471 | cell_output | def cell_output(self, cell_index):
"""Return a cells outputs
NOTE: Since we always execute an init cell we adjust the offset by 1
Arguments:
cell_index {int} -- cell index in notebook
Returns:
list -- List of outputs for the given cell
"""
outputs = self.nb["cells"][cell_index + 1]["outputs"]
return outputs | python | tests/pytest_tests/unit_tests_old/utils/notebook_client.py | 70 | 83 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
472 | load_modules | def load_modules(use_yea=False):
global RequestsMock, InjectRequestsParse, ArtifactEmulator
if use_yea:
from yea_wandb.artifact_emu import ArtifactEmulator
from yea_wandb.mock_requests import InjectRequestsParse, RequestsMock
else:
try:
from .artifact_emu import ArtifactEmulator
from .mock_requests import InjectRequestsParse, RequestsMock
except ImportError:
from artifact_emu import ArtifactEmulator
from mock_requests import InjectRequestsParse, RequestsMock | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 33 | 44 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
473 | default_ctx | def default_ctx():
return {
"fail_graphql_count": 0, # used via "fail_graphql_times"
"fail_storage_count": 0, # used via "fail_storage_times"
"rate_limited_count": 0, # used via "rate_limited_times"
"graphql_conflict": False,
"num_search_users": 1,
"page_count": 0,
"page_times": 2,
"requested_file": "weights.h5",
"current_run": None,
"files": {},
"k8s": False,
"resume": None,
"file_bytes": {},
"manifests_created": [],
"artifacts": {},
"artifacts_by_id": {},
"artifacts_created": {},
"portfolio_links": {},
"upsert_bucket_count": 0,
"out_of_date": False,
"empty_query": False,
"local_none": False,
"run_queues_return_default": True,
"run_queues": {"1": []},
"num_popped": 0,
"num_acked": 0,
"max_cli_version": "0.14.0",
"runs": {},
"run_ids": [],
"file_names": [],
"emulate_artifacts": None,
"emulate_azure": False,
"run_state": "running",
"run_queue_item_return_type": "queued",
"run_script_type": "python",
"alerts": [],
"gorilla_supports_launch_agents": True,
"launch_agents": {},
"successfully_create_default_queue": True,
"launch_agent_update_fail": False,
"stop_launch_agent": False,
"swappable_artifacts": False,
"used_artifact_info": None,
"invalid_launch_spec_project": False,
"n_sweep_runs": 0,
"code_saving_enabled": True,
"sentry_events": [],
"sentry_sessions": [],
"run_cuda_version": None,
# relay mode, keep track of upsert runs for validation
"relay_run_info": {},
"server_settings": False,
"server_messages": None,
"latest_arti_id": None,
} | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 51 | 107 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
474 | mock_server | def mock_server(mocker):
load_modules()
ctx = default_ctx()
app = create_app(ctx)
mock = RequestsMock(app, ctx)
# We mock out all requests libraries, couldn't find a way to mock the core lib
sdk_path = "wandb.sdk"
# From previous wandb_gql transport library.
mocker.patch("wandb_gql.transport.requests.requests", mock)
mocker.patch("wandb.wandb_sdk.lib.gql_request.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.file_stream.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.internal_api.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.update.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.sender.requests", mock)
mocker.patch("wandb.apis.public.requests", mock)
mocker.patch("wandb.util.requests", mock)
mocker.patch("wandb.wandb_sdk.wandb_artifacts.requests", mock)
mocker.patch("azure.core.pipeline.transport._requests_basic.requests", mock)
print("Patched requests everywhere", os.getpid())
return mock | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 110 | 130 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
475 | run | def run(ctx):
if ctx["resume"]:
now = datetime.now()
created_at = (now - timedelta(days=1)).isoformat()
else:
created_at = datetime.now().isoformat()
stopped = ctx.get("stopped", False)
base_url = request.url_root.rstrip("/")
# for wandb_tests::wandb_restore_name_not_found
# if there is a fileName query, and this query is for nofile.h5
# return an empty file. otherwise, return the usual weights.h5
if ctx.get("graphql"):
fileNames = ctx["graphql"][-1]["variables"].get("fileNames")
else:
fileNames = None
if fileNames == ["nofile.h5"]:
fileNode = {
"id": "file123",
"name": "nofile.h5",
"sizeBytes": 0,
"md5": "0",
"url": base_url + "/storage?file=nofile.h5",
}
else:
fileNode = {
"id": "file123",
"name": ctx["requested_file"],
"sizeBytes": 20,
"md5": "XXX",
"url": base_url + "/storage?file=%s" % ctx["requested_file"],
"directUrl": base_url
+ "/storage?file=%s&direct=true" % ctx["requested_file"],
}
if ctx["run_script_type"] == "notebook":
program_name = "one_cell.ipynb"
elif ctx["run_script_type"] == "shell":
program_name = "test.sh"
elif ctx["run_script_type"] == "python":
program_name = "train.py"
elif ctx["run_script_type"] == "unknown":
program_name = "unknown.unk"
return {
"id": "test",
"name": "test",
"displayName": "beast-bug-33",
"state": "running",
"config": '{"epochs": {"value": 10}}',
"group": "A",
"jobType": "test",
"description": "",
"systemMetrics": '{"cpu": 100}',
"summaryMetrics": '{"acc": 100, "loss": 0}',
"fileCount": 1,
"history": [
'{"acc": 10, "loss": 90}',
'{"acc": 20, "loss": 80}',
'{"acc": 30, "loss": 70}',
],
"events": ['{"cpu": 10}', '{"cpu": 20}', '{"cpu": 30}'],
"files": {
# Special weights url by default, if requesting upload we set the name
"edges": [
{
"node": fileNode,
}
]
},
"sampledHistory": [[{"loss": 0, "acc": 100}, {"loss": 1, "acc": 0}]],
"shouldStop": False,
"failed": False,
"stopped": stopped,
"running": True,
"tags": [],
"notes": None,
"sweepName": None,
"createdAt": created_at,
"updatedAt": datetime.now().isoformat(),
"runInfo": {
"program": program_name,
"args": [],
"os": platform.system(),
"python": platform.python_version(),
"colab": None,
"executable": None,
"codeSaved": False,
"cpuCount": 12,
"gpuCount": 0,
"git": {
"remote": "https://foo:bar@github.com/FooTest/Foo.git",
"commit": "HEAD",
},
},
} | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 133 | 227 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
476 | artifact | def artifact(
ctx,
collection_name="mnist",
state="COMMITTED",
request_url_root="",
id_override=None,
):
_id = str(ctx["page_count"]) if id_override is None else id_override
return {
"id": _id,
"digest": "abc123",
"description": "",
"state": state,
"size": 10000,
"fileCount": 10,
"createdAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"versionIndex": ctx["page_count"],
"labels": [],
"metadata": "{}",
"aliases": [
{
"artifactCollectionName": collection_name,
"alias": "v%i" % ctx["page_count"],
}
],
"artifactSequence": {
"name": collection_name,
},
"artifactType": {"name": "dataset"},
"currentManifest": {
"file": {
"directUrl": request_url_root
+ f"/storage?file=wandb_manifest.json&id={_id}"
}
},
} | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 230 | 266 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
477 | paginated | def paginated(node, ctx, extra={}):
next_page = False
ctx["page_count"] += 1
if ctx["page_count"] < ctx["page_times"]:
next_page = True
edge = {"node": node, "cursor": "abc123"}
edge.update(extra)
return {
"edges": [edge],
"pageInfo": {"endCursor": "abc123", "hasNextPage": next_page},
} | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 269 | 279 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
478 | __init__ | def __init__(self, ctx):
self.ctx = ctx | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 291 | 292 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
479 | get | def get(self):
return self.ctx | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 294 | 295 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
480 | set | def set(self, ctx):
self.ctx = ctx
CTX.persist(self)
return self.ctx | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 297 | 300 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
481 | persist | def persist(cls, instance):
with cls.lock:
cls.STATE = instance.ctx | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 303 | 305 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
482 | load | def load(cls, default):
with cls.lock:
if cls.STATE is not None:
return CTX(cls.STATE)
else:
return CTX(default) | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 308 | 313 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
483 | get_ctx | def get_ctx():
if "ctx" not in g:
g.ctx = CTX.load(default_ctx())
return g.ctx.get() | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 316 | 319 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
484 | get_run_ctx | def get_run_ctx(run_id):
glob_ctx = get_ctx()
run_ctx = glob_ctx["runs"][run_id]
return run_ctx | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 322 | 325 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
485 | set_ctx | def set_ctx(ctx):
get_ctx()
g.ctx.set(ctx) | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 328 | 330 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
486 | _bucket_config | def _bucket_config(ctx):
files = ["wandb-metadata.json", "diff.patch"]
if "bucket_config" in ctx and "files" in ctx["bucket_config"]:
files = ctx["bucket_config"]["files"]
base_url = request.url_root.rstrip("/")
return {
"commit": "HEAD",
"github": "https://github.com/vanpelt",
"config": '{"foo":{"value":"bar"}}',
"files": {
"edges": [
{
"node": {
"url": base_url + "/storage?file=" + name,
"directUrl": base_url
+ "/storage?file="
+ name
+ "&direct=true",
"updatedAt": datetime.now().isoformat(),
"name": name,
}
}
for name in files
]
},
} | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 333 | 358 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
487 | __init__ | def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 364 | 369 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
488 | to_dict | def to_dict(self):
rv = dict(self.payload or ())
rv["error"] = self.message
return rv | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 371 | 374 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
489 | __init__ | def __init__(self):
self._inject_count = 0
self._inject_time = 0.0 | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 381 | 383 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
490 | relay | def relay(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Normal mockserver mode, disable live relay and call next function
if not os.environ.get("MOCKSERVER_RELAY"):
return func(*args, **kwargs)
if request.method == "POST":
url_path = request.path
body = request.get_json()
base_url = os.environ.get(
"MOCKSERVER_RELAY_REMOTE_BASE_URL",
"https://api.wandb.ai",
)
url = urllib.parse.urljoin(base_url, url_path)
resp = requests.post(url, json=body)
data = resp.json()
run_obj = ((data.get("data") or {}).get("upsertBucket") or {}).get(
"bucket"
) or {}
project_obj = run_obj.get("project", {})
run_id = run_obj.get("name")
project = project_obj.get("name")
entity = project_obj.get("entity", {}).get("name")
ctx = get_ctx()
if run_id:
ctx["relay_run_info"].setdefault(run_id, {})
ctx["relay_run_info"][run_id]["project"] = project
ctx["relay_run_info"][run_id]["entity"] = entity
# TODO: This is a hardcoded for now, will add inject specification to the yea file
if run_id and run_id.startswith("inject"):
time_now = time.time()
if self._inject_count == 0:
self._inject_time = time_now
self._inject_count += 1
if time_now < self._inject_time + 21:
# print("INJECT", self._inject_count, time_now, self._inject_time)
time.sleep(12)
raise HttpException("some error", status_code=500)
return make_response(jsonify(data), resp.status_code)
assert False # we do not support get requests yet, and likely never will :)
return func(*args, **kwargs)
return wrapper | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 385 | 434 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
491 | wrapper | def wrapper(*args, **kwargs):
# Normal mockserver mode, disable live relay and call next function
if not os.environ.get("MOCKSERVER_RELAY"):
return func(*args, **kwargs)
if request.method == "POST":
url_path = request.path
body = request.get_json()
base_url = os.environ.get(
"MOCKSERVER_RELAY_REMOTE_BASE_URL",
"https://api.wandb.ai",
)
url = urllib.parse.urljoin(base_url, url_path)
resp = requests.post(url, json=body)
data = resp.json()
run_obj = ((data.get("data") or {}).get("upsertBucket") or {}).get(
"bucket"
) or {}
project_obj = run_obj.get("project", {})
run_id = run_obj.get("name")
project = project_obj.get("name")
entity = project_obj.get("entity", {}).get("name")
ctx = get_ctx()
if run_id:
ctx["relay_run_info"].setdefault(run_id, {})
ctx["relay_run_info"][run_id]["project"] = project
ctx["relay_run_info"][run_id]["entity"] = entity
# TODO: This is a hardcoded for now, will add inject specification to the yea file
if run_id and run_id.startswith("inject"):
time_now = time.time()
if self._inject_count == 0:
self._inject_time = time_now
self._inject_count += 1
if time_now < self._inject_time + 21:
# print("INJECT", self._inject_count, time_now, self._inject_time)
time.sleep(12)
raise HttpException("some error", status_code=500)
return make_response(jsonify(data), resp.status_code)
assert False # we do not support get requests yet, and likely never will :)
return func(*args, **kwargs) | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 387 | 432 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
492 | context_enrich | def context_enrich(self, ctx):
for run_id, run_info in ctx["relay_run_info"].items():
run_num = len(ctx["runs"])
insert = run_id not in ctx["run_ids"]
if insert:
ctx["run_ids"].append(run_id)
run_ctx = ctx["runs"].setdefault(run_id, default_ctx())
# NOTE: not used, added for consistency with non-relay mode
r = run_ctx.setdefault("run", {})
r.setdefault("display_name", f"relay_name-{run_num}")
r.setdefault("storage_id", f"storageid{run_num}")
r.setdefault("project_name", "relay_proj")
r.setdefault("entity_name", "relay_entity")
# TODO: handle errors better
try:
import wandb
api = wandb.Api(
overrides={
"base_url": os.environ.get(
"MOCKSERVER_RELAY_REMOTE_BASE_URL",
"https://api.wandb.ai",
)
}
)
run = api.run(f"{run_info['entity']}/{run_info['project']}/{run_id}")
except Exception as e:
print(f"ERROR: problem calling public api for run {run_id}", e)
continue
value_config = {k: dict(value=v) for k, v in run.rawconfig.items()}
# TODO: need to have a correct state mapping
exitcode = 0 if run.state == "finished" else 1
for c in ctx, run_ctx:
c.setdefault("config", []).append(dict(value_config))
c.setdefault("file_stream", []).append(
dict(
exitcode=exitcode,
files={
"wandb-summary.json": dict(
offset=0, content=[json.dumps(run.summary_metrics)]
)
},
)
)
ctx["runs"][run_id] = run_ctx
# print("SEND", ctx)
return ctx | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 436 | 486 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
493 | create_app | def create_app(user_ctx=None):
app = Flask(__name__)
# When starting in live mode, user_ctx is a fancy object
if isinstance(user_ctx, dict):
with app.app_context():
set_ctx(user_ctx)
snoop = SnoopRelay()
@app.teardown_appcontext
def persist_ctx(exc):
if "ctx" in g:
CTX.persist(g.ctx)
@app.errorhandler(HttpException)
def handle_http_exception(error):
response = jsonify(error.to_dict())
# For azure storage
response.headers["x-ms-error-code"] = 500
response.status_code = error.status_code
return response
@app.route("/ctx", methods=["GET", "PUT", "DELETE"])
def update_ctx():
"""Updating context for live_mock_server"""
ctx = get_ctx()
# in Flask/Werkzeug 2.1.0 get_json raises an exception on
# empty json, so we try/catch here
try:
body = request.get_json()
except BadRequest:
body = None
if request.method == "GET":
ctx = snoop.context_enrich(ctx)
return json.dumps(ctx)
elif request.method == "DELETE":
app.logger.info("resetting context")
set_ctx(default_ctx())
return json.dumps(get_ctx())
else:
ctx.update(body)
# TODO: tests in CI failed on this
set_ctx(ctx)
app.logger.info("updated context %s", ctx)
return json.dumps(get_ctx())
@app.route("/graphql", methods=["POST"])
@snoop.relay
def graphql():
# TODO: in tests wandb-username is set to the test name, lets scope ctx to it
ctx = get_ctx()
base_url = request.url_root.rstrip("/")
test_name = request.headers.get("X-WANDB-USERNAME")
if test_name:
app.logger.info("Test request from: %s", test_name)
app.logger.info("graphql post")
if "fail_graphql_times" in ctx:
if ctx["fail_graphql_count"] < ctx["fail_graphql_times"]:
ctx["fail_graphql_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
if "rate_limited_times" in ctx:
if ctx["rate_limited_count"] < ctx["rate_limited_times"]:
ctx["rate_limited_count"] += 1
return json.dumps({"error": "rate limit exceeded"}), 429
if ctx["graphql_conflict"]:
return json.dumps({"error": "resource already exists"}), 409
# Setup artifact emulator (should this be somewhere else?)
emulate_random_str = ctx["emulate_artifacts"]
global ART_EMU
if emulate_random_str:
if ART_EMU is None or ART_EMU._random_str != emulate_random_str:
ART_EMU = ArtifactEmulator(
random_str=emulate_random_str, ctx=ctx, base_url=base_url
)
else:
ART_EMU = None
body = request.get_json()
app.logger.info("graphql post body: %s", body)
if body["variables"].get("run"):
ctx["current_run"] = body["variables"]["run"]
if body["variables"].get("files"):
requested_file = body["variables"]["files"][0]
ctx["requested_file"] = requested_file
emulate_azure = ctx.get("emulate_azure")
# Azure expects the request path of signed urls to have 2 parts
upload_headers = []
if emulate_azure:
url = (
base_url
+ "/storage/azure/"
+ ctx["current_run"]
+ "/"
+ requested_file
)
upload_headers.append("x-ms-blob-type:Block")
upload_headers.append("Content-MD5:{}".format("AAAA"))
else:
url = base_url + "/storage?file={}&run={}".format(
urllib.parse.quote(requested_file), ctx["current_run"]
)
return json.dumps(
{
"data": {
"model": {
"bucket": {
"id": "storageid",
"files": {
"uploadHeaders": upload_headers,
"edges": [
{
"node": {
"name": requested_file,
"url": url,
"directUrl": url + "&direct=true",
}
}
],
},
}
}
}
}
)
if "historyTail" in body["query"]:
if ctx["resume"] is True:
hist_tail = '["{\\"_step\\": 15, \\"acc\\": 1, \\"_runtime\\": 60}"]'
return json.dumps(
{
"data": {
"model": {
"bucket": {
"name": "test",
"displayName": "funky-town-13",
"id": "test",
"config": '{"epochs": {"value": 10}}',
"summaryMetrics": '{"acc": 10, "best_val_loss": 0.5, "_wandb": {"runtime": 50}}',
"logLineCount": 14,
"historyLineCount": 15,
"eventsLineCount": 0,
"historyTail": hist_tail,
"eventsTail": '["{\\"_runtime\\": 70}"]',
}
}
}
}
)
else:
return json.dumps({"data": {"model": {"bucket": None}}})
if "query Runs(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"runCount": 4,
"readOnly": False,
"runs": paginated(run(ctx), ctx),
}
}
}
)
if "reportCursor" in body["query"]:
page_count = ctx["page_count"]
return json.dumps(
{
"data": {
"project": {
"allViews": paginated(
{
"name": "test-report",
"description": "test-description",
"user": {
"username": body["variables"]["entity"],
"photoUrl": "test-url",
},
"spec": '{"version": 5}',
"updatedAt": datetime.now().isoformat(),
"pageCount": page_count,
},
ctx,
)
}
}
}
)
for query_name in [
"Run",
"RunInfo",
"RunState",
"RunFiles",
"RunFullHistory",
"RunSampledHistory",
]:
if f"query {query_name}(" in body["query"]:
# if querying state of run, change context from running to finished
if "RunFragment" not in body["query"] and "state" in body["query"]:
ret_val = json.dumps(
{"data": {"project": {"run": {"state": ctx.get("run_state")}}}}
)
ctx["run_state"] = "finished"
return ret_val
return json.dumps({"data": {"project": {"run": run(ctx)}}})
for query_name in [
"Model", # backward compatible for 0.12.10 and below
"RunConfigs",
"RunResumeStatus",
"RunStoppedStatus",
"RunUploadUrls",
"RunDownloadUrls",
"RunDownloadUrl",
]:
if f"query {query_name}(" in body["query"]:
if "project(" in body["query"]:
project_field_name = "project"
run_field_name = "run"
else:
project_field_name = "model"
run_field_name = "bucket"
if (
"commit" in body["query"]
or body["variables"].get("fileName") == "wandb-metadata.json"
):
run_config = _bucket_config(ctx)
else:
run_config = run(ctx)
return json.dumps(
{"data": {project_field_name: {run_field_name: run_config}}}
)
# Models() is backward compatible for 0.12.10 and below
if "query Models(" in body["query"] or "query EntityProjects(" in body["query"]:
return json.dumps(
{
"data": {
"models": {
"edges": [
{
"node": {
"id": "123",
"name": "myname",
"project": "myproj",
}
}
]
}
}
}
)
if "query Projects(" in body["query"]:
return json.dumps(
{
"data": {
"models": paginated(
{
"id": "1",
"name": "test-project",
"entityName": body["variables"]["entity"],
"createdAt": "now",
"isBenchmark": False,
},
ctx,
)
}
}
)
if "query Viewer " in body["query"] or "query ServerInfo" in body["query"]:
viewer_dict = {
"data": {
"viewer": {
"entity": "mock_server_entity",
"admin": False,
"email": "mock@server.test",
"username": "mock",
"teams": {"edges": []}, # TODO make configurable for cli_test
},
},
}
code_saving_enabled = ctx.get("code_saving_enabled")
if code_saving_enabled is not None:
viewer_dict["data"]["viewer"][
"flags"
] = f'{{"code_saving_enabled": {str(code_saving_enabled).lower()}}}'
server_info = {
"serverInfo": {
"cliVersionInfo": {
"max_cli_version": str(ctx.get("max_cli_version", "0.10.33"))
},
"latestLocalVersionInfo": {
"outOfDate": ctx.get("out_of_date", False),
"latestVersionString": str(ctx.get("latest_version", "0.9.42")),
},
}
}
if ctx["empty_query"]:
server_info["serverInfo"].pop("latestLocalVersionInfo")
elif ctx["local_none"]:
server_info["serverInfo"]["latestLocalVersionInfo"] = None
viewer_dict["data"].update(server_info)
return json.dumps(viewer_dict)
if "query ArtifactFiles" in body["query"]:
artifact_file = {
"id": "1",
"name": "foo",
"uploadUrl": "",
"storagePath": "x/y/z",
"uploadheaders": [],
"artifact": {"id": "1"},
}
if "storagePath" not in body["query"]:
del artifact_file["storagePath"]
return {
"data": {
"project": {
"artifactType": {
"artifact": {
"files": paginated(
artifact_file,
ctx,
)
}
}
}
}
}
if "query ProbeServerCapabilities" in body["query"]:
if ctx["empty_query"]:
return json.dumps(
{
"data": {
"QueryType": {
"fields": [
{"name": "serverInfo"},
]
},
"ServerInfoType": {
"fields": [
{"name": "cliVersionInfo"},
{"name": "exposesExplicitRunQueueAckPath"},
]
},
}
}
)
return json.dumps(
{
"data": {
"QueryType": {
"fields": [
{"name": "serverInfo"},
]
},
"ServerInfoType": {
"fields": [
{"name": "cliVersionInfo"},
{"name": "latestLocalVersionInfo"},
{"name": "exposesExplicitRunQueueAckPath"},
]
},
}
}
)
if "query ProbeServerUseArtifactInput" in body["query"]:
return json.dumps(
{
"data": {
"UseArtifactInputInfoType": {
"inputFields": [
{"name": "entityName"},
{"name": "projectName"},
{"name": "runName"},
{"name": "artifactID"},
{"name": "usedAs"},
{"name": "clientMutationId"},
]
},
}
}
)
if "query Sweep(" in body["query"] or "query SweepWithRuns(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"sweep": {
"id": "1234",
"name": "fun-sweep-10",
"state": "running",
"bestLoss": 0.33,
"config": yaml.dump(
{
"controller": {"type": "local"},
"method": "random",
"parameters": {
"param1": {
"values": [1, 2, 3],
"distribution": "categorical",
},
"param2": {
"values": [1, 2, 3],
"distribution": "categorical",
},
},
"program": "train-dummy.py",
}
),
"createdAt": datetime.now().isoformat(),
"heartbeatAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"earlyStopJobRunning": False,
"controller": None,
"scheduler": None,
"runs": paginated(run(ctx), ctx),
}
}
}
}
)
if "mutation UpsertSweep(" in body["query"]:
return json.dumps(
{
"data": {
"upsertSweep": {
"sweep": {
"name": "test",
"project": {
"id": "1234",
"name": "test",
"entity": {"id": "1234", "name": "test"},
},
},
"configValidationWarnings": [],
}
}
}
)
if "mutation CreateAgent(" in body["query"]:
return json.dumps(
{
"data": {
"createAgent": {
"agent": {
"id": "mock-server-agent-93xy",
}
}
}
}
)
if "mutation Heartbeat(" in body["query"]:
new_run_needed = body["variables"]["runState"] == "{}"
if new_run_needed:
ctx["n_sweep_runs"] += 1
return json.dumps(
{
"data": {
"agentHeartbeat": {
"agent": {
"id": "mock-server-agent-93xy",
},
"commands": (
json.dumps(
[
{
"type": "run",
"run_id": f"mocker-sweep-run-x9{ctx['n_sweep_runs']}",
"args": {
"a": {"value": ctx["n_sweep_runs"]}
},
}
]
)
if ctx["n_sweep_runs"] <= 4
else json.dumps([{"type": "exit"}])
)
if new_run_needed
else "[]",
}
}
}
)
if "query ProbeServerSettings" in body["query"]:
if not ctx["server_settings"]:
data = {}
else:
data = {"ServerSettingsType": {"fields": [{"name": "sdkMessages"}]}}
return json.dumps({"data": data})
if "mutation UpsertBucket(" in body["query"]:
run_id_default = "abc123"
run_id = body["variables"].get("name", run_id_default)
run_num = len(ctx["runs"])
inserted = run_id not in ctx["runs"]
if inserted:
ctx["run_ids"].append(run_id)
run_ctx = ctx["runs"].setdefault(run_id, default_ctx())
r = run_ctx.setdefault("run", {})
r.setdefault("display_name", f"lovely-dawn-{run_num + 32}")
r.setdefault("storage_id", f"storageid{run_num}")
r.setdefault("project_name", "test")
r.setdefault("entity_name", "mock_server_entity")
git_remote = body["variables"].get("repo")
git_commit = body["variables"].get("commit")
if git_commit or git_remote:
for c in ctx, run_ctx:
c.setdefault("git", {})
c["git"]["remote"] = git_remote
c["git"]["commit"] = git_commit
for c in ctx, run_ctx:
tags = body["variables"].get("tags")
if tags is not None:
c["tags"] = tags
notes = body["variables"].get("notes")
if notes is not None:
c["notes"] = notes
group = body["variables"].get("groupName")
if group is not None:
c["group"] = group
job_type = body["variables"].get("jobType")
if job_type is not None:
c["job_type"] = job_type
name = body["variables"].get("displayName")
if name is not None:
c["name"] = name
program = body["variables"].get("program")
if program is not None:
c["program"] = program
host = body["variables"].get("host")
if host is not None:
c["host"] = host
param_config = body["variables"].get("config")
if param_config:
for c in ctx, run_ctx:
c.setdefault("config", []).append(json.loads(param_config))
param_summary = body["variables"].get("summaryMetrics")
if param_summary:
for c in ctx, run_ctx:
c.setdefault("summary", []).append(json.loads(param_summary))
for c in ctx, run_ctx:
c["upsert_bucket_count"] += 1
# Update run context
ctx["runs"][run_id] = run_ctx
# support legacy tests which pass resume
if ctx["resume"] is True:
inserted = False
response = {
"data": {
"upsertBucket": {
"bucket": {
"id": r["storage_id"],
"name": run_id,
"displayName": r["display_name"],
"project": {
"name": r["project_name"],
"entity": {"name": r["entity_name"]},
},
},
"inserted": inserted,
}
}
}
if ctx["server_settings"]:
response["data"]["upsertBucket"]["serverSettings"] = {
"serverMessages": ctx["server_messages"]
}
if "mocker-sweep-run-x9" in body["variables"].get("name", ""):
response["data"]["upsertBucket"]["bucket"][
"sweepName"
] = "test-sweep-id"
return json.dumps(response)
if "mutation DeleteRun(" in body["query"]:
return json.dumps({"data": {}})
if "mutation CreateAnonymousApiKey " in body["query"]:
return json.dumps(
{
"data": {
"createAnonymousEntity": {"apiKey": {"name": "ANONYMOOSE" * 4}}
}
}
)
if "mutation DeleteFiles(" in body["query"]:
return json.dumps({"data": {"deleteFiles": {"success": True}}})
if "mutation PrepareFiles(" in body["query"]:
nodes = []
for i, file_spec in enumerate(body["variables"]["fileSpecs"]):
url = base_url + "/storage?file=%s" % file_spec["name"]
nodes.append(
{
"node": {
"id": str(i),
"name": file_spec["name"],
"displayName": file_spec["name"],
"digest": "null",
"uploadUrl": url,
"uploadHeaders": "",
}
}
)
return json.dumps({"data": {"prepareFiles": {"files": {"edges": nodes}}}})
if "mutation LinkArtifact(" in body["query"]:
if ART_EMU:
ctx["latest_arti_id"] = body["variables"].get("artifactID") or body[
"variables"
].get("clientID")
return ART_EMU.link(variables=body["variables"])
if "mutation CreateArtifact(" in body["query"]:
if ART_EMU:
res = ART_EMU.create(variables=body["variables"])
ctx["latest_arti_id"] = res["data"]["createArtifact"]["artifact"]["id"]
return res
collection_name = body["variables"]["artifactCollectionNames"][0]
app.logger.info(f"Creating artifact {collection_name}")
ctx["artifacts"] = ctx.get("artifacts", {})
ctx["artifacts"][collection_name] = ctx["artifacts"].get(
collection_name, []
)
ctx["artifacts"][collection_name].append(body["variables"])
_id = body.get("variables", {}).get("digest", "")
if _id != "":
ctx.get("artifacts_by_id")[_id] = body["variables"]
return {
"data": {
"createArtifact": {
"artifact": artifact(
ctx,
collection_name,
id_override=_id,
state="COMMITTED"
if "PENDING" not in collection_name
else "PENDING",
)
}
}
}
if "mutation updateArtifact" in body["query"]:
id = body["variables"]["artifactID"]
ctx["latest_arti_id"] = id
ctx.get("artifacts_by_id")[id] = body["variables"]
return {"data": {"updateArtifact": {"artifact": id}}}
if "mutation DeleteArtifact(" in body["query"]:
id = body["variables"]["artifactID"]
delete_aliases = body["variables"]["deleteAliases"]
art = artifact(ctx, id_override=id)
if len(art.get("aliases", [])) and not delete_aliases:
raise Exception("delete_aliases not set, but artifact has aliases")
return {
"data": {
"deleteArtifact": {
"artifact": art,
"success": True,
}
}
}
if "mutation CreateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": base_url
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": base_url + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
run_name = body.get("variables", {}).get("runName", "unknown")
run_ctx = ctx["runs"].setdefault(run_name, default_ctx())
for c in ctx, run_ctx:
c["manifests_created"].append(manifest)
return {
"data": {
"createArtifactManifest": {
"artifactManifest": manifest,
}
}
}
if "mutation UpdateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": base_url
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": base_url + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
return {
"data": {
"updateArtifactManifest": {
"artifactManifest": manifest,
}
}
}
if "mutation CreateArtifactFiles" in body["query"]:
if ART_EMU:
return ART_EMU.create_files(variables=body["variables"])
return {
"data": {
"files": [
{
"node": {
"id": idx,
"name": file["name"],
"uploadUrl": "",
"uploadheaders": [],
"artifact": {"id": file["artifactID"]},
}
for idx, file in enumerate(
body["variables"]["artifactFiles"]
)
}
],
}
}
if "mutation CommitArtifact(" in body["query"]:
return {
"data": {
"commitArtifact": {
"artifact": {"id": 1, "digest": "0000===================="}
}
}
}
if "mutation UseArtifact(" in body["query"]:
used_name = body.get("variables", {}).get("usedAs", None)
ctx["used_artifact_info"] = {"used_name": used_name}
return {"data": {"useArtifact": {"artifact": artifact(ctx)}}}
if "query ProjectArtifactType(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
}
}
}
}
if "mutation upsertView(" in body["query"]:
return json.dumps(
{
"data": {
"upsertView": {
"view": {
"id": "VmlldzoxOTk4ODgz",
"type": "runs",
"name": "2mr65bp2bjy",
"displayName": "2mr65bp2bjy",
"description": None,
"project": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
},
"spec": '{"version": 5, "panelSettings": {}, "blocks": [], "width": "readable", "authors": [], "discussionThreads": [], "ref": {}}',
"updatedAt": "2022-05-13T03:13:18",
},
"inserted": True,
}
}
}
)
if "mutation upsertModel(" in body["query"]:
return json.dumps(
{
"data": {
"upsertModel": {
"project": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
"description": None,
"access": "PRIVATE",
"views": "{}",
},
"model": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
"description": None,
"access": "PRIVATE",
"views": "{}",
},
"inserted": False,
}
}
}
)
if "query SpecificReport(" in body["query"]:
return json.dumps(
{
"data": {
"view": {
"id": "VmlldzoxOTcxMzI2",
"type": "runs",
"name": "hxrbu425ppr",
"displayName": "Copy of megatruong's Copy of megatruong's Copy of megatruong's Untitled Report",
"description": "",
"project": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
},
"createdAt": "2022-05-09T02:18:42",
"updatedAt": "2022-05-09T02:18:45",
"spec": '{"version":5,"panelSettings":{"xAxis":"_step","smoothingWeight":0,"smoothingType":"exponential","ignoreOutliers":false,"xAxisActive":false,"smoothingActive":false,"ref":{"type":"panelSettings","viewID":"qwnlqy0ka","id":"85uruphtl"}},"blocks":[{"type":"paragraph","children":[{"text":""},{"type":"link","url":"https://docs.google.com/presentation/d/1cvcSZygln3WPOjRGcJX8XgDtj9qcW7SA6K2u-fUVXTg/edit#slide=id.g1186f921888_0_1484","children":[{"text":"https://docs.google.com/presentation/d/1cvcSZygln3WPOjRGcJX8XgDtj9qcW7SA6K2u-fUVXTg/edit#slide=id.g1186f921888_0_1484"}]},{"text":""}]},{"type":"paragraph","children":[{"text":""}]},{"type":"twitter","html":"<blockquote class=\\"twitter-tweet\\"><p lang=\\"en\\" dir=\\"ltr\\">The voice of an angel, truly. <a href=\\"https://twitter.com/hashtag/MassEffect?src=hash&ref_src=twsrc%5Etfw\\">#MassEffect</a> <a href=\\"https://t.co/nMev97Uw7F\\">pic.twitter.com/nMev97Uw7F</a></p>— Mass Effect (@masseffect) <a href=\\"https://twitter.com/masseffect/status/1428748886655569924?ref_src=twsrc%5Etfw\\">August 20, 2021</a></blockquote>\\n","children":[{"text":""}]},{"type":"spotify","spotifyType":"track","spotifyID":"7kRKlFCFLAUwt43HWtauhX","children":[{"text":""}]},{"type":"soundcloud","html":"<iframe width=\\"100%\\" height=\\"400\\" scrolling=\\"no\\" frameborder=\\"no\\" src=\\"https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F1076901103&show_artwork=true\\"></iframe>","children":[{"text":""}]},{"type":"video","url":"https://www.youtube.com/embed/ggqI-HH8yXc","children":[{"text":""}]},{"type":"paragraph","children":[{"text":"Normal paragraph "}]},{"type":"heading","children":[{"text":"Heading 1"}],"level":1},{"type":"heading","children":[{"text":"Heading 2"}],"level":2},{"type":"heading","children":[{"text":"Heading 3"}],"level":3},{"type":"list","children":[{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Bullet 1"}]}]},{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Bullet 2"}]}]}]},{"type":"list","ordered":true,"children":[{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Ordered 1"}]}],"ordered":true},{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Ordered 2"}]}],"ordered":true}]},{"type":"list","children":[{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Unchecked"}]}],"checked":false},{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Checked"}]}],"checked":true}]},{"type":"block-quote","children":[{"text":"Block Quote 1\\nBlock Quote 2\\nBlock Quote 3"}]},{"type":"callout-block","children":[{"type":"callout-line","children":[{"text":"Callout 1"}]},{"type":"callout-line","children":[{"text":"Callout 2"}]},{"type":"callout-line","children":[{"text":"Callout 3"}]}]},{"type":"code-block","children":[{"type":"code-line","children":[{"text":"# python code block"}]},{"type":"code-line","children":[{"text":"for x in range(10):"}]},{"type":"code-line","children":[{"text":" pass"}]}]},{"type":"horizontal-rule","children":[{"text":""}]},{"type":"code-block","language":"yaml","children":[{"type":"code-line","children":[{"text":"this:"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- is"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- a"}],"language":"yaml"},{"type":"code-line","children":[{"text":"cool:"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- yaml"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- file"}],"language":"yaml"}]},{"type":"markdown-block","children":[{"text":""}],"content":"Markdown cell with *italics* and **bold** and $e=mc^2$"},{"type":"image","children":[{"text":"It\'s a me, Pikachu"}],"url":"https://api.wandb.ai/files/megatruong/images/projects/918598/350382db.gif","hasCaption":true},{"type":"paragraph","children":[{"text":""},{"type":"latex","children":[{"text":""}],"content":"y=ax^2 +bx+c"},{"text":""}]},{"type":"latex","children":[{"text":""}],"content":"\\\\gamma^2+\\\\theta^2=\\\\omega^2\\n\\\\\\\\ a^2 + b^2 = c^2","block":true},{"type":"gallery","children":[{"text":""}],"ids":[]},{"type":"weave-panel","children":[{"text":""}],"config":{"panelConfig":{"exp":{"nodeType":"output","type":{"type":"tagged","tag":{"type":"tagged","tag":{"type":"typedDict","propertyTypes":{"entityName":"string","projectName":"string"}},"value":{"type":"typedDict","propertyTypes":{"project":"project","artifactName":"string"}}},"value":"artifact"},"fromOp":{"name":"project-artifact","inputs":{"project":{"nodeType":"output","type":{"type":"tagged","tag":{"type":"typedDict","propertyTypes":{"entityName":"string","projectName":"string"}},"value":"project"},"fromOp":{"name":"root-project","inputs":{"entityName":{"nodeType":"const","type":"string","val":"megatruong"},"projectName":{"nodeType":"const","type":"string","val":"nvda-ngc"}}}},"artifactName":{"nodeType":"const","type":"string","val":"my-artifact"}}}}}}},{"type":"paragraph","children":[{"text":""}]},{"type":"panel-grid","children":[{"text":""}],"metadata":{"openViz":true,"openRunSet":0,"name":"unused-name","runSets":[{"runFeed":{"version":2,"columnVisible":{"run:name":false},"columnPinned":{},"columnWidths":{},"columnOrder":[],"pageSize":10,"onlyShowSelected":false},"enabled":true,"name":"Run set","search":{"query":""},"id":"pw1e1wwf6","filters":{"op":"OR","filters":[{"op":"AND","filters":[{"key":{"section":"run","name":"jobType"},"op":"=","value":"job_type2","disabled":false},{"key":{"section":"run","name":"group"},"op":"=","value":"groupA","disabled":false}]}],"ref":{"type":"filters","viewID":"vtahfagaz","id":"dpd2z5bz0"}},"grouping":[{"section":"run","name":"username"}],"sort":{"keys":[{"key":{"section":"run","name":"createdAt"},"ascending":false},{"key":{"section":"run","name":"username"},"ascending":false}],"ref":{"type":"sort","viewID":"vtahfagaz","id":"ivgiyvxox"}},"selections":{"root":1,"bounds":[],"tree":[]},"expandedRowAddresses":[],"ref":{"type":"runSet","viewID":"vtahfagaz","id":"z59jmoybe"}}],"panelBankConfig":{"state":0,"settings":{"autoOrganizePrefix":2,"showEmptySections":false,"sortAlphabetically":false},"sections":[{"name":"Hidden Panels","isOpen":false,"type":"flow","flowConfig":{"snapToColumns":true,"columnsPerPage":3,"rowsPerPage":2,"gutterWidth":16,"boxWidth":460,"boxHeight":300},"sorted":0,"localPanelSettings":{"xAxis":"_step","smoothingWeight":0,"smoothingType":"exponential","ignoreOutliers":false,"xAxisActive":false,"smoothingActive":false,"ref":{"type":"panelSettings","viewID":"vtahfagaz","id":"xa2bddtor"}},"panels":[],"localPanelSettingsRef":{"type":"panelSettings","viewID":"vtahfagaz","id":"xa2bddtor"},"panelRefs":[],"ref":{"type":"panel-bank-section-config","viewID":"vtahfagaz","id":"fz92jh3dt"}}],"ref":{"type":"panel-bank-config","viewID":"vtahfagaz","id":"1o946whn4"}},"panelBankSectionConfig":{"name":"Report Panels","isOpen":true,"type":"grid","flowConfig":{"snapToColumns":true,"columnsPerPage":3,"rowsPerPage":2,"gutterWidth":16,"boxWidth":460,"boxHeight":300},"sorted":0,"localPanelSettings":{"xAxis":"_step","smoothingWeight":0,"smoothingType":"exponential","ignoreOutliers":false,"xAxisActive":false,"smoothingActive":false,"ref":{"type":"panelSettings","viewID":"vtahfagaz","id":"xvr9gn2vt"}},"panels":[{"__id__":"e6mwxa1mq","viewType":"Run History Line Plot","config":{"metrics":["y"],"groupBy":"None","legendFields":["run:displayName"],"yAxisAutoRange":false,"yLogScale":false},"ref":{"type":"panel","viewID":"vtahfagaz","id":"ui3b5xcsb"},"layout":{"x":8,"y":0,"w":8,"h":6}},{"__id__":"ymceey3mu","viewType":"Run History Line Plot","config":{"metrics":["x"],"groupBy":"None","legendFields":["run:displayName"],"yAxisAutoRange":false,"yLogScale":false},"ref":{"type":"panel","viewID":"vtahfagaz","id":"yewkh6k6p"},"layout":{"x":0,"y":0,"w":8,"h":6}}],"localPanelSettingsRef":{"type":"panelSettings","viewID":"vtahfagaz","id":"xvr9gn2vt"},"panelRefs":[{"type":"panel","viewID":"vtahfagaz","id":"ui3b5xcsb"},{"type":"panel","viewID":"vtahfagaz","id":"yewkh6k6p"}],"ref":{"type":"panel-bank-section-config","viewID":"vtahfagaz","id":"5mqkb97jz"}},"customRunColors":{"ref":{"type":"run-colors","viewID":"vtahfagaz","id":"ukxc20iq0"}},"ref":{"type":"section","viewID":"vtahfagaz","id":"eg2e88znk"}}},{"type":"paragraph","children":[{"text":""}]}],"width":"readable","authors":[{"name":"Andrew Truong","username":"megatruong"}],"discussionThreads":[],"ref":{"type":"runs/draft","viewID":"qwnlqy0ka","id":"s2r3aq8j6"}}',
"previewUrl": None,
"user": {
"name": "Andrew Truong",
"username": "megatruong",
"userInfo": {
"bio": "model-registry \ninstant replay\nweeave-plot\nweeave-report\nniight\n",
"company": "Weights and Biases",
"location": "San Francisco",
"githubUrl": "",
"twitterUrl": "",
"websiteUrl": "wandb.com",
},
},
}
}
}
)
if "query ProjectArtifacts(" in body["query"]:
return {
"data": {
"project": {
"artifactTypes": paginated(
{
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
if "query ProjectArtifactCollections(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"artifactCollections": paginated(
{
"id": "1",
"name": "mnist",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
}
if "query ArtifactCollection(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"artifactCollection": {
"id": "1",
"name": "mnist",
"description": "",
"createdAt": datetime.now().isoformat(),
"aliases": {"edges": [{"node": {"alias": "latest"}}]},
}
}
}
}
}
# backward compatible for 0.12.10 and below
if "query RunArtifacts(" in body["query"]:
if "inputArtifacts" in body["query"]:
key = "inputArtifacts"
else:
key = "outputArtifacts"
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {key: artifacts}}}}
if "query RunInputArtifacts(" in body["query"]:
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {"inputArtifacts": artifacts}}}}
if "query RunOutputArtifacts(" in body["query"]:
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {"outputArtifacts": artifacts}}}}
if "query Artifacts(" in body["query"]:
version = "v%i" % ctx["page_count"]
artifacts = paginated(artifact(ctx), ctx, {"version": version})
artifacts["totalCount"] = ctx["page_times"]
return {
"data": {
"project": {
"artifactType": {
"artifactCollection": {
"name": "mnist",
"artifacts": artifacts,
}
}
}
}
}
for query_name in [
"Artifact",
"ArtifactType",
"ArtifactWithCurrentManifest",
"ArtifactUsedBy",
"ArtifactCreatedBy",
]:
if f"query {query_name}(" in body["query"]:
if ART_EMU:
res = ART_EMU.query(
variables=body.get("variables", {}), query=body.get("query")
)
artifact_response = None
if res["data"].get("project") is not None:
artifact_response = res["data"]["project"]["artifact"]
else:
artifact_response = res["data"]["artifact"]
if artifact_response:
ctx["latest_arti_id"] = artifact_response.get("id")
return res
art = artifact(
ctx, request_url_root=base_url, id_override="QXJ0aWZhY3Q6NTI1MDk4"
)
if "id" in body.get("variables", {}):
art = artifact(
ctx,
request_url_root=base_url,
id_override=body.get("variables", {}).get("id"),
)
art["artifactType"] = {"id": 1, "name": "dataset"}
return {"data": {"artifact": art}}
if ctx["swappable_artifacts"] and "name" in body.get("variables", {}):
full_name = body.get("variables", {}).get("name", None)
if full_name is not None:
collection_name = full_name.split(":")[0]
art = artifact(
ctx,
collection_name=collection_name,
request_url_root=base_url,
)
# code artifacts use source-RUNID names, we return the code type
art["artifactType"] = {"id": 2, "name": "code"}
if "source" not in body["variables"]["name"]:
art["artifactType"] = {"id": 1, "name": "dataset"}
if "logged_table" in body["variables"]["name"]:
art["artifactType"] = {"id": 3, "name": "run_table"}
if "run-" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "run_table"}
if "wb_validation_data" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "validation_dataset"}
if "job" in body["variables"]["name"]:
art["artifactType"] = {"id": 5, "name": "job"}
if "model" in body["variables"]["name"]:
art["artifactType"] = {"id": 6, "name": "model"}
return {"data": {"project": {"artifact": art}}}
if "query ArtifactManifest(" in body["query"]:
if ART_EMU:
res = ART_EMU.query(
variables=body.get("variables", {}), query=body.get("query")
)
ctx["latest_arti_id"] = res["data"]["artifact"]["id"]
art = artifact(ctx)
art["currentManifest"] = {
"id": 1,
"file": {
"id": 1,
"directUrl": base_url
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
},
}
return {"data": {"project": {"artifact": art}}}
# Project() is backward compatible for 0.12.10 and below
if ("query Project(" in body["query"] and "runQueues" in body["query"]) or (
"query ProjectRunQueues(" in body["query"] and "runQueues" in body["query"]
):
if ctx["run_queues_return_default"]:
return json.dumps(
{
"data": {
"project": {
"runQueues": [
{
"id": 1,
"name": "default",
"createdBy": "mock_server_entity",
"access": "PROJECT",
}
]
}
}
}
)
else:
return json.dumps({"data": {"project": {"runQueues": []}}})
if "query GetRunQueueItem" in body["query"]:
if ctx["run_queue_item_return_type"] == "claimed":
return json.dumps(
{
"data": {
"project": {
"runQueue": {
"runQueueItems": {
"edges": [
{
"node": {
"id": "1",
"associatedRunId": "test",
"state": "CLAIMED",
}
}
]
}
}
}
}
}
)
else:
return json.dumps(
{
"data": {
"project": {
"runQueue": {
"runQueueItems": {
"edges": [
{
"node": {
"id": "1",
"associatedRunId": None,
"state": "PENDING",
}
}
]
}
}
}
}
}
)
if "mutation createRunQueue" in body["query"]:
if not ctx["successfully_create_default_queue"]:
return json.dumps(
{"data": {"createRunQueue": {"success": False, "queueID": None}}}
)
ctx["run_queues_return_default"] = True
return json.dumps(
{"data": {"createRunQueue": {"success": True, "queueID": 1}}}
)
if "mutation popFromRunQueue" in body["query"]:
if ctx["num_popped"] != 0:
return json.dumps({"data": {"popFromRunQueue": None}})
ctx["num_popped"] += 1
if ctx["invalid_launch_spec_project"]:
return json.dumps(
{
"data": {
"popFromRunQueue": {
"runQueueItemId": "1",
"runSpec": {
"uri": "https://wandb.ai/mock_server_entity/test_project/runs/1",
"project": "test_project2",
"entity": "mock_server_entity",
"resource": "local",
},
}
}
}
)
return json.dumps(
{
"data": {
"popFromRunQueue": {
"runQueueItemId": "1",
"runSpec": {
"uri": "https://wandb.ai/mock_server_entity/test_project/runs/1",
"project": "test_project",
"entity": "mock_server_entity",
"resource": "local",
},
}
}
}
)
if "mutation pushToRunQueue" in body["query"]:
if ctx["run_queues"].get(body["variables"]["queueID"]):
ctx["run_queues"][body["variables"]["queueID"]].append(
body["variables"]["queueID"]
)
else:
ctx["run_queues"][body["variables"]["queueID"]] = [
body["variables"]["queueID"]
]
return json.dumps({"data": {"pushToRunQueue": {"runQueueItemId": "1"}}})
if "mutation ackRunQueueItem" in body["query"]:
ctx["num_acked"] += 1
return json.dumps({"data": {"ackRunQueueItem": {"success": True}}})
if "query ClientIDMapping(" in body["query"]:
return {"data": {"clientIDMapping": {"serverID": "QXJ0aWZhY3Q6NTI1MDk4"}}}
# Admin apis
if "mutation DeleteApiKey" in body["query"]:
return {"data": {"deleteApiKey": {"success": True}}}
if "mutation GenerateApiKey" in body["query"]:
return {
"data": {"generateApiKey": {"apiKey": {"id": "XXX", "name": "Z" * 40}}}
}
if "mutation DeleteInvite" in body["query"]:
return {"data": {"deleteInvite": {"success": True}}}
if "mutation CreateInvite" in body["query"]:
return {"data": {"createInvite": {"invite": {"id": "XXX"}}}}
if "mutation CreateServiceAccount" in body["query"]:
return {"data": {"createServiceAccount": {"user": {"id": "XXX"}}}}
if "mutation CreateTeam" in body["query"]:
return {
"data": {
"createTeam": {
"entity": {"id": "XXX", "name": body["variables"]["teamName"]}
}
}
}
if "mutation NotifyScriptableRunAlert" in body["query"]:
avars = body.get("variables", {})
run_name = avars.get("runName", "unknown")
adict = dict(
title=avars["title"], text=avars["text"], severity=avars["severity"]
)
atime = avars.get("waitDuration")
if atime is not None:
adict["wait"] = atime
run_ctx = ctx["runs"].setdefault(run_name, default_ctx())
for c in ctx, run_ctx:
c["alerts"].append(adict)
return {"data": {"notifyScriptableRunAlert": {"success": True}}}
if "query SearchUsers" in body["query"]:
return {
"data": {
"users": {
"edges": [
{
"node": {
"id": "XXX",
"email": body["variables"]["query"],
"apiKeys": {
"edges": [
{"node": {"id": "YYY", "name": "Y" * 40}}
]
},
"teams": {
"edges": [
{"node": {"id": "TTT", "name": "test"}}
]
},
}
}
]
* ctx["num_search_users"]
}
}
}
if "query Entity(" in body["query"]:
return {
"data": {
"entity": {
"id": "XXX",
"members": [
{
"id": "YYY",
"name": "test",
"accountType": "MEMBER",
"apiKey": None,
},
{
"id": "SSS",
"name": "Service account",
"accountType": "SERVICE",
"apiKey": "Y" * 40,
},
],
}
}
}
if "stopped" in body["query"]:
return json.dumps(
{
"data": {
"Model": {
"project": {"run": {"stopped": ctx.get("stopped", False)}}
}
}
}
)
if "mutation createLaunchAgent(" in body["query"]:
agent_id = len(ctx["launch_agents"].keys())
ctx["launch_agents"][agent_id] = "POLLING"
return json.dumps(
{
"data": {
"createLaunchAgent": {
"success": True,
"launchAgentId": agent_id,
}
}
}
)
if "mutation updateLaunchAgent(" in body["query"]:
if ctx["launch_agent_update_fail"]:
return json.dumps({"data": {"updateLaunchAgent": {"success": False}}})
status = body["variables"]["agentStatus"]
agent_id = body["variables"]["agentId"]
ctx["launch_agents"][agent_id] = status
return json.dumps({"data": {"updateLaunchAgent": {"success": True}}})
if "query LaunchAgentIntrospection" in body["query"]:
if ctx["gorilla_supports_launch_agents"]:
return json.dumps(
{"data": {"LaunchAgentType": {"name": "LaunchAgent"}}}
)
else:
return json.dumps({"data": {}})
if "query LaunchAgent" in body["query"]:
if ctx["gorilla_supports_launch_agents"]:
return json.dumps(
{
"data": {
"launchAgent": {
"name": "test_agent",
"stopPolling": ctx["stop_launch_agent"],
}
}
}
)
else:
return json.dumps({"data": {}})
if "query GetSweeps" in body["query"]:
if body["variables"]["project"] == "testnosweeps":
return {"data": {"project": {"totalSweeps": 0, "sweeps": {}}}}
return {
"data": {
"project": {
"totalSweeps": 1,
"sweeps": {
"edges": [
{
"node": {
"id": "testdatabaseid",
"name": "testid",
"bestLoss": 0.5,
"config": yaml.dump({"name": "testname"}),
}
}
]
},
}
}
}
print("MISSING QUERY, add me to tests/mock_server.py", body["query"])
error = {"message": "Not implemented in tests/mock_server.py", "body": body}
return json.dumps({"errors": [error]})
@app.route("/storage", methods=["PUT", "GET"])
@app.route("/storage/<path:extra>", methods=["PUT", "GET"])
def storage(extra=None):
ctx = get_ctx()
if "fail_storage_times" in ctx:
if ctx["fail_storage_count"] < ctx["fail_storage_times"]:
ctx["fail_storage_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
file = request.args.get("file")
_id = request.args.get("id", "")
run = request.args.get("run", "unknown")
# Grab the run from the url for azure uploads
if extra and run == "unknown":
run = extra.split("/")[-2]
ctx["storage"] = ctx.get("storage", {})
ctx["storage"][run] = ctx["storage"].get(run, [])
ctx["storage"][run].append(request.args.get("file"))
size = ctx["files"].get(request.args.get("file"))
if request.method == "GET" and size:
return os.urandom(size), 200
# make sure to read the data
request.get_data(as_text=True)
# We need to bomb out before storing the file as uploaded for tests
inject = InjectRequestsParse(ctx).find(request=request)
if inject:
if inject.response:
response = inject.response
if inject.http_status:
# print("INJECT", inject, inject.http_status)
raise HttpException(
{"code": 500, "message": "some error"},
status_code=inject.http_status,
)
run_ctx = ctx["runs"].setdefault(run, default_ctx())
for c in ctx, run_ctx:
if extra:
file = extra.split("/")[-1]
else:
file = request.args.get("file", "UNKNOWN_STORAGE_FILE_PUT")
c["file_names"].append(file)
if request.method == "PUT":
for c in ctx, run_ctx:
c["file_bytes"].setdefault(file, 0)
c["file_bytes"][file] += request.content_length
if ART_EMU:
res = ART_EMU.storage(request=request, arti_id=ctx["latest_arti_id"])
return res
if file == "wandb_manifest.json":
if _id in ctx.get("artifacts_by_id"):
art = ctx["artifacts_by_id"][_id]
if "-validation_predictions" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_predictions.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
if "wb_validation_data" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/5aac4cea496fd061e813.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
if request.args.get("name") == "my-test_reference_download:latest":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"StarWars3.wav": {
"digest": "a90eb05f7aef652b3bdd957c67b7213a",
"size": 81299,
"ref": "https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav",
},
"file1.txt": {
"digest": "0000====================",
"size": 81299,
},
},
}
elif (
_id == "bb8043da7d78ff168a695cff097897d2"
or _id == "ad4d74ac0e4167c6cf4aaad9d59b9b44"
):
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"t1.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "27c102831476c6ff7ce53c266c937612":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "b9a598178557aed1d89bd93ec0db989b":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table_2.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "e6954815d2beb5841b3dabf7cf455c30":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table.partitioned-table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "0eec13efd400546f58a4530de62ed07a":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"jt.joined-table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id in [
"2d9a7e0aa8407f0730e19e5bc55c3a45",
"c541de19b18331a4a33b282fc9d42510",
"6f3d6ed5417d2955afbc73bff0ed1609",
"7d797e62834a7d72538529e91ed958e2",
"03d3e221fd4da6c5fccb1fbd75fe475e",
"464aa7e0d7c3f8230e3fe5f10464a2e6",
"8ef51aeabcfcd89b719822de64f6a8bf",
]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/e14239fe.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
elif (
len(ctx.get("graphql", [])) >= 3
and ctx["graphql"][2].get("variables", {}).get("name", "") == "dummy:v0"
) or request.args.get("name") == "dummy:v0":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"dataset.partitioned-table.json": {
"digest": "0aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"parts/1.table.json": {
"digest": "1aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"t.table.json": {
"digest": "2aaaaaaaaaaaaaaaaaaaaa==",
"size": 123,
},
},
}
elif _id == "e04169452d5584146eb7ebb405647cc8":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"results_df.table.json": {
"digest": "0aaaaaaaaaaaaaaaaaaaaa==",
"size": 363,
},
},
}
else:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"digits.h5": {
"digest": "TeSJ4xxXg0ohuL5xEdq2Ew==",
"size": 81299,
},
},
}
elif file == "wandb-metadata.json":
if ctx["run_script_type"] == "notebook":
code_path = "one_cell.ipynb"
elif ctx["run_script_type"] == "shell":
code_path = "test.sh"
elif ctx["run_script_type"] == "python":
code_path = "train.py"
elif ctx["run_script_type"] == "unknown":
code_path = "unknown.unk"
result = {
"docker": "test/docker",
"program": "train.py",
"codePath": code_path,
"args": ["--test", "foo"],
"git": ctx.get("git", {}),
}
if ctx["run_cuda_version"]:
result["cuda"] = ctx["run_cuda_version"]
return result
elif file == "requirements.txt":
return "numpy==1.19.5\n"
elif file == "diff.patch":
# TODO: make sure the patch is valid for windows as well,
# and un skip the test in test_cli.py
return r"""
diff --git a/patch.txt b/patch.txt
index 30d74d2..9a2c773 100644
--- a/patch.txt
+++ b/patch.txt
@@ -1 +1 @@
-test
\ No newline at end of file
+testing
\ No newline at end of file
"""
# emulate azure when we're receiving requests from them
if extra is not None:
return "", 201
return "", 200
@app.route("/artifacts/<entity>/<digest>", methods=["GET", "POST"])
def artifact_file(entity, digest):
if ART_EMU:
return ART_EMU.file(entity=entity, digest=digest)
if entity == "entity" or entity == "mock_server_entity":
if (
digest == "d1a69a69a69a69a69a69a69a69a69a69"
): # "dataset.partitioned-table.json"
return (
json.dumps({"_type": "partitioned-table", "parts_path": "parts"}),
200,
)
elif digest == "d5a69a69a69a69a69a69a69a69a69a69": # "parts/1.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {
"type_map": {
"A": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"B": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"C": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
}
},
"wb_type": "dictionary",
},
"columns": ["A", "B", "C"],
"data": [[0, 0, 1]],
"ncols": 3,
"nrows": 1,
}
),
200,
)
elif digest == "d9a69a69a69a69a69a69a69a69a69a69": # "t.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {"type_map": {}},
"wb_type": "dictionary",
},
"columns": [],
"data": [],
"ncols": 0,
"nrows": 0,
}
),
200,
)
if digest == "dda69a69a69a69a69a69a69a69a69a69":
return (
json.dumps({"_type": "table-file", "columns": [], "data": []}),
200,
)
return "ARTIFACT %s" % digest, 200
@app.route("/files/<entity>/<project>/<run>/file_stream", methods=["POST"])
@snoop.relay
def file_stream(entity, project, run):
body = request.get_json()
app.logger.info("file_stream post body: %s", body)
ctx = get_ctx()
run_ctx = get_run_ctx(run)
for c in ctx, run_ctx:
c["file_stream"] = c.get("file_stream", [])
c["file_stream"].append(body)
response = json.dumps({"exitcode": None, "limits": {}})
inject = InjectRequestsParse(ctx).find(request=request)
if inject:
if inject.response:
response = inject.response
if inject.http_status:
# print("INJECT", inject, inject.http_status)
raise HttpException("some error", status_code=inject.http_status)
return response
@app.route("/api/v1/namespaces/default/pods/test")
def k8s_pod():
ctx = get_ctx()
image_id = b"docker-pullable://test@sha256:1234"
ms = b'{"status":{"containerStatuses":[{"imageID":"%s"}]}}' % image_id
if ctx.get("k8s"):
return ms, 200
else:
return b"", 500
@app.route("/api/sessions")
def jupyter_sessions():
return json.dumps(
[
{
"kernel": {"id": "12345"},
"notebook": {"path": "test.ipynb", "name": "test.ipynb"},
}
]
)
@app.route("/wandb_url", methods=["PUT"])
def spell_url():
ctx = get_ctx()
ctx["spell_data"] = request.get_json()
return json.dumps({"success": True})
@app.route("/pypi/<library>/json")
def pypi(library):
version = getattr(wandb, "__hack_pypi_latest_version__", wandb.__version__)
return json.dumps(
{
"info": {"version": version},
"releases": {
"88.1.2rc2": [],
"88.1.2rc12": [],
"88.1.2rc3": [],
"88.1.2rc4": [],
"0.11.0": [],
"0.10.32": [],
"0.10.31": [],
"0.10.30": [],
"0.0.8rc6": [],
"0.0.8rc2": [],
"0.0.8rc3": [],
"0.0.8rc8": [],
"0.0.2": [{"yanked": True}],
"0.0.3": [{"yanked": True, "yanked_reason": "just cuz"}],
"0.0.7": [],
"0.0.5": [],
"0.0.6": [],
},
}
)
@app.route("/api/5288891/store/", methods=["POST"])
def sentry_put():
ctx = get_ctx()
data = request.get_data()
data = gzip.decompress(data)
data = str(data, "utf-8")
data = json.loads(data)
ctx["sentry_events"].append(data)
return ""
@app.route("/api/5288891/envelope/", methods=["POST"])
def sentry_session_put():
ctx = get_ctx()
data = request.get_data()
data = gzip.decompress(data)
data = str(data, "utf-8")
envelope = []
for line in data.splitlines():
if not line:
continue
line = json.loads(line)
envelope.append(line)
ctx["sentry_sessions"].append(envelope)
return ""
@app.errorhandler(404)
def page_not_found(e):
print(f"Got request to: {request.url} ({request.method})")
return "Not Found", 404
return app | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 489 | 2,252 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
494 | persist_ctx | def persist_ctx(exc):
if "ctx" in g:
CTX.persist(g.ctx) | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 498 | 500 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
495 | handle_http_exception | def handle_http_exception(error):
response = jsonify(error.to_dict())
# For azure storage
response.headers["x-ms-error-code"] = 500
response.status_code = error.status_code
return response | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 503 | 508 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
496 | update_ctx | def update_ctx():
"""Updating context for live_mock_server"""
ctx = get_ctx()
# in Flask/Werkzeug 2.1.0 get_json raises an exception on
# empty json, so we try/catch here
try:
body = request.get_json()
except BadRequest:
body = None
if request.method == "GET":
ctx = snoop.context_enrich(ctx)
return json.dumps(ctx)
elif request.method == "DELETE":
app.logger.info("resetting context")
set_ctx(default_ctx())
return json.dumps(get_ctx())
else:
ctx.update(body)
# TODO: tests in CI failed on this
set_ctx(ctx)
app.logger.info("updated context %s", ctx)
return json.dumps(get_ctx()) | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 511 | 533 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
497 | graphql | def graphql():
# TODO: in tests wandb-username is set to the test name, lets scope ctx to it
ctx = get_ctx()
base_url = request.url_root.rstrip("/")
test_name = request.headers.get("X-WANDB-USERNAME")
if test_name:
app.logger.info("Test request from: %s", test_name)
app.logger.info("graphql post")
if "fail_graphql_times" in ctx:
if ctx["fail_graphql_count"] < ctx["fail_graphql_times"]:
ctx["fail_graphql_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
if "rate_limited_times" in ctx:
if ctx["rate_limited_count"] < ctx["rate_limited_times"]:
ctx["rate_limited_count"] += 1
return json.dumps({"error": "rate limit exceeded"}), 429
if ctx["graphql_conflict"]:
return json.dumps({"error": "resource already exists"}), 409
# Setup artifact emulator (should this be somewhere else?)
emulate_random_str = ctx["emulate_artifacts"]
global ART_EMU
if emulate_random_str:
if ART_EMU is None or ART_EMU._random_str != emulate_random_str:
ART_EMU = ArtifactEmulator(
random_str=emulate_random_str, ctx=ctx, base_url=base_url
)
else:
ART_EMU = None
body = request.get_json()
app.logger.info("graphql post body: %s", body)
if body["variables"].get("run"):
ctx["current_run"] = body["variables"]["run"]
if body["variables"].get("files"):
requested_file = body["variables"]["files"][0]
ctx["requested_file"] = requested_file
emulate_azure = ctx.get("emulate_azure")
# Azure expects the request path of signed urls to have 2 parts
upload_headers = []
if emulate_azure:
url = (
base_url
+ "/storage/azure/"
+ ctx["current_run"]
+ "/"
+ requested_file
)
upload_headers.append("x-ms-blob-type:Block")
upload_headers.append("Content-MD5:{}".format("AAAA"))
else:
url = base_url + "/storage?file={}&run={}".format(
urllib.parse.quote(requested_file), ctx["current_run"]
)
return json.dumps(
{
"data": {
"model": {
"bucket": {
"id": "storageid",
"files": {
"uploadHeaders": upload_headers,
"edges": [
{
"node": {
"name": requested_file,
"url": url,
"directUrl": url + "&direct=true",
}
}
],
},
}
}
}
}
)
if "historyTail" in body["query"]:
if ctx["resume"] is True:
hist_tail = '["{\\"_step\\": 15, \\"acc\\": 1, \\"_runtime\\": 60}"]'
return json.dumps(
{
"data": {
"model": {
"bucket": {
"name": "test",
"displayName": "funky-town-13",
"id": "test",
"config": '{"epochs": {"value": 10}}',
"summaryMetrics": '{"acc": 10, "best_val_loss": 0.5, "_wandb": {"runtime": 50}}',
"logLineCount": 14,
"historyLineCount": 15,
"eventsLineCount": 0,
"historyTail": hist_tail,
"eventsTail": '["{\\"_runtime\\": 70}"]',
}
}
}
}
)
else:
return json.dumps({"data": {"model": {"bucket": None}}})
if "query Runs(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"runCount": 4,
"readOnly": False,
"runs": paginated(run(ctx), ctx),
}
}
}
)
if "reportCursor" in body["query"]:
page_count = ctx["page_count"]
return json.dumps(
{
"data": {
"project": {
"allViews": paginated(
{
"name": "test-report",
"description": "test-description",
"user": {
"username": body["variables"]["entity"],
"photoUrl": "test-url",
},
"spec": '{"version": 5}',
"updatedAt": datetime.now().isoformat(),
"pageCount": page_count,
},
ctx,
)
}
}
}
)
for query_name in [
"Run",
"RunInfo",
"RunState",
"RunFiles",
"RunFullHistory",
"RunSampledHistory",
]:
if f"query {query_name}(" in body["query"]:
# if querying state of run, change context from running to finished
if "RunFragment" not in body["query"] and "state" in body["query"]:
ret_val = json.dumps(
{"data": {"project": {"run": {"state": ctx.get("run_state")}}}}
)
ctx["run_state"] = "finished"
return ret_val
return json.dumps({"data": {"project": {"run": run(ctx)}}})
for query_name in [
"Model", # backward compatible for 0.12.10 and below
"RunConfigs",
"RunResumeStatus",
"RunStoppedStatus",
"RunUploadUrls",
"RunDownloadUrls",
"RunDownloadUrl",
]:
if f"query {query_name}(" in body["query"]:
if "project(" in body["query"]:
project_field_name = "project"
run_field_name = "run"
else:
project_field_name = "model"
run_field_name = "bucket"
if (
"commit" in body["query"]
or body["variables"].get("fileName") == "wandb-metadata.json"
):
run_config = _bucket_config(ctx)
else:
run_config = run(ctx)
return json.dumps(
{"data": {project_field_name: {run_field_name: run_config}}}
)
# Models() is backward compatible for 0.12.10 and below
if "query Models(" in body["query"] or "query EntityProjects(" in body["query"]:
return json.dumps(
{
"data": {
"models": {
"edges": [
{
"node": {
"id": "123",
"name": "myname",
"project": "myproj",
}
}
]
}
}
}
)
if "query Projects(" in body["query"]:
return json.dumps(
{
"data": {
"models": paginated(
{
"id": "1",
"name": "test-project",
"entityName": body["variables"]["entity"],
"createdAt": "now",
"isBenchmark": False,
},
ctx,
)
}
}
)
if "query Viewer " in body["query"] or "query ServerInfo" in body["query"]:
viewer_dict = {
"data": {
"viewer": {
"entity": "mock_server_entity",
"admin": False,
"email": "mock@server.test",
"username": "mock",
"teams": {"edges": []}, # TODO make configurable for cli_test
},
},
}
code_saving_enabled = ctx.get("code_saving_enabled")
if code_saving_enabled is not None:
viewer_dict["data"]["viewer"][
"flags"
] = f'{{"code_saving_enabled": {str(code_saving_enabled).lower()}}}'
server_info = {
"serverInfo": {
"cliVersionInfo": {
"max_cli_version": str(ctx.get("max_cli_version", "0.10.33"))
},
"latestLocalVersionInfo": {
"outOfDate": ctx.get("out_of_date", False),
"latestVersionString": str(ctx.get("latest_version", "0.9.42")),
},
}
}
if ctx["empty_query"]:
server_info["serverInfo"].pop("latestLocalVersionInfo")
elif ctx["local_none"]:
server_info["serverInfo"]["latestLocalVersionInfo"] = None
viewer_dict["data"].update(server_info)
return json.dumps(viewer_dict)
if "query ArtifactFiles" in body["query"]:
artifact_file = {
"id": "1",
"name": "foo",
"uploadUrl": "",
"storagePath": "x/y/z",
"uploadheaders": [],
"artifact": {"id": "1"},
}
if "storagePath" not in body["query"]:
del artifact_file["storagePath"]
return {
"data": {
"project": {
"artifactType": {
"artifact": {
"files": paginated(
artifact_file,
ctx,
)
}
}
}
}
}
if "query ProbeServerCapabilities" in body["query"]:
if ctx["empty_query"]:
return json.dumps(
{
"data": {
"QueryType": {
"fields": [
{"name": "serverInfo"},
]
},
"ServerInfoType": {
"fields": [
{"name": "cliVersionInfo"},
{"name": "exposesExplicitRunQueueAckPath"},
]
},
}
}
)
return json.dumps(
{
"data": {
"QueryType": {
"fields": [
{"name": "serverInfo"},
]
},
"ServerInfoType": {
"fields": [
{"name": "cliVersionInfo"},
{"name": "latestLocalVersionInfo"},
{"name": "exposesExplicitRunQueueAckPath"},
]
},
}
}
)
if "query ProbeServerUseArtifactInput" in body["query"]:
return json.dumps(
{
"data": {
"UseArtifactInputInfoType": {
"inputFields": [
{"name": "entityName"},
{"name": "projectName"},
{"name": "runName"},
{"name": "artifactID"},
{"name": "usedAs"},
{"name": "clientMutationId"},
]
},
}
}
)
if "query Sweep(" in body["query"] or "query SweepWithRuns(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"sweep": {
"id": "1234",
"name": "fun-sweep-10",
"state": "running",
"bestLoss": 0.33,
"config": yaml.dump(
{
"controller": {"type": "local"},
"method": "random",
"parameters": {
"param1": {
"values": [1, 2, 3],
"distribution": "categorical",
},
"param2": {
"values": [1, 2, 3],
"distribution": "categorical",
},
},
"program": "train-dummy.py",
}
),
"createdAt": datetime.now().isoformat(),
"heartbeatAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"earlyStopJobRunning": False,
"controller": None,
"scheduler": None,
"runs": paginated(run(ctx), ctx),
}
}
}
}
)
if "mutation UpsertSweep(" in body["query"]:
return json.dumps(
{
"data": {
"upsertSweep": {
"sweep": {
"name": "test",
"project": {
"id": "1234",
"name": "test",
"entity": {"id": "1234", "name": "test"},
},
},
"configValidationWarnings": [],
}
}
}
)
if "mutation CreateAgent(" in body["query"]:
return json.dumps(
{
"data": {
"createAgent": {
"agent": {
"id": "mock-server-agent-93xy",
}
}
}
}
)
if "mutation Heartbeat(" in body["query"]:
new_run_needed = body["variables"]["runState"] == "{}"
if new_run_needed:
ctx["n_sweep_runs"] += 1
return json.dumps(
{
"data": {
"agentHeartbeat": {
"agent": {
"id": "mock-server-agent-93xy",
},
"commands": (
json.dumps(
[
{
"type": "run",
"run_id": f"mocker-sweep-run-x9{ctx['n_sweep_runs']}",
"args": {
"a": {"value": ctx["n_sweep_runs"]}
},
}
]
)
if ctx["n_sweep_runs"] <= 4
else json.dumps([{"type": "exit"}])
)
if new_run_needed
else "[]",
}
}
}
)
if "query ProbeServerSettings" in body["query"]:
if not ctx["server_settings"]:
data = {}
else:
data = {"ServerSettingsType": {"fields": [{"name": "sdkMessages"}]}}
return json.dumps({"data": data})
if "mutation UpsertBucket(" in body["query"]:
run_id_default = "abc123"
run_id = body["variables"].get("name", run_id_default)
run_num = len(ctx["runs"])
inserted = run_id not in ctx["runs"]
if inserted:
ctx["run_ids"].append(run_id)
run_ctx = ctx["runs"].setdefault(run_id, default_ctx())
r = run_ctx.setdefault("run", {})
r.setdefault("display_name", f"lovely-dawn-{run_num + 32}")
r.setdefault("storage_id", f"storageid{run_num}")
r.setdefault("project_name", "test")
r.setdefault("entity_name", "mock_server_entity")
git_remote = body["variables"].get("repo")
git_commit = body["variables"].get("commit")
if git_commit or git_remote:
for c in ctx, run_ctx:
c.setdefault("git", {})
c["git"]["remote"] = git_remote
c["git"]["commit"] = git_commit
for c in ctx, run_ctx:
tags = body["variables"].get("tags")
if tags is not None:
c["tags"] = tags
notes = body["variables"].get("notes")
if notes is not None:
c["notes"] = notes
group = body["variables"].get("groupName")
if group is not None:
c["group"] = group
job_type = body["variables"].get("jobType")
if job_type is not None:
c["job_type"] = job_type
name = body["variables"].get("displayName")
if name is not None:
c["name"] = name
program = body["variables"].get("program")
if program is not None:
c["program"] = program
host = body["variables"].get("host")
if host is not None:
c["host"] = host
param_config = body["variables"].get("config")
if param_config:
for c in ctx, run_ctx:
c.setdefault("config", []).append(json.loads(param_config))
param_summary = body["variables"].get("summaryMetrics")
if param_summary:
for c in ctx, run_ctx:
c.setdefault("summary", []).append(json.loads(param_summary))
for c in ctx, run_ctx:
c["upsert_bucket_count"] += 1
# Update run context
ctx["runs"][run_id] = run_ctx
# support legacy tests which pass resume
if ctx["resume"] is True:
inserted = False
response = {
"data": {
"upsertBucket": {
"bucket": {
"id": r["storage_id"],
"name": run_id,
"displayName": r["display_name"],
"project": {
"name": r["project_name"],
"entity": {"name": r["entity_name"]},
},
},
"inserted": inserted,
}
}
}
if ctx["server_settings"]:
response["data"]["upsertBucket"]["serverSettings"] = {
"serverMessages": ctx["server_messages"]
}
if "mocker-sweep-run-x9" in body["variables"].get("name", ""):
response["data"]["upsertBucket"]["bucket"][
"sweepName"
] = "test-sweep-id"
return json.dumps(response)
if "mutation DeleteRun(" in body["query"]:
return json.dumps({"data": {}})
if "mutation CreateAnonymousApiKey " in body["query"]:
return json.dumps(
{
"data": {
"createAnonymousEntity": {"apiKey": {"name": "ANONYMOOSE" * 4}}
}
}
)
if "mutation DeleteFiles(" in body["query"]:
return json.dumps({"data": {"deleteFiles": {"success": True}}})
if "mutation PrepareFiles(" in body["query"]:
nodes = []
for i, file_spec in enumerate(body["variables"]["fileSpecs"]):
url = base_url + "/storage?file=%s" % file_spec["name"]
nodes.append(
{
"node": {
"id": str(i),
"name": file_spec["name"],
"displayName": file_spec["name"],
"digest": "null",
"uploadUrl": url,
"uploadHeaders": "",
}
}
)
return json.dumps({"data": {"prepareFiles": {"files": {"edges": nodes}}}})
if "mutation LinkArtifact(" in body["query"]:
if ART_EMU:
ctx["latest_arti_id"] = body["variables"].get("artifactID") or body[
"variables"
].get("clientID")
return ART_EMU.link(variables=body["variables"])
if "mutation CreateArtifact(" in body["query"]:
if ART_EMU:
res = ART_EMU.create(variables=body["variables"])
ctx["latest_arti_id"] = res["data"]["createArtifact"]["artifact"]["id"]
return res
collection_name = body["variables"]["artifactCollectionNames"][0]
app.logger.info(f"Creating artifact {collection_name}")
ctx["artifacts"] = ctx.get("artifacts", {})
ctx["artifacts"][collection_name] = ctx["artifacts"].get(
collection_name, []
)
ctx["artifacts"][collection_name].append(body["variables"])
_id = body.get("variables", {}).get("digest", "")
if _id != "":
ctx.get("artifacts_by_id")[_id] = body["variables"]
return {
"data": {
"createArtifact": {
"artifact": artifact(
ctx,
collection_name,
id_override=_id,
state="COMMITTED"
if "PENDING" not in collection_name
else "PENDING",
)
}
}
}
if "mutation updateArtifact" in body["query"]:
id = body["variables"]["artifactID"]
ctx["latest_arti_id"] = id
ctx.get("artifacts_by_id")[id] = body["variables"]
return {"data": {"updateArtifact": {"artifact": id}}}
if "mutation DeleteArtifact(" in body["query"]:
id = body["variables"]["artifactID"]
delete_aliases = body["variables"]["deleteAliases"]
art = artifact(ctx, id_override=id)
if len(art.get("aliases", [])) and not delete_aliases:
raise Exception("delete_aliases not set, but artifact has aliases")
return {
"data": {
"deleteArtifact": {
"artifact": art,
"success": True,
}
}
}
if "mutation CreateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": base_url
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": base_url + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
run_name = body.get("variables", {}).get("runName", "unknown")
run_ctx = ctx["runs"].setdefault(run_name, default_ctx())
for c in ctx, run_ctx:
c["manifests_created"].append(manifest)
return {
"data": {
"createArtifactManifest": {
"artifactManifest": manifest,
}
}
}
if "mutation UpdateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": base_url
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": base_url + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
return {
"data": {
"updateArtifactManifest": {
"artifactManifest": manifest,
}
}
}
if "mutation CreateArtifactFiles" in body["query"]:
if ART_EMU:
return ART_EMU.create_files(variables=body["variables"])
return {
"data": {
"files": [
{
"node": {
"id": idx,
"name": file["name"],
"uploadUrl": "",
"uploadheaders": [],
"artifact": {"id": file["artifactID"]},
}
for idx, file in enumerate(
body["variables"]["artifactFiles"]
)
}
],
}
}
if "mutation CommitArtifact(" in body["query"]:
return {
"data": {
"commitArtifact": {
"artifact": {"id": 1, "digest": "0000===================="}
}
}
}
if "mutation UseArtifact(" in body["query"]:
used_name = body.get("variables", {}).get("usedAs", None)
ctx["used_artifact_info"] = {"used_name": used_name}
return {"data": {"useArtifact": {"artifact": artifact(ctx)}}}
if "query ProjectArtifactType(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
}
}
}
}
if "mutation upsertView(" in body["query"]:
return json.dumps(
{
"data": {
"upsertView": {
"view": {
"id": "VmlldzoxOTk4ODgz",
"type": "runs",
"name": "2mr65bp2bjy",
"displayName": "2mr65bp2bjy",
"description": None,
"project": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
},
"spec": '{"version": 5, "panelSettings": {}, "blocks": [], "width": "readable", "authors": [], "discussionThreads": [], "ref": {}}',
"updatedAt": "2022-05-13T03:13:18",
},
"inserted": True,
}
}
}
)
if "mutation upsertModel(" in body["query"]:
return json.dumps(
{
"data": {
"upsertModel": {
"project": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
"description": None,
"access": "PRIVATE",
"views": "{}",
},
"model": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
"description": None,
"access": "PRIVATE",
"views": "{}",
},
"inserted": False,
}
}
}
)
if "query SpecificReport(" in body["query"]:
return json.dumps(
{
"data": {
"view": {
"id": "VmlldzoxOTcxMzI2",
"type": "runs",
"name": "hxrbu425ppr",
"displayName": "Copy of megatruong's Copy of megatruong's Copy of megatruong's Untitled Report",
"description": "",
"project": {
"id": "UHJvamVjdDp2MTpyZXBvcnQtZWRpdGluZzptZWdhdHJ1b25n",
"name": "report-editing",
"entityName": "megatruong",
},
"createdAt": "2022-05-09T02:18:42",
"updatedAt": "2022-05-09T02:18:45",
"spec": '{"version":5,"panelSettings":{"xAxis":"_step","smoothingWeight":0,"smoothingType":"exponential","ignoreOutliers":false,"xAxisActive":false,"smoothingActive":false,"ref":{"type":"panelSettings","viewID":"qwnlqy0ka","id":"85uruphtl"}},"blocks":[{"type":"paragraph","children":[{"text":""},{"type":"link","url":"https://docs.google.com/presentation/d/1cvcSZygln3WPOjRGcJX8XgDtj9qcW7SA6K2u-fUVXTg/edit#slide=id.g1186f921888_0_1484","children":[{"text":"https://docs.google.com/presentation/d/1cvcSZygln3WPOjRGcJX8XgDtj9qcW7SA6K2u-fUVXTg/edit#slide=id.g1186f921888_0_1484"}]},{"text":""}]},{"type":"paragraph","children":[{"text":""}]},{"type":"twitter","html":"<blockquote class=\\"twitter-tweet\\"><p lang=\\"en\\" dir=\\"ltr\\">The voice of an angel, truly. <a href=\\"https://twitter.com/hashtag/MassEffect?src=hash&ref_src=twsrc%5Etfw\\">#MassEffect</a> <a href=\\"https://t.co/nMev97Uw7F\\">pic.twitter.com/nMev97Uw7F</a></p>— Mass Effect (@masseffect) <a href=\\"https://twitter.com/masseffect/status/1428748886655569924?ref_src=twsrc%5Etfw\\">August 20, 2021</a></blockquote>\\n","children":[{"text":""}]},{"type":"spotify","spotifyType":"track","spotifyID":"7kRKlFCFLAUwt43HWtauhX","children":[{"text":""}]},{"type":"soundcloud","html":"<iframe width=\\"100%\\" height=\\"400\\" scrolling=\\"no\\" frameborder=\\"no\\" src=\\"https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F1076901103&show_artwork=true\\"></iframe>","children":[{"text":""}]},{"type":"video","url":"https://www.youtube.com/embed/ggqI-HH8yXc","children":[{"text":""}]},{"type":"paragraph","children":[{"text":"Normal paragraph "}]},{"type":"heading","children":[{"text":"Heading 1"}],"level":1},{"type":"heading","children":[{"text":"Heading 2"}],"level":2},{"type":"heading","children":[{"text":"Heading 3"}],"level":3},{"type":"list","children":[{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Bullet 1"}]}]},{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Bullet 2"}]}]}]},{"type":"list","ordered":true,"children":[{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Ordered 1"}]}],"ordered":true},{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Ordered 2"}]}],"ordered":true}]},{"type":"list","children":[{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Unchecked"}]}],"checked":false},{"type":"list-item","children":[{"type":"paragraph","children":[{"text":"Checked"}]}],"checked":true}]},{"type":"block-quote","children":[{"text":"Block Quote 1\\nBlock Quote 2\\nBlock Quote 3"}]},{"type":"callout-block","children":[{"type":"callout-line","children":[{"text":"Callout 1"}]},{"type":"callout-line","children":[{"text":"Callout 2"}]},{"type":"callout-line","children":[{"text":"Callout 3"}]}]},{"type":"code-block","children":[{"type":"code-line","children":[{"text":"# python code block"}]},{"type":"code-line","children":[{"text":"for x in range(10):"}]},{"type":"code-line","children":[{"text":" pass"}]}]},{"type":"horizontal-rule","children":[{"text":""}]},{"type":"code-block","language":"yaml","children":[{"type":"code-line","children":[{"text":"this:"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- is"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- a"}],"language":"yaml"},{"type":"code-line","children":[{"text":"cool:"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- yaml"}],"language":"yaml"},{"type":"code-line","children":[{"text":"- file"}],"language":"yaml"}]},{"type":"markdown-block","children":[{"text":""}],"content":"Markdown cell with *italics* and **bold** and $e=mc^2$"},{"type":"image","children":[{"text":"It\'s a me, Pikachu"}],"url":"https://api.wandb.ai/files/megatruong/images/projects/918598/350382db.gif","hasCaption":true},{"type":"paragraph","children":[{"text":""},{"type":"latex","children":[{"text":""}],"content":"y=ax^2 +bx+c"},{"text":""}]},{"type":"latex","children":[{"text":""}],"content":"\\\\gamma^2+\\\\theta^2=\\\\omega^2\\n\\\\\\\\ a^2 + b^2 = c^2","block":true},{"type":"gallery","children":[{"text":""}],"ids":[]},{"type":"weave-panel","children":[{"text":""}],"config":{"panelConfig":{"exp":{"nodeType":"output","type":{"type":"tagged","tag":{"type":"tagged","tag":{"type":"typedDict","propertyTypes":{"entityName":"string","projectName":"string"}},"value":{"type":"typedDict","propertyTypes":{"project":"project","artifactName":"string"}}},"value":"artifact"},"fromOp":{"name":"project-artifact","inputs":{"project":{"nodeType":"output","type":{"type":"tagged","tag":{"type":"typedDict","propertyTypes":{"entityName":"string","projectName":"string"}},"value":"project"},"fromOp":{"name":"root-project","inputs":{"entityName":{"nodeType":"const","type":"string","val":"megatruong"},"projectName":{"nodeType":"const","type":"string","val":"nvda-ngc"}}}},"artifactName":{"nodeType":"const","type":"string","val":"my-artifact"}}}}}}},{"type":"paragraph","children":[{"text":""}]},{"type":"panel-grid","children":[{"text":""}],"metadata":{"openViz":true,"openRunSet":0,"name":"unused-name","runSets":[{"runFeed":{"version":2,"columnVisible":{"run:name":false},"columnPinned":{},"columnWidths":{},"columnOrder":[],"pageSize":10,"onlyShowSelected":false},"enabled":true,"name":"Run set","search":{"query":""},"id":"pw1e1wwf6","filters":{"op":"OR","filters":[{"op":"AND","filters":[{"key":{"section":"run","name":"jobType"},"op":"=","value":"job_type2","disabled":false},{"key":{"section":"run","name":"group"},"op":"=","value":"groupA","disabled":false}]}],"ref":{"type":"filters","viewID":"vtahfagaz","id":"dpd2z5bz0"}},"grouping":[{"section":"run","name":"username"}],"sort":{"keys":[{"key":{"section":"run","name":"createdAt"},"ascending":false},{"key":{"section":"run","name":"username"},"ascending":false}],"ref":{"type":"sort","viewID":"vtahfagaz","id":"ivgiyvxox"}},"selections":{"root":1,"bounds":[],"tree":[]},"expandedRowAddresses":[],"ref":{"type":"runSet","viewID":"vtahfagaz","id":"z59jmoybe"}}],"panelBankConfig":{"state":0,"settings":{"autoOrganizePrefix":2,"showEmptySections":false,"sortAlphabetically":false},"sections":[{"name":"Hidden Panels","isOpen":false,"type":"flow","flowConfig":{"snapToColumns":true,"columnsPerPage":3,"rowsPerPage":2,"gutterWidth":16,"boxWidth":460,"boxHeight":300},"sorted":0,"localPanelSettings":{"xAxis":"_step","smoothingWeight":0,"smoothingType":"exponential","ignoreOutliers":false,"xAxisActive":false,"smoothingActive":false,"ref":{"type":"panelSettings","viewID":"vtahfagaz","id":"xa2bddtor"}},"panels":[],"localPanelSettingsRef":{"type":"panelSettings","viewID":"vtahfagaz","id":"xa2bddtor"},"panelRefs":[],"ref":{"type":"panel-bank-section-config","viewID":"vtahfagaz","id":"fz92jh3dt"}}],"ref":{"type":"panel-bank-config","viewID":"vtahfagaz","id":"1o946whn4"}},"panelBankSectionConfig":{"name":"Report Panels","isOpen":true,"type":"grid","flowConfig":{"snapToColumns":true,"columnsPerPage":3,"rowsPerPage":2,"gutterWidth":16,"boxWidth":460,"boxHeight":300},"sorted":0,"localPanelSettings":{"xAxis":"_step","smoothingWeight":0,"smoothingType":"exponential","ignoreOutliers":false,"xAxisActive":false,"smoothingActive":false,"ref":{"type":"panelSettings","viewID":"vtahfagaz","id":"xvr9gn2vt"}},"panels":[{"__id__":"e6mwxa1mq","viewType":"Run History Line Plot","config":{"metrics":["y"],"groupBy":"None","legendFields":["run:displayName"],"yAxisAutoRange":false,"yLogScale":false},"ref":{"type":"panel","viewID":"vtahfagaz","id":"ui3b5xcsb"},"layout":{"x":8,"y":0,"w":8,"h":6}},{"__id__":"ymceey3mu","viewType":"Run History Line Plot","config":{"metrics":["x"],"groupBy":"None","legendFields":["run:displayName"],"yAxisAutoRange":false,"yLogScale":false},"ref":{"type":"panel","viewID":"vtahfagaz","id":"yewkh6k6p"},"layout":{"x":0,"y":0,"w":8,"h":6}}],"localPanelSettingsRef":{"type":"panelSettings","viewID":"vtahfagaz","id":"xvr9gn2vt"},"panelRefs":[{"type":"panel","viewID":"vtahfagaz","id":"ui3b5xcsb"},{"type":"panel","viewID":"vtahfagaz","id":"yewkh6k6p"}],"ref":{"type":"panel-bank-section-config","viewID":"vtahfagaz","id":"5mqkb97jz"}},"customRunColors":{"ref":{"type":"run-colors","viewID":"vtahfagaz","id":"ukxc20iq0"}},"ref":{"type":"section","viewID":"vtahfagaz","id":"eg2e88znk"}}},{"type":"paragraph","children":[{"text":""}]}],"width":"readable","authors":[{"name":"Andrew Truong","username":"megatruong"}],"discussionThreads":[],"ref":{"type":"runs/draft","viewID":"qwnlqy0ka","id":"s2r3aq8j6"}}',
"previewUrl": None,
"user": {
"name": "Andrew Truong",
"username": "megatruong",
"userInfo": {
"bio": "model-registry \ninstant replay\nweeave-plot\nweeave-report\nniight\n",
"company": "Weights and Biases",
"location": "San Francisco",
"githubUrl": "",
"twitterUrl": "",
"websiteUrl": "wandb.com",
},
},
}
}
}
)
if "query ProjectArtifacts(" in body["query"]:
return {
"data": {
"project": {
"artifactTypes": paginated(
{
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
if "query ProjectArtifactCollections(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"artifactCollections": paginated(
{
"id": "1",
"name": "mnist",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
}
if "query ArtifactCollection(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"artifactCollection": {
"id": "1",
"name": "mnist",
"description": "",
"createdAt": datetime.now().isoformat(),
"aliases": {"edges": [{"node": {"alias": "latest"}}]},
}
}
}
}
}
# backward compatible for 0.12.10 and below
if "query RunArtifacts(" in body["query"]:
if "inputArtifacts" in body["query"]:
key = "inputArtifacts"
else:
key = "outputArtifacts"
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {key: artifacts}}}}
if "query RunInputArtifacts(" in body["query"]:
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {"inputArtifacts": artifacts}}}}
if "query RunOutputArtifacts(" in body["query"]:
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {"outputArtifacts": artifacts}}}}
if "query Artifacts(" in body["query"]:
version = "v%i" % ctx["page_count"]
artifacts = paginated(artifact(ctx), ctx, {"version": version})
artifacts["totalCount"] = ctx["page_times"]
return {
"data": {
"project": {
"artifactType": {
"artifactCollection": {
"name": "mnist",
"artifacts": artifacts,
}
}
}
}
}
for query_name in [
"Artifact",
"ArtifactType",
"ArtifactWithCurrentManifest",
"ArtifactUsedBy",
"ArtifactCreatedBy",
]:
if f"query {query_name}(" in body["query"]:
if ART_EMU:
res = ART_EMU.query(
variables=body.get("variables", {}), query=body.get("query")
)
artifact_response = None
if res["data"].get("project") is not None:
artifact_response = res["data"]["project"]["artifact"]
else:
artifact_response = res["data"]["artifact"]
if artifact_response:
ctx["latest_arti_id"] = artifact_response.get("id")
return res
art = artifact(
ctx, request_url_root=base_url, id_override="QXJ0aWZhY3Q6NTI1MDk4"
)
if "id" in body.get("variables", {}):
art = artifact(
ctx,
request_url_root=base_url,
id_override=body.get("variables", {}).get("id"),
)
art["artifactType"] = {"id": 1, "name": "dataset"}
return {"data": {"artifact": art}}
if ctx["swappable_artifacts"] and "name" in body.get("variables", {}):
full_name = body.get("variables", {}).get("name", None)
if full_name is not None:
collection_name = full_name.split(":")[0]
art = artifact(
ctx,
collection_name=collection_name,
request_url_root=base_url,
)
# code artifacts use source-RUNID names, we return the code type
art["artifactType"] = {"id": 2, "name": "code"}
if "source" not in body["variables"]["name"]:
art["artifactType"] = {"id": 1, "name": "dataset"}
if "logged_table" in body["variables"]["name"]:
art["artifactType"] = {"id": 3, "name": "run_table"}
if "run-" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "run_table"}
if "wb_validation_data" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "validation_dataset"}
if "job" in body["variables"]["name"]:
art["artifactType"] = {"id": 5, "name": "job"}
if "model" in body["variables"]["name"]:
art["artifactType"] = {"id": 6, "name": "model"}
return {"data": {"project": {"artifact": art}}}
if "query ArtifactManifest(" in body["query"]:
if ART_EMU:
res = ART_EMU.query(
variables=body.get("variables", {}), query=body.get("query")
)
ctx["latest_arti_id"] = res["data"]["artifact"]["id"]
art = artifact(ctx)
art["currentManifest"] = {
"id": 1,
"file": {
"id": 1,
"directUrl": base_url
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
},
}
return {"data": {"project": {"artifact": art}}}
# Project() is backward compatible for 0.12.10 and below
if ("query Project(" in body["query"] and "runQueues" in body["query"]) or (
"query ProjectRunQueues(" in body["query"] and "runQueues" in body["query"]
):
if ctx["run_queues_return_default"]:
return json.dumps(
{
"data": {
"project": {
"runQueues": [
{
"id": 1,
"name": "default",
"createdBy": "mock_server_entity",
"access": "PROJECT",
}
]
}
}
}
)
else:
return json.dumps({"data": {"project": {"runQueues": []}}})
if "query GetRunQueueItem" in body["query"]:
if ctx["run_queue_item_return_type"] == "claimed":
return json.dumps(
{
"data": {
"project": {
"runQueue": {
"runQueueItems": {
"edges": [
{
"node": {
"id": "1",
"associatedRunId": "test",
"state": "CLAIMED",
}
}
]
}
}
}
}
}
)
else:
return json.dumps(
{
"data": {
"project": {
"runQueue": {
"runQueueItems": {
"edges": [
{
"node": {
"id": "1",
"associatedRunId": None,
"state": "PENDING",
}
}
]
}
}
}
}
}
)
if "mutation createRunQueue" in body["query"]:
if not ctx["successfully_create_default_queue"]:
return json.dumps(
{"data": {"createRunQueue": {"success": False, "queueID": None}}}
)
ctx["run_queues_return_default"] = True
return json.dumps(
{"data": {"createRunQueue": {"success": True, "queueID": 1}}}
)
if "mutation popFromRunQueue" in body["query"]:
if ctx["num_popped"] != 0:
return json.dumps({"data": {"popFromRunQueue": None}})
ctx["num_popped"] += 1
if ctx["invalid_launch_spec_project"]:
return json.dumps(
{
"data": {
"popFromRunQueue": {
"runQueueItemId": "1",
"runSpec": {
"uri": "https://wandb.ai/mock_server_entity/test_project/runs/1",
"project": "test_project2",
"entity": "mock_server_entity",
"resource": "local",
},
}
}
}
)
return json.dumps(
{
"data": {
"popFromRunQueue": {
"runQueueItemId": "1",
"runSpec": {
"uri": "https://wandb.ai/mock_server_entity/test_project/runs/1",
"project": "test_project",
"entity": "mock_server_entity",
"resource": "local",
},
}
}
}
)
if "mutation pushToRunQueue" in body["query"]:
if ctx["run_queues"].get(body["variables"]["queueID"]):
ctx["run_queues"][body["variables"]["queueID"]].append(
body["variables"]["queueID"]
)
else:
ctx["run_queues"][body["variables"]["queueID"]] = [
body["variables"]["queueID"]
]
return json.dumps({"data": {"pushToRunQueue": {"runQueueItemId": "1"}}})
if "mutation ackRunQueueItem" in body["query"]:
ctx["num_acked"] += 1
return json.dumps({"data": {"ackRunQueueItem": {"success": True}}})
if "query ClientIDMapping(" in body["query"]:
return {"data": {"clientIDMapping": {"serverID": "QXJ0aWZhY3Q6NTI1MDk4"}}}
# Admin apis
if "mutation DeleteApiKey" in body["query"]:
return {"data": {"deleteApiKey": {"success": True}}}
if "mutation GenerateApiKey" in body["query"]:
return {
"data": {"generateApiKey": {"apiKey": {"id": "XXX", "name": "Z" * 40}}}
}
if "mutation DeleteInvite" in body["query"]:
return {"data": {"deleteInvite": {"success": True}}}
if "mutation CreateInvite" in body["query"]:
return {"data": {"createInvite": {"invite": {"id": "XXX"}}}}
if "mutation CreateServiceAccount" in body["query"]:
return {"data": {"createServiceAccount": {"user": {"id": "XXX"}}}}
if "mutation CreateTeam" in body["query"]:
return {
"data": {
"createTeam": {
"entity": {"id": "XXX", "name": body["variables"]["teamName"]}
}
}
}
if "mutation NotifyScriptableRunAlert" in body["query"]:
avars = body.get("variables", {})
run_name = avars.get("runName", "unknown")
adict = dict(
title=avars["title"], text=avars["text"], severity=avars["severity"]
)
atime = avars.get("waitDuration")
if atime is not None:
adict["wait"] = atime
run_ctx = ctx["runs"].setdefault(run_name, default_ctx())
for c in ctx, run_ctx:
c["alerts"].append(adict)
return {"data": {"notifyScriptableRunAlert": {"success": True}}}
if "query SearchUsers" in body["query"]:
return {
"data": {
"users": {
"edges": [
{
"node": {
"id": "XXX",
"email": body["variables"]["query"],
"apiKeys": {
"edges": [
{"node": {"id": "YYY", "name": "Y" * 40}}
]
},
"teams": {
"edges": [
{"node": {"id": "TTT", "name": "test"}}
]
},
}
}
]
* ctx["num_search_users"]
}
}
}
if "query Entity(" in body["query"]:
return {
"data": {
"entity": {
"id": "XXX",
"members": [
{
"id": "YYY",
"name": "test",
"accountType": "MEMBER",
"apiKey": None,
},
{
"id": "SSS",
"name": "Service account",
"accountType": "SERVICE",
"apiKey": "Y" * 40,
},
],
}
}
}
if "stopped" in body["query"]:
return json.dumps(
{
"data": {
"Model": {
"project": {"run": {"stopped": ctx.get("stopped", False)}}
}
}
}
)
if "mutation createLaunchAgent(" in body["query"]:
agent_id = len(ctx["launch_agents"].keys())
ctx["launch_agents"][agent_id] = "POLLING"
return json.dumps(
{
"data": {
"createLaunchAgent": {
"success": True,
"launchAgentId": agent_id,
}
}
}
)
if "mutation updateLaunchAgent(" in body["query"]:
if ctx["launch_agent_update_fail"]:
return json.dumps({"data": {"updateLaunchAgent": {"success": False}}})
status = body["variables"]["agentStatus"]
agent_id = body["variables"]["agentId"]
ctx["launch_agents"][agent_id] = status
return json.dumps({"data": {"updateLaunchAgent": {"success": True}}})
if "query LaunchAgentIntrospection" in body["query"]:
if ctx["gorilla_supports_launch_agents"]:
return json.dumps(
{"data": {"LaunchAgentType": {"name": "LaunchAgent"}}}
)
else:
return json.dumps({"data": {}})
if "query LaunchAgent" in body["query"]:
if ctx["gorilla_supports_launch_agents"]:
return json.dumps(
{
"data": {
"launchAgent": {
"name": "test_agent",
"stopPolling": ctx["stop_launch_agent"],
}
}
}
)
else:
return json.dumps({"data": {}})
if "query GetSweeps" in body["query"]:
if body["variables"]["project"] == "testnosweeps":
return {"data": {"project": {"totalSweeps": 0, "sweeps": {}}}}
return {
"data": {
"project": {
"totalSweeps": 1,
"sweeps": {
"edges": [
{
"node": {
"id": "testdatabaseid",
"name": "testid",
"bestLoss": 0.5,
"config": yaml.dump({"name": "testname"}),
}
}
]
},
}
}
}
print("MISSING QUERY, add me to tests/mock_server.py", body["query"])
error = {"message": "Not implemented in tests/mock_server.py", "body": body}
return json.dumps({"errors": [error]}) | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 537 | 1,787 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
498 | storage | def storage(extra=None):
ctx = get_ctx()
if "fail_storage_times" in ctx:
if ctx["fail_storage_count"] < ctx["fail_storage_times"]:
ctx["fail_storage_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
file = request.args.get("file")
_id = request.args.get("id", "")
run = request.args.get("run", "unknown")
# Grab the run from the url for azure uploads
if extra and run == "unknown":
run = extra.split("/")[-2]
ctx["storage"] = ctx.get("storage", {})
ctx["storage"][run] = ctx["storage"].get(run, [])
ctx["storage"][run].append(request.args.get("file"))
size = ctx["files"].get(request.args.get("file"))
if request.method == "GET" and size:
return os.urandom(size), 200
# make sure to read the data
request.get_data(as_text=True)
# We need to bomb out before storing the file as uploaded for tests
inject = InjectRequestsParse(ctx).find(request=request)
if inject:
if inject.response:
response = inject.response
if inject.http_status:
# print("INJECT", inject, inject.http_status)
raise HttpException(
{"code": 500, "message": "some error"},
status_code=inject.http_status,
)
run_ctx = ctx["runs"].setdefault(run, default_ctx())
for c in ctx, run_ctx:
if extra:
file = extra.split("/")[-1]
else:
file = request.args.get("file", "UNKNOWN_STORAGE_FILE_PUT")
c["file_names"].append(file)
if request.method == "PUT":
for c in ctx, run_ctx:
c["file_bytes"].setdefault(file, 0)
c["file_bytes"][file] += request.content_length
if ART_EMU:
res = ART_EMU.storage(request=request, arti_id=ctx["latest_arti_id"])
return res
if file == "wandb_manifest.json":
if _id in ctx.get("artifacts_by_id"):
art = ctx["artifacts_by_id"][_id]
if "-validation_predictions" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_predictions.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
if "wb_validation_data" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/5aac4cea496fd061e813.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
if request.args.get("name") == "my-test_reference_download:latest":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"StarWars3.wav": {
"digest": "a90eb05f7aef652b3bdd957c67b7213a",
"size": 81299,
"ref": "https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav",
},
"file1.txt": {
"digest": "0000====================",
"size": 81299,
},
},
}
elif (
_id == "bb8043da7d78ff168a695cff097897d2"
or _id == "ad4d74ac0e4167c6cf4aaad9d59b9b44"
):
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"t1.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "27c102831476c6ff7ce53c266c937612":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "b9a598178557aed1d89bd93ec0db989b":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table_2.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "e6954815d2beb5841b3dabf7cf455c30":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table.partitioned-table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "0eec13efd400546f58a4530de62ed07a":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"jt.joined-table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id in [
"2d9a7e0aa8407f0730e19e5bc55c3a45",
"c541de19b18331a4a33b282fc9d42510",
"6f3d6ed5417d2955afbc73bff0ed1609",
"7d797e62834a7d72538529e91ed958e2",
"03d3e221fd4da6c5fccb1fbd75fe475e",
"464aa7e0d7c3f8230e3fe5f10464a2e6",
"8ef51aeabcfcd89b719822de64f6a8bf",
]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/e14239fe.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
elif (
len(ctx.get("graphql", [])) >= 3
and ctx["graphql"][2].get("variables", {}).get("name", "") == "dummy:v0"
) or request.args.get("name") == "dummy:v0":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"dataset.partitioned-table.json": {
"digest": "0aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"parts/1.table.json": {
"digest": "1aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"t.table.json": {
"digest": "2aaaaaaaaaaaaaaaaaaaaa==",
"size": 123,
},
},
}
elif _id == "e04169452d5584146eb7ebb405647cc8":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"results_df.table.json": {
"digest": "0aaaaaaaaaaaaaaaaaaaaa==",
"size": 363,
},
},
}
else:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"digits.h5": {
"digest": "TeSJ4xxXg0ohuL5xEdq2Ew==",
"size": 81299,
},
},
}
elif file == "wandb-metadata.json":
if ctx["run_script_type"] == "notebook":
code_path = "one_cell.ipynb"
elif ctx["run_script_type"] == "shell":
code_path = "test.sh"
elif ctx["run_script_type"] == "python":
code_path = "train.py"
elif ctx["run_script_type"] == "unknown":
code_path = "unknown.unk"
result = {
"docker": "test/docker",
"program": "train.py",
"codePath": code_path,
"args": ["--test", "foo"],
"git": ctx.get("git", {}),
}
if ctx["run_cuda_version"]:
result["cuda"] = ctx["run_cuda_version"]
return result
elif file == "requirements.txt":
return "numpy==1.19.5\n"
elif file == "diff.patch":
# TODO: make sure the patch is valid for windows as well,
# and un skip the test in test_cli.py
return r"""
diff --git a/patch.txt b/patch.txt
index 30d74d2..9a2c773 100644
--- a/patch.txt
+++ b/patch.txt
@@ -1 +1 @@
-test
\ No newline at end of file
+testing
\ No newline at end of file
"""
# emulate azure when we're receiving requests from them
if extra is not None:
return "", 201
return "", 200 | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 1,791 | 2,058 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
499 | artifact_file | def artifact_file(entity, digest):
if ART_EMU:
return ART_EMU.file(entity=entity, digest=digest)
if entity == "entity" or entity == "mock_server_entity":
if (
digest == "d1a69a69a69a69a69a69a69a69a69a69"
): # "dataset.partitioned-table.json"
return (
json.dumps({"_type": "partitioned-table", "parts_path": "parts"}),
200,
)
elif digest == "d5a69a69a69a69a69a69a69a69a69a69": # "parts/1.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {
"type_map": {
"A": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"B": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"C": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
}
},
"wb_type": "dictionary",
},
"columns": ["A", "B", "C"],
"data": [[0, 0, 1]],
"ncols": 3,
"nrows": 1,
}
),
200,
)
elif digest == "d9a69a69a69a69a69a69a69a69a69a69": # "t.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {"type_map": {}},
"wb_type": "dictionary",
},
"columns": [],
"data": [],
"ncols": 0,
"nrows": 0,
}
),
200,
)
if digest == "dda69a69a69a69a69a69a69a69a69a69":
return (
json.dumps({"_type": "table-file", "columns": [], "data": []}),
200,
)
return "ARTIFACT %s" % digest, 200 | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 2,061 | 2,143 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
500 | file_stream | def file_stream(entity, project, run):
body = request.get_json()
app.logger.info("file_stream post body: %s", body)
ctx = get_ctx()
run_ctx = get_run_ctx(run)
for c in ctx, run_ctx:
c["file_stream"] = c.get("file_stream", [])
c["file_stream"].append(body)
response = json.dumps({"exitcode": None, "limits": {}})
inject = InjectRequestsParse(ctx).find(request=request)
if inject:
if inject.response:
response = inject.response
if inject.http_status:
# print("INJECT", inject, inject.http_status)
raise HttpException("some error", status_code=inject.http_status)
return response | python | tests/pytest_tests/unit_tests_old/utils/mock_server.py | 2,147 | 2,165 | {
"name": "Git-abouvier/wandb",
"url": "https://github.com/Git-abouvier/wandb.git",
"license": "MIT",
"stars": 0,
"forks": 0
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.