Spaces:
Sleeping
Sleeping
Hotfix: avoid gr.JSON schema crash in Gradio
Browse files- __pycache__/app.cpython-311.pyc +0 -0
- app.py +4 -4
__pycache__/app.cpython-311.pyc
CHANGED
|
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
|
|
|
app.py
CHANGED
|
@@ -146,7 +146,7 @@ def load_selected_model(checkpoint_label):
|
|
| 146 |
}
|
| 147 |
status = f"Loaded `{experiment}` on `{device}` (`{cfg['model_type']}`)"
|
| 148 |
suggested_out = os.path.join("analysis", "outputs_ui", experiment)
|
| 149 |
-
return bundle, status, model_info, cfg["inference"]["num_steps"], suggested_out
|
| 150 |
|
| 151 |
|
| 152 |
def apply_preset(preset_name):
|
|
@@ -246,7 +246,7 @@ def generate_from_ui(
|
|
| 246 |
}
|
| 247 |
log_path = save_generation(model_bundle["experiment"], record)
|
| 248 |
status = f"Inference done. Saved: `{log_path}`"
|
| 249 |
-
return output_text, status, record
|
| 250 |
|
| 251 |
|
| 252 |
def _run_analysis_cmd(task, ckpt_path, output_dir, input_text="dharmo rakṣati rakṣitaḥ", phase="analyze"):
|
|
@@ -375,7 +375,7 @@ with gr.Blocks(title="Sanskrit Diffusion Client Demo", css=CUSTOM_CSS) as demo:
|
|
| 375 |
load_btn = gr.Button("Load Selected Model", variant="primary")
|
| 376 |
|
| 377 |
load_status = gr.Markdown("Select a model and load.")
|
| 378 |
-
model_info = gr.
|
| 379 |
|
| 380 |
with gr.Tabs():
|
| 381 |
with gr.Tab("1) Task Runner"):
|
|
@@ -435,7 +435,7 @@ with gr.Blocks(title="Sanskrit Diffusion Client Demo", css=CUSTOM_CSS) as demo:
|
|
| 435 |
interactive=False,
|
| 436 |
)
|
| 437 |
run_status = gr.Markdown("")
|
| 438 |
-
run_record = gr.
|
| 439 |
with gr.Column(scale=1, elem_classes=["panel"]):
|
| 440 |
preset = gr.Radio(["Manual", "Literal", "Balanced", "Creative"], value="Balanced", label="Preset")
|
| 441 |
temperature = gr.Slider(0.4, 1.2, value=0.70, step=0.05, label="Temperature")
|
|
|
|
| 146 |
}
|
| 147 |
status = f"Loaded `{experiment}` on `{device}` (`{cfg['model_type']}`)"
|
| 148 |
suggested_out = os.path.join("analysis", "outputs_ui", experiment)
|
| 149 |
+
return bundle, status, json.dumps(model_info, ensure_ascii=False, indent=2), cfg["inference"]["num_steps"], suggested_out
|
| 150 |
|
| 151 |
|
| 152 |
def apply_preset(preset_name):
|
|
|
|
| 246 |
}
|
| 247 |
log_path = save_generation(model_bundle["experiment"], record)
|
| 248 |
status = f"Inference done. Saved: `{log_path}`"
|
| 249 |
+
return output_text, status, json.dumps(record, ensure_ascii=False, indent=2)
|
| 250 |
|
| 251 |
|
| 252 |
def _run_analysis_cmd(task, ckpt_path, output_dir, input_text="dharmo rakṣati rakṣitaḥ", phase="analyze"):
|
|
|
|
| 375 |
load_btn = gr.Button("Load Selected Model", variant="primary")
|
| 376 |
|
| 377 |
load_status = gr.Markdown("Select a model and load.")
|
| 378 |
+
model_info = gr.Textbox(label="Loaded Model Details (JSON)", lines=12, interactive=False)
|
| 379 |
|
| 380 |
with gr.Tabs():
|
| 381 |
with gr.Tab("1) Task Runner"):
|
|
|
|
| 435 |
interactive=False,
|
| 436 |
)
|
| 437 |
run_status = gr.Markdown("")
|
| 438 |
+
run_record = gr.Textbox(label="Inference Metadata (JSON)", lines=12, interactive=False)
|
| 439 |
with gr.Column(scale=1, elem_classes=["panel"]):
|
| 440 |
preset = gr.Radio(["Manual", "Literal", "Balanced", "Creative"], value="Balanced", label="Preset")
|
| 441 |
temperature = gr.Slider(0.4, 1.2, value=0.70, step=0.05, label="Temperature")
|