Update app.py
Browse files
app.py
CHANGED
|
@@ -8,18 +8,17 @@ import json
|
|
| 8 |
from huggingface_hub import InferenceClient
|
| 9 |
import spaces
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
|
|
|
| 13 |
|
| 14 |
-
@spaces.GPU(duration=120)
|
| 15 |
-
def ai_fix_json(json_data):
|
| 16 |
prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
|
| 17 |
|
| 18 |
try:
|
| 19 |
if torch.cuda.is_available():
|
| 20 |
response = client.text_generation(prompt, max_new_tokens=1024)
|
| 21 |
fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
|
| 22 |
-
return fixed_json, "JSON fixed using AI on GPU
|
| 23 |
else:
|
| 24 |
raise RuntimeError("GPU not available, falling back to CPU.")
|
| 25 |
|
|
@@ -28,11 +27,11 @@ def ai_fix_json(json_data):
|
|
| 28 |
print(f"Falling back to CPU due to: {gpu_error}")
|
| 29 |
response = client.text_generation(prompt, max_new_tokens=1024)
|
| 30 |
fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
|
| 31 |
-
return fixed_json, "JSON fixed using AI on CPU
|
| 32 |
|
| 33 |
-
def process_file(uploaded_file):
|
| 34 |
json_data = uploaded_file # This is already the content of the file as a string
|
| 35 |
-
cleaned_json, message = ai_fix_json(json_data)
|
| 36 |
|
| 37 |
try:
|
| 38 |
parsed_data = json.loads(cleaned_json)
|
|
@@ -43,10 +42,11 @@ def process_file(uploaded_file):
|
|
| 43 |
|
| 44 |
iface = gr.Interface(
|
| 45 |
fn=process_file,
|
| 46 |
-
inputs=gr.
|
|
|
|
| 47 |
outputs=[gr.JSON(label="Fixed JSON"), "text", gr.File(label="Download cleaned JSON file")],
|
| 48 |
-
title="AI-Powered JSON Cleaner with
|
| 49 |
-
description="Upload a JSON file to automatically fix, remove duplicates, and download the cleaned version using AI with GPU/CPU fallback."
|
| 50 |
)
|
| 51 |
|
| 52 |
if __name__ == "__main__":
|
|
|
|
| 8 |
from huggingface_hub import InferenceClient
|
| 9 |
import spaces
|
| 10 |
|
| 11 |
+
def ai_fix_json(model_id, json_data):
|
| 12 |
+
# Initialize the InferenceClient with the chosen model
|
| 13 |
+
client = InferenceClient(model=model_id)
|
| 14 |
|
|
|
|
|
|
|
| 15 |
prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
|
| 16 |
|
| 17 |
try:
|
| 18 |
if torch.cuda.is_available():
|
| 19 |
response = client.text_generation(prompt, max_new_tokens=1024)
|
| 20 |
fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
|
| 21 |
+
return fixed_json, "JSON fixed using AI on GPU with model: " + model_id
|
| 22 |
else:
|
| 23 |
raise RuntimeError("GPU not available, falling back to CPU.")
|
| 24 |
|
|
|
|
| 27 |
print(f"Falling back to CPU due to: {gpu_error}")
|
| 28 |
response = client.text_generation(prompt, max_new_tokens=1024)
|
| 29 |
fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
|
| 30 |
+
return fixed_json, "JSON fixed using AI on CPU with model: " + model_id
|
| 31 |
|
| 32 |
+
def process_file(model_id, uploaded_file):
|
| 33 |
json_data = uploaded_file # This is already the content of the file as a string
|
| 34 |
+
cleaned_json, message = ai_fix_json(model_id, json_data)
|
| 35 |
|
| 36 |
try:
|
| 37 |
parsed_data = json.loads(cleaned_json)
|
|
|
|
| 42 |
|
| 43 |
iface = gr.Interface(
|
| 44 |
fn=process_file,
|
| 45 |
+
inputs=[gr.Textbox(label="Model ID (e.g., gpt-3.5-turbo)", default="gpt-3.5-turbo"),
|
| 46 |
+
gr.File(label="Upload your JSON file")],
|
| 47 |
outputs=[gr.JSON(label="Fixed JSON"), "text", gr.File(label="Download cleaned JSON file")],
|
| 48 |
+
title="AI-Powered JSON Cleaner with Model Selection",
|
| 49 |
+
description="Upload a JSON file to automatically fix, remove duplicates, and download the cleaned version using AI with GPU/CPU fallback. Select any model from the Hugging Face Hub."
|
| 50 |
)
|
| 51 |
|
| 52 |
if __name__ == "__main__":
|