Oranblock commited on
Commit
4a5d194
·
verified ·
1 Parent(s): f5d6e87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -1,30 +1,34 @@
 
 
 
 
1
  import gradio as gr
2
- import json
3
  import torch
 
4
  from huggingface_hub import InferenceClient
5
- from huggingface_hub import spaces
6
 
7
- # Initialize the InferenceClient for GPU and CPU
8
- client_gpu = InferenceClient(model="gpt-3.5-turbo") # Assuming this will use GPU by default if available
9
- client_cpu = InferenceClient(model="gpt-3.5-turbo") # Duplicate client for CPU fallback
10
 
11
- # Function to attempt to fix JSON using the appropriate resource
12
  def ai_fix_json(json_data):
13
  prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
14
 
15
  try:
16
- # Check if the space is running on GPU
17
  if torch.cuda.is_available():
18
- response = client_gpu.text_generation(prompt, max_new_tokens=1024)
19
- return response[0]['generated_text'].split("Fixed JSON:")[-1].strip(), "JSON fixed using AI on GPU."
 
20
  else:
21
  raise RuntimeError("GPU not available, falling back to CPU.")
22
 
23
  except Exception as gpu_error:
24
  # Fall back to CPU if GPU processing fails
25
  print(f"Falling back to CPU due to: {gpu_error}")
26
- response = client_cpu.text_generation(prompt, max_new_tokens=1024)
27
- return response[0]['generated_text'].split("Fixed JSON:")[-1].strip(), "JSON fixed using AI on CPU."
 
28
 
29
  def process_file(uploaded_file):
30
  json_data = uploaded_file # This is already the content of the file as a string
@@ -41,8 +45,9 @@ iface = gr.Interface(
41
  fn=process_file,
42
  inputs=gr.File(label="Upload your JSON file"),
43
  outputs=[gr.JSON(label="Fixed JSON"), "text", gr.File(label="Download cleaned JSON file")],
44
- title="AI-Powered JSON Cleaner with Dynamic Resource Allocation",
45
  description="Upload a JSON file to automatically fix, remove duplicates, and download the cleaned version using AI with GPU/CPU fallback."
46
  )
47
 
48
- iface.launch()
 
 
1
+ import os
2
+ os.environ['CUDA_HOME'] = '/usr/local/cuda'
3
+ os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin'
4
+
5
  import gradio as gr
 
6
  import torch
7
+ import json
8
  from huggingface_hub import InferenceClient
9
+ import spaces
10
 
11
+ # Initialize the InferenceClient
12
+ client = InferenceClient(model="gpt-3.5-turbo")
 
13
 
14
+ @spaces.GPU(duration=120)
15
  def ai_fix_json(json_data):
16
  prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
17
 
18
  try:
 
19
  if torch.cuda.is_available():
20
+ response = client.text_generation(prompt, max_new_tokens=1024)
21
+ fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
22
+ return fixed_json, "JSON fixed using AI on GPU."
23
  else:
24
  raise RuntimeError("GPU not available, falling back to CPU.")
25
 
26
  except Exception as gpu_error:
27
  # Fall back to CPU if GPU processing fails
28
  print(f"Falling back to CPU due to: {gpu_error}")
29
+ response = client.text_generation(prompt, max_new_tokens=1024)
30
+ fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
31
+ return fixed_json, "JSON fixed using AI on CPU."
32
 
33
  def process_file(uploaded_file):
34
  json_data = uploaded_file # This is already the content of the file as a string
 
45
  fn=process_file,
46
  inputs=gr.File(label="Upload your JSON file"),
47
  outputs=[gr.JSON(label="Fixed JSON"), "text", gr.File(label="Download cleaned JSON file")],
48
+ title="AI-Powered JSON Cleaner with GPU Support",
49
  description="Upload a JSON file to automatically fix, remove duplicates, and download the cleaned version using AI with GPU/CPU fallback."
50
  )
51
 
52
+ if __name__ == "__main__":
53
+ iface.launch()