Oranblock commited on
Commit
fd9a508
·
verified ·
1 Parent(s): aa85114

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -5
app.py CHANGED
@@ -1,13 +1,20 @@
1
  import os
2
- os.environ['CUDA_HOME'] = '/usr/local/cuda'
3
- os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin'
4
-
5
  import gradio as gr
6
  import torch
7
  import json
8
  from huggingface_hub import InferenceClient
9
  import spaces
10
 
 
 
 
 
 
 
 
 
 
 
11
  def ai_fix_json(model_id, json_data):
12
  # Initialize the InferenceClient with the chosen model
13
  client = InferenceClient(model=model_id)
@@ -15,10 +22,11 @@ def ai_fix_json(model_id, json_data):
15
  prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
16
 
17
  try:
 
18
  if torch.cuda.is_available():
19
  response = client.text_generation(prompt, max_new_tokens=1024)
20
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
21
- return fixed_json, "JSON fixed using AI on GPU with model: " + model_id
22
  else:
23
  raise RuntimeError("GPU not available, falling back to CPU.")
24
 
@@ -27,7 +35,7 @@ def ai_fix_json(model_id, json_data):
27
  print(f"Falling back to CPU due to: {gpu_error}")
28
  response = client.text_generation(prompt, max_new_tokens=1024)
29
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
30
- return fixed_json, "JSON fixed using AI on CPU with model: " + model_id
31
 
32
  def process_file(model_id, uploaded_file):
33
  json_data = uploaded_file # This is already the content of the file as a string
@@ -52,4 +60,10 @@ iface = gr.Interface(
52
  )
53
 
54
  if __name__ == "__main__":
 
 
 
 
 
 
55
  iface.launch()
 
1
  import os
 
 
 
2
  import gradio as gr
3
  import torch
4
  import json
5
  from huggingface_hub import InferenceClient
6
  import spaces
7
 
8
+ # Ensure CUDA is configured correctly
9
+ os.environ['CUDA_HOME'] = '/usr/local/cuda'
10
+ os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin'
11
+
12
+ def initialize_zerogpu():
13
+ # Example function to initialize ZeroGPU, replace with actual initialization if needed
14
+ pass
15
+
16
+ # Function to perform JSON correction using a chosen model
17
+ @spaces.GPU(duration=120) # Use GPU for this function, if available
18
  def ai_fix_json(model_id, json_data):
19
  # Initialize the InferenceClient with the chosen model
20
  client = InferenceClient(model=model_id)
 
22
  prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
23
 
24
  try:
25
+ # Check if GPU is available
26
  if torch.cuda.is_available():
27
  response = client.text_generation(prompt, max_new_tokens=1024)
28
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
29
+ return fixed_json, f"JSON fixed using AI on GPU with model: {model_id}"
30
  else:
31
  raise RuntimeError("GPU not available, falling back to CPU.")
32
 
 
35
  print(f"Falling back to CPU due to: {gpu_error}")
36
  response = client.text_generation(prompt, max_new_tokens=1024)
37
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
38
+ return fixed_json, f"JSON fixed using AI on CPU with model: {model_id}"
39
 
40
  def process_file(model_id, uploaded_file):
41
  json_data = uploaded_file # This is already the content of the file as a string
 
60
  )
61
 
62
  if __name__ == "__main__":
63
+ try:
64
+ initialize_zerogpu()
65
+ print("ZeroGPU initialized.")
66
+ except Exception as e:
67
+ print(f"ZeroGPU initialization failed: {e}. Falling back to CPU.")
68
+
69
  iface.launch()