Oranblock commited on
Commit
1574b63
·
verified ·
1 Parent(s): 0a2338b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -13,13 +13,15 @@ os.environ['PATH'] = os.environ['PATH'] + ':/usr/local/cuda/bin'
13
  @spaces.GPU(duration=120) # Use GPU for this function, if available
14
  def ai_fix_json(model_id, json_data):
15
  client = InferenceClient(model=model_id)
16
-
17
  prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
18
 
19
  try:
20
  # Check if GPU is available
21
  if torch.cuda.is_available():
22
  response = client.text_generation(prompt, max_new_tokens=1024)
 
 
 
23
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
24
  return fixed_json, f"JSON fixed using AI on GPU with model: {model_id}"
25
  else:
@@ -29,21 +31,27 @@ def ai_fix_json(model_id, json_data):
29
  print(f"Falling back to CPU due to: {gpu_error}")
30
  try:
31
  response = client.text_generation(prompt, max_new_tokens=1024)
 
 
 
32
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
33
  return fixed_json, f"JSON fixed using AI on CPU with model: {model_id}"
34
  except Exception as e:
35
- return None, f"Failed to process with model {model_id}. Error: {str(e)}", None
36
 
37
  def process_file(model_id, uploaded_file):
38
- json_data = uploaded_file # This is already the content of the file as a string
39
  cleaned_json, message = ai_fix_json(model_id, json_data)
40
 
41
- try:
42
- parsed_data = json.loads(cleaned_json)
43
- pretty_json = json.dumps(parsed_data, indent=4)
44
- return pretty_json, message, pretty_json
45
- except json.JSONDecodeError as e:
46
- return None, f"Failed to fix JSON: {str(e)}", None
 
 
 
47
 
48
  # List of available models
49
  model_options = [
 
13
  @spaces.GPU(duration=120) # Use GPU for this function, if available
14
  def ai_fix_json(model_id, json_data):
15
  client = InferenceClient(model=model_id)
 
16
  prompt = f"Fix the following JSON data and make it valid:\n\n{json_data}\n\nFixed JSON:"
17
 
18
  try:
19
  # Check if GPU is available
20
  if torch.cuda.is_available():
21
  response = client.text_generation(prompt, max_new_tokens=1024)
22
+ # Handle cases where the response might be malformed or too short
23
+ if not response or 'generated_text' not in response[0]:
24
+ return None, f"Failed to process JSON with model {model_id}. Response was invalid."
25
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
26
  return fixed_json, f"JSON fixed using AI on GPU with model: {model_id}"
27
  else:
 
31
  print(f"Falling back to CPU due to: {gpu_error}")
32
  try:
33
  response = client.text_generation(prompt, max_new_tokens=1024)
34
+ # Handle cases where the response might be malformed or too short
35
+ if not response or 'generated_text' not in response[0]:
36
+ return None, f"Failed to process JSON with model {model_id}. Response was invalid."
37
  fixed_json = response[0]['generated_text'].split("Fixed JSON:")[-1].strip()
38
  return fixed_json, f"JSON fixed using AI on CPU with model: {model_id}"
39
  except Exception as e:
40
+ return None, f"Failed to process with model {model_id}. Error: {str(e)}"
41
 
42
  def process_file(model_id, uploaded_file):
43
+ json_data = uploaded_file.read().decode("utf-8") # Ensure the file is read correctly as a string
44
  cleaned_json, message = ai_fix_json(model_id, json_data)
45
 
46
+ if cleaned_json:
47
+ try:
48
+ parsed_data = json.loads(cleaned_json)
49
+ pretty_json = json.dumps(parsed_data, indent=4)
50
+ return pretty_json, message, pretty_json
51
+ except json.JSONDecodeError as e:
52
+ return None, f"Failed to fix JSON: {str(e)}", None
53
+ else:
54
+ return None, message, None
55
 
56
  # List of available models
57
  model_options = [