turtle170 commited on
Commit
4166b44
·
verified ·
1 Parent(s): 70cb62d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -14,7 +14,7 @@ from gradio_client import Client
14
  import hashlib
15
 
16
  # Backend processor connection
17
- BACKEND_URL = "https://turtle170-zeroengine-backend.hf.space"
18
  BACKEND_FALLBACK_URL = "turtle170/ZeroEngine-Backend" # Fallback to repo ID
19
  CONNECTION_TIMEOUT = 60 # seconds
20
  MAX_RETRIES = 3
@@ -65,7 +65,7 @@ class BackendProcessor:
65
  self.client = Client(url, **client_kwargs)
66
 
67
  # Test connection with a simple API call
68
- test_result = self.client.predict(api_name="/predict")
69
  response_time = time.time() - start_time
70
 
71
  if test_result:
@@ -116,7 +116,7 @@ class BackendProcessor:
116
  # Test tokenization
117
  try:
118
  start_time = time.time()
119
- result = self.client.predict(api_name="/predict")
120
  response_time = time.time() - start_time
121
  endpoints_status["tokenize"] = {"status": "ok", "response_time": response_time}
122
  except Exception as e:
@@ -125,7 +125,7 @@ class BackendProcessor:
125
  # Test cache stats
126
  try:
127
  start_time = time.time()
128
- result = self.client.predict(api_name="/predict_6")
129
  response_time = time.time() - start_time
130
  data = json.loads(result)
131
  if data.get("success"):
@@ -178,7 +178,7 @@ class BackendProcessor:
178
  return None
179
 
180
  start_time = time.time()
181
- result = self.client.predict(*args, api_name=api_name, **kwargs)
182
  response_time = time.time() - start_time
183
 
184
  self.response_times.append(response_time)
 
14
  import hashlib
15
 
16
  # Backend processor connection
17
+ BACKEND_URL = "https://turtle170-ZeroEngine-Backend.hf.space"
18
  BACKEND_FALLBACK_URL = "turtle170/ZeroEngine-Backend" # Fallback to repo ID
19
  CONNECTION_TIMEOUT = 60 # seconds
20
  MAX_RETRIES = 3
 
65
  self.client = Client(url, **client_kwargs)
66
 
67
  # Test connection with a simple API call
68
+ test_result = self.client.predict(api_name="//predict")
69
  response_time = time.time() - start_time
70
 
71
  if test_result:
 
116
  # Test tokenization
117
  try:
118
  start_time = time.time()
119
+ result = self.client.predict(api_name="//predict")
120
  response_time = time.time() - start_time
121
  endpoints_status["tokenize"] = {"status": "ok", "response_time": response_time}
122
  except Exception as e:
 
125
  # Test cache stats
126
  try:
127
  start_time = time.time()
128
+ result = self.client.predict(api_name="//predict_6")
129
  response_time = time.time() - start_time
130
  data = json.loads(result)
131
  if data.get("success"):
 
178
  return None
179
 
180
  start_time = time.time()
181
+ result = self.client.predict(*args, api_name=api_name.replace("/", "//"), **kwargs)
182
  response_time = time.time() - start_time
183
 
184
  self.response_times.append(response_time)