Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,19 +6,20 @@ import os
|
|
| 6 |
DEFAULT_MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.1"
|
| 7 |
|
| 8 |
# Try both common environment variable names for Hugging Face tokens
|
| 9 |
-
HF_TOKEN = os.getenv("API_TOKEN_2")
|
|
|
|
| 10 |
|
| 11 |
client = None
|
| 12 |
|
| 13 |
def get_inference_client(model_name):
|
| 14 |
global client
|
| 15 |
-
|
| 16 |
-
|
| 17 |
client = InferenceClient(model=model_name, token=HF_TOKEN if HF_TOKEN else None)
|
| 18 |
print(f"InferenceClient initialized for {model_name}. Token {'provided' if HF_TOKEN else 'not provided'}.")
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
return client
|
| 23 |
|
| 24 |
def evaluate_understanding(prompt, response):
|
|
@@ -67,7 +68,6 @@ def query_model_and_evaluate(user_prompt, model_name_to_use):
|
|
| 67 |
"do_sample": True,
|
| 68 |
"return_full_text": False
|
| 69 |
}
|
| 70 |
-
# Call the model
|
| 71 |
model_response_text = current_client.text_generation(formatted_prompt, **params)
|
| 72 |
if not model_response_text:
|
| 73 |
model_response_text = ""
|
|
|
|
| 6 |
DEFAULT_MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.1"
|
| 7 |
|
| 8 |
# Try both common environment variable names for Hugging Face tokens
|
| 9 |
+
HF_TOKEN = os.getenv("API_TOKEN_2")
|
| 10 |
+
print(f"HF_TOKEN is {'set' if HF_TOKEN else 'NOT set'}.")
|
| 11 |
|
| 12 |
client = None
|
| 13 |
|
| 14 |
def get_inference_client(model_name):
|
| 15 |
global client
|
| 16 |
+
if client is None or getattr(client, "model", None) != model_name:
|
| 17 |
+
try:
|
| 18 |
client = InferenceClient(model=model_name, token=HF_TOKEN if HF_TOKEN else None)
|
| 19 |
print(f"InferenceClient initialized for {model_name}. Token {'provided' if HF_TOKEN else 'not provided'}.")
|
| 20 |
+
except Exception as e:
|
| 21 |
+
print(f"Failed to initialize InferenceClient for {model_name}: {e}")
|
| 22 |
+
return None
|
| 23 |
return client
|
| 24 |
|
| 25 |
def evaluate_understanding(prompt, response):
|
|
|
|
| 68 |
"do_sample": True,
|
| 69 |
"return_full_text": False
|
| 70 |
}
|
|
|
|
| 71 |
model_response_text = current_client.text_generation(formatted_prompt, **params)
|
| 72 |
if not model_response_text:
|
| 73 |
model_response_text = ""
|