Spaces:
Sleeping
Sleeping
Update utils.py
Browse files
utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
import os
|
| 4 |
import pandas as pd
|
|
@@ -46,15 +46,9 @@ Answer (with explanation):
|
|
| 46 |
|
| 47 |
# Use Hugging Face InferenceClient directly
|
| 48 |
client = InferenceClient(token=os.environ.get("HUGGINGFACE_API_KEY"))
|
| 49 |
-
response = client(model="google/gemma-2b-it", inputs=prompt)
|
| 50 |
-
|
| 51 |
-
#
|
| 52 |
-
|
| 53 |
-
if isinstance(response, list) and "generated_text" in response[0]:
|
| 54 |
-
answer = response[0]["generated_text"]
|
| 55 |
-
elif isinstance(response, dict) and "generated_text" in response:
|
| 56 |
-
answer = response["generated_text"]
|
| 57 |
-
else:
|
| 58 |
-
answer = str(response)
|
| 59 |
|
| 60 |
return answer
|
|
|
|
| 1 |
+
rom huggingface_hub import InferenceClient
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
import os
|
| 4 |
import pandas as pd
|
|
|
|
| 46 |
|
| 47 |
# Use Hugging Face InferenceClient directly
|
| 48 |
client = InferenceClient(token=os.environ.get("HUGGINGFACE_API_KEY"))
|
| 49 |
+
response = client.text_generation(model="google/gemma-2b-it", inputs=prompt)
|
| 50 |
+
|
| 51 |
+
# Extract the generated text from the response
|
| 52 |
+
answer = response[0]["generated_text"] if isinstance(response, list) and "generated_text" in response[0] else str(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
return answer
|