Spaces:
Sleeping
Sleeping
File size: 1,926 Bytes
8cdd5f1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | import os
from huggingface_hub import InferenceClient
from dotenv import load_dotenv
# Import Logger
from .logging_utils import get_logger
logger = get_logger("llm_utils")
# Create HuggingFace Hub Link
load_dotenv()
hf_token = os.getenv('HF_TOKEN')
if hf_token:
logger.info("Retrieved HuggingFace Token")
else:
logger.warning("HF_TOKEN not found. LLM features may fail.")
client = InferenceClient(
token=hf_token,
)
logger.info("Built Client")
def build_prompt(query, blog_post):
prompt = f"""
You are an AI assistant helping explain why the returned post about a vacation spot was retrieved for the given query.
The results were generated through FAISS.
Given the following query: '{query}'
And the returned neighbor (blog post about travel): {blog_post}
Explain in clear, human terms why this vacation location is a good option for the user to consider given their request query.
Please reiterate the name of the location, being as specific as possible. The response should be a maximum of 8 sentances.
"""
return prompt.strip()
def explain_results(query, post_text):
prompt = build_prompt(query, post_text)
logger.info("Built Prompt")
if hf_token:
try:
completion = client.chat.completions.create(
model="deepseek-ai/DeepSeek-V3.2",
messages=[
{
"role": "user",
"content": prompt
}
],
)
gen_text = completion.choices[0].message['content']
return gen_text
except Exception as e:
logger.error(f"LLM explanation generation failed: {e}")
return "Explanation unavailable due to an error."
else:
logger.warning("Skipping LLM explanation: No token available")
return "Explanation unavailable (No API Token)." |