mechark commited on
Commit
ead947b
·
1 Parent(s): 54d4035

refac: clearer code, fix output

Browse files
Files changed (3) hide show
  1. src/core/config.py +0 -17
  2. src/rag/llm.py +0 -19
  3. src/rag/pipeline.py +3 -1
src/core/config.py CHANGED
@@ -2,17 +2,6 @@ from pydantic_settings import BaseSettings
2
  import os
3
  from pathlib import Path
4
 
5
-
6
- def read_secret_from_file(secret_name: str) -> str:
7
- """Read a secret from HuggingFace Spaces secrets directory."""
8
- secrets_dir = Path("/run/secrets")
9
- secret_file = secrets_dir / secret_name
10
-
11
- if secret_file.exists():
12
- return secret_file.read_text().strip()
13
- return ""
14
-
15
-
16
  class Settings(BaseSettings):
17
  MODEL_NAME: str = "mistralai/Mistral-7B-Instruct-v0.2"
18
 
@@ -30,10 +19,4 @@ class Settings(BaseSettings):
30
  class Config:
31
  env_file = ".env"
32
 
33
-
34
- # Try to read HF_TOKEN from HuggingFace Spaces secrets first, then fall back to env
35
- hf_token_from_file = read_secret_from_file("HF_TOKEN")
36
- if hf_token_from_file:
37
- os.environ["HF_TOKEN"] = hf_token_from_file
38
-
39
  settings = Settings()
 
2
  import os
3
  from pathlib import Path
4
 
 
 
 
 
 
 
 
 
 
 
 
5
  class Settings(BaseSettings):
6
  MODEL_NAME: str = "mistralai/Mistral-7B-Instruct-v0.2"
7
 
 
19
  class Config:
20
  env_file = ".env"
21
 
 
 
 
 
 
 
22
  settings = Settings()
src/rag/llm.py CHANGED
@@ -1,22 +1,3 @@
1
- import os
2
- import logging
3
-
4
- # Check what the OS actually sees
5
- raw_token = os.getenv("HF_TOKEN")
6
-
7
- print("--- DEBUG START ---")
8
- if raw_token:
9
- print(f"OS found HF_TOKEN! Length: {len(raw_token)}")
10
- print(f"First 3 chars: {raw_token[:3]}...") # Safety check
11
- else:
12
- print("OS did NOT find HF_TOKEN. It is None/Empty.")
13
- # Print all keys available (masked) to see if there is a typo
14
- print("Available Env Vars:", [k for k in os.environ.keys() if 'TOKEN' in k])
15
- print("--- DEBUG END ---")
16
-
17
- from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
18
- from langchain_core.prompts import ChatPromptTemplate
19
-
20
  from src.prompts import SYSTEM_PROMPT
21
  from src.core.config import settings
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from src.prompts import SYSTEM_PROMPT
2
  from src.core.config import settings
3
 
src/rag/pipeline.py CHANGED
@@ -54,7 +54,9 @@ def answer_question(question: str) -> tuple[str, list[dict]]:
54
  chain = get_chain()
55
 
56
  response = chain.invoke({"context": context, "question": question})
57
- return response, citations
 
 
58
  except Exception as e:
59
  logging.error(f"Error occurred while answering question: {e}")
60
  error_msg = f"Sorry, an error occurred while processing your request: {str(e)}"
 
54
  chain = get_chain()
55
 
56
  response = chain.invoke({"context": context, "question": question})
57
+ # Extract just the content from the response message
58
+ answer_text = response.content if hasattr(response, 'content') else str(response)
59
+ return answer_text, citations
60
  except Exception as e:
61
  logging.error(f"Error occurred while answering question: {e}")
62
  error_msg = f"Sorry, an error occurred while processing your request: {str(e)}"