Kushagra commited on
Commit
555a055
·
1 Parent(s): 6e94f25

Hugging-Face Deployment

Browse files
Files changed (3) hide show
  1. app/config/config.json +0 -1
  2. app/core/models.py +2 -2
  3. main.py +10 -3
app/config/config.json CHANGED
@@ -1,6 +1,5 @@
1
  {
2
  "MODEL_NAME": "llama-3.3-70b-versatile",
3
- "GROQ_KEY": "",
4
  "VALID_API_KEY": "2931609bd36ec1a45cb577b3b831dc711c76ae157b3c6250c564284c93b062ff",
5
 
6
  "LLM_CONFIG": {
 
1
  {
2
  "MODEL_NAME": "llama-3.3-70b-versatile",
 
3
  "VALID_API_KEY": "2931609bd36ec1a45cb577b3b831dc711c76ae157b3c6250c564284c93b062ff",
4
 
5
  "LLM_CONFIG": {
app/core/models.py CHANGED
@@ -80,9 +80,9 @@ async def llm_setup(config, url):
80
  temperature=f"{config.get('TEMPERATURE', 0)}",
81
  max_tokens=f"{config.get('MAX_TOKENS', 300)}", # Increased token limit for JSON responses
82
  max_retries=f"{config.get('MAX_RETRIES', 3)}",
83
- api_key=f"{config.get('GROQ_KEY')}",
84
  )
85
- logging.info(f"LLM initialized with model: {config.get('MODEL_NAME')}, api_key: {config.get('GROQ_KEY')}")
86
 
87
  # Choose template based on whether we need structured JSON output
88
  prompt_template = prompt_template_description()
 
80
  temperature=f"{config.get('TEMPERATURE', 0)}",
81
  max_tokens=f"{config.get('MAX_TOKENS', 300)}", # Increased token limit for JSON responses
82
  max_retries=f"{config.get('MAX_RETRIES', 3)}",
83
+ api_key=f"{os.getenv('GROQ_KEY')}",
84
  )
85
+ logging.info(f"LLM initialized with model: {config.get('MODEL_NAME')}, api_key: {os.getenv('GROQ_KEY')}")
86
 
87
  # Choose template based on whether we need structured JSON output
88
  prompt_template = prompt_template_description()
main.py CHANGED
@@ -5,12 +5,14 @@ import json
5
  import hashlib
6
 
7
  # Import our existing pipeline components
8
- from contextlib import asynccontextmanager
9
- from fastapi import FastAPI, HTTPException, Depends, status
10
  from app.utils.util import verify_api_key
11
  from app.core.models import llm_response_generator
12
  from app.schema.schema import QuestionRequest, AnswerResponse
13
 
 
 
 
 
14
  logging.basicConfig(format='%(asctime)s - %(levelname)s - Line: %(lineno)d - %(message)s',
15
  datefmt='%Y-%m-%d %H:%M:%S',
16
  level=logging.INFO)
@@ -18,6 +20,7 @@ logging.basicConfig(format='%(asctime)s - %(levelname)s - Line: %(lineno)d - %(m
18
 
19
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
20
 
 
21
  # Load config.json at startup
22
  @asynccontextmanager
23
  async def lifespan(app: FastAPI):
@@ -33,6 +36,10 @@ async def lifespan(app: FastAPI):
33
  # FastAPI app
34
  app = FastAPI(title="HackRx PDF RAG API", version="1.0.0", lifespan=lifespan)
35
 
 
 
 
 
36
  @app.post("/api/v1/hackrx/run", response_model=AnswerResponse)
37
  async def process_questions(request: QuestionRequest, api_key: str = Depends(verify_api_key)):
38
  try:
@@ -43,7 +50,7 @@ async def process_questions(request: QuestionRequest, api_key: str = Depends(ver
43
  logging.info(f"Received {len(questions)} questions for processing. Documents URL: {url}")
44
 
45
  # Create cache directory if not exists
46
- cache_dir = "redis"
47
  os.makedirs(cache_dir, exist_ok=True)
48
  # Create a cache key from url and questions
49
  cache_key = hashlib.sha256((url + json.dumps(questions, sort_keys=True)).encode()).hexdigest()
 
5
  import hashlib
6
 
7
  # Import our existing pipeline components
 
 
8
  from app.utils.util import verify_api_key
9
  from app.core.models import llm_response_generator
10
  from app.schema.schema import QuestionRequest, AnswerResponse
11
 
12
+ from contextlib import asynccontextmanager
13
+ from fastapi import FastAPI, HTTPException, Depends, status
14
+ from fastapi.responses import FileResponse, RedirectResponse
15
+
16
  logging.basicConfig(format='%(asctime)s - %(levelname)s - Line: %(lineno)d - %(message)s',
17
  datefmt='%Y-%m-%d %H:%M:%S',
18
  level=logging.INFO)
 
20
 
21
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
22
 
23
+
24
  # Load config.json at startup
25
  @asynccontextmanager
26
  async def lifespan(app: FastAPI):
 
36
  # FastAPI app
37
  app = FastAPI(title="HackRx PDF RAG API", version="1.0.0", lifespan=lifespan)
38
 
39
+ @app.get("/", include_in_schema=False)
40
+ async def docs_redirect():
41
+ return RedirectResponse("/docs")
42
+
43
  @app.post("/api/v1/hackrx/run", response_model=AnswerResponse)
44
  async def process_questions(request: QuestionRequest, api_key: str = Depends(verify_api_key)):
45
  try:
 
50
  logging.info(f"Received {len(questions)} questions for processing. Documents URL: {url}")
51
 
52
  # Create cache directory if not exists
53
+ cache_dir = "cache"
54
  os.makedirs(cache_dir, exist_ok=True)
55
  # Create a cache key from url and questions
56
  cache_key = hashlib.sha256((url + json.dumps(questions, sort_keys=True)).encode()).hexdigest()