marcweibel commited on
Commit
6f5c63d
·
verified ·
1 Parent(s): 55fb962

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -3
app.py CHANGED
@@ -1,7 +1,6 @@
1
- import os
2
  import logging
3
  from fastapi import FastAPI, HTTPException
4
- from fastapi.responses import StreamingResponse
5
  from pydantic import BaseModel
6
  from langchain_community.llms import Ollama
7
  from langchain.callbacks.manager import CallbackManager
@@ -11,6 +10,16 @@ logging.basicConfig(level=logging.INFO)
11
  logger = logging.getLogger(__name__)
12
 
13
  app = FastAPI()
 
 
 
 
 
 
 
 
 
 
14
  MODEL_NAME = 'tinyllama'
15
 
16
  def get_llm():
@@ -20,14 +29,36 @@ def get_llm():
20
  class Question(BaseModel):
21
  text: str
22
 
 
 
 
 
23
  @app.get("/")
24
  def read_root():
25
  return {"Hello": f"Welcome to {MODEL_NAME} FastAPI"}
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  @app.on_event("startup")
28
  async def startup_event():
29
  logger.info(f"Starting up with model: {MODEL_NAME}")
30
 
31
  @app.on_event("shutdown")
32
  async def shutdown_event():
33
- logger.info("Shutting down")
 
 
 
1
  import logging
2
  from fastapi import FastAPI, HTTPException
3
+ from fastapi.middleware.cors import CORSMiddleware
4
  from pydantic import BaseModel
5
  from langchain_community.llms import Ollama
6
  from langchain.callbacks.manager import CallbackManager
 
10
  logger = logging.getLogger(__name__)
11
 
12
  app = FastAPI()
13
+
14
+ # Allow cross-origin requests (for browser-based JS)
15
+ app.add_middleware(
16
+ CORSMiddleware,
17
+ allow_origins=["*"], # Adjust for production if needed
18
+ allow_credentials=True,
19
+ allow_methods=["*"],
20
+ allow_headers=["*"],
21
+ )
22
+
23
  MODEL_NAME = 'tinyllama'
24
 
25
  def get_llm():
 
29
  class Question(BaseModel):
30
  text: str
31
 
32
+ class QueryWithContext(BaseModel):
33
+ question: str
34
+ context: str
35
+
36
  @app.get("/")
37
  def read_root():
38
  return {"Hello": f"Welcome to {MODEL_NAME} FastAPI"}
39
 
40
+ @app.post("/ask")
41
+ def ask(query: QueryWithContext):
42
+ prompt = f"""You are an expert in quantitative methods. Based on the following lecture notes, answer the user's question.
43
+
44
+ Lecture notes:
45
+ {query.context}
46
+
47
+ User's question:
48
+ {query.question}
49
+ """
50
+ llm = get_llm()
51
+ try:
52
+ response = llm.invoke(prompt)
53
+ return {"answer": response}
54
+ except Exception as e:
55
+ raise HTTPException(status_code=500, detail=str(e))
56
+
57
  @app.on_event("startup")
58
  async def startup_event():
59
  logger.info(f"Starting up with model: {MODEL_NAME}")
60
 
61
  @app.on_event("shutdown")
62
  async def shutdown_event():
63
+ logger.info("Shutting down")
64
+