PranavReddy18 commited on
Commit
01312e2
Β·
verified Β·
1 Parent(s): bfc642e

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +2 -0
  2. api.py +76 -0
  3. app.py +28 -0
  4. data/Attention.pdf +3 -0
  5. data/Pranav_Reddy.pdf +0 -0
  6. data/VT.pdf +3 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/Attention.pdf filter=lfs diff=lfs merge=lfs -text
37
+ data/VT.pdf filter=lfs diff=lfs merge=lfs -text
api.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from langchain.document_loaders import DirectoryLoader, PyPDFLoader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain.embeddings import HuggingFaceEmbeddings
5
+ from langchain.vectorstores import FAISS
6
+ from langchain_huggingface import HuggingFaceEndpoint
7
+ from langchain.chains.combine_documents import create_stuff_documents_chain
8
+ from langchain_core.prompts import ChatPromptTemplate
9
+ from langchain.chains import create_retrieval_chain
10
+ import os
11
+ import uvicorn
12
+
13
+ # Initialize FastAPI App πŸš€
14
+ app = FastAPI(title="Vision Transformer Assistant", description="A FastAPI-powered AI assistant for deep learning.")
15
+
16
+ # Load Hugging Face Token πŸ”‘
17
+ HF_TOKEN = os.getenv("HF_TOKEN") # Ensure your HF_TOKEN is set in Hugging Face Secrets
18
+
19
+ # Load Documents πŸ“‚
20
+ loader = DirectoryLoader("./data/", glob="*.pdf", loader_cls=PyPDFLoader)
21
+ docs = loader.load()
22
+
23
+ # Text Splitting πŸ“–
24
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
25
+ texts = text_splitter.split_documents(docs)
26
+
27
+ # Vector Database πŸ”
28
+ db = FAISS.from_documents(documents=texts, embedding=HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5'))
29
+ retriever = db.as_retriever()
30
+
31
+ # Load LLM πŸš€
32
+ repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
33
+ llm = HuggingFaceEndpoint(repo_id=repo_id, token=HF_TOKEN, task="text-generation")
34
+
35
+ # Prompt Template ✨
36
+ prompt_temp = ChatPromptTemplate.from_template("""
37
+ You are an AI assistant specializing in deep learning, specifically Vision Transformers.
38
+
39
+ <context>
40
+ {context}
41
+ <context>
42
+
43
+ ### Instructions:
44
+ - Extract relevant information only from retrieved documents.
45
+ - Provide concise yet detailed responses.
46
+ - Use LaTeX for equations when necessary.
47
+ - Do not make up answers; respond with *'Information not available in retrieved documents.'* if needed.
48
+ """)
49
+
50
+ # Create Retrieval Chain ⚑
51
+ document_chain = create_stuff_documents_chain(llm, prompt_temp)
52
+ retrieval_chain = create_retrieval_chain(retriever, document_chain)
53
+
54
+ def get_response(query: str) -> str:
55
+ """
56
+ Get AI-generated response based on query.
57
+ """
58
+ response = retrieval_chain.invoke({'input': query})
59
+ return response['answer']
60
+
61
+ @app.get("/")
62
+ def home():
63
+ return {"message": "Vision Transformer Assistant API is running πŸš€"}
64
+
65
+ @app.get("/query")
66
+ def get_answer(query: str):
67
+ """
68
+ API endpoint to retrieve AI-generated responses.
69
+ """
70
+ answer = get_response(query)
71
+ return {"answer": answer}
72
+
73
+ # Run FastAPI on Hugging Face Spaces (public)
74
+ if __name__ == "__main__":
75
+ uvicorn.run(app, host="0.0.0.0", port=8080)
76
+
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ # Hugging Face Spaces FastAPI Backend URL
5
+ FASTAPI_URL = "https://pranavreddy18-research.hf.space/query" # Change to your actual Space URL
6
+
7
+ # Streamlit UI
8
+ st.set_page_config(page_title="Vision Transformer Assistant", page_icon="πŸ€–")
9
+ st.title("Vision Transformer Assistant πŸ€–")
10
+ st.markdown("Ask anything about deep learning and Vision Transformers!")
11
+
12
+ # User input
13
+ query = st.text_input("Enter your question:")
14
+
15
+ if st.button("Get Answer"):
16
+ if query:
17
+ with st.spinner("Fetching answer..."):
18
+ try:
19
+ response = requests.get(FASTAPI_URL, params={"query": query})
20
+ if response.status_code == 200:
21
+ answer = response.json().get("answer", "No answer found.")
22
+ st.success("βœ… Answer:")
23
+ st.write(answer)
24
+ else:
25
+ st.error(f"⚠️ Error fetching answer. Status Code: {response.status_code}")
26
+ except requests.exceptions.RequestException as e:
27
+ st.error(f"⚠️ Failed to connect to backend: {e}")
28
+
data/Attention.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdfaa68d8984f0dc02beaca527b76f207d99b666d31d1da728ee0728182df697
3
+ size 2215244
data/Pranav_Reddy.pdf ADDED
Binary file (83.8 kB). View file
 
data/VT.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce7b83971a14508ca711a27c875c9b6914c4f6767cf3150fb1ca6c07aa056d6
3
+ size 3743814