KOkeke94 commited on
Commit
d41aa32
Β·
1 Parent(s): 86cccfb

Final version: multi-agent stats assistant using RAG + OpenAI router

Browse files
Files changed (2) hide show
  1. app.py +3 -4
  2. requirements.txt +2 -0
app.py CHANGED
@@ -3,15 +3,14 @@ import gradio as gr
3
  import torch
4
  from langchain_community.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
- from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain_community.vectorstores import FAISS
8
  from langchain.chains import RetrievalQA
9
- from langchain_community.llms import HuggingFacePipeline
10
  from langchain_openai import ChatOpenAI
11
  from transformers.pipelines import pipeline
12
 
13
  # βœ… Load writer model and wrap it for LangChain
14
- writer_model = pipeline("text2text-generation", model="BivinSadler/llama3-finetuned-Statistics")
15
  writer_llm = HuggingFacePipeline(pipeline=writer_model)
16
 
17
  # βœ… RAG Agent Builder
@@ -65,7 +64,7 @@ Answer:
65
 
66
  Write your response in 2–3 sentences. Avoid technical jargon.
67
  """
68
- result = writer_model(prompt, max_new_tokens=200, do_sample=False)
69
  return result[0]['generated_text']
70
 
71
  # βœ… Main logic
 
3
  import torch
4
  from langchain_community.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain_huggingface import HuggingFacePipeline, HuggingFaceEmbeddings
7
  from langchain_community.vectorstores import FAISS
8
  from langchain.chains import RetrievalQA
 
9
  from langchain_openai import ChatOpenAI
10
  from transformers.pipelines import pipeline
11
 
12
  # βœ… Load writer model and wrap it for LangChain
13
+ writer_model = pipeline("text-generation", model="BivinSadler/llama3-finetuned-Statistics", return_full_text=False)
14
  writer_llm = HuggingFacePipeline(pipeline=writer_model)
15
 
16
  # βœ… RAG Agent Builder
 
64
 
65
  Write your response in 2–3 sentences. Avoid technical jargon.
66
  """
67
+ result = writer_model(prompt, max_new_tokens=200)
68
  return result[0]['generated_text']
69
 
70
  # βœ… Main logic
requirements.txt CHANGED
@@ -9,4 +9,6 @@ sentence-transformers
9
  gradio
10
  torch
11
  tiktoken
 
 
12
 
 
9
  gradio
10
  torch
11
  tiktoken
12
+ huggingface-hub
13
+ accelerate
14