praneeth dodedu commited on
Commit
eede28a
·
1 Parent(s): ab880a7
Files changed (1) hide show
  1. app.py +48 -10
app.py CHANGED
@@ -1,13 +1,11 @@
1
- import os
2
- os.system("pip uninstall -y gradio")
3
- os.system("pip install gradio==3.31.0")
4
- from sentence_transformers import SentenceTransformer, models
5
  from dotenv import load_dotenv
6
  from langchain.chains import RetrievalQA
7
  from langchain.embeddings import HuggingFaceEmbeddings
8
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
9
  from langchain.vectorstores import Chroma
10
  from langchain.llms import GPT4All, LlamaCpp
 
11
  import argparse
12
  from pathlib import Path
13
  import base64
@@ -24,11 +22,51 @@ model_n_ctx = os.environ.get('MODEL_N_CTX')
24
 
25
  from constants import CHROMA_SETTINGS
26
 
27
- # Declare Textbox globally
28
- txt = gr.Textbox(
29
- label="Type your query here:",
30
- placeholder="What would you like to learn today?"
31
- ).style(container=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  def parse_arguments():
34
  parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
@@ -224,5 +262,5 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css, title="RyBOT") as demo:
224
  inputs=None,
225
  outputs=[txt]
226
  )
227
-
228
  demo.launch()
 
1
+ #!/usr/bin/env python3
 
 
 
2
  from dotenv import load_dotenv
3
  from langchain.chains import RetrievalQA
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6
  from langchain.vectorstores import Chroma
7
  from langchain.llms import GPT4All, LlamaCpp
8
+ import os
9
  import argparse
10
  from pathlib import Path
11
  import base64
 
22
 
23
  from constants import CHROMA_SETTINGS
24
 
25
+ def main():
26
+ # Parse the command line arguments
27
+ args = parse_arguments()
28
+ embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
29
+ db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
30
+ retriever = db.as_retriever()
31
+ # activate/deactivate the streaming StdOut callback for LLMs
32
+ callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
33
+ # Prepare the LLM
34
+ '''match model_type:
35
+ case "LlamaCpp":
36
+ llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, callbacks=callbacks, verbose=False)
37
+ case "GPT4All":
38
+ llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', callbacks=callbacks, verbose=False)
39
+ case _default:
40
+ print(f"Model {model_type} not supported!")
41
+ exit;'''
42
+ if model_type == "LlamaCpp":
43
+ llm = LlamaCpp(model_path=model_path, n_ctx=model_n_ctx, callbacks=callbacks, verbose=False)
44
+ elif model_type == "GPT4All":
45
+ llm = GPT4All(model=model_path, n_ctx=model_n_ctx, backend='gptj', callbacks=callbacks, verbose=False)
46
+ else:
47
+ print(f"Model {model_type} not supported!")
48
+ exit;
49
+ qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source)
50
+ # Interactive questions and answers
51
+ while True:
52
+ query = input("\nEnter a query: ")
53
+ if query == "exit":
54
+ break
55
+
56
+ # Get the answer from the chain
57
+ res = qa(query)
58
+ answer, docs = res['result'], [] if args.hide_source else res['source_documents']
59
+
60
+ # Print the result
61
+ print("\n\n> Question:")
62
+ print(query)
63
+ print("\n> Answer:")
64
+ print(answer)
65
+
66
+ # Print the relevant sources used for the answer
67
+ for document in docs:
68
+ print("\n> " + document.metadata["source"] + ":")
69
+ print(document.page_content)
70
 
71
  def parse_arguments():
72
  parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
 
262
  inputs=None,
263
  outputs=[txt]
264
  )
265
+
266
  demo.launch()