guerrillachicken commited on
Commit
d51b844
·
1 Parent(s): f2c72fe
Files changed (3) hide show
  1. README.md +3 -4
  2. app.py +74 -0
  3. requirements.txt +6 -0
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
  title: Gurfagpt
3
- emoji: 🐢
4
  colorFrom: blue
5
- colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.28.2
8
  app_file: app.py
9
  pinned: false
10
- license: gpl-3.0
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Gurfagpt
3
+ emoji: 🧠
4
  colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
7
  sdk_version: 3.28.2
8
  app_file: app.py
9
  pinned: false
10
+ license: openrail++
11
  ---
12
 
 
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import os
4
+
5
+ import gradio as gr
6
+
7
+
8
+ from langchain.embeddings.openai import OpenAIEmbeddings
9
+ from langchain.text_splitter import CharacterTextSplitter
10
+ from langchain.vectorstores import FAISS
11
+ from langchain.document_loaders import TextLoader
12
+
13
+ embeddings = OpenAIEmbeddings()
14
+
15
+ db = FAISS.load_local("./faiss_index", embeddings)
16
+
17
+ query = "Who is responsible for conducting the pre-flight briefing to the Cabin Crew?"
18
+ docs = db.similarity_search(query)
19
+ docs[1]
20
+
21
+ from langchain.memory import ConversationBufferMemory
22
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True,output_key='answer')
23
+
24
+ from langchain.embeddings.openai import OpenAIEmbeddings
25
+ from langchain.vectorstores import Chroma
26
+ from langchain.text_splitter import CharacterTextSplitter
27
+ from langchain.llms import OpenAI
28
+ from langchain.chains import ConversationalRetrievalChain
29
+ from langchain import PromptTemplate
30
+ from langchain.chat_models import ChatOpenAI
31
+ from langchain.chains import RetrievalQAWithSourcesChain
32
+ from langchain.chains import ChatVectorDBChain
33
+
34
+
35
+ chainqa = RetrievalQAWithSourcesChain.from_chain_type(
36
+ ChatOpenAI(temperature=0,model_name="gpt-3.5-turbo",max_tokens=150),
37
+ chain_type="stuff",
38
+ reduce_k_below_max_tokens=True,
39
+ memory=memory,
40
+ retriever=db.as_retriever())
41
+
42
+ def bambolini(query):
43
+ bambolini_result = chainqa({"question": query }, return_only_outputs=True)
44
+ return bambolini_result
45
+
46
+ title = 'GURFAGPT'
47
+ description = """ Made with AI Lobs"""
48
+
49
+
50
+ with gr.Blocks() as demo:
51
+
52
+ gr.Markdown(f'# {title}')
53
+ gr.Markdown(f'### {description}')
54
+
55
+ gr.Markdown("")
56
+ text_input = gr.Textbox(label="Question")
57
+
58
+ with gr.Tab("Bambolini AI"):
59
+ bambo_output = gr.JSON()
60
+ bambo_button = gr.Button("Ask Bambolini")
61
+ # with gr.Tab("Carabinieri AI"):
62
+ # cari_output = gr.JSON()
63
+ # cari_button = gr.Button("Ask Caribinieri")
64
+ # with gr.Tab("Gurfa AI"):
65
+ # gurfa_output = gr.JSON()
66
+ # gurfa_button = gr.Button("Ask Gurfinha")
67
+
68
+
69
+ bambo_button.click(bambolini, inputs=text_input, outputs=bambo_output)
70
+ # cari_button.click(bambolini, inputs=text_input, outputs=cari_output)
71
+ # gurfa_button.click(bambolini, inputs=text_input, outputs=gurfa_output)
72
+
73
+
74
+ demo.launch(share=True,debug=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ langchain
2
+ pypdf
3
+ openai
4
+ faiss-cpu
5
+ tiktoken
6
+ gradio