abdullahtahir commited on
Commit
b43d116
·
verified ·
1 Parent(s): 08eaf78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -62
app.py CHANGED
@@ -1,64 +1,40 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from langchain_community.embeddings import HuggingFaceEmbeddings
3
+ from langchain_community.vectorstores import FAISS
4
+ from langchain_community.document_loaders import TextLoader
5
+ from langchain_community.llms import HuggingFacePipeline
6
+ from langchain.chains import RetrievalQA
7
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain.prompts import PromptTemplate
9
+ from transformers import AutoModelForSeq2SeqLM, pipeline, AutoTokenizer
10
+
11
+ # Load data
12
+ loader = TextLoader("about_me.txt")
13
+ docs = loader.load()
14
+
15
+ # Split documents
16
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
17
+ split_docs = text_splitter.split_documents(docs)
18
+
19
+ # Embeddings and DB
20
+ embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
21
+ db = FAISS.from_documents(split_docs, embedding_model)
22
+
23
+ # Load model
24
+ model_id = "google/flan-t5-large"
25
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
26
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
27
+ pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, max_length=512, truncation=True)
28
+ llm = HuggingFacePipeline(pipeline=pipe)
29
+
30
+ # RetrievalQA chain
31
+ custom_prompt = PromptTemplate(template="Context: {context}\nQ: {question}\nA:", input_variables=["context", "question"])
32
+ qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever(search_kwargs={"k": 2}), chain_type_kwargs={"prompt": custom_prompt})
33
+
34
+ def ask_bot_alternative(question):
35
+ return qa_chain.invoke({"query": question})["result"]
36
+
37
+ # Gradio interface
38
+ iface = gr.Interface(fn=ask_bot_alternative, inputs="text", outputs="text", title="Portfolio Chatbot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  if __name__ == "__main__":
40
+ iface.launch()