File size: 6,183 Bytes
421d1ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"import datetime\n",
"import gradio as gr\n",
"from dotenv import load_dotenv\n",
"from langchain.vectorstores import Chroma\n",
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.chains import RetrievalQA\n",
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"\n",
"\n",
"import warnings\n",
"warnings.filterwarnings('ignore')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# from langchain.memory import MemoryViewMemory\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"gpt-3.5-turbo-0301\n"
]
}
],
"source": [
"import datetime\n",
"current_date = datetime.datetime.now().date()\n",
"if current_date < datetime.date(2023, 9, 2):\n",
" llm_name = \"gpt-3.5-turbo-0301\"\n",
"else:\n",
" llm_name = \"gpt-3.5-turbo\"\n",
"print(llm_name)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def chatWithNCAIR(question, history):\n",
" load_dotenv()\n",
"\n",
" persist_directory = 'docs/chroma/'\n",
" embedding = OpenAIEmbeddings()\n",
" vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)\n",
" llm = ChatOpenAI(model_name=llm_name, temperature=0)\n",
"\n",
" template = \"\"\"Use the following pieces of context to answer the question at the end. \n",
" If you don't know the answer, just say that you don't know, don't try to make up an answer. \n",
" Use three sentences maximum. Keep the answer as concise as possible. \n",
" Always say \"thank you for choosing NCAIR BOT!\" at the end of the answer. \n",
" {context}\n",
" Question: {question}\n",
" Helpful Answer:\"\"\"\n",
" QA_CHAIN_PROMPT = PromptTemplate(input_variables=[\"context\", \"question\"],template=template,)\n",
"\n",
" # Run chain\n",
" from langchain.chains import RetrievalQA\n",
" # question = \"Will interns go through the fabLab during the onboarding?\"\n",
" qa_chain = RetrievalQA.from_chain_type(llm,\n",
" retriever=vectordb.as_retriever(),\n",
" return_source_documents=True,\n",
" chain_type_kwargs={\"prompt\": QA_CHAIN_PROMPT})\n",
"\n",
" memory = ConversationBufferMemory(\n",
" memory_key=\"chat_history\",\n",
" return_messages=True\n",
" )\n",
" retriever=vectordb.as_retriever()\n",
" qa = ConversationalRetrievalChain.from_llm(\n",
" llm,\n",
" retriever=retriever,\n",
" memory=memory\n",
" )\n",
"\n",
"\n",
" result = qa({\"question\": question})\n",
" return result[\"answer\"]\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Yes, as an intern in NCAIR, you will first undergo the onboarding session in Fablab, PCB, shopbot, 3D printing, solid work, and then go through compulsory NADIT programs.'"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chatWithNCAIR(\"Will interns go through the fabLab during the onboarding?\",\"\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"# chatWithNCAIR(\"Is it compulsory?\",\"\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"# ! pip install --upgrade gradio"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7860\n",
"Running on public URL: https://0e3e0326bf1f8474c9.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"https://0e3e0326bf1f8474c9.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"demo = gr.ChatInterface(fn=chatWithNCAIR,\n",
" chatbot=gr.Chatbot(height=300, min_width=40),\n",
" textbox=gr.Textbox(placeholder=\"Ask me a question relating to NCAIR\"),\n",
" title=\"Chat with NCAIR💬\",\n",
" description=\"Ask NCAIR any question\",\n",
" theme=\"soft\",\n",
" cache_examples=True,\n",
" retry_btn=None,\n",
" undo_btn=\"Delete Previous\",\n",
" clear_btn=\"Clear\",)\n",
"\n",
"demo.launch(share=True)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
|