Commit
·
606fdee
1
Parent(s):
8b59ecd
Upload 🗣️QuestionMyDoc📄_with_LangChain (Assignment Version) (1).ipynb
Browse files
🗣️QuestionMyDoc📄_with_LangChain (Assignment Version) (1).ipynb
ADDED
|
@@ -0,0 +1,714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {
|
| 6 |
+
"id": "kEKghJQ2pmYH"
|
| 7 |
+
},
|
| 8 |
+
"source": [
|
| 9 |
+
"### The Basics of LangChain\n",
|
| 10 |
+
"\n",
|
| 11 |
+
"In this notebook we'll explore exactly what LangChain is doing - and implement a straightforward example that lets us ask questions of a document!\n",
|
| 12 |
+
"\n",
|
| 13 |
+
"First things first, let's get our dependencies all set!"
|
| 14 |
+
]
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"cell_type": "code",
|
| 18 |
+
"execution_count": 1,
|
| 19 |
+
"metadata": {
|
| 20 |
+
"colab": {
|
| 21 |
+
"base_uri": "https://localhost:8080/"
|
| 22 |
+
},
|
| 23 |
+
"id": "fXsYHTgvnCM2",
|
| 24 |
+
"outputId": "36fb461a-25aa-4ffe-c684-1ca6f8eb0904"
|
| 25 |
+
},
|
| 26 |
+
"outputs": [],
|
| 27 |
+
"source": [
|
| 28 |
+
"!pip install openai langchain python-dotenv -q"
|
| 29 |
+
]
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"cell_type": "markdown",
|
| 33 |
+
"metadata": {
|
| 34 |
+
"id": "T0sLjfy8p3jf"
|
| 35 |
+
},
|
| 36 |
+
"source": [
|
| 37 |
+
"You'll need to have an OpenAI API key for this next part - see [this](https://www.onmsft.com/how-to/how-to-get-an-openai-api-key/) if you haven't already set one up!"
|
| 38 |
+
]
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"cell_type": "code",
|
| 42 |
+
"execution_count": 2,
|
| 43 |
+
"metadata": {},
|
| 44 |
+
"outputs": [],
|
| 45 |
+
"source": [
|
| 46 |
+
"!echo openai_api_key=\"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\" > .env"
|
| 47 |
+
]
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"cell_type": "code",
|
| 51 |
+
"execution_count": 3,
|
| 52 |
+
"metadata": {
|
| 53 |
+
"id": "0TTosnCHnGHG"
|
| 54 |
+
},
|
| 55 |
+
"outputs": [],
|
| 56 |
+
"source": [
|
| 57 |
+
"import os \n",
|
| 58 |
+
"import openai\n",
|
| 59 |
+
"from dotenv import load_dotenv\n",
|
| 60 |
+
"\n",
|
| 61 |
+
"load_dotenv(\".env\")\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"openai.api_key = os.environ.get(\"openai_api_key\")"
|
| 64 |
+
]
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"cell_type": "markdown",
|
| 68 |
+
"metadata": {
|
| 69 |
+
"id": "15M3Jx6SBXcO"
|
| 70 |
+
},
|
| 71 |
+
"source": [
|
| 72 |
+
"#### Helper Functions (run this cell)"
|
| 73 |
+
]
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"cell_type": "code",
|
| 77 |
+
"execution_count": 4,
|
| 78 |
+
"metadata": {
|
| 79 |
+
"id": "k3SBzWBUpQ21"
|
| 80 |
+
},
|
| 81 |
+
"outputs": [],
|
| 82 |
+
"source": [
|
| 83 |
+
"from IPython.display import display, Markdown\n",
|
| 84 |
+
"\n",
|
| 85 |
+
"def disp_markdown(text: str) -> None:\n",
|
| 86 |
+
" display(Markdown(text))"
|
| 87 |
+
]
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"cell_type": "markdown",
|
| 91 |
+
"metadata": {
|
| 92 |
+
"id": "fU4LWrv-BayH"
|
| 93 |
+
},
|
| 94 |
+
"source": [
|
| 95 |
+
"### Our First LangChain ChatModel"
|
| 96 |
+
]
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"cell_type": "markdown",
|
| 100 |
+
"metadata": {
|
| 101 |
+
"id": "p-M-VQhQOC1c"
|
| 102 |
+
},
|
| 103 |
+
"source": [
|
| 104 |
+
"\n",
|
| 105 |
+
"\n",
|
| 106 |
+
"---\n",
|
| 107 |
+
"\n",
|
| 108 |
+
"\n",
|
| 109 |
+
"<div class=\"warn\">Note: Information on OpenAI's <a href=https://openai.com/pricing>pricing</a> and <a href=https://openai.com/policies/usage-policies>usage policies.</a></div>\n",
|
| 110 |
+
"\n",
|
| 111 |
+
"\n",
|
| 112 |
+
"\n",
|
| 113 |
+
"---\n",
|
| 114 |
+
"\n"
|
| 115 |
+
]
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"cell_type": "markdown",
|
| 119 |
+
"metadata": {
|
| 120 |
+
"id": "XVkfqk4NOFWS"
|
| 121 |
+
},
|
| 122 |
+
"source": [
|
| 123 |
+
"Now that we're set-up with OpenAI's API - we can begin making our first ChatModel!\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"There's a few important things to consider when we're using LangChain's ChatModel that are outlined [here](https://python.langchain.com/docs/modules/model_io/models/chat/)\n",
|
| 126 |
+
"\n",
|
| 127 |
+
"Let's begin by initializing the model with OpenAI's `gpt-3.5-turbo` (ChatGPT) model.\n",
|
| 128 |
+
"\n",
|
| 129 |
+
"We're not going to be leveraging the [streaming](https://python.langchain.com/docs/modules/model_io/models/chat/how_to/streaming) capabilities in this Notebook - just the basics to get us started!"
|
| 130 |
+
]
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"cell_type": "code",
|
| 134 |
+
"execution_count": 5,
|
| 135 |
+
"metadata": {
|
| 136 |
+
"id": "tNscLft_nxBb"
|
| 137 |
+
},
|
| 138 |
+
"outputs": [],
|
| 139 |
+
"source": [
|
| 140 |
+
"from langchain.chat_models import ChatOpenAI\n",
|
| 141 |
+
"from langchain.schema import HumanMessage\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"chat_model = ChatOpenAI(model_name=\"gpt-3.5-turbo\", openai_api_key=os.environ.get(\"openai_api_key\"))"
|
| 144 |
+
]
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"cell_type": "markdown",
|
| 148 |
+
"metadata": {
|
| 149 |
+
"id": "vzGhlpwUPyU9"
|
| 150 |
+
},
|
| 151 |
+
"source": [
|
| 152 |
+
"If we look at the [Chat completions](https://platform.openai.com/docs/guides/chat) documentation for OpenAI's chat models - we'll see that there are a few specific fields we'll need to concern ourselves with:\n",
|
| 153 |
+
"\n",
|
| 154 |
+
"`role`\n",
|
| 155 |
+
"- This refers to one of three \"roles\" that interact with the model in specific ways.\n",
|
| 156 |
+
"- The `system` role is an optional role that can be used to guide the model toward a specific task. Examples of `system` messages might be: \n",
|
| 157 |
+
" - You are an expert in Python, please answer questions as though we were in a peer coding session.\n",
|
| 158 |
+
" - You are the world's leading expert in stamps.\n",
|
| 159 |
+
"\n",
|
| 160 |
+
" These messages help us \"prime\" the model to be more aligned with our desired task!\n",
|
| 161 |
+
"\n",
|
| 162 |
+
"- The `user` role represents, well, the user!\n",
|
| 163 |
+
"- The `assistant` role lets us act in the place of the model's outputs. We can (and will) leverage this for some few-shot prompt engineering!\n",
|
| 164 |
+
"\n",
|
| 165 |
+
"Each of these roles has a class in LangChain to make it nice and easy for us to use! \n",
|
| 166 |
+
"\n",
|
| 167 |
+
"Let's look at an example."
|
| 168 |
+
]
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"cell_type": "code",
|
| 172 |
+
"execution_count": 6,
|
| 173 |
+
"metadata": {
|
| 174 |
+
"id": "dM7lciZtoPEp"
|
| 175 |
+
},
|
| 176 |
+
"outputs": [],
|
| 177 |
+
"source": [
|
| 178 |
+
"from langchain.schema import (\n",
|
| 179 |
+
" AIMessage, \n",
|
| 180 |
+
" HumanMessage,\n",
|
| 181 |
+
" SystemMessage\n",
|
| 182 |
+
")\n",
|
| 183 |
+
"\n",
|
| 184 |
+
"# The SystemMessage is associated with the system role\n",
|
| 185 |
+
"system_message = SystemMessage(content=\"You are a food critic.\")\n",
|
| 186 |
+
"\n",
|
| 187 |
+
"# The HumanMessage is associated with the user role\n",
|
| 188 |
+
"user_message = HumanMessage(content=\"Do you think Kraft Dinner constitues fine dining?\")\n",
|
| 189 |
+
"\n",
|
| 190 |
+
"# The AIMessage is associated with the assistant role\n",
|
| 191 |
+
"assistant_message = AIMessage(content=\"Egads! No, it most certainly does not!\")"
|
| 192 |
+
]
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"cell_type": "markdown",
|
| 196 |
+
"metadata": {
|
| 197 |
+
"id": "dSx5HBgjSUvB"
|
| 198 |
+
},
|
| 199 |
+
"source": [
|
| 200 |
+
"Now that we have those messages set-up, let's send them to `gpt-3.5-turbo` with a new user message and see how it does!\n",
|
| 201 |
+
"\n",
|
| 202 |
+
"It's easy enough to do this - the ChatOpenAI model accepts a list of inputs!"
|
| 203 |
+
]
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"cell_type": "code",
|
| 207 |
+
"execution_count": 7,
|
| 208 |
+
"metadata": {
|
| 209 |
+
"colab": {
|
| 210 |
+
"base_uri": "https://localhost:8080/"
|
| 211 |
+
},
|
| 212 |
+
"id": "LwDLOYOKSTpG",
|
| 213 |
+
"outputId": "94160cfc-c5b6-4825-8838-32e2476bcb73"
|
| 214 |
+
},
|
| 215 |
+
"outputs": [
|
| 216 |
+
{
|
| 217 |
+
"data": {
|
| 218 |
+
"text/plain": [
|
| 219 |
+
"AIMessage(content=\"Ah, Red Lobster. While it may be a step up from Kraft Dinner, I wouldn't classify it as fine dining either. Red Lobster is more of a casual dining chain that focuses on seafood. It's known for its affordable prices and popular menu items like their Cheddar Bay Biscuits and endless shrimp promotion. While it may be a fun and enjoyable dining experience, it doesn't quite reach the level of sophistication and refinement that is typically associated with fine dining establishments.\", additional_kwargs={}, example=False)"
|
| 220 |
+
]
|
| 221 |
+
},
|
| 222 |
+
"execution_count": 7,
|
| 223 |
+
"metadata": {},
|
| 224 |
+
"output_type": "execute_result"
|
| 225 |
+
}
|
| 226 |
+
],
|
| 227 |
+
"source": [
|
| 228 |
+
"second_user_message = HumanMessage(content=\"What about Red Lobster, surely that is fine dining!\")\n",
|
| 229 |
+
"\n",
|
| 230 |
+
"# create the list of prompts\n",
|
| 231 |
+
"list_of_prompts = [\n",
|
| 232 |
+
" system_message,\n",
|
| 233 |
+
" user_message,\n",
|
| 234 |
+
" assistant_message,\n",
|
| 235 |
+
" second_user_message\n",
|
| 236 |
+
"]\n",
|
| 237 |
+
"\n",
|
| 238 |
+
"# we can just call our chat_model on the list of prompts!\n",
|
| 239 |
+
"chat_model(list_of_prompts)"
|
| 240 |
+
]
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"cell_type": "markdown",
|
| 244 |
+
"metadata": {
|
| 245 |
+
"id": "pZMYJDWXTkMq"
|
| 246 |
+
},
|
| 247 |
+
"source": [
|
| 248 |
+
"Great! That's inline with what we expected to see!"
|
| 249 |
+
]
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"cell_type": "markdown",
|
| 253 |
+
"metadata": {
|
| 254 |
+
"id": "8DUNhabQUB8f"
|
| 255 |
+
},
|
| 256 |
+
"source": [
|
| 257 |
+
"### PromptTemplates\n",
|
| 258 |
+
"\n",
|
| 259 |
+
"Next stop, we'll discuss a few templates. This allows us to easily interact with our model by not having to redo work we've already completed!"
|
| 260 |
+
]
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"cell_type": "code",
|
| 264 |
+
"execution_count": 8,
|
| 265 |
+
"metadata": {
|
| 266 |
+
"id": "74vpojywT0-4"
|
| 267 |
+
},
|
| 268 |
+
"outputs": [],
|
| 269 |
+
"source": [
|
| 270 |
+
"from langchain.prompts.chat import (\n",
|
| 271 |
+
" ChatPromptTemplate,\n",
|
| 272 |
+
" SystemMessagePromptTemplate,\n",
|
| 273 |
+
" HumanMessagePromptTemplate\n",
|
| 274 |
+
")\n",
|
| 275 |
+
"\n",
|
| 276 |
+
"# we can signify variables we want access to by wrapping them in {}\n",
|
| 277 |
+
"system_prompt_template = \"You are an expert in {SUBJECT}, and you're currently feeling {MOOD}\"\n",
|
| 278 |
+
"system_prompt_template = SystemMessagePromptTemplate.from_template(system_prompt_template)\n",
|
| 279 |
+
"\n",
|
| 280 |
+
"user_prompt_template = \"{CONTENT}\"\n",
|
| 281 |
+
"user_prompt_template = HumanMessagePromptTemplate.from_template(user_prompt_template)\n",
|
| 282 |
+
"\n",
|
| 283 |
+
"# put them together into a ChatPromptTemplate\n",
|
| 284 |
+
"chat_prompt = ChatPromptTemplate.from_messages([system_prompt_template, user_prompt_template])"
|
| 285 |
+
]
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"cell_type": "markdown",
|
| 289 |
+
"metadata": {
|
| 290 |
+
"id": "a-nbEW-kV_na"
|
| 291 |
+
},
|
| 292 |
+
"source": [
|
| 293 |
+
"Now that we have our `chat_prompt` set-up with the templates - let's see how we can easily format them with our content!\n",
|
| 294 |
+
"\n",
|
| 295 |
+
"NOTE: `disp_markdown` is just a helper function to display the formatted markdown response."
|
| 296 |
+
]
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"cell_type": "code",
|
| 300 |
+
"execution_count": 9,
|
| 301 |
+
"metadata": {
|
| 302 |
+
"colab": {
|
| 303 |
+
"base_uri": "https://localhost:8080/",
|
| 304 |
+
"height": 337
|
| 305 |
+
},
|
| 306 |
+
"id": "P4vd-W2FV7Xq",
|
| 307 |
+
"outputId": "2c4bda02-7ba3-4d72-f6d2-dddeed8c3385"
|
| 308 |
+
},
|
| 309 |
+
"outputs": [
|
| 310 |
+
{
|
| 311 |
+
"data": {
|
| 312 |
+
"text/markdown": [
|
| 313 |
+
"Hello! As an expert in cheeses, I can certainly help you with that. When it comes to the finest cheeses, there are a few that stand out:\n",
|
| 314 |
+
"\n",
|
| 315 |
+
"1. Parmigiano Reggiano: Known as the \"king of cheeses,\" Parmigiano Reggiano is a hard, aged Italian cheese. It has a rich, nutty flavor with a slightly grainy texture. It's commonly used for grating over pasta or salads.\n",
|
| 316 |
+
"\n",
|
| 317 |
+
"2. Gruyère: This Swiss cheese is well-known for its complex and nutty flavor. It has a firm, creamy texture, making it perfect for melting in dishes like quiches or fondue.\n",
|
| 318 |
+
"\n",
|
| 319 |
+
"3. Roquefort: A famous blue cheese from France, Roquefort is made from sheep's milk and aged in caves. It has a strong, tangy flavor with a creamy and crumbly texture. This cheese pairs well with fruits and honey.\n",
|
| 320 |
+
"\n",
|
| 321 |
+
"4. Brie de Meaux: A classic French cheese, Brie de Meaux is a soft, creamy cheese with a bloomy rind. It has a mild, buttery flavor that intensifies as it ripens. Brie is often enjoyed with crusty bread or fresh fruits.\n",
|
| 322 |
+
"\n",
|
| 323 |
+
"5. Manchego: Originating from Spain, Manchego is made from sheep's milk and has a firm, crumbly texture. It has a distinct, nutty flavor with hints of sweetness. Manchego is delicious on its own or paired with quince paste.\n",
|
| 324 |
+
"\n",
|
| 325 |
+
"Remember, these are just a few examples of the finest cheeses, and there are countless more varieties to explore. Enjoy your cheese journey!"
|
| 326 |
+
],
|
| 327 |
+
"text/plain": [
|
| 328 |
+
"<IPython.core.display.Markdown object>"
|
| 329 |
+
]
|
| 330 |
+
},
|
| 331 |
+
"metadata": {},
|
| 332 |
+
"output_type": "display_data"
|
| 333 |
+
}
|
| 334 |
+
],
|
| 335 |
+
"source": [
|
| 336 |
+
"# note the method `to_messages()`, that's what converts our formatted prompt into \n",
|
| 337 |
+
"formatted_chat_prompt = chat_prompt.format_prompt(SUBJECT=\"cheeses\", MOOD=\"quite tired\", CONTENT=\"Hi, what are the finest cheeses?\").to_messages()\n",
|
| 338 |
+
"\n",
|
| 339 |
+
"disp_markdown(chat_model(formatted_chat_prompt).content)"
|
| 340 |
+
]
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"cell_type": "markdown",
|
| 344 |
+
"metadata": {
|
| 345 |
+
"id": "hHehNFjAXbU_"
|
| 346 |
+
},
|
| 347 |
+
"source": [
|
| 348 |
+
"### Putting the Chain in LangChain\n",
|
| 349 |
+
"\n",
|
| 350 |
+
"In essense, a chain is exactly as it sounds - it helps us chain actions together.\n",
|
| 351 |
+
"\n",
|
| 352 |
+
"Let's take a look at an example."
|
| 353 |
+
]
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"cell_type": "code",
|
| 357 |
+
"execution_count": 10,
|
| 358 |
+
"metadata": {
|
| 359 |
+
"colab": {
|
| 360 |
+
"base_uri": "https://localhost:8080/",
|
| 361 |
+
"height": 163
|
| 362 |
+
},
|
| 363 |
+
"id": "lTzw4ZMoWX0X",
|
| 364 |
+
"outputId": "ed63bd6c-a682-4a95-fbc8-ad5fc678574f"
|
| 365 |
+
},
|
| 366 |
+
"outputs": [
|
| 367 |
+
{
|
| 368 |
+
"data": {
|
| 369 |
+
"text/markdown": [
|
| 370 |
+
"Oh, don't even get me started on the '67 Chevrolet Impala! It's an absolute classic and an iconic vehicle in the world of cars. With its sleek design, powerful engine options, and comfortable interior, it's no wonder why it's so highly regarded among car enthusiasts. Whether you're cruising down the highway or showing it off at a car show, the '67 Impala is sure to turn heads and make a statement. So yes, it is a fantastic vehicle. Now, excuse me while I go calm down."
|
| 371 |
+
],
|
| 372 |
+
"text/plain": [
|
| 373 |
+
"<IPython.core.display.Markdown object>"
|
| 374 |
+
]
|
| 375 |
+
},
|
| 376 |
+
"metadata": {},
|
| 377 |
+
"output_type": "display_data"
|
| 378 |
+
}
|
| 379 |
+
],
|
| 380 |
+
"source": [
|
| 381 |
+
"from langchain.chains import LLMChain\n",
|
| 382 |
+
"\n",
|
| 383 |
+
"chain = LLMChain(llm=chat_model, prompt=chat_prompt)\n",
|
| 384 |
+
"\n",
|
| 385 |
+
"disp_markdown(chain.run(SUBJECT=\"classic cars\", MOOD=\"angry\", CONTENT=\"Is the 67 Chevrolet Impala a good vehicle?\"))"
|
| 386 |
+
]
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"cell_type": "markdown",
|
| 390 |
+
"metadata": {
|
| 391 |
+
"id": "Md5XYaAj_t51"
|
| 392 |
+
},
|
| 393 |
+
"source": [
|
| 394 |
+
"### Incorporate A Local Document\n",
|
| 395 |
+
"\n",
|
| 396 |
+
"Now that we've got our first chain running, let's talk about how we can leverage our own document!\n",
|
| 397 |
+
"\n",
|
| 398 |
+
"First off, we'll need a document!\n",
|
| 399 |
+
"\n",
|
| 400 |
+
"For this example, we'll be using Douglas Adam's Hitchker's Guide to the Galaxy - though you can substitute this for any particular document, as long as it's in a text file."
|
| 401 |
+
]
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"cell_type": "code",
|
| 405 |
+
"execution_count": 11,
|
| 406 |
+
"metadata": {
|
| 407 |
+
"colab": {
|
| 408 |
+
"base_uri": "https://localhost:8080/"
|
| 409 |
+
},
|
| 410 |
+
"id": "l4SJNvP_KXk9",
|
| 411 |
+
"outputId": "d483de62-c84f-4394-f3f5-e07959622562"
|
| 412 |
+
},
|
| 413 |
+
"outputs": [
|
| 414 |
+
{
|
| 415 |
+
"name": "stdout",
|
| 416 |
+
"output_type": "stream",
|
| 417 |
+
"text": [
|
| 418 |
+
"--2023-09-08 15:57:43-- https://erki.lap.ee/failid/raamatud/guide1.txt\n",
|
| 419 |
+
"Resolving erki.lap.ee (erki.lap.ee)... 185.158.177.102\n",
|
| 420 |
+
"Connecting to erki.lap.ee (erki.lap.ee)|185.158.177.102|:443... connected.\n",
|
| 421 |
+
"HTTP request sent, awaiting response... 200 OK\n",
|
| 422 |
+
"Length: 291862 (285K) [text/plain]\n",
|
| 423 |
+
"Saving to: ‘guide1.txt’\n",
|
| 424 |
+
"\n",
|
| 425 |
+
"100%[======================================>] 291,862 455KB/s in 0.6s \n",
|
| 426 |
+
"\n",
|
| 427 |
+
"2023-09-08 15:57:44 (455 KB/s) - ‘guide1.txt’ saved [291862/291862]\n",
|
| 428 |
+
"\n"
|
| 429 |
+
]
|
| 430 |
+
}
|
| 431 |
+
],
|
| 432 |
+
"source": [
|
| 433 |
+
"!wget https://erki.lap.ee/failid/raamatud/guide1.txt"
|
| 434 |
+
]
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"cell_type": "code",
|
| 438 |
+
"execution_count": 13,
|
| 439 |
+
"metadata": {
|
| 440 |
+
"id": "HX00sL92LATv"
|
| 441 |
+
},
|
| 442 |
+
"outputs": [],
|
| 443 |
+
"source": [
|
| 444 |
+
"with open(\"guide1.txt\") as f:\n",
|
| 445 |
+
" hitchhikersguide = f.read()"
|
| 446 |
+
]
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"cell_type": "markdown",
|
| 450 |
+
"metadata": {
|
| 451 |
+
"id": "5PdfLcOlKcjH"
|
| 452 |
+
},
|
| 453 |
+
"source": [
|
| 454 |
+
"Next we'll want to split our text into appropirately sized chunks. \n",
|
| 455 |
+
"\n",
|
| 456 |
+
"We're going to be using the [CharacterTextSplitter](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/character_text_splitter) from LangChain today.\n",
|
| 457 |
+
"\n",
|
| 458 |
+
"The size of these chunks will depend heavily on a number of factors relating to which LLM you're using, what the max context size is, and more. \n",
|
| 459 |
+
"\n",
|
| 460 |
+
"You can also choose to have the chunks overlap to avoid potentially missing any important information between chunks. As we're dealing with a novel - there's not a critical need to include overlap.\n",
|
| 461 |
+
"\n",
|
| 462 |
+
"We can also pass in the separator - this is what we'll try and separate the documents on. Be careful to understand your documents so you can be sure you use a valid separator!\n",
|
| 463 |
+
"\n",
|
| 464 |
+
"For now, we'll go with 1000 characters. "
|
| 465 |
+
]
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"cell_type": "code",
|
| 469 |
+
"execution_count": 14,
|
| 470 |
+
"metadata": {
|
| 471 |
+
"id": "BSYNeLXPKZtn"
|
| 472 |
+
},
|
| 473 |
+
"outputs": [],
|
| 474 |
+
"source": [
|
| 475 |
+
"from langchain.text_splitter import CharacterTextSplitter\n",
|
| 476 |
+
"\n",
|
| 477 |
+
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0, separator = \"\\n\")\n",
|
| 478 |
+
"texts = text_splitter.split_text(hitchhikersguide)"
|
| 479 |
+
]
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"cell_type": "code",
|
| 483 |
+
"execution_count": 15,
|
| 484 |
+
"metadata": {
|
| 485 |
+
"colab": {
|
| 486 |
+
"base_uri": "https://localhost:8080/"
|
| 487 |
+
},
|
| 488 |
+
"id": "z9w-svpbLq62",
|
| 489 |
+
"outputId": "623c31d3-4676-4dcf-bb41-24df47a7234e"
|
| 490 |
+
},
|
| 491 |
+
"outputs": [],
|
| 492 |
+
"source": [
|
| 493 |
+
"assert len(texts) == 293"
|
| 494 |
+
]
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"cell_type": "markdown",
|
| 498 |
+
"metadata": {
|
| 499 |
+
"id": "dQCXLq-ML_aN"
|
| 500 |
+
},
|
| 501 |
+
"source": [
|
| 502 |
+
"Now that we've split our document into more manageable sized chunks. We'll need to embed those documents!\n",
|
| 503 |
+
"\n",
|
| 504 |
+
"For more information on embedding - please check out [this](https://platform.openai.com/docs/guides/embeddings) resource from OpenAI.\n",
|
| 505 |
+
"\n",
|
| 506 |
+
"In order to do this, we'll first need to select a method to embed - for this example we'll be using OpenAI's embedding - but you're free to use whatever you'd like. \n",
|
| 507 |
+
"\n",
|
| 508 |
+
"You just need to ensure you're using consistent embeddings as they don't play well with others."
|
| 509 |
+
]
|
| 510 |
+
},
|
| 511 |
+
{
|
| 512 |
+
"cell_type": "code",
|
| 513 |
+
"execution_count": 16,
|
| 514 |
+
"metadata": {
|
| 515 |
+
"id": "VigAmqxaMd5a"
|
| 516 |
+
},
|
| 517 |
+
"outputs": [],
|
| 518 |
+
"source": [
|
| 519 |
+
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
| 520 |
+
"\n",
|
| 521 |
+
"os.environ[\"OPENAI_API_KEY\"] = openai.api_key\n",
|
| 522 |
+
"\n",
|
| 523 |
+
"embeddings = OpenAIEmbeddings()"
|
| 524 |
+
]
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"cell_type": "markdown",
|
| 528 |
+
"metadata": {
|
| 529 |
+
"id": "uEN_IgzqOBNs"
|
| 530 |
+
},
|
| 531 |
+
"source": [
|
| 532 |
+
"Now that we've set up how we want to embed our document - we'll need to embed it. \n",
|
| 533 |
+
"\n",
|
| 534 |
+
"For this week we'll be glossing over the technical details of this process - as we'll get more into next week.\n",
|
| 535 |
+
"\n",
|
| 536 |
+
"Just know that we're converting our text into an easily queryable format!\n",
|
| 537 |
+
"\n",
|
| 538 |
+
"We're going to leverage ChromaDB for this example, so we'll want to install that dependency. "
|
| 539 |
+
]
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"cell_type": "code",
|
| 543 |
+
"execution_count": 17,
|
| 544 |
+
"metadata": {
|
| 545 |
+
"colab": {
|
| 546 |
+
"base_uri": "https://localhost:8080/"
|
| 547 |
+
},
|
| 548 |
+
"id": "Y-ZuzHPCOjLc",
|
| 549 |
+
"outputId": "17641e2c-4838-4785-ddb6-4b14b5b0b3b3"
|
| 550 |
+
},
|
| 551 |
+
"outputs": [],
|
| 552 |
+
"source": [
|
| 553 |
+
"!pip install chromadb==0.3.22 tiktoken -q "
|
| 554 |
+
]
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"cell_type": "code",
|
| 558 |
+
"execution_count": 18,
|
| 559 |
+
"metadata": {
|
| 560 |
+
"id": "ql7jqj7TONDE"
|
| 561 |
+
},
|
| 562 |
+
"outputs": [
|
| 563 |
+
{
|
| 564 |
+
"name": "stderr",
|
| 565 |
+
"output_type": "stream",
|
| 566 |
+
"text": [
|
| 567 |
+
"Using embedded DuckDB without persistence: data will be transient\n"
|
| 568 |
+
]
|
| 569 |
+
}
|
| 570 |
+
],
|
| 571 |
+
"source": [
|
| 572 |
+
"from langchain.vectorstores import Chroma\n",
|
| 573 |
+
"\n",
|
| 574 |
+
"docsearch = Chroma.from_texts(texts, embeddings, metadatas=[{\"source\": str(i)} for i in range(len(texts))]).as_retriever()"
|
| 575 |
+
]
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"cell_type": "markdown",
|
| 579 |
+
"metadata": {
|
| 580 |
+
"id": "kfn0R64lPb7n"
|
| 581 |
+
},
|
| 582 |
+
"source": [
|
| 583 |
+
"Now that we have our documents embedded we're free to query them with natural language! Let's see this in action!"
|
| 584 |
+
]
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"cell_type": "code",
|
| 588 |
+
"execution_count": 19,
|
| 589 |
+
"metadata": {
|
| 590 |
+
"id": "ubZwxCHvQzsT"
|
| 591 |
+
},
|
| 592 |
+
"outputs": [],
|
| 593 |
+
"source": [
|
| 594 |
+
"query = \"What makes towels important?\"\n",
|
| 595 |
+
"docs = docsearch.get_relevant_documents(query)"
|
| 596 |
+
]
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"cell_type": "code",
|
| 600 |
+
"execution_count": 20,
|
| 601 |
+
"metadata": {
|
| 602 |
+
"colab": {
|
| 603 |
+
"base_uri": "https://localhost:8080/"
|
| 604 |
+
},
|
| 605 |
+
"id": "w4M08F78Q3i3",
|
| 606 |
+
"outputId": "5364b9ec-b8ce-423a-cbd7-3baabf705bc4"
|
| 607 |
+
},
|
| 608 |
+
"outputs": [
|
| 609 |
+
{
|
| 610 |
+
"data": {
|
| 611 |
+
"text/plain": [
|
| 612 |
+
"Document(page_content=\"value - you can wrap it around you for warmth as you bound across\\nthe cold moons of Jaglan Beta; you can lie on it on the brilliant\\nmarble-sanded beaches of Santraginus V, inhaling the heady sea\\nvapours; you can sleep under it beneath the stars which shine so\\nredly on the desert world of Kakrafoon; use it to sail a mini\\nraft down the slow heavy river Moth; wet it for use in hand-to-\\nhand-combat; wrap it round your head to ward off noxious fumes or\\nto avoid the gaze of the Ravenous Bugblatter Beast of Traal (a\\nmindboggingly stupid animal, it assumes that if you can't see it,\\nit can't see you - daft as a bush, but very ravenous); you can\\nwave your towel in emergencies as a distress signal, and of\\ncourse dry yourself off with it if it still seems to be clean\\nenough.\\n \\nMore importantly, a towel has immense psychological value. For\\nsome reason, if a strag (strag: non-hitch hiker) discovers that a\\nhitch hiker has his towel with him, he will automatically assume\", metadata={'source': '36'})"
|
| 613 |
+
]
|
| 614 |
+
},
|
| 615 |
+
"execution_count": 20,
|
| 616 |
+
"metadata": {},
|
| 617 |
+
"output_type": "execute_result"
|
| 618 |
+
}
|
| 619 |
+
],
|
| 620 |
+
"source": [
|
| 621 |
+
"docs[0]"
|
| 622 |
+
]
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"cell_type": "markdown",
|
| 626 |
+
"metadata": {
|
| 627 |
+
"id": "-8W9ZmNaRRBX"
|
| 628 |
+
},
|
| 629 |
+
"source": [
|
| 630 |
+
"Finally, we're able to combine what we've done so far into a chain!\n",
|
| 631 |
+
"\n",
|
| 632 |
+
"We're going to leverage the `load_qa_chain` to quickly integrate our queryable documents with an LLM.\n",
|
| 633 |
+
"\n",
|
| 634 |
+
"There are 4 major methods of building this chain, they can be found [here](https://docs.langchain.com/docs/components/chains/index_related_chains)!\n",
|
| 635 |
+
"\n",
|
| 636 |
+
"For this example we'll be using the `stuff` chain type."
|
| 637 |
+
]
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"cell_type": "code",
|
| 641 |
+
"execution_count": 21,
|
| 642 |
+
"metadata": {
|
| 643 |
+
"colab": {
|
| 644 |
+
"base_uri": "https://localhost:8080/",
|
| 645 |
+
"height": 87
|
| 646 |
+
},
|
| 647 |
+
"id": "S7vAWKiFSVj_",
|
| 648 |
+
"outputId": "2f44bc25-ea96-4c0f-9cc9-8f69c73d913e"
|
| 649 |
+
},
|
| 650 |
+
"outputs": [
|
| 651 |
+
{
|
| 652 |
+
"data": {
|
| 653 |
+
"text/plain": [
|
| 654 |
+
"' Towels have immense psychological value. For some reason, if a strag discovers that a hitch hiker has his towel with him, he will automatically assume that he is also in possession of a toothbrush, face flannel, soap, tin of biscuits, flask, compass, map, ball of string, gnat spray, wet weather gear, space suit etc., etc. Furthermore, the strag will then happily lend the hitch hiker any of these or a dozen other items that the hitch hiker might accidentally have \"lost\". What the strag will think is that any man who can hitch the length and breadth of the galaxy, rough it, slum it, struggle against terrible odds, win through, and still knows where his towel is is clearly a man to be reckoned with.'"
|
| 655 |
+
]
|
| 656 |
+
},
|
| 657 |
+
"execution_count": 21,
|
| 658 |
+
"metadata": {},
|
| 659 |
+
"output_type": "execute_result"
|
| 660 |
+
}
|
| 661 |
+
],
|
| 662 |
+
"source": [
|
| 663 |
+
"from langchain.chains.question_answering import load_qa_chain\n",
|
| 664 |
+
"from langchain.llms import OpenAI\n",
|
| 665 |
+
"\n",
|
| 666 |
+
"chain = load_qa_chain(OpenAI(temperature=0), chain_type=\"stuff\")\n",
|
| 667 |
+
"query = \"What makes towels important?\"\n",
|
| 668 |
+
"chain.run(input_documents=docs, question=query)"
|
| 669 |
+
]
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"cell_type": "markdown",
|
| 673 |
+
"metadata": {
|
| 674 |
+
"id": "fMxm7pdwUs5K"
|
| 675 |
+
},
|
| 676 |
+
"source": [
|
| 677 |
+
"Now that we have this set-up, we'll want to package it into an app and pass it to a Hugging Face Space!\n",
|
| 678 |
+
"\n",
|
| 679 |
+
"You can find instruction on how to do that in the GitHub Repository!"
|
| 680 |
+
]
|
| 681 |
+
},
|
| 682 |
+
{
|
| 683 |
+
"cell_type": "code",
|
| 684 |
+
"execution_count": null,
|
| 685 |
+
"metadata": {},
|
| 686 |
+
"outputs": [],
|
| 687 |
+
"source": []
|
| 688 |
+
}
|
| 689 |
+
],
|
| 690 |
+
"metadata": {
|
| 691 |
+
"colab": {
|
| 692 |
+
"provenance": []
|
| 693 |
+
},
|
| 694 |
+
"kernelspec": {
|
| 695 |
+
"display_name": "conda_pytorch_p310",
|
| 696 |
+
"language": "python",
|
| 697 |
+
"name": "conda_pytorch_p310"
|
| 698 |
+
},
|
| 699 |
+
"language_info": {
|
| 700 |
+
"codemirror_mode": {
|
| 701 |
+
"name": "ipython",
|
| 702 |
+
"version": 3
|
| 703 |
+
},
|
| 704 |
+
"file_extension": ".py",
|
| 705 |
+
"mimetype": "text/x-python",
|
| 706 |
+
"name": "python",
|
| 707 |
+
"nbconvert_exporter": "python",
|
| 708 |
+
"pygments_lexer": "ipython3",
|
| 709 |
+
"version": "3.10.12"
|
| 710 |
+
}
|
| 711 |
+
},
|
| 712 |
+
"nbformat": 4,
|
| 713 |
+
"nbformat_minor": 1
|
| 714 |
+
}
|