freddyaboulton HF Staff commited on
Commit
20d1294
·
verified ·
1 Parent(s): 6f16b1f

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. requirements.txt +2 -1
  3. run.ipynb +1 -1
  4. run.py +8 -6
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 6.4.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 6.5.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  langchain
2
- langchain-openai
 
 
1
  langchain
2
+ langchain-openai
3
+ pytz
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: llm_langchain"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio langchain langchain-openai"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# This is a simple general-purpose chatbot built on top of LangChain and Gradio.\n", "# Before running this, make sure you have exported your OpenAI API key as an environment variable:\n", "# export OPENAI_API_KEY=\"your-openai-api-key\"\n", "\n", "from langchain_openai import ChatOpenAI # type: ignore\n", "from langchain.schema import AIMessage, HumanMessage # type: ignore\n", "import gradio as gr\n", "\n", "model = ChatOpenAI(model=\"gpt-4o-mini\")\n", "\n", "def predict(message, history):\n", " history_langchain_format = []\n", " for msg in history:\n", " if msg['role'] == \"user\":\n", " history_langchain_format.append(HumanMessage(content=msg['content']))\n", " elif msg['role'] == \"assistant\":\n", " history_langchain_format.append(AIMessage(content=msg['content']))\n", " history_langchain_format.append(HumanMessage(content=message))\n", " gpt_response = model.invoke(history_langchain_format)\n", " return gpt_response.content\n", "\n", "demo = gr.ChatInterface(\n", " predict,\n", " api_name=\"chat\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: llm_langchain"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio langchain langchain-openai pytz "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# This is a simple general-purpose chatbot built on top of LangChain and Gradio.\n", "# Before running this, make sure you have exported your OpenAI API key as an environment variable:\n", "# export OPENAI_API_KEY=\"your-openai-api-key\"\n", "\n", "import gradio as gr\n", "from langchain.messages import AIMessage, HumanMessage # type: ignore\n", "from langchain_openai import ChatOpenAI # type: ignore\n", "\n", "model = ChatOpenAI(model=\"gpt-4o-mini\")\n", "\n", "\n", "def predict(message, history):\n", " history_langchain_format = []\n", " for msg in history:\n", " if msg[\"role\"] == \"user\":\n", " history_langchain_format.append(HumanMessage(content=msg[\"content\"]))\n", " elif msg[\"role\"] == \"assistant\":\n", " history_langchain_format.append(AIMessage(content=msg[\"content\"]))\n", " history_langchain_format.append(HumanMessage(content=message))\n", " gpt_response = model.invoke(history_langchain_format)\n", " return gpt_response.content\n", "\n", "\n", "demo = gr.ChatInterface(\n", " predict,\n", " api_name=\"chat\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -2,23 +2,25 @@
2
  # Before running this, make sure you have exported your OpenAI API key as an environment variable:
3
  # export OPENAI_API_KEY="your-openai-api-key"
4
 
5
- from langchain_openai import ChatOpenAI # type: ignore
6
- from langchain.schema import AIMessage, HumanMessage # type: ignore
7
  import gradio as gr
 
 
8
 
9
  model = ChatOpenAI(model="gpt-4o-mini")
10
 
 
11
  def predict(message, history):
12
  history_langchain_format = []
13
  for msg in history:
14
- if msg['role'] == "user":
15
- history_langchain_format.append(HumanMessage(content=msg['content']))
16
- elif msg['role'] == "assistant":
17
- history_langchain_format.append(AIMessage(content=msg['content']))
18
  history_langchain_format.append(HumanMessage(content=message))
19
  gpt_response = model.invoke(history_langchain_format)
20
  return gpt_response.content
21
 
 
22
  demo = gr.ChatInterface(
23
  predict,
24
  api_name="chat",
 
2
  # Before running this, make sure you have exported your OpenAI API key as an environment variable:
3
  # export OPENAI_API_KEY="your-openai-api-key"
4
 
 
 
5
  import gradio as gr
6
+ from langchain.messages import AIMessage, HumanMessage # type: ignore
7
+ from langchain_openai import ChatOpenAI # type: ignore
8
 
9
  model = ChatOpenAI(model="gpt-4o-mini")
10
 
11
+
12
  def predict(message, history):
13
  history_langchain_format = []
14
  for msg in history:
15
+ if msg["role"] == "user":
16
+ history_langchain_format.append(HumanMessage(content=msg["content"]))
17
+ elif msg["role"] == "assistant":
18
+ history_langchain_format.append(AIMessage(content=msg["content"]))
19
  history_langchain_format.append(HumanMessage(content=message))
20
  gpt_response = model.invoke(history_langchain_format)
21
  return gpt_response.content
22
 
23
+
24
  demo = gr.ChatInterface(
25
  predict,
26
  api_name="chat",