Chirag commited on
Commit
a947ce2
·
1 Parent(s): 32f1f44
.ipynb_checkpoints/app-checkpoint.ipynb ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "6797ba2e-ef94-42d6-9e64-a1559b507173",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "C:\\Users\\singl\\anaconda3\\envs\\pshcy\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
14
+ " from .autonotebook import tqdm as notebook_tqdm\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "import gradio as gr\n",
20
+ "from huggingface_hub import InferenceClient\n",
21
+ "\n"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": null,
27
+ "id": "f50cbd8a-777f-440c-8b7f-8eca9e611e12",
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "\"\"\"\n",
32
+ "For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference\n",
33
+ "\"\"\"\n",
34
+ "client = InferenceClient(\"HuggingFaceH4/zephyr-7b-beta\")\n",
35
+ "\n",
36
+ "\n",
37
+ "def respond(\n",
38
+ " message,\n",
39
+ " history: list[tuple[str, str]],\n",
40
+ " system_message,\n",
41
+ " max_tokens,\n",
42
+ " temperature,\n",
43
+ " top_p,\n",
44
+ "):\n",
45
+ " messages = [{\"role\": \"system\", \"content\": system_message}]\n",
46
+ "\n",
47
+ " for val in history:\n",
48
+ " if val[0]:\n",
49
+ " messages.append({\"role\": \"user\", \"content\": val[0]})\n",
50
+ " if val[1]:\n",
51
+ " messages.append({\"role\": \"assistant\", \"content\": val[1]})\n",
52
+ "\n",
53
+ " messages.append({\"role\": \"user\", \"content\": message})\n",
54
+ "\n",
55
+ " response = \"\"\n",
56
+ "\n",
57
+ " for message in client.chat_completion(\n",
58
+ " messages,\n",
59
+ " max_tokens=max_tokens,\n",
60
+ " stream=True,\n",
61
+ " temperature=temperature,\n",
62
+ " top_p=top_p,\n",
63
+ " ):\n",
64
+ " token = message.choices[0].delta.content\n",
65
+ "\n",
66
+ " response += token\n",
67
+ " yield response\n",
68
+ "\n",
69
+ "\n",
70
+ "\"\"\"\n",
71
+ "For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface\n",
72
+ "\"\"\"\n",
73
+ "demo = gr.ChatInterface(\n",
74
+ " respond,\n",
75
+ " additional_inputs=[\n",
76
+ " gr.Textbox(value=\"You are a friendly Chatbot.\", label=\"System message\"),\n",
77
+ " gr.Slider(minimum=1, maximum=2048, value=512, step=1, label=\"Max new tokens\"),\n",
78
+ " gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label=\"Temperature\"),\n",
79
+ " gr.Slider(\n",
80
+ " minimum=0.1,\n",
81
+ " maximum=1.0,\n",
82
+ " value=0.95,\n",
83
+ " step=0.05,\n",
84
+ " label=\"Top-p (nucleus sampling)\",\n",
85
+ " ),\n",
86
+ " ],\n",
87
+ ")\n",
88
+ "\n",
89
+ "\n",
90
+ "if __name__ == \"__main__\":\n",
91
+ " demo.launch()\n"
92
+ ]
93
+ }
94
+ ],
95
+ "metadata": {
96
+ "kernelspec": {
97
+ "display_name": "Python (Psych)",
98
+ "language": "python",
99
+ "name": "pshcy"
100
+ },
101
+ "language_info": {
102
+ "codemirror_mode": {
103
+ "name": "ipython",
104
+ "version": 3
105
+ },
106
+ "file_extension": ".py",
107
+ "mimetype": "text/x-python",
108
+ "name": "python",
109
+ "nbconvert_exporter": "python",
110
+ "pygments_lexer": "ipython3",
111
+ "version": "3.10.18"
112
+ }
113
+ },
114
+ "nbformat": 4,
115
+ "nbformat_minor": 5
116
+ }
.ipynb_checkpoints/app-checkpoint.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ response = ""
29
+
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
+ ),
59
+ ],
60
+ )
61
+
62
+
63
+ if __name__ == "__main__":
64
+ demo.launch()
.ipynb_checkpoints/requirements-checkpoint.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ huggingface_hub==0.25.2
app.ipynb ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "6797ba2e-ef94-42d6-9e64-a1559b507173",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "C:\\Users\\singl\\anaconda3\\envs\\pshcy\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
14
+ " from .autonotebook import tqdm as notebook_tqdm\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "import gradio as gr\n",
20
+ "from huggingface_hub import InferenceClient\n",
21
+ "\n"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": null,
27
+ "id": "f50cbd8a-777f-440c-8b7f-8eca9e611e12",
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "\"\"\"\n",
32
+ "For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference\n",
33
+ "\"\"\"\n",
34
+ "client = InferenceClient(\"HuggingFaceH4/zephyr-7b-beta\")\n",
35
+ "\n",
36
+ "\n",
37
+ "def respond(\n",
38
+ " message,\n",
39
+ " history: list[tuple[str, str]],\n",
40
+ " system_message,\n",
41
+ " max_tokens,\n",
42
+ " temperature,\n",
43
+ " top_p,\n",
44
+ "):\n",
45
+ " messages = [{\"role\": \"system\", \"content\": system_message}]\n",
46
+ "\n",
47
+ " for val in history:\n",
48
+ " if val[0]:\n",
49
+ " messages.append({\"role\": \"user\", \"content\": val[0]})\n",
50
+ " if val[1]:\n",
51
+ " messages.append({\"role\": \"assistant\", \"content\": val[1]})\n",
52
+ "\n",
53
+ " messages.append({\"role\": \"user\", \"content\": message})\n",
54
+ "\n",
55
+ " response = \"\"\n",
56
+ "\n",
57
+ " for message in client.chat_completion(\n",
58
+ " messages,\n",
59
+ " max_tokens=max_tokens,\n",
60
+ " stream=True,\n",
61
+ " temperature=temperature,\n",
62
+ " top_p=top_p,\n",
63
+ " ):\n",
64
+ " token = message.choices[0].delta.content\n",
65
+ "\n",
66
+ " response += token\n",
67
+ " yield response\n",
68
+ "\n",
69
+ "\n",
70
+ "\"\"\"\n",
71
+ "For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface\n",
72
+ "\"\"\"\n",
73
+ "demo = gr.ChatInterface(\n",
74
+ " respond,\n",
75
+ " additional_inputs=[\n",
76
+ " gr.Textbox(value=\"You are a friendly Chatbot.\", label=\"System message\"),\n",
77
+ " gr.Slider(minimum=1, maximum=2048, value=512, step=1, label=\"Max new tokens\"),\n",
78
+ " gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label=\"Temperature\"),\n",
79
+ " gr.Slider(\n",
80
+ " minimum=0.1,\n",
81
+ " maximum=1.0,\n",
82
+ " value=0.95,\n",
83
+ " step=0.05,\n",
84
+ " label=\"Top-p (nucleus sampling)\",\n",
85
+ " ),\n",
86
+ " ],\n",
87
+ ")\n",
88
+ "\n",
89
+ "\n",
90
+ "if __name__ == \"__main__\":\n",
91
+ " demo.launch()\n"
92
+ ]
93
+ }
94
+ ],
95
+ "metadata": {
96
+ "kernelspec": {
97
+ "display_name": "Python (Psych)",
98
+ "language": "python",
99
+ "name": "pshcy"
100
+ },
101
+ "language_info": {
102
+ "codemirror_mode": {
103
+ "name": "ipython",
104
+ "version": 3
105
+ },
106
+ "file_extension": ".py",
107
+ "mimetype": "text/x-python",
108
+ "name": "python",
109
+ "nbconvert_exporter": "python",
110
+ "pygments_lexer": "ipython3",
111
+ "version": "3.10.18"
112
+ }
113
+ },
114
+ "nbformat": 4,
115
+ "nbformat_minor": 5
116
+ }