Tiitta commited on
Commit
3f2ef4a
·
verified ·
1 Parent(s): dbe3591

Delete gradiotest.ipynb

Browse files
Files changed (1) hide show
  1. gradiotest.ipynb +0 -199
gradiotest.ipynb DELETED
@@ -1,199 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 25,
6
- "metadata": {},
7
- "outputs": [
8
- {
9
- "name": "stderr",
10
- "output_type": "stream",
11
- "text": [
12
- "C:\\Users\\tiitu\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python311\\site-packages\\transformers\\tokenization_utils_base.py:1601: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n",
13
- " warnings.warn(\n"
14
- ]
15
- },
16
- {
17
- "data": {
18
- "text/plain": [
19
- "GPT2LMHeadModel(\n",
20
- " (transformer): GPT2Model(\n",
21
- " (wte): Embedding(50257, 768)\n",
22
- " (wpe): Embedding(1024, 768)\n",
23
- " (drop): Dropout(p=0.1, inplace=False)\n",
24
- " (h): ModuleList(\n",
25
- " (0-11): 12 x GPT2Block(\n",
26
- " (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
27
- " (attn): GPT2SdpaAttention(\n",
28
- " (c_attn): Conv1D()\n",
29
- " (c_proj): Conv1D()\n",
30
- " (attn_dropout): Dropout(p=0.1, inplace=False)\n",
31
- " (resid_dropout): Dropout(p=0.1, inplace=False)\n",
32
- " )\n",
33
- " (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
34
- " (mlp): GPT2MLP(\n",
35
- " (c_fc): Conv1D()\n",
36
- " (c_proj): Conv1D()\n",
37
- " (act): NewGELUActivation()\n",
38
- " (dropout): Dropout(p=0.1, inplace=False)\n",
39
- " )\n",
40
- " )\n",
41
- " )\n",
42
- " (ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
43
- " )\n",
44
- " (lm_head): Linear(in_features=768, out_features=50257, bias=False)\n",
45
- ")"
46
- ]
47
- },
48
- "execution_count": 25,
49
- "metadata": {},
50
- "output_type": "execute_result"
51
- }
52
- ],
53
- "source": [
54
- "# Load model directly\n",
55
- "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
56
- "\n",
57
- "tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n",
58
- "model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n",
59
- "\n",
60
- "# Move the model to GPU if available\n",
61
- "import torch\n",
62
- "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
63
- "model.to(device)"
64
- ]
65
- },
66
- {
67
- "cell_type": "code",
68
- "execution_count": 26,
69
- "metadata": {},
70
- "outputs": [],
71
- "source": [
72
- "def generate_text(prompt, max_length=50, temperature=1.0):\n",
73
- " # Tokenize the input prompt\n",
74
- " inputs = tokenizer.encode(prompt, return_tensors=\"pt\").to(device)\n",
75
- " \n",
76
- " # Generate text using the model\n",
77
- " outputs = model.generate(\n",
78
- " inputs, \n",
79
- " max_length=max_length, \n",
80
- " temperature=temperature, \n",
81
- " num_return_sequences=1, \n",
82
- " no_repeat_ngram_size=2,\n",
83
- " do_sample=True\n",
84
- " )\n",
85
- " \n",
86
- " # Decode the generated text\n",
87
- " generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
88
- " return generated_text"
89
- ]
90
- },
91
- {
92
- "cell_type": "code",
93
- "execution_count": 27,
94
- "metadata": {},
95
- "outputs": [
96
- {
97
- "name": "stderr",
98
- "output_type": "stream",
99
- "text": [
100
- "C:\\Users\\tiitu\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python311\\site-packages\\gradio\\utils.py:1002: UserWarning: Expected 3 arguments for function <function generate_interface at 0x000002920F269940>, received 2.\n",
101
- " warnings.warn(\n",
102
- "C:\\Users\\tiitu\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python311\\site-packages\\gradio\\utils.py:1006: UserWarning: Expected at least 3 arguments for function <function generate_interface at 0x000002920F269940>, received 2.\n",
103
- " warnings.warn(\n"
104
- ]
105
- },
106
- {
107
- "name": "stdout",
108
- "output_type": "stream",
109
- "text": [
110
- "Running on local URL: http://127.0.0.1:7864\n",
111
- "\n",
112
- "To create a public link, set `share=True` in `launch()`.\n"
113
- ]
114
- },
115
- {
116
- "data": {
117
- "text/html": [
118
- "<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
119
- ],
120
- "text/plain": [
121
- "<IPython.core.display.HTML object>"
122
- ]
123
- },
124
- "metadata": {},
125
- "output_type": "display_data"
126
- },
127
- {
128
- "data": {
129
- "text/plain": []
130
- },
131
- "execution_count": 27,
132
- "metadata": {},
133
- "output_type": "execute_result"
134
- },
135
- {
136
- "name": "stderr",
137
- "output_type": "stream",
138
- "text": [
139
- "C:\\Users\\tiitu\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python311\\site-packages\\gradio\\analytics.py:106: UserWarning: IMPORTANT: You are using gradio version 4.44.0, however version 5.0.1 is available, please upgrade. \n",
140
- "--------\n",
141
- " warnings.warn(\n",
142
- "C:\\Users\\tiitu\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python311\\site-packages\\gradio\\helpers.py:978: UserWarning: Unexpected argument. Filling with None.\n",
143
- " warnings.warn(\"Unexpected argument. Filling with None.\")\n",
144
- "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
145
- "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
146
- "The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
147
- "C:\\Users\\tiitu\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python311\\site-packages\\transformers\\models\\gpt2\\modeling_gpt2.py:544: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\builder\\windows\\pytorch\\aten\\src\\ATen\\native\\transformers\\cuda\\sdp_utils.cpp:555.)\n",
148
- " attn_output = torch.nn.functional.scaled_dot_product_attention(\n",
149
- "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
150
- "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n",
151
- "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
152
- "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
153
- ]
154
- }
155
- ],
156
- "source": [
157
- "import gradio as gr\n",
158
- "\n",
159
- "# Create the Gradio interface\n",
160
- "def generate_interface(prompt, max_length, temperature):\n",
161
- " return generate_text(prompt, max_length=max_length, temperature=temperature)\n",
162
- "\n",
163
- "interface = gr.Interface(\n",
164
- " fn=generate_interface,\n",
165
- " inputs=[\n",
166
- " gr.Textbox(lines=2, placeholder=\"Enter your guestion / prompt\", label=\"Prompt\"),\n",
167
- " gr.Slider(10, 100, value=50, label=\"Max Length\")\n",
168
- " ],\n",
169
- " outputs=\"text\",\n",
170
- " title=\"Network helper\",\n",
171
- " description=\"Network helper is hare to help you with our network related questions. Just enter your question and we will try to help you with the answer.\"\n",
172
- ")\n",
173
- "\n",
174
- "interface.launch()"
175
- ]
176
- }
177
- ],
178
- "metadata": {
179
- "kernelspec": {
180
- "display_name": "Python 3",
181
- "language": "python",
182
- "name": "python3"
183
- },
184
- "language_info": {
185
- "codemirror_mode": {
186
- "name": "ipython",
187
- "version": 3
188
- },
189
- "file_extension": ".py",
190
- "mimetype": "text/x-python",
191
- "name": "python",
192
- "nbconvert_exporter": "python",
193
- "pygments_lexer": "ipython3",
194
- "version": "3.11.9"
195
- }
196
- },
197
- "nbformat": 4,
198
- "nbformat_minor": 2
199
- }