vidu8 commited on
Commit
4a99d8f
Β·
verified Β·
1 Parent(s): a25f88f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -59
app.py CHANGED
@@ -1,64 +1,55 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
61
 
 
 
 
 
 
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ from datasets import load_dataset
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, Trainer, TrainingArguments
4
+
5
+ # Load your dataset
6
+ dataset = load_dataset("vidu8/ch01")
7
+
8
+ # Load tokenizer and model
9
+ model_name = "t5-small" # lightweight and fast
10
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+
12
+ # Prepare dataset
13
+ def preprocess(example):
14
+ inputs = "chat: " + example["input_text"]
15
+ targets = example["target_text"]
16
+ model_inputs = tokenizer(inputs, max_length=128, truncation=True)
17
+ labels = tokenizer(targets, max_length=128, truncation=True)
18
+ model_inputs["labels"] = labels["input_ids"]
19
+ return model_inputs
20
+
21
+ train_dataset = dataset["train"].map(preprocess, batched=False)
22
+
23
+ # Load model
24
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
25
+
26
+ # Set training arguments
27
+ training_args = TrainingArguments(
28
+ output_dir="./results",
29
+ num_train_epochs=3,
30
+ per_device_train_batch_size=4,
31
+ logging_steps=10,
32
+ save_steps=100,
33
+ save_total_limit=1,
34
+ evaluation_strategy="no",
35
+ )
 
 
 
 
 
 
36
 
37
+ # Define Trainer
38
+ trainer = Trainer(
39
+ model=model,
40
+ args=training_args,
41
+ train_dataset=train_dataset,
 
 
 
 
 
 
 
 
 
 
 
 
42
  )
43
 
44
+ # Train
45
+ trainer.train()
46
+
47
+ # Gradio interface
48
+ def chat(input_text):
49
+ inputs = tokenizer("chat: " + input_text, return_tensors="pt")
50
+ outputs = model.generate(**inputs, max_length=50)
51
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
52
+ return response
53
 
54
+ iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Simple Chatbot")
55
+ iface.launch()