VajraGlobal commited on
Commit
0fdb645
·
verified ·
1 Parent(s): 4d88ab7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +98 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import torch
4
+ from transformers import (
5
+ AutoTokenizer,
6
+ AutoModelForCausalLM,
7
+ TextIteratorStreamer,
8
+ pipeline,
9
+ )
10
+ from threading import Thread
11
+
12
+ # The huggingface model id for Microsoft's phi-2 model
13
+ checkpoint = "microsoft/phi-2"
14
+
15
+ # Download and load model and tokenizer
16
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ checkpoint, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True
19
+ )
20
+
21
+ # Text generation pipeline
22
+ phi2 = pipeline(
23
+ "text-generation",
24
+ tokenizer=tokenizer,
25
+ model=model,
26
+ pad_token_id=tokenizer.eos_token_id,
27
+ eos_token_id=tokenizer.eos_token_id,
28
+ device_map="cpu",
29
+ )
30
+
31
+
32
+ # Function that accepts a prompt and generates text using the phi2 pipeline
33
+ def generate(message, chat_history, max_new_tokens):
34
+ instruction = "You are a helpful assistant to 'User'. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
35
+ final_prompt = f"Instruction: {instruction}\n"
36
+
37
+ for sent, received in chat_history:
38
+ final_prompt += "User: " + sent + "\n"
39
+ final_prompt += "Assistant: " + received + "\n"
40
+
41
+ final_prompt += "User: " + message + "\n"
42
+ final_prompt += "Output:"
43
+
44
+ # Streamer
45
+ streamer = TextIteratorStreamer(
46
+ tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=300.0
47
+ )
48
+ thread = Thread(
49
+ target=phi2,
50
+ kwargs={
51
+ "text_inputs": final_prompt,
52
+ "max_new_tokens": max_new_tokens,
53
+ "streamer": streamer,
54
+ },
55
+ )
56
+ thread.start()
57
+
58
+ generated_text = ""
59
+ for word in streamer:
60
+ generated_text += word
61
+ response = generated_text.strip()
62
+
63
+ if "User:" in response:
64
+ response = response.split("User:")[0].strip()
65
+
66
+ if "Assistant:" in response:
67
+ response = response.split("Assistant:")[1].strip()
68
+
69
+ yield response
70
+
71
+
72
+ # Chat interface with gradio
73
+ with gr.Blocks() as demo:
74
+ gr.Markdown(
75
+ """
76
+ # Phi-2 Chatbot Demo
77
+ This chatbot was created using Microsoft's 2.7 billion parameter [phi-2](https://huggingface.co/microsoft/phi-2) Transformer model.
78
+
79
+ In order to reduce the response time on this hardware, `max_new_tokens` has been set to `21` in the text generation pipeline. With this default configuration, it takes approximately `60 seconds` for the response to start being generated, and streamed one word at a time. Use the slider below to increase or decrease the length of the generated text.
80
+ """
81
+ )
82
+
83
+ tokens_slider = gr.Slider(
84
+ 8,
85
+ 128,
86
+ value=21,
87
+ label="Maximum new tokens",
88
+ info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.",
89
+ )
90
+
91
+ chatbot = gr.ChatInterface(
92
+ fn=generate,
93
+ additional_inputs=[tokens_slider],
94
+ stop_btn=None,
95
+ examples=[["Who is Leonhard Euler?"]],
96
+ )
97
+
98
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ accelerate
4
+ einops