Bahareh Kavousi nejad commited on
Commit
d8e0ea6
·
1 Parent(s): 9e10d4b

From the beginning :_)

Browse files
Files changed (2) hide show
  1. app.py +16 -52
  2. requirements.txt +1 -7
app.py CHANGED
@@ -1,58 +1,22 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load the model and tokenizer
5
- tokenizer = AutoTokenizer.from_pretrained(
6
- "SebastianSchramm/UniNER-7B-type-GPTQ-4bit-128g-actorder_True",
7
- legacy=False
8
- )
9
- model = AutoModelForCausalLM.from_pretrained(
10
- "SebastianSchramm/UniNER-7B-type-GPTQ-4bit-128g-actorder_True",
11
- device_map="auto"
12
- )
13
 
14
- # Define a function to handle user input and generate a response
15
- def chatbot_response(user_input, chat_history=[]):
16
- # Append user input to chat history
17
- chat_history.append(("User", user_input))
18
-
19
- # Prepare the prompt based on the chat history
20
- prompt = " ".join([f"{speaker}: {text}" for speaker, text in chat_history])
21
-
22
- # Generate a response
23
- inputs = tokenizer(prompt, return_tensors="pt")
24
- outputs = model.generate(inputs.input_ids, max_length=200, num_return_sequences=1)
25
- bot_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
-
27
- # Append bot response to chat history
28
- chat_history.append(("Bot", bot_response))
29
-
30
- # Return updated chat history
31
- return chat_history, chat_history
32
 
33
- # Gradio interface
34
- with gr.Blocks() as interface:
35
- gr.Markdown("### Chat with Your LLM")
36
-
37
- chat_history = gr.State([]) # To store the conversation history
38
- chat_display = gr.Chatbot() # Chat display for the conversation
39
- user_input = gr.Textbox(
40
- show_label=False,
41
- placeholder="Type your message and press Enter"
42
- )
43
- send_button = gr.Button("Send")
44
-
45
- # Define the interaction
46
- send_button.click(
47
- chatbot_response,
48
- inputs=[user_input, chat_history],
49
- outputs=[chat_display, chat_history]
50
- )
51
- user_input.submit(
52
- chatbot_response,
53
- inputs=[user_input, chat_history],
54
- outputs=[chat_display, chat_history]
55
- )
56
 
57
- # Launch the Gradio app
58
  interface.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ # Load the model
5
+ chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B")
 
 
 
 
 
 
 
6
 
7
+ # Define the chatbot function
8
+ def chat(input_text):
9
+ response = chatbot(input_text, max_length=100, do_sample=True)
10
+ return response[0]["generated_text"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Create the Gradio interface
13
+ interface = gr.Interface(
14
+ fn=chat,
15
+ inputs="text",
16
+ outputs="text",
17
+ title="Chat with an LLM",
18
+ description="Type your message and chat with the LLM!"
19
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ # Launch the app
22
  interface.launch()
requirements.txt CHANGED
@@ -1,8 +1,2 @@
1
- flask
2
- gradio
3
  transformers
4
- torch
5
- torchvision
6
- torchaudio
7
- accelerate>=0.26.0
8
- optimum
 
 
 
1
  transformers
2
+ gradio