Samuel4677 commited on
Commit
ea0182e
·
verified ·
1 Parent(s): b0d1b16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -52
app.py CHANGED
@@ -1,64 +1,51 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
27
 
28
- response = ""
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ import warnings
2
+ warnings.filterwarnings("ignore")
3
 
4
+ import logging
5
+ logging.getLogger("streamlit").setLevel(logging.ERROR)
 
 
6
 
7
+ import streamlit as st
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+ import torch
10
 
11
+ @st.cache_resource
12
+ def load_model():
13
+ model_name = "radlab/polish-gpt2-small-v2"
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+ return tokenizer, model
 
 
 
17
 
18
+ tokenizer, model = load_model()
 
 
 
 
19
 
20
+ st.set_page_config(page_title="Polski Chatbot AI", page_icon="🤖")
21
+ st.title("🤖 Polski Chatbot AI")
22
+ st.caption("Model: radlab/polish-gpt2-small-v2")
23
 
24
+ if "history" not in st.session_state:
25
+ st.session_state.history = ""
26
 
27
+ user_input = st.text_input("Wpisz wiadomość:", "")
 
 
 
 
 
 
 
28
 
29
+ if st.button("Wyślij") and user_input.strip() != "":
30
+ st.session_state.history += f"Użytkownik: {user_input}\nAI:"
31
 
32
+ input_ids = tokenizer.encode(st.session_state.history, return_tensors="pt", truncation=True, max_length=1024)
33
+ output = model.generate(
34
+ input_ids,
35
+ max_length=input_ids.shape[1] + 80,
36
+ pad_token_id=tokenizer.eos_token_id,
37
+ do_sample=True,
38
+ top_k=50,
39
+ top_p=0.95,
40
+ temperature=0.7
41
+ )
42
+ output_text = tokenizer.decode(output[0], skip_special_tokens=True)
43
 
44
+ model_reply = output_text[len(st.session_state.history):].split("Użytkownik:")[0].strip()
45
+ st.session_state.history += f" {model_reply}\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ st.subheader("🗨️ Historia rozmów")
48
+ st.text_area("📖", st.session_state.history.strip(), height=300)
49
 
50
+ if st.button("🧹 Wyczyść historię"):
51
+ st.session_state.history = ""