Samuel4677 commited on
Commit
2751646
·
verified ·
1 Parent(s): f2373c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -41
app.py CHANGED
@@ -1,45 +1,36 @@
1
- # app.py
2
  import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
- import torch
5
 
6
- # Szybszy i sensowny model
7
- model_name = "tiiuae/falcon-rw-1b"
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
 
11
- def generate_response(user_input, history):
12
- # Kontrola historii, ostatnie 3 interakcje
13
- short_history = history[-3:] if history else []
 
 
 
 
 
 
 
14
 
15
- # Budowanie promptu
16
- prompt = ""
17
- for human, bot in short_history:
18
- prompt += f"User: {human}\nAI: {bot}\n"
19
- prompt += f"User: {user_input}\nAI:"
20
-
21
- input_ids = tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=1024)
22
- output = model.generate(
23
- input_ids,
24
- max_new_tokens=80,
25
- do_sample=True,
26
- top_k=50,
27
- top_p=0.95,
28
- temperature=0.7,
29
- pad_token_id=tokenizer.eos_token_id
30
- )
31
-
32
- output_text = tokenizer.decode(output[0], skip_special_tokens=True)
33
- reply = output_text[len(prompt):].split("User:")[0].strip()
34
- history.append((user_input, reply))
35
- return reply, history
36
-
37
- iface = gr.Interface(
38
- fn=generate_response,
39
- inputs=[gr.Textbox(label="Twoje pytanie"), gr.State([])],
40
- outputs=[gr.Textbox(label="Odpowiedź AI"), gr.State([])],
41
- title="🤖 Polski Chatbot AI",
42
- description="Chatbot działający na bazie modelu Falcon-RW-1B. Zadaj pytanie, a AI postara się odpowiedzieć mądrze i szybko."
43
- )
44
-
45
- iface.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
3
 
4
+ model_id = "tiiuae/falcon-rw-1b" # Szybszy i sensowniejszy model
 
 
 
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
7
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
8
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
+
10
+ chat_history = []
11
+
12
+ def chat(user_input):
13
+ global chat_history
14
+ chat_history.append(f"User: {user_input}")
15
+ prompt = "\n".join(chat_history) + "\nAI:"
16
 
17
+ response = generator(prompt, max_new_tokens=80, do_sample=True, top_k=40, top_p=0.9, temperature=0.7)[0]["generated_text"]
18
+ reply = response[len(prompt):].strip().split("\nUser:")[0]
19
+
20
+ chat_history.append(f"AI: {reply}")
21
+ return reply
22
+
23
+ with gr.Blocks() as demo:
24
+ gr.Markdown("## 🤖 Polski Chatbot AI – Szybki i Inteligentny")
25
+ chatbox = gr.Chatbot()
26
+ msg = gr.Textbox(label="Twoja wiadomość")
27
+ send_btn = gr.Button("Wyślij")
28
+
29
+ def respond(user_input, history=[]):
30
+ reply = chat(user_input)
31
+ history.append((user_input, reply))
32
+ return history, ""
33
+
34
+ send_btn.click(respond, inputs=[msg, chatbox], outputs=[chatbox, msg])
35
+
36
+ demo.launch()