Drbrain0620 commited on
Commit
03fde68
ยท
verified ยท
1 Parent(s): 4a610fa

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. .py +7 -0
  2. speech_to_text.py +31 -0
.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def echo(message, history):
4
+ return message
5
+
6
+ demo = gr.ChatInterface(fn=echo, type="messages", examples=["hello", "hola", "merhaba"], title="Echo Bot")
7
+ demo.launch()
speech_to_text.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+ import gradio as gr
4
+
5
+ model_name = "mistralai/Mistral-7B-Instruct-v0.1"
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
+
10
+ chat_history = [{"role": "system", "content": "You are a helpful assistant."}]
11
+
12
+ def chat(user_input):
13
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฑ„ํŒ… ๊ธฐ๋ก์— ์ถ”๊ฐ€
14
+ chat_history.append({"role": "user", "content": user_input})
15
+
16
+ # ๋ชจ๋ธ์— ์ฑ„ํŒ… ๊ธฐ๋ก ์ „๋‹ฌ
17
+ inputs = tokenizer([message['content'] for message in chat_history], return_tensors="pt", padding=True).to("cuda" if torch.cuda.is_available() else "cpu")
18
+
19
+ # ๋ชจ๋ธ๋กœ ์‘๋‹ต ์ƒ์„ฑ
20
+ outputs = model.generate(**inputs, max_length=200)
21
+
22
+ # ์ƒ์„ฑ๋œ ์‘๋‹ต
23
+ bot_reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+
25
+ # ๋ด‡ ์‘๋‹ต์„ ์ฑ„ํŒ… ๊ธฐ๋ก์— ์ถ”๊ฐ€
26
+ chat_history.append({"role": "assistant", "content": bot_reply})
27
+
28
+ return bot_reply
29
+
30
+ demo = gr.ChatInterface(fn=chat, type='messages', title='์ด์šฐ์ง„์˜ ์ฑ—๋ด‡')
31
+ demo.launch(share=True)