kawasumi commited on
Commit
49dde48
·
verified ·
1 Parent(s): 2517087

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ try:
6
+ import llama_cpp
7
+ except ImportError:
8
+ print("Installing pre-built llama-cpp-python...")
9
+ subprocess.check_call([
10
+ sys.executable, "-m", "pip", "install",
11
+ "llama-cpp-python",
12
+ "--extra-index-url", "https://abetlen.github.io/llama-cpp-python/whl/cpu"
13
+ ])
14
+
15
+ import gradio as gr
16
+ from llama_cpp import Llama
17
+ from huggingface_hub import hf_hub_download
18
+
19
+ # モデルの設定
20
+ model_id = "kawasumi/Tema_Q-R-4B-GGUF"
21
+ model_file = "Tema_Q-R-4B-Q4_K_M.gguf"
22
+
23
+ # 入力制限文字数
24
+ MAX_INPUT_CHARS = 300
25
+
26
+ print("Downloading model...")
27
+ model_path = hf_hub_download(repo_id=model_id, filename=model_file)
28
+
29
+ print(f"Loading model from {model_path}...")
30
+ llm = Llama(
31
+ model_path=model_path,
32
+ n_ctx=1024,
33
+ n_threads=2,
34
+ use_mmap=False,
35
+ n_batch=128,
36
+ )
37
+ print("Model loaded.")
38
+
39
+ def chat_response(message, history):
40
+ # --- 文字数制限の追加 ---
41
+ if len(message) > MAX_INPUT_CHARS:
42
+ yield f"入力が長すぎます。{MAX_INPUT_CHARS}文字以内で入力してください。(現在 {len(message)} 文字)"
43
+ return
44
+ # ----------------------
45
+
46
+ prompt = f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
47
+
48
+ try:
49
+ output = llm(
50
+ prompt,
51
+ max_tokens=512,
52
+ stop=["<end_of_turn>", "user"],
53
+ stream=True
54
+ )
55
+
56
+ response = ""
57
+ for chunk in output:
58
+ text = chunk["choices"][0]["text"]
59
+ response += text
60
+ yield response
61
+ except Exception as e:
62
+ yield f"エラーが発生しました: {str(e)}"
63
+
64
+ # UIの構築(説明欄に制限の記載を追加)
65
+ demo = gr.ChatInterface(
66
+ fn=chat_response,
67
+ title="Tema_Q-R-4B Chat",
68
+ )
69
+
70
+ if __name__ == "__main__":
71
+ demo.launch(server_name="0.0.0.0", server_port=7860)