yuna1126 commited on
Commit
9da5836
·
verified ·
1 Parent(s): 9b2b533

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -0
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ try:
6
+ import llama_cpp
7
+ except ImportError:
8
+ print("Installing pre-built llama-cpp-python...")
9
+ subprocess.check_call([
10
+ sys.executable, "-m", "pip", "install",
11
+ "llama-cpp-python",
12
+ "--extra-index-url", "https://abetlen.github.io/llama-cpp-python/whl/cpu"
13
+ ])
14
+
15
+ import gradio as gr
16
+ from llama_cpp import Llama
17
+ from huggingface_hub import hf_hub_download
18
+
19
+ # モデルの設定
20
+ model_id = "kawasumi/Tema_Q-R4.2-GGUF"
21
+ model_file = "Tema_Q-R4.2-Q2_K.gguf"
22
+
23
+ print("Downloading model...")
24
+ model_path = hf_hub_download(repo_id=model_id, filename=model_file)
25
+
26
+ print(f"Loading model from {model_path}...")
27
+ # メモリ節約のための設定を強化
28
+ llm = Llama(
29
+ model_path=model_path,
30
+ n_ctx=1024,
31
+ n_threads=2,
32
+ use_mmap=False,
33
+ n_batch=128,
34
+ )
35
+ print("Model loaded.")
36
+
37
+ def chat_response(message, history):
38
+
39
+ prompt = f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model\n"
40
+
41
+ output = llm(
42
+ prompt,
43
+ max_tokens=512,
44
+ stop=["<end_of_turn>", "user"],
45
+ stream=True
46
+ )
47
+
48
+ response = ""
49
+ for chunk in output:
50
+ text = chunk["choices"][0]["text"]
51
+ response += text
52
+ yield response
53
+
54
+ demo = gr.ChatInterface(
55
+ fn=chat_response,
56
+ title="Tema_Q-R4.2 Chat",
57
+ )
58
+
59
+ if __name__ == "__main__":
60
+ demo.launch(server_name="0.0.0.0", server_port=7860)