SakuraLLM / app.py
dlxj
优化日志输出
08196b6
# ["ls", "-al", "/app"]
# 成功执行命令
# ["lsof", "-i:8080"]
# 这个命令测试 llama 后端启动成功没有
# & "E:\huggingface_echodict\SakuraLLM\llama\llama-server.exe" -m "E:\huggingface_echodict\SakuraLLM\Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS.gguf" -t 8 -c 4096 -ngl 999 --repeat-penalty 1.2 --temp 0 --top-k 10 --top-p 0.1 -a "Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS" --port 8080
# ["nohup", "/app/llama.cpp/llama-server", "-m", "/app/sakura-14b-qwen2.5-v1.0-q6k.gguf", "-t", "8", "-c", "4096", "-ngl", "999", "--repeat-penalty", "1.2", "--temp", "0", "--top-k", "10", "--top-p", "0.1", "-a", "sakura-14b-qwen2.5-v1.0-q6k", "--port", "8080", ">", "/app/output", "&"]
# uvicorn "app:app" --host "0.0.0.0" --port 7860
# see /root/huggingface/NLPP_Audio/sakura_trs.py
# llama.cpp/llama-server -m /mnt/y/ai/sakura-14b-qwen2.5-v1.0-q6k.gguf -t 8 -c 4096 -ngl 999 --repeat-penalty 1.2 --temp 0 --top-k 10 --top-p 0.1 -a sakura-14b-qwen2.5-v1.0-q6k --port 8080
from fastapi import FastAPI
import json
def jsonparse(s):
return json.loads(s, strict=False )
def jsonstring(d):
return json.dumps(d, ensure_ascii=False)
import gradio as gr
initQ = False
def run_sakurallm():
import subprocess
import threading
def run_command(command):
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True, # 将输出作为文本处理,而非字节流
bufsize=1, # 行缓冲
universal_newlines=True # 兼容不同平台上的换行符
)
def log_stream(stream, prefix):
for line in iter(stream.readline, ""):
print(f"{prefix}{line}", end="")
stream.close()
# 使用线程实时打印 stdout 和 stderr,避免阻塞主线程且防止缓冲区填满
t_stdout = threading.Thread(target=log_stream, args=(process.stdout, "[LLAMA] "))
t_stderr = threading.Thread(target=log_stream, args=(process.stderr, "[LLAMA_ERR] "))
t_stdout.daemon = True
t_stderr.daemon = True
t_stdout.start()
t_stderr.start()
return process
# command = ["ping", "-c", "4", "qq.com"]
command = [
r'E:\huggingface_echodict\SakuraLLM\llama\llama-server.exe',
'-m', r"E:\huggingface_echodict\SakuraLLM\Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS.gguf",
'-t',
'8',
'-c',
'4096',
'-ngl',
'999',
'--repeat-penalty',
'1.2',
'--temp',
'0',
'--top-k',
'10',
'--top-p',
'0.1',
'-a',
'Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS',
'--port',
'8080'
]
print("Starting SakuraLLM server...")
run_command(command)
# if returncode != 0:
# print(f"Error occurred: {stderr}")
# run_sakurallm()
def tranlate(text, dic=None):
import os
from openai import OpenAI
# If using auth
# auth = "user:pass"
# client = OpenAI(api_key="114514", base_url=f"http://{auth}@localhost:5000/v1")
# Not using auth
api_base_url = "http://localhost:8080/v1"
client = OpenAI(api_key="114514", base_url=api_base_url) # api_key随便填无所谓,base_url填写服务器IP和端口并添加/v1
input_text = """先程から何度も込められているため息に、確実に実体験からくる言葉だと分かって周としては胸が痛い。彼女にとって好意も悪意も向けられるのを慣れすぎている事が透けて見えて、やるせなさに唇を嚙んでしまう。
彼女の今までの交友関係は真昼が天使様として振る舞ってきた軌跡ではあるのだが、それが全て快いものではないのだと改めて突き付けられていた。
「可愛いものだと勉強の面倒見てもらえるから、評判がいい女と仲良くなって自分の評判を上げる、周りから除け者にされないように、とかそういうもの。悪いとまあ、その、アクセサリーというか戦利品? として欲している殿方とか私に振られた男子を拾うために仲良くしてる振りをする方とか……まあ色々居た訳です」
ややぐったりとげんなりを合わせたような声音は本当に苦労してきた事が窺えて、思わず労るように頭を撫でてしまう。
真昼が思い出しただけで心労が嵩んでいそうな声と表情をしていたので周としてはお疲れ様という気持ちでいっぱいだった。""" # 要翻译的日文
input_text = """
○○○○○○○○
行くぞ、▲高嶺**▲!
高嶺
はい!
よっ。
あっ。
げ、失敗。悪い、●が取りにいくから!
姉ヶ崎
はい、ボール。
▲姉ヶ崎*▲? サンキュ。
どういたしまして。ここの部って男女で練習するんだ?
ウチの部、そこらへん適当なんだよ。
ふぅん。それにしてもキレイな子だよね……。
▲高嶺**▲? うん、たしかに。
あんな子がチームメイトなんだ……。
ん?
ううん、なんにも。じゃあね、がんばって。
"""
# input_text = """おっさんと女の子 リアルエッチ ふぇらっこ痴女のおしゃぶり日記"""
query = "将下面的日文文本翻译成中文:" + input_text
gpt_dict = [
{
"src": "周",
"dst": "周",
"info": "名字,男孩",
},
{
"src": "真昼",
"dst": "真昼",
"info": "名字,女孩",
},
]
if text:
input_text = text
if dic:
gpt_dict = gpt_dict
gpt_dict_text_list = []
for gpt in gpt_dict:
src = gpt['src']
dst = gpt['dst']
info = gpt['info'] if "info" in gpt.keys() else None
if info:
single = f"{src}->{dst} #{info}"
else:
single = f"{src}->{dst}"
gpt_dict_text_list.append(single)
gpt_dict_raw_text = "\n".join(gpt_dict_text_list)
# 0.10版本的user prompt,需要填写术语表,也可以留空。
query_v010 = "据以下术语表(可以为空):\n" + gpt_dict_raw_text + "\n\n" + "将下面的日文文本根据上述术语表的对应关系和备注翻译成中文:" + input_text
query_v010 = "据以下术语表(可以为空):\n" + "\n\n" + "将下面的日文文本根据上述术语表的对应关系和备注翻译成中文:" + input_text
extra_query = {
'do_sample': True,
'num_beams': 1,
'repetition_penalty': 1.0,
}
# models_list = client.models.list()
# print(models_list)
# print(models_list.data[0].model_version)
# chat_completion = client.chat.completions.create(
try:
for output in client.chat.completions.create(
model="sukinishiro",
############# v0.9 prompt #############
# messages=[
# {
# "role": "system",
# "content": "你是一个轻小说翻译模型,可以流畅通顺地以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,不擅自添加原文中没有的代词。"
# },
# {
# "role": "user",
# "content": f"{query}"
# }
# ],
############# v0.10 prompt #############
messages=[
{
"role": "system",
"content": "你是一个轻小说翻译模型,可以流畅通顺地使用给定的术语表以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,注意不要混淆使役态和被动态的主语和宾语,不要擅自添加原文中没有的代词,也不要擅自增加或减少换行。"
},
{
"role": "user",
"content": f"{query_v010}"
}
],
temperature=0,
top_p=0.1,
max_tokens=32768,
frequency_penalty=1.2,
seed=-1,
extra_query=extra_query,
stream=True,
# stop=['\n\n'] # NotImplement
# n=1 # NotImplement
# logit_bias # won't support
# response_format # won't support
# tools # won't support
# tool_choice # won't support
# user # won't support
):
# stream=True key response
if output.choices[0].finish_reason:
# print("\nfinish reason is", output.choices[0].finish_reason)
pass
elif output.choices[0].delta.content:
# print(output.choices[0].delta.content, end="")
pass
yield [output.choices[0].delta.content, output.choices[0].finish_reason, None]
except Exception as e:
print('####### error: ', e)
yield [None, None, e]
# os._exit(-1)
import gradio as gr
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def greet(text):
return f"Hello, {text}!"
def exce_cmd(cmd):
global initQ
if not initQ:
initQ = True
run_sakurallm()
import subprocess
cmd = jsonparse(cmd)
process = subprocess.Popen(
cmd, # ["free", "-g"], # ["lsof", "-i:8080"],
#['nohup', '/app/llama.cpp/llama-server', '-m', '/app/sakura-14b-qwen2.5-v1.0-q6k.gguf', '-t', '8', '-c', '4096', '-ngl', '999', '--repeat-penalty', '1.2', '--temp', '0', '--top-k', '10', '--top-p', '0.1', '-a', 'sakura-14b-qwen2.5-v1.0-q6k', '--port', '8080', '>', '/app/output', '&'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
stdout_decoded = stdout.decode("utf-8")
stderr_decoded = stderr.decode("utf-8")
return { "stdout":stdout_decoded, "stderr": stderr_decoded }
def create_gradio():
with gr.Blocks() as demo:
gr.Markdown("""Enter Any command\nexample: ["ls", "-al", "/app"]""")
textbox_cmd = gr.Textbox(label="""["lsof", "-i:8080"]""")
button_summit = gr.Button("Exeute")
output = gr.Textbox(label="")
textbox_jp = gr.Textbox(value="それと一番大事なもの", label="input", lines=3)
textbox_ch = gr.Textbox(label="output", lines=3)
button_trans = gr.Button("translate")
button_summit.click(fn=exce_cmd, inputs=textbox_cmd, outputs=output)
def trans(text):
print('require tranlate now...')
result = ''
for arr in tranlate(text=text):
[content, finish_reason, err] = arr
if err:
yield str(err)
break
if finish_reason:
print("\nfinish reason is", finish_reason)
elif content:
print(content, end="")
result += content
yield result
pass
button_trans.click(trans, textbox_jp, textbox_ch)
return demo
if __name__ == '__main__':
import uvicorn
app = gr.mount_gradio_app(app, create_gradio(), path="/")
uvicorn.run(app, port=7860, host="0.0.0.0")
# from fastapi import FastAPI
# import uvicorn
# import gradio as gr
# app = FastAPI()
# app = FastAPI()
# io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
# with gr.Blocks() as demo:
# def exce_cmd(cmd):
# import subprocess
# cmd = jsonparse(cmd)
# process = subprocess.Popen(
# cmd, # ["free", "-g"], # ["lsof", "-i:8080"],
# #['nohup', '/app/llama.cpp/llama-server', '-m', '/app/sakura-14b-qwen2.5-v1.0-q6k.gguf', '-t', '8', '-c', '4096', '-ngl', '999', '--repeat-penalty', '1.2', '--temp', '0', '--top-k', '10', '--top-p', '0.1', '-a', 'sakura-14b-qwen2.5-v1.0-q6k', '--port', '8080', '>', '/app/output', '&'],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE
# )
# stdout, stderr = process.communicate()
# stdout_decoded = stdout.decode("utf-8")
# stderr_decoded = stderr.decode("utf-8")
# return { "stdout":stdout_decoded, "stderr": stderr_decoded }
# out = exce_cmd(jsonstring(["lsof", "-i:7860"]))
# # gr.Interface(exce_cmd, "textbox", "textbox")
# demo.load(lambda x: "Hello, " + x + "!", "textbox", "textbox")
# app = gr.mount_gradio_app(app, demo, "/")
# # app = gr.mount_gradio_app(app, share.main(), path="/share/")
# # app = gr.mount_gradio_app(app, main.main(), path="/")
# if __name__ == '__main__':
# uvicorn.run(app, port=7860, host="0.0.0.0")
# demo.launch(server_name='0.0.0.0', share=False, inbrowser=False)
# io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
# gradio_app = gr.routes.App.create_app(io)
# app.mount("/gradio", gradio_app)
# @app.get("/")
# def greet_json():
# global initQ
# if not initQ:
# initQ = True
# # run_sakurallm()
# import subprocess
# process = subprocess.Popen(
# ['tail', '-n', '10', '/app/output'], # ["free", "-g"], # ["lsof", "-i:8080"],
# #['nohup', '/app/llama.cpp/llama-server', '-m', '/app/sakura-14b-qwen2.5-v1.0-q6k.gguf', '-t', '8', '-c', '4096', '-ngl', '999', '--repeat-penalty', '1.2', '--temp', '0', '--top-k', '10', '--top-p', '0.1', '-a', 'sakura-14b-qwen2.5-v1.0-q6k', '--port', '8080', '>', '/app/output', '&'],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE
# )
# stdout, stderr = process.communicate()
# stdout_decoded = stdout.decode("utf-8")
# stderr_decoded = stderr.decode("utf-8")
# return {
# "Hello": "World!",
# "stdout": stdout_decoded,
# "stderr": stderr_decoded,
# }