dlxj commited on
Commit ·
f9cdae0
1
Parent(s): b2c4ccd
add gradio app.py
Browse files- .vscode/settings.json +5 -0
- app.py +411 -0
- readme.txt +9 -0
- requirements.txt +5 -0
.vscode/settings.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python-envs.defaultEnvManager": "ms-python.python:conda",
|
| 3 |
+
"python-envs.defaultPackageManager": "ms-python.python:conda",
|
| 4 |
+
"python-envs.pythonProjects": []
|
| 5 |
+
}
|
app.py
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
# ["ls", "-al", "/app"]
|
| 4 |
+
# 成功执行命令
|
| 5 |
+
|
| 6 |
+
# ["lsof", "-i:8080"]
|
| 7 |
+
# 这个命令测试 llama 后端启动成功没有
|
| 8 |
+
|
| 9 |
+
# & "E:\huggingface_echodict\SakuraLLM\llama\llama-server.exe" -m "E:\huggingface_echodict\SakuraLLM\Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS.gguf" -t 8 -c 4096 -ngl 999 --repeat-penalty 1.2 --temp 0 --top-k 10 --top-p 0.1 -a "Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS" --port 8080
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# ["nohup", "/app/llama.cpp/llama-server", "-m", "/app/sakura-14b-qwen2.5-v1.0-q6k.gguf", "-t", "8", "-c", "4096", "-ngl", "999", "--repeat-penalty", "1.2", "--temp", "0", "--top-k", "10", "--top-p", "0.1", "-a", "sakura-14b-qwen2.5-v1.0-q6k", "--port", "8080", ">", "/app/output", "&"]
|
| 13 |
+
|
| 14 |
+
# uvicorn "app:app" --host "0.0.0.0" --port 7860
|
| 15 |
+
|
| 16 |
+
# see /root/huggingface/NLPP_Audio/sakura_trs.py
|
| 17 |
+
|
| 18 |
+
# llama.cpp/llama-server -m /mnt/y/ai/sakura-14b-qwen2.5-v1.0-q6k.gguf -t 8 -c 4096 -ngl 999 --repeat-penalty 1.2 --temp 0 --top-k 10 --top-p 0.1 -a sakura-14b-qwen2.5-v1.0-q6k --port 8080
|
| 19 |
+
|
| 20 |
+
from fastapi import FastAPI
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
def jsonparse(s):
|
| 24 |
+
return json.loads(s, strict=False )
|
| 25 |
+
def jsonstring(d):
|
| 26 |
+
return json.dumps(d, ensure_ascii=False)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
import gradio as gr
|
| 30 |
+
|
| 31 |
+
initQ = False
|
| 32 |
+
|
| 33 |
+
def run_sakurallm():
|
| 34 |
+
import subprocess
|
| 35 |
+
|
| 36 |
+
def run_command(command):
|
| 37 |
+
process = subprocess.Popen(
|
| 38 |
+
command,
|
| 39 |
+
stdout=subprocess.PIPE,
|
| 40 |
+
stderr=subprocess.PIPE,
|
| 41 |
+
text=True, # 将输出作为文本处理,而非字节流
|
| 42 |
+
bufsize=1, # 行缓冲
|
| 43 |
+
universal_newlines=True # 兼容不同平台上的换行符
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# 实时打印 stdout 的输出
|
| 47 |
+
# for stdout_line in iter(process.stdout.readline, ""):
|
| 48 |
+
# print(stdout_line, end="") # 避免多打印一个换行符
|
| 49 |
+
|
| 50 |
+
# process.stdout.close()
|
| 51 |
+
# process.wait()
|
| 52 |
+
|
| 53 |
+
# stderr = process.stderr.read()
|
| 54 |
+
# process.stderr.close()
|
| 55 |
+
|
| 56 |
+
# return process.returncode, stderr
|
| 57 |
+
|
| 58 |
+
# command = ["ping", "-c", "4", "qq.com"]
|
| 59 |
+
command = [
|
| 60 |
+
r'E:\huggingface_echodict\SakuraLLM\llama\llama-server.exe',
|
| 61 |
+
'-m', r"E:\huggingface_echodict\SakuraLLM\Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS.gguf",
|
| 62 |
+
'-t',
|
| 63 |
+
'8',
|
| 64 |
+
'-c',
|
| 65 |
+
'4096',
|
| 66 |
+
'-ngl',
|
| 67 |
+
'999',
|
| 68 |
+
'--repeat-penalty',
|
| 69 |
+
'1.2',
|
| 70 |
+
'--temp',
|
| 71 |
+
'0',
|
| 72 |
+
'--top-k',
|
| 73 |
+
'10',
|
| 74 |
+
'--top-p',
|
| 75 |
+
'0.1',
|
| 76 |
+
'-a',
|
| 77 |
+
'Sakura-14B-Qwen2beta-v0.9.2_IQ4_XS',
|
| 78 |
+
'--port',
|
| 79 |
+
'8080'
|
| 80 |
+
]
|
| 81 |
+
# 不知道为什么 ping 可以打印流式输出, llamacpp server 不能行
|
| 82 |
+
# returncode, stderr = run_command(command)
|
| 83 |
+
run_command(command)
|
| 84 |
+
|
| 85 |
+
# if returncode != 0:
|
| 86 |
+
# print(f"Error occurred: {stderr}")
|
| 87 |
+
|
| 88 |
+
# run_sakurallm()
|
| 89 |
+
|
| 90 |
+
def tranlate(text, dic=None):
|
| 91 |
+
import os
|
| 92 |
+
from openai import OpenAI
|
| 93 |
+
|
| 94 |
+
# If using auth
|
| 95 |
+
# auth = "user:pass"
|
| 96 |
+
# client = OpenAI(api_key="114514", base_url=f"http://{auth}@localhost:5000/v1")
|
| 97 |
+
|
| 98 |
+
# Not using auth
|
| 99 |
+
api_base_url = "http://localhost:8080/v1"
|
| 100 |
+
client = OpenAI(api_key="114514", base_url=api_base_url) # api_key随便填无所谓,base_url填写服务器IP和端口并添加/v1
|
| 101 |
+
|
| 102 |
+
input_text = """先程から何度も込められているため息に、確実に実体験からくる言葉だと分かって周としては胸が痛い。彼女にとって好意も悪意も向けられるのを慣れすぎている事が透けて見えて、やるせなさに唇を嚙んでしまう。
|
| 103 |
+
彼女の今までの交友関係は真昼が天使様として振る舞ってきた軌跡ではあるのだが、それが全て快いものではないのだと改めて突き付けられていた。
|
| 104 |
+
「可愛いものだと勉強の面倒見てもらえるから、評判がいい女と仲良くなって自分の評判を上げる、周りから除け者にされないように、とかそういうもの。悪いとまあ、その、アクセサリーというか戦利品? として欲している殿方とか私に振られた男子を拾うために仲良くしてる振りをする方とか……まあ色々居た訳です」
|
| 105 |
+
ややぐったりとげんなりを合わせたような声音は本当に苦労してきた事が窺えて、思わず労るように頭を撫でてしまう。
|
| 106 |
+
真昼が思い出しただけで心労が嵩んでいそうな声と表情をしていたので周としてはお疲れ様という気持ちでいっぱいだった。""" # 要翻译的日文
|
| 107 |
+
|
| 108 |
+
input_text = """
|
| 109 |
+
○○○○○○○○
|
| 110 |
+
行くぞ、▲高嶺**▲!
|
| 111 |
+
高嶺
|
| 112 |
+
はい!
|
| 113 |
+
よっ。
|
| 114 |
+
あっ。
|
| 115 |
+
げ、失敗。悪い、●が取りにいくから!
|
| 116 |
+
姉ヶ崎
|
| 117 |
+
はい、ボール。
|
| 118 |
+
▲姉ヶ崎*▲? サンキュ。
|
| 119 |
+
どういたしまして。ここの部って男女で練習するんだ?
|
| 120 |
+
ウチの部、そこらへん適当なんだよ。
|
| 121 |
+
ふぅん。それにしてもキレイな子だよね……。
|
| 122 |
+
▲高嶺**▲? うん、たしかに。
|
| 123 |
+
あんな子がチームメイトなんだ……。
|
| 124 |
+
ん?
|
| 125 |
+
ううん、なんにも。じゃあね、がんばって。
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
# input_text = """おっさんと女の子 リアルエッチ ふぇらっこ痴女のおしゃぶり日記"""
|
| 129 |
+
|
| 130 |
+
query = "将下面的日文文本翻译成中文:" + input_text
|
| 131 |
+
|
| 132 |
+
gpt_dict = [
|
| 133 |
+
{
|
| 134 |
+
"src": "周",
|
| 135 |
+
"dst": "周",
|
| 136 |
+
"info": "名字,男孩",
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"src": "真昼",
|
| 140 |
+
"dst": "真昼",
|
| 141 |
+
"info": "名字,女孩",
|
| 142 |
+
},
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
if text:
|
| 146 |
+
input_text = text
|
| 147 |
+
if dic:
|
| 148 |
+
gpt_dict = gpt_dict
|
| 149 |
+
|
| 150 |
+
gpt_dict_text_list = []
|
| 151 |
+
for gpt in gpt_dict:
|
| 152 |
+
src = gpt['src']
|
| 153 |
+
dst = gpt['dst']
|
| 154 |
+
info = gpt['info'] if "info" in gpt.keys() else None
|
| 155 |
+
if info:
|
| 156 |
+
single = f"{src}->{dst} #{info}"
|
| 157 |
+
else:
|
| 158 |
+
single = f"{src}->{dst}"
|
| 159 |
+
gpt_dict_text_list.append(single)
|
| 160 |
+
|
| 161 |
+
gpt_dict_raw_text = "\n".join(gpt_dict_text_list)
|
| 162 |
+
# 0.10版本的user prompt,需要填写术语表,也可以留空。
|
| 163 |
+
query_v010 = "据以下术语表(可以为空):\n" + gpt_dict_raw_text + "\n\n" + "将下面的日文文本根据上述术语表的对应关系和备注翻译成中文:" + input_text
|
| 164 |
+
query_v010 = "据以下术语表(可以为空):\n" + "\n\n" + "将下面的日文文本根据上述术语表的对应关系和备注翻译成中文:" + input_text
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
extra_query = {
|
| 168 |
+
'do_sample': True,
|
| 169 |
+
'num_beams': 1,
|
| 170 |
+
'repetition_penalty': 1.0,
|
| 171 |
+
}
|
| 172 |
+
# models_list = client.models.list()
|
| 173 |
+
# print(models_list)
|
| 174 |
+
# print(models_list.data[0].model_version)
|
| 175 |
+
|
| 176 |
+
# chat_completion = client.chat.completions.create(
|
| 177 |
+
|
| 178 |
+
try:
|
| 179 |
+
|
| 180 |
+
for output in client.chat.completions.create(
|
| 181 |
+
model="sukinishiro",
|
| 182 |
+
############# v0.9 prompt #############
|
| 183 |
+
# messages=[
|
| 184 |
+
# {
|
| 185 |
+
# "role": "system",
|
| 186 |
+
# "content": "你是一个轻小说翻译模型,可以流畅通顺地以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,不擅自添加原文中没有的代词。"
|
| 187 |
+
# },
|
| 188 |
+
# {
|
| 189 |
+
# "role": "user",
|
| 190 |
+
# "content": f"{query}"
|
| 191 |
+
# }
|
| 192 |
+
# ],
|
| 193 |
+
############# v0.10 prompt #############
|
| 194 |
+
messages=[
|
| 195 |
+
{
|
| 196 |
+
"role": "system",
|
| 197 |
+
"content": "你是一个轻小说翻译模型,可以流畅通顺地使用给定的术语表以日本轻小说的风格将日文翻译成简体中文,并联系上下文正确使用人称代词,注意不要混淆使役态和被动态的主语和宾语,不要擅自添加原文中没有的代词,也不要擅自增加或减少换行。"
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"role": "user",
|
| 201 |
+
"content": f"{query_v010}"
|
| 202 |
+
}
|
| 203 |
+
],
|
| 204 |
+
temperature=0,
|
| 205 |
+
top_p=0.1,
|
| 206 |
+
max_tokens=32768,
|
| 207 |
+
frequency_penalty=1.2,
|
| 208 |
+
seed=-1,
|
| 209 |
+
extra_query=extra_query,
|
| 210 |
+
stream=True,
|
| 211 |
+
# stop=['\n\n'] # NotImplement
|
| 212 |
+
# n=1 # NotImplement
|
| 213 |
+
# logit_bias # won't support
|
| 214 |
+
# response_format # won't support
|
| 215 |
+
# tools # won't support
|
| 216 |
+
# tool_choice # won't support
|
| 217 |
+
# user # won't support
|
| 218 |
+
):
|
| 219 |
+
# stream=True key response
|
| 220 |
+
if output.choices[0].finish_reason:
|
| 221 |
+
# print("\nfinish reason is", output.choices[0].finish_reason)
|
| 222 |
+
pass
|
| 223 |
+
elif output.choices[0].delta.content:
|
| 224 |
+
# print(output.choices[0].delta.content, end="")
|
| 225 |
+
pass
|
| 226 |
+
|
| 227 |
+
yield [output.choices[0].delta.content, output.choices[0].finish_reason, None]
|
| 228 |
+
|
| 229 |
+
except Exception as e:
|
| 230 |
+
print('####### error: ', e)
|
| 231 |
+
yield [None, None, e]
|
| 232 |
+
# os._exit(-1)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
import gradio as gr
|
| 236 |
+
from fastapi import FastAPI
|
| 237 |
+
from starlette.middleware.cors import CORSMiddleware
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
app = FastAPI()
|
| 241 |
+
app.add_middleware(
|
| 242 |
+
CORSMiddleware,
|
| 243 |
+
allow_origins=["*"],
|
| 244 |
+
allow_credentials=True,
|
| 245 |
+
allow_methods=["*"],
|
| 246 |
+
allow_headers=["*"],
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def greet(text):
|
| 251 |
+
return f"Hello, {text}!"
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def exce_cmd(cmd):
|
| 255 |
+
global initQ
|
| 256 |
+
|
| 257 |
+
if not initQ:
|
| 258 |
+
initQ = True
|
| 259 |
+
run_sakurallm()
|
| 260 |
+
|
| 261 |
+
import subprocess
|
| 262 |
+
|
| 263 |
+
cmd = jsonparse(cmd)
|
| 264 |
+
|
| 265 |
+
process = subprocess.Popen(
|
| 266 |
+
cmd, # ["free", "-g"], # ["lsof", "-i:8080"],
|
| 267 |
+
#['nohup', '/app/llama.cpp/llama-server', '-m', '/app/sakura-14b-qwen2.5-v1.0-q6k.gguf', '-t', '8', '-c', '4096', '-ngl', '999', '--repeat-penalty', '1.2', '--temp', '0', '--top-k', '10', '--top-p', '0.1', '-a', 'sakura-14b-qwen2.5-v1.0-q6k', '--port', '8080', '>', '/app/output', '&'],
|
| 268 |
+
stdout=subprocess.PIPE,
|
| 269 |
+
stderr=subprocess.PIPE
|
| 270 |
+
)
|
| 271 |
+
stdout, stderr = process.communicate()
|
| 272 |
+
|
| 273 |
+
stdout_decoded = stdout.decode("utf-8")
|
| 274 |
+
stderr_decoded = stderr.decode("utf-8")
|
| 275 |
+
|
| 276 |
+
return { "stdout":stdout_decoded, "stderr": stderr_decoded }
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def create_gradio():
|
| 280 |
+
|
| 281 |
+
with gr.Blocks() as demo:
|
| 282 |
+
gr.Markdown("""Enter Any command\nexample: ["ls", "-al", "/app"]""")
|
| 283 |
+
|
| 284 |
+
textbox_cmd = gr.Textbox(label="""["lsof", "-i:8080"]""")
|
| 285 |
+
|
| 286 |
+
button_summit = gr.Button("Exeute")
|
| 287 |
+
|
| 288 |
+
output = gr.Textbox(label="")
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
textbox_jp = gr.Textbox(value="それと一番大事なもの", label="input", lines=3)
|
| 292 |
+
textbox_ch = gr.Textbox(label="output", lines=3)
|
| 293 |
+
|
| 294 |
+
button_trans = gr.Button("translate")
|
| 295 |
+
|
| 296 |
+
button_summit.click(fn=exce_cmd, inputs=textbox_cmd, outputs=output)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def trans(text):
|
| 300 |
+
|
| 301 |
+
print('require tranlate now...')
|
| 302 |
+
result = ''
|
| 303 |
+
for arr in tranlate(text=text):
|
| 304 |
+
[content, finish_reason, err] = arr
|
| 305 |
+
if err:
|
| 306 |
+
yield str(err)
|
| 307 |
+
break
|
| 308 |
+
if finish_reason:
|
| 309 |
+
print("\nfinish reason is", finish_reason)
|
| 310 |
+
elif content:
|
| 311 |
+
print(content, end="")
|
| 312 |
+
result += content
|
| 313 |
+
yield result
|
| 314 |
+
|
| 315 |
+
pass
|
| 316 |
+
button_trans.click(trans, textbox_jp, textbox_ch)
|
| 317 |
+
|
| 318 |
+
return demo
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
if __name__ == '__main__':
|
| 322 |
+
import uvicorn
|
| 323 |
+
|
| 324 |
+
app = gr.mount_gradio_app(app, create_gradio(), path="/")
|
| 325 |
+
uvicorn.run(app, port=7860, host="0.0.0.0")
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
# from fastapi import FastAPI
|
| 330 |
+
# import uvicorn
|
| 331 |
+
# import gradio as gr
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
# app = FastAPI()
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
# app = FastAPI()
|
| 339 |
+
|
| 340 |
+
# io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
# with gr.Blocks() as demo:
|
| 344 |
+
# def exce_cmd(cmd):
|
| 345 |
+
# import subprocess
|
| 346 |
+
|
| 347 |
+
# cmd = jsonparse(cmd)
|
| 348 |
+
|
| 349 |
+
# process = subprocess.Popen(
|
| 350 |
+
# cmd, # ["free", "-g"], # ["lsof", "-i:8080"],
|
| 351 |
+
# #['nohup', '/app/llama.cpp/llama-server', '-m', '/app/sakura-14b-qwen2.5-v1.0-q6k.gguf', '-t', '8', '-c', '4096', '-ngl', '999', '--repeat-penalty', '1.2', '--temp', '0', '--top-k', '10', '--top-p', '0.1', '-a', 'sakura-14b-qwen2.5-v1.0-q6k', '--port', '8080', '>', '/app/output', '&'],
|
| 352 |
+
# stdout=subprocess.PIPE,
|
| 353 |
+
# stderr=subprocess.PIPE
|
| 354 |
+
# )
|
| 355 |
+
# stdout, stderr = process.communicate()
|
| 356 |
+
|
| 357 |
+
# stdout_decoded = stdout.decode("utf-8")
|
| 358 |
+
# stderr_decoded = stderr.decode("utf-8")
|
| 359 |
+
|
| 360 |
+
# return { "stdout":stdout_decoded, "stderr": stderr_decoded }
|
| 361 |
+
|
| 362 |
+
# out = exce_cmd(jsonstring(["lsof", "-i:7860"]))
|
| 363 |
+
|
| 364 |
+
# # gr.Interface(exce_cmd, "textbox", "textbox")
|
| 365 |
+
|
| 366 |
+
# demo.load(lambda x: "Hello, " + x + "!", "textbox", "textbox")
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
# app = gr.mount_gradio_app(app, demo, "/")
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
# # app = gr.mount_gradio_app(app, share.main(), path="/share/")
|
| 373 |
+
# # app = gr.mount_gradio_app(app, main.main(), path="/")
|
| 374 |
+
|
| 375 |
+
# if __name__ == '__main__':
|
| 376 |
+
# uvicorn.run(app, port=7860, host="0.0.0.0")
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
# demo.launch(server_name='0.0.0.0', share=False, inbrowser=False)
|
| 380 |
+
|
| 381 |
+
# io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
|
| 382 |
+
# gradio_app = gr.routes.App.create_app(io)
|
| 383 |
+
|
| 384 |
+
# app.mount("/gradio", gradio_app)
|
| 385 |
+
|
| 386 |
+
# @app.get("/")
|
| 387 |
+
# def greet_json():
|
| 388 |
+
# global initQ
|
| 389 |
+
# if not initQ:
|
| 390 |
+
# initQ = True
|
| 391 |
+
# # run_sakurallm()
|
| 392 |
+
|
| 393 |
+
# import subprocess
|
| 394 |
+
|
| 395 |
+
# process = subprocess.Popen(
|
| 396 |
+
# ['tail', '-n', '10', '/app/output'], # ["free", "-g"], # ["lsof", "-i:8080"],
|
| 397 |
+
# #['nohup', '/app/llama.cpp/llama-server', '-m', '/app/sakura-14b-qwen2.5-v1.0-q6k.gguf', '-t', '8', '-c', '4096', '-ngl', '999', '--repeat-penalty', '1.2', '--temp', '0', '--top-k', '10', '--top-p', '0.1', '-a', 'sakura-14b-qwen2.5-v1.0-q6k', '--port', '8080', '>', '/app/output', '&'],
|
| 398 |
+
# stdout=subprocess.PIPE,
|
| 399 |
+
# stderr=subprocess.PIPE
|
| 400 |
+
# )
|
| 401 |
+
# stdout, stderr = process.communicate()
|
| 402 |
+
|
| 403 |
+
# stdout_decoded = stdout.decode("utf-8")
|
| 404 |
+
# stderr_decoded = stderr.decode("utf-8")
|
| 405 |
+
|
| 406 |
+
# return {
|
| 407 |
+
# "Hello": "World!",
|
| 408 |
+
# "stdout": stdout_decoded,
|
| 409 |
+
# "stderr": stderr_decoded,
|
| 410 |
+
# }
|
| 411 |
+
|
readme.txt
CHANGED
|
@@ -1,3 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
see echodict/README.md -> LiveABC互動日语 -> 轻小说翻译
|
| 2 |
|
| 3 |
see Y:/ai/Galgame_Dataset
|
|
|
|
| 1 |
+
|
| 2 |
+
see huggingface spaces Sakurallm_server_254_6006
|
| 3 |
+
|
| 4 |
+
conda create -n SakuraLLM python==3.10 pip
|
| 5 |
+
|
| 6 |
+
pip install -r .\requirements.txt
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
see echodict/README.md -> LiveABC互動日语 -> 轻小说翻译
|
| 11 |
|
| 12 |
see Y:/ai/Galgame_Dataset
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
gradio
|
| 3 |
+
openai
|
| 4 |
+
proxynt==1.1.28
|
| 5 |
+
uvicorn[standard]
|