Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import os
|
|
| 6 |
import json
|
| 7 |
import subprocess
|
| 8 |
import sys
|
| 9 |
-
from llama_cpp import Llama
|
| 10 |
from llama_cpp_agent import LlamaCppAgent
|
| 11 |
from llama_cpp_agent import MessagesFormatterType
|
| 12 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
|
@@ -61,11 +61,13 @@ llm = None
|
|
| 61 |
llm_model = None
|
| 62 |
|
| 63 |
def trans(text):
|
|
|
|
|
|
|
| 64 |
text = "こんにちは"
|
| 65 |
input_text = f"<2ja>{text}".encode('utf-8')
|
| 66 |
tokens = llm.tokenize(input_text)
|
| 67 |
print("Tokens:", tokens)
|
| 68 |
-
print(
|
| 69 |
initial_tokens = [llm.llama_model_decoder_start_token()]
|
| 70 |
print("Initial Tokens:", initial_tokens)
|
| 71 |
return text
|
|
|
|
| 6 |
import json
|
| 7 |
import subprocess
|
| 8 |
import sys
|
| 9 |
+
from llama_cpp import Llama,llama_model_decoder_start_token
|
| 10 |
from llama_cpp_agent import LlamaCppAgent
|
| 11 |
from llama_cpp_agent import MessagesFormatterType
|
| 12 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
|
|
|
| 61 |
llm_model = None
|
| 62 |
|
| 63 |
def trans(text):
|
| 64 |
+
print(llm.llama_get_model())
|
| 65 |
+
model = llm.llama_get_model()
|
| 66 |
text = "こんにちは"
|
| 67 |
input_text = f"<2ja>{text}".encode('utf-8')
|
| 68 |
tokens = llm.tokenize(input_text)
|
| 69 |
print("Tokens:", tokens)
|
| 70 |
+
print(llama_model_decoder_start_token(model))
|
| 71 |
initial_tokens = [llm.llama_model_decoder_start_token()]
|
| 72 |
print("Initial Tokens:", initial_tokens)
|
| 73 |
return text
|