Update app.py
Browse files
app.py
CHANGED
|
@@ -210,7 +210,7 @@ def trans(text):
|
|
| 210 |
return None
|
| 211 |
|
| 212 |
# γγγΉγγ«θ¨θͺγΏγ°γδ»δΈγγγγ€γεγ«ε€ζ
|
| 213 |
-
input_text =
|
| 214 |
|
| 215 |
# γγΌγ―γγ€γΊ
|
| 216 |
tokens = llm.tokenize(input_text)
|
|
@@ -284,8 +284,8 @@ def respond(
|
|
| 284 |
repeat_penalty: float,
|
| 285 |
):
|
| 286 |
llama = Llama("models/madlad400-3b-mt-q8_0.gguf")
|
| 287 |
-
#tokens = llama.tokenize(
|
| 288 |
-
tokens = llama.tokenize(
|
| 289 |
llama.encode(tokens)
|
| 290 |
tokens = [llama.decoder_start_token()]
|
| 291 |
for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1, repeat_penalty=1.0):
|
|
|
|
| 210 |
return None
|
| 211 |
|
| 212 |
# γγγΉγγ«θ¨θͺγΏγ°γδ»δΈγγγγ€γεγ«ε€ζ
|
| 213 |
+
input_text = f"<2ja>{text}"
|
| 214 |
|
| 215 |
# γγΌγ―γγ€γΊ
|
| 216 |
tokens = llm.tokenize(input_text)
|
|
|
|
| 284 |
repeat_penalty: float,
|
| 285 |
):
|
| 286 |
llama = Llama("models/madlad400-3b-mt-q8_0.gguf")
|
| 287 |
+
#tokens = llama.tokenize(f"<2ja>{message}")#
|
| 288 |
+
tokens = llama.tokenize(f"{What is the capital of France?}".encode("utf-8"))
|
| 289 |
llama.encode(tokens)
|
| 290 |
tokens = [llama.decoder_start_token()]
|
| 291 |
for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1, repeat_penalty=1.0):
|