Spaces:
Runtime error
Runtime error
~drbenjamin commited on
Commit ·
b299ae3
1
Parent(s): 8091023
added files
Browse files- pages/🦙_Alpaca.py +36 -6
pages/🦙_Alpaca.py
CHANGED
|
@@ -8,10 +8,11 @@
|
|
| 8 |
#### Loading needed Python libraries
|
| 9 |
import streamlit as st
|
| 10 |
#from llamacpypy import Llama
|
| 11 |
-
import llamacpp
|
| 12 |
-
from llama_cpp import Llama
|
| 13 |
import os
|
| 14 |
import subprocess
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
|
|
@@ -134,7 +135,7 @@ def llama(
|
|
| 134 |
|
| 135 |
|
| 136 |
|
| 137 |
-
### Python Wrapper (functions above
|
| 138 |
#text = []
|
| 139 |
#for token in llama(prompt = 'What is your purpose?', repeat_penalty = 1.05, skip_prompt = False, interactive = False):
|
| 140 |
# print(token, end = '', flush = True)
|
|
@@ -161,11 +162,40 @@ def llama(
|
|
| 161 |
#model = llamacpp.PyLLAMA(model_path, params)
|
| 162 |
#text = model.predict("Hello, I'm a llama.", 10)
|
| 163 |
#st.write(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
|
| 165 |
|
| 166 |
|
| 167 |
### Llama cpp
|
| 168 |
-
llm = Llama(model_path="models/7B/ggml-model-q4_0.bin")
|
| 169 |
-
output = llm("Q: Name the planets in the solar system? A: ", max_tokens = 32, stop = ["Q:", "\n"], echo = True)
|
| 170 |
-
st.write(output)
|
| 171 |
|
|
|
|
| 8 |
#### Loading needed Python libraries
|
| 9 |
import streamlit as st
|
| 10 |
#from llamacpypy import Llama
|
| 11 |
+
#import llamacpp
|
| 12 |
+
#from llama_cpp import Llama
|
| 13 |
import os
|
| 14 |
import subprocess
|
| 15 |
+
import sys
|
| 16 |
|
| 17 |
|
| 18 |
|
|
|
|
| 135 |
|
| 136 |
|
| 137 |
|
| 138 |
+
### Python Wrapper (functions above)
|
| 139 |
#text = []
|
| 140 |
#for token in llama(prompt = 'What is your purpose?', repeat_penalty = 1.05, skip_prompt = False, interactive = False):
|
| 141 |
# print(token, end = '', flush = True)
|
|
|
|
| 162 |
#model = llamacpp.PyLLAMA(model_path, params)
|
| 163 |
#text = model.predict("Hello, I'm a llama.", 10)
|
| 164 |
#st.write(text)
|
| 165 |
+
#params = llamacpp.gpt_params('./models/7B/ggml-model-q4_0.bin', # model,
|
| 166 |
+
# 512, # ctx_size
|
| 167 |
+
# 100, # n_predict
|
| 168 |
+
# 40, # top_k
|
| 169 |
+
# 0.95, # top_p
|
| 170 |
+
# 0.85, # temp
|
| 171 |
+
# 1.30, # repeat_penalty
|
| 172 |
+
# -1, # seed
|
| 173 |
+
# 8, # threads
|
| 174 |
+
# 64, # repeat_last_n
|
| 175 |
+
# 8, # batch_size
|
| 176 |
+
#)
|
| 177 |
+
#model = llamacpp.PyLLAMA(params)
|
| 178 |
+
#model.add_bos() # Adds "beginning of string" token
|
| 179 |
+
#model.update_input("A llama is a")
|
| 180 |
+
#model.print_startup_stats()
|
| 181 |
+
#model.prepare_context()
|
| 182 |
+
|
| 183 |
+
#model.ingest_all_pending_input(True)
|
| 184 |
+
#while not model.is_finished():
|
| 185 |
+
# text, is_finished = model.infer_text()
|
| 186 |
+
# print(text, end="")
|
| 187 |
+
|
| 188 |
+
# if is_finished:
|
| 189 |
+
# break
|
| 190 |
+
|
| 191 |
+
# Flush stdout
|
| 192 |
+
#sys.stdout.flush()
|
| 193 |
+
#model.print_end_stats()
|
| 194 |
|
| 195 |
|
| 196 |
|
| 197 |
### Llama cpp
|
| 198 |
+
#llm = Llama(model_path="models/7B/ggml-model-q4_0.bin")
|
| 199 |
+
#output = llm("Q: Name the planets in the solar system? A: ", max_tokens = 32, stop = ["Q:", "\n"], echo = True)
|
| 200 |
+
#st.write(output)
|
| 201 |
|