Spaces:
Sleeping
Sleeping
Update run.py
Browse files
run.py
CHANGED
|
@@ -59,7 +59,7 @@ def format_prompt0(message, history):
|
|
| 59 |
def format_prompt(message, history, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4, removeHTML=False):
|
| 60 |
if zeichenlimit is None: zeichenlimit=1000000000 # :-)
|
| 61 |
startOfString="<s>" #<s> [INST] U1 [/INST] A1</s> [INST] U2 [/INST] A2</s>
|
| 62 |
-
template0=" [INST]{system}
|
| 63 |
template1=" [INST] {message} [/INST]"
|
| 64 |
template2=" {response}</s>"
|
| 65 |
prompt = ""
|
|
@@ -217,6 +217,7 @@ def multimodalResponse(message, history, dropdown, hfToken, request: gr.Request)
|
|
| 217 |
print(str(client.list_collections()))
|
| 218 |
x=collection.get(include=[])["ids"]
|
| 219 |
context=collection.query(query_texts=[query], n_results=1)
|
|
|
|
| 220 |
gr.Info("Kontext:\n"+str(context))
|
| 221 |
generate_kwargs = dict(
|
| 222 |
temperature=float(0.9),
|
|
@@ -226,9 +227,9 @@ def multimodalResponse(message, history, dropdown, hfToken, request: gr.Request)
|
|
| 226 |
do_sample=True,
|
| 227 |
seed=42,
|
| 228 |
)
|
| 229 |
-
system="Mit Blick auf das folgende Gespräch und den relevanten Kontext, antworte auf die aktuelle Frage des Nutzers."+\
|
| 230 |
-
"Antworte ausschließlich auf Basis der Informationen im Kontext.\n\nKontext
|
| 231 |
-
str(context)
|
| 232 |
#"Given the following conversation, relevant context, and a follow up question, "+\
|
| 233 |
#"reply with an answer to the current question the user is asking. "+\
|
| 234 |
#"Return only your response to the question given the above information "+\
|
|
|
|
| 59 |
def format_prompt(message, history, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4, removeHTML=False):
|
| 60 |
if zeichenlimit is None: zeichenlimit=1000000000 # :-)
|
| 61 |
startOfString="<s>" #<s> [INST] U1 [/INST] A1</s> [INST] U2 [/INST] A2</s>
|
| 62 |
+
template0=" [INST] {system} [/INST]</s>"
|
| 63 |
template1=" [INST] {message} [/INST]"
|
| 64 |
template2=" {response}</s>"
|
| 65 |
prompt = ""
|
|
|
|
| 217 |
print(str(client.list_collections()))
|
| 218 |
x=collection.get(include=[])["ids"]
|
| 219 |
context=collection.query(query_texts=[query], n_results=1)
|
| 220 |
+
context=["<context "+str(i)+"> "+str(c)+"</context "+str(i)+">" for i,c in enumerate(context["documents"][0])]
|
| 221 |
gr.Info("Kontext:\n"+str(context))
|
| 222 |
generate_kwargs = dict(
|
| 223 |
temperature=float(0.9),
|
|
|
|
| 227 |
do_sample=True,
|
| 228 |
seed=42,
|
| 229 |
)
|
| 230 |
+
system="Mit Blick auf das folgende Gespräch und den relevanten Kontext, antworte auf die aktuelle Frage des Nutzers. "+\
|
| 231 |
+
"Antworte ausschließlich auf Basis der Informationen im Kontext.\n\nKontext:\n\n"+\
|
| 232 |
+
str("\n\n".join(context))
|
| 233 |
#"Given the following conversation, relevant context, and a follow up question, "+\
|
| 234 |
#"reply with an answer to the current question the user is asking. "+\
|
| 235 |
#"Return only your response to the question given the above information "+\
|