Spaces:
Sleeping
Sleeping
Rob Learsch commited on
Commit ·
6e3f9a7
1
Parent(s): ad01535
Update app.py
Browse files
app.py
CHANGED
|
@@ -116,50 +116,47 @@ def chat_with_musician(user_input, history, artist):
|
|
| 116 |
history = []
|
| 117 |
previous_artist = artist_history[-1]
|
| 118 |
if artist != previous_artist:
|
| 119 |
-
# Reset history if the artist changes
|
| 120 |
history.clear()
|
|
|
|
|
|
|
| 121 |
messages = []
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
-
|
| 124 |
-
messages.append(dict)
|
| 125 |
-
# Add the latest user message
|
| 126 |
messages.append({"role": "user", "content": system_message + "\n\n" + user_input})
|
|
|
|
| 127 |
try:
|
| 128 |
response = client.chat_completion(
|
| 129 |
messages=messages,
|
| 130 |
-
#model="google/gemma-2-2b-it",
|
| 131 |
-
#model="openai/gpt-oss-120b",
|
| 132 |
-
#test without specifying model
|
| 133 |
max_tokens=256,
|
| 134 |
temperature=0.75,
|
| 135 |
-
#top_p=0.9
|
| 136 |
)
|
| 137 |
-
gemma_response= response["choices"][0]["message"]["content"]
|
| 138 |
except Exception as e:
|
| 139 |
-
|
| 140 |
-
|
| 141 |
lyric_response = artist_response(gemma_response, artist)
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
except Exception as e:
|
| 156 |
-
return f"Error: {str(e)}"
|
| 157 |
lyric_response = artist_response(gemma_response, artist)
|
| 158 |
|
| 159 |
-
|
| 160 |
-
history.append(
|
| 161 |
-
artist_history.append(artist)
|
| 162 |
-
artist_history[:] = artist_history[-10:]
|
| 163 |
return lyric_response
|
| 164 |
|
| 165 |
def cosine_similarity_int8(query, embeddings):
|
|
|
|
| 116 |
history = []
|
| 117 |
previous_artist = artist_history[-1]
|
| 118 |
if artist != previous_artist:
|
|
|
|
| 119 |
history.clear()
|
| 120 |
+
|
| 121 |
+
# Convert Gradio history tuples to HF message dicts
|
| 122 |
messages = []
|
| 123 |
+
for user_msg, bot_msg in history[-5:]: # last 5 exchanges
|
| 124 |
+
messages.append({"role": "user", "content": user_msg})
|
| 125 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 126 |
|
| 127 |
+
# Add current user message
|
|
|
|
|
|
|
| 128 |
messages.append({"role": "user", "content": system_message + "\n\n" + user_input})
|
| 129 |
+
|
| 130 |
try:
|
| 131 |
response = client.chat_completion(
|
| 132 |
messages=messages,
|
|
|
|
|
|
|
|
|
|
| 133 |
max_tokens=256,
|
| 134 |
temperature=0.75,
|
|
|
|
| 135 |
)
|
| 136 |
+
gemma_response = response["choices"][0]["message"]["content"]
|
| 137 |
except Exception as e:
|
| 138 |
+
gemma_response = f"Error: {str(e)}"
|
| 139 |
+
|
| 140 |
lyric_response = artist_response(gemma_response, artist)
|
| 141 |
+
|
| 142 |
+
# Check for repeated response logic (optional)
|
| 143 |
+
if len(messages) > 1 and lyric_response == messages[-2]["content"]:
|
| 144 |
+
messages[-1] = {"role": "user", "content": system_message_repeated + "\n\n" + user_input}
|
| 145 |
+
try:
|
| 146 |
+
response = client.chat_completion(
|
| 147 |
+
messages=messages,
|
| 148 |
+
max_tokens=256,
|
| 149 |
+
temperature=0.75,
|
| 150 |
+
)
|
| 151 |
+
gemma_response = response["choices"][0]["message"]["content"]
|
| 152 |
+
except Exception as e:
|
| 153 |
+
gemma_response = f"Error: {str(e)}"
|
|
|
|
|
|
|
| 154 |
lyric_response = artist_response(gemma_response, artist)
|
| 155 |
|
| 156 |
+
# Append new exchange to Gradio history format
|
| 157 |
+
history.append((user_input, lyric_response))
|
| 158 |
+
artist_history.append(artist)
|
| 159 |
+
artist_history[:] = artist_history[-10:]
|
| 160 |
return lyric_response
|
| 161 |
|
| 162 |
def cosine_similarity_int8(query, embeddings):
|