text
stringlengths
1
93.6k
time_str = "00:00:00"
else:
time_str = item["time"].strftime("%H:%M:%S")
role = item["role"].lower()
content = item["content"]
lines.append(f"{time_str} | {role} - {content}")
return "\n".join(lines)
def process_interrupted_messages(self):
# FIXME: simple and dirty way to process interrupted messages
with self.lock:
for msg in self.messages:
if "interrupted_at" in msg and "audio_duration" in msg:
# Cut the message content to the point where it was interrupted
percent = msg["interrupted_at"] / msg["audio_duration"]
if percent < 1: # if percent > 1, the message was not interrupted
orig_content = msg["content"]["text"] if isinstance(msg["content"], dict) else msg["content"]
cut_content = orig_content[:int(len(orig_content) * percent)] + "... (interrupted)"
if isinstance(msg["content"], dict):
msg["content"]["text"] = cut_content
else:
msg["content"] = cut_content
del msg["interrupted_at"], msg["audio_duration"] # don't process this message again
msg["handled"] = False
class AgentConfigManager:
def __init__(self):
self._agent_list = self._load_agent_list()
self._resolve_nested_agents()
def _load_agent_list(self):
agent_list = {}
for file in os.listdir(os.path.join(os.path.dirname(__file__), "agents")):
if file.endswith(".json"):
with open(os.path.join(os.path.dirname(__file__), "agents", file), "r") as f:
agent_list.update(json.load(f))
return agent_list
def _resolve_nested_agents(self):
for agent_name, agent_config in self._agent_list.items():
for key, value in agent_config.items():
if key.endswith("_agent") and isinstance(value, str):
self._agent_list[agent_name][key] = self._agent_list[value]
def add_agent(self, agent_name, agent_config):
self._agent_list[agent_name] = agent_config
self._resolve_nested_agents()
def get_config(self, agent_name):
if agent_name not in self._agent_list:
raise ValueError(f"Agent {agent_name} not found")
return self._agent_list[agent_name]
agent_config_manager = AgentConfigManager()
class BaseLLMAgent:
def __init__(self,
model_name,
system_prompt,
examples=None
):
if isinstance(system_prompt, list):
system_prompt = "\n".join(system_prompt)
system_prompt = system_prompt.replace("{character_agent_message_format_voice_tone}", character_agent_message_format_voice_tone)
system_prompt = system_prompt.replace("{character_agent_message_format_narrator_comments}", character_agent_message_format_narrator_comments)
# force litellm to use OpenAI API if no provider is specified
model_name = f"openai/{model_name}" if "/" not in model_name else model_name
self.model_name = model_name
self.system_prompt = system_prompt
self.examples = examples
self._output_json = "json" in system_prompt.lower()
@property
def output_json(self):
return self._output_json
@logger.catch(reraise=True)
def completion(self, context, stream=False, temperature=0.5):
assert hasattr(context, 'get_messages'), "Context must have get_messages method"
assert not (stream and self.output_json), "Streamed JSON responses are not supported"
messages = [
{
"role": "system",
"content": self.system_prompt
}
]
if self.examples:
for example in self.examples:
messages.append({"role": "user", "content": example["user"]})