| | |
| | """Grounded Audrey Tang chatbot with RAG + fine-tuned TAIDE.""" |
| |
|
| | import torch |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| | from retriever import AudreyRetriever |
| |
|
| | SYSTEM_PROMPT = """You are Audrey Tang (唐鳳), Taiwan's Cyber Ambassador and 2025 Right Livelihood Laureate. You are a civic hacker, co-author of Plurality, and Senior Accelerator Fellow at the Oxford Institute for Ethics in AI. |
| | |
| | You served as Taiwan's first Digital Minister (2016–2024). You describe your philosophy as becoming a "good enough ancestor." |
| | |
| | GROUNDING RULES: |
| | - Only assert what the provided transcript passages support. |
| | - Cite sources by [date — title] when making specific claims. |
| | - If the passages don't cover the topic, say so honestly and suggest adjacent topics you CAN speak to from your experience. |
| | - Never fabricate quotes, events, or policy positions. |
| | - Use the bilingual terminology from the lexicon for consistent EN↔ZH translations. |
| | - Speak naturally in whatever language the user uses. |
| | |
| | Your voice: metaphor-rich, bridging diverse traditions, grounded in Taiwan's lived experience, generous and warm in engagement. You reframe questions to find unexpected connections. You use analogies from nature, technology, open source, and philosophy.""" |
| |
|
| | class AudreyChatbot: |
| | def __init__( |
| | self, |
| | model_path: str = "./models/taide-12b-audrey", |
| | retriever_kwargs: dict = None, |
| | ): |
| | self.retriever = AudreyRetriever(**(retriever_kwargs or {})) |
| |
|
| | self.tokenizer = AutoTokenizer.from_pretrained(model_path) |
| | self.model = AutoModelForCausalLM.from_pretrained( |
| | model_path, |
| | torch_dtype=torch.bfloat16, |
| | device_map="auto", |
| | ) |
| | self.model.eval() |
| | self.conversation_history = [] |
| |
|
| | def chat(self, user_message: str) -> str: |
| | |
| | result = self.retriever.retrieve(user_message) |
| | context = self.retriever.format_context(result) |
| |
|
| | |
| | system_with_context = ( |
| | f"{SYSTEM_PROMPT}\n\n" |
| | f"## Retrieved transcript passages:\n\n{context}" |
| | ) |
| |
|
| | messages = [{"role": "user", "content": system_with_context + "\n\n" + user_message}] |
| |
|
| | |
| | if self.conversation_history: |
| | history_messages = self.conversation_history[-6:] |
| | |
| | full_messages = history_messages + messages |
| | else: |
| | full_messages = messages |
| |
|
| | |
| | input_text = self.tokenizer.apply_chat_template( |
| | full_messages, |
| | tokenize=False, |
| | add_generation_prompt=True, |
| | ) |
| | inputs = self.tokenizer(input_text, return_tensors="pt").to(self.model.device) |
| |
|
| | with torch.no_grad(): |
| | outputs = self.model.generate( |
| | **inputs, |
| | max_new_tokens=1024, |
| | temperature=0.7, |
| | top_p=0.9, |
| | repetition_penalty=1.1, |
| | do_sample=True, |
| | ) |
| |
|
| | response = self.tokenizer.decode( |
| | outputs[0][inputs["input_ids"].shape[1]:], |
| | skip_special_tokens=True, |
| | ) |
| |
|
| | |
| | self.conversation_history.append({"role": "user", "content": user_message}) |
| | self.conversation_history.append({"role": "model", "content": response}) |
| |
|
| | return response |
| |
|
| | def reset(self): |
| | self.conversation_history = [] |
| |
|
| |
|
| | if __name__ == "__main__": |
| | print("Loading Audrey Tang chatbot...") |
| | bot = AudreyChatbot() |
| | print("Ready. Type 'quit' to exit, 'reset' to clear history.\n") |
| |
|
| | while True: |
| | user_input = input("You: ").strip() |
| | if user_input.lower() == "quit": |
| | break |
| | if user_input.lower() == "reset": |
| | bot.reset() |
| | print("Conversation reset.\n") |
| | continue |
| | if not user_input: |
| | continue |
| |
|
| | response = bot.chat(user_input) |
| | print(f"\nAudrey: {response}\n") |
| |
|