| from typing import Literal, List, Tuple | |
| from .groq_client import groq_chat | |
| from .utils import get_answer | |
| def build_prompt(question:str, | |
| mode: Literal["cot", "base"]="base", | |
| exampler: List[Tuple[str, str]]=None, | |
| zero_shot: bool=False | |
| ): | |
| """ | |
| to be written | |
| """ | |
| if mode == "cot": | |
| if not zero_shot: | |
| prompt = "" | |
| if exampler: | |
| for q, a in exampler: | |
| prompt+=f"Q: {q}\nA:{a}" | |
| prompt+=f"Q: {question}\nA:" | |
| return prompt | |
| return f"Q: {question}\nA:" | |
| return f"Q: {question}\nA:" | |
| def generate_answer( | |
| question: str, | |
| model_id: str="llama3-8b-8192", | |
| temperature: float=0.5, | |
| max_tokens: int=200, | |
| mode: Literal["cot", "base"]="base", | |
| exampler: List[Tuple[str ,str]]=None, | |
| zero_shot: bool=False | |
| ): | |
| """ | |
| to be written | |
| """ | |
| prompt = build_prompt(question, mode, exampler, zero_shot) | |
| reasoning = groq_chat(prompt, model_id, temperature, max_tokens) | |
| last_line = get_answer(reasoning) | |
| return reasoning, last_line | |