| | |
| | """MedGenesis – OpenAI async helpers (summary + QA). |
| | |
| | Changes vs. legacy version |
| | ~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| | * Centralised **`_client()`** getter with singleton cache (avoids TLS overhead). |
| | * Exponential‑back‑off retry (2×, 4×) for transient 5xx. |
| | * Supports model override (`model="gpt-4o-mini"`, etc.). |
| | * Allows temperature & max_tokens tuning via kwargs. |
| | * Returns *str* (content) directly; orchestrator wraps if needed. |
| | """ |
| | from __future__ import annotations |
| |
|
| | import os, asyncio, functools, time |
| | from typing import Any, Dict |
| |
|
| | import openai |
| |
|
| | openai.api_key = os.getenv("OPENAI_API_KEY") |
| | if not openai.api_key: |
| | raise RuntimeError("OPENAI_API_KEY not set in environment") |
| |
|
| | |
| | |
| | |
| | @functools.lru_cache(maxsize=1) |
| | def _client() -> openai.AsyncOpenAI: |
| | return openai.AsyncOpenAI(api_key=openai.api_key) |
| |
|
| |
|
| | async def _chat(messages: list[dict[str, str]], *, model: str, max_tokens: int, temperature: float = 0.2, retries: int = 3) -> str: |
| | delay = 2 |
| | for _ in range(retries): |
| | try: |
| | resp = await _client().chat.completions.create( |
| | model=model, |
| | messages=messages, |
| | max_tokens=max_tokens, |
| | temperature=temperature, |
| | ) |
| | return resp.choices[0].message.content.strip() |
| | except openai.OpenAIError as e: |
| | if retries <= 1: |
| | raise |
| | await asyncio.sleep(delay) |
| | delay *= 2 |
| | |
| | return "[OpenAI request failed]" |
| |
|
| | |
| | |
| | |
| | async def ai_summarize(text: str, *, prompt: str | None = None, model: str = "gpt-4o", max_tokens: int = 350) -> str: |
| | """LLM summariser tuned for biomedical search blobs.""" |
| | if not prompt: |
| | prompt = ( |
| | "Summarize the following biomedical search results. Highlight key findings, " |
| | "significant genes/drugs/trials, and suggest future research directions." |
| | ) |
| | system = {"role": "system", "content": "You are an expert biomedical research assistant."} |
| | user = {"role": "user", "content": f"{prompt}\n\n{text}"} |
| | return await _chat([system, user], model=model, max_tokens=max_tokens) |
| |
|
| |
|
| | async def ai_qa(question: str, *, context: str = "", model: str = "gpt-4o", max_tokens: int = 350) -> str: |
| | """One‑shot QA against provided *context*.""" |
| | system = {"role": "system", "content": "You are an advanced biomedical research agent."} |
| | user = {"role": "user", "content": f"Answer the question using the given context.\n\nQuestion: {question}\nContext: {context}"} |
| | return await _chat([system, user], model=model, max_tokens=max_tokens) |
| |
|