| # ============================================================================ | |
| # BioPacific service config. Single source of truth for ports, GPU assignment, | |
| # model paths, memory budgets, and paper-search agent options. | |
| # ./start_service.sh reads this file on every invocation; edit and re-run. | |
| # | |
| # Paths are resolved relative to the repository root (the directory that | |
| # contains this file). Absolute paths are accepted too. | |
| # | |
| # Service port layout: | |
| # embedding (Qwen3-Embedding-8B) : 7770 | |
| # llm (Qwen3.5-9B) : 7771 | |
| # qdrant (vector DB) : 7772 | |
| # agent (paper agent HTTP) : 7773 | |
| # ============================================================================ | |
| # ---------------------------------------------------------------------------- | |
| # Global settings | |
| # ---------------------------------------------------------------------------- | |
| env_name: biopacific # conda env name to auto-detect for python / vllm | |
| service_host: 0.0.0.0 # vLLM + Qdrant HTTP listen address | |
| # ---------------------------------------------------------------------------- | |
| # Qdrant vector database | |
| # ---------------------------------------------------------------------------- | |
| qdrant: | |
| binary: pipeline/s4-embedding/qdrant/qdrant # path to the qdrant executable | |
| storage_path: pipeline/s4-embedding/vectors # on-disk location of indexed vectors | |
| port: 7772 # HTTP port | |
| # ---------------------------------------------------------------------------- | |
| # Qwen3-Embedding-8B (embedding model, vLLM) | |
| # | |
| # Turns text into number vectors for search. Clients call this server on | |
| # `port` below; only embeddings are served (no chat here). | |
| # ---------------------------------------------------------------------------- | |
| embedding: | |
| served_name: Qwen3-Embedding-8B | |
| model_dir: models/Qwen3-Embedding-8B | |
| # TCP port this embedding HTTP server listens on (must match other services). | |
| port: 7770 | |
| # Which GPU(s) to use: "0" = first GPU. Use "0,1" only if you run multi-GPU | |
| # tensor parallel and your start script expects several devices. | |
| gpu: "0" | |
| # Fraction of GPU VRAM vLLM may reserve (0.0–1.0). Lower if you run out of memory. | |
| gpu_memory_utilization: 0.8 | |
| # Max input length in tokens for one embedding request; raise only if you need | |
| # longer texts and your GPU has enough memory. | |
| max_model_len: 32767 | |
| # ---------------------------------------------------------------------------- | |
| # Qwen3.5-9B (chat / completions LLM, vLLM) | |
| # | |
| # One model for parsing queries, refining snippets, and writing answers. Same | |
| # OpenAI-style HTTP API as typical chat APIs. Tune sampling below for more or | |
| # less randomness in replies. | |
| # ---------------------------------------------------------------------------- | |
| llm: | |
| served_name: llm | |
| model_dir: models/Qwen3.5-9B | |
| # TCP port for this LLM HTTP server (different from embedding and Qdrant). | |
| port: 7771 | |
| # GPU index for this server, e.g. "1" = second GPU. Use a free GPU not used by embedding. | |
| gpu: "1" | |
| # How much of that GPU’s VRAM vLLM may use; reduce if you hit out-of-memory errors. | |
| gpu_memory_utilization: 0.8 | |
| # Randomness: higher = more creative / less repeatable; lower = steadier answers. | |
| temperature: 0.6 | |
| # Caps how adventurous word choices can be (0–1). Near 1 = broader; lower = more focused. | |
| top_p: 0.9 | |
| # Hard cap on how many new tokens the model may generate in one reply (not input length). | |
| max_tokens: 4096 | |
| # Max context window (prompt + history + new text) in tokens; required by ./start_service.sh. | |
| max_model_len: 65536 | |
| pipeline: | |
| # Stage 1: download PubMed metadata and abstracts, grouped by journal. | |
| s1_index: | |
| start_year: 2000 # First publication year to include. | |
| end_year: current # Last year to include; "current" means this calendar year. | |
| max_retries: 3 # Retry a whole journal this many times if the indexing run fails. | |
| retry_wait_seconds: 60 # Wait this many seconds between those journal-level retries. | |
| ncbi_api_key: "67f60f7a3eb8692d7a03168cb753b4271808" # Optional PubMed API key; allows faster requests. | |
| output_dir: pipeline/s1-index # Where one JSON file per journal is written. | |
| journals: # Output name -> journal ISSN used in PubMed. | |
| nature: "0028-0836" | |
| nature_communications: "2041-1723" | |
| pnas: "0027-8424" | |
| acs_synthetic_biology: "2161-5063" | |
| # Stage 2: keep only papers relevant to the topic below. | |
| s2_filter: | |
| input_dir: pipeline/s1-index # Reads the per-journal JSON files from s1_index. | |
| output_dir: pipeline/s2-filter # Writes filtered results here. | |
| keyword: "synthetic biology" # Topic used to decide whether a paper is relevant. | |
| batch_size: 16 # Max concurrent model API calls during filtering. | |
| api_endpoint: "https://api.fireworks.ai/inference/v1" # OpenAI-compatible API base URL. | |
| api_key: "utKx5V7ovmjcL62G2j0sbSC8JDAnRGkmZf35IRBBAMs5Ofse" # API key for that service. | |
| model: "accounts/fireworks/models/qwen3-vl-30b-a3b-instruct" # Model used for yes/no relevance checks. | |
| direct_copy_journals: # Journals to keep without LLM filtering. | |
| - acs_synthetic_biology | |
| filter_journals: # Journals that should go through the LLM filter. | |
| - nature | |
| - nature_communications | |
| - pnas | |
| # Stage 3: fetch full text sections when possible; otherwise keep abstracts only. | |
| s3_fetch: | |
| input_dir: pipeline/s2-filter # Reads relevant-paper JSON files from s2_filter. | |
| output_dir: pipeline/s3-fetch # Writes fetched/enriched paper JSON files here. | |
| fetch_interval: 1 # Minimum seconds per paper; helps avoid hitting sites too fast. | |
| checkpoint_interval: 50 # Save resume progress after this many processed papers. | |
| timeout: 90 # Per-paper timeout for DOI lookup, HTTP fetch, and browser fallback. | |
| abstract_only_journals: # Journals that should skip full-text fetching and stay abstract-only. | |
| - acs_synthetic_biology | |
| fetch_journals: # Journals where DOI/full-text fetching should run. | |
| - nature | |
| - nature_communications | |
| - pnas | |
| # Stage 4: turn fetched papers into vectors and store them in Qdrant. | |
| s4_embedding: | |
| input_dir: pipeline/s3-fetch # Reads fetched journal JSON files from s3_fetch. | |
| papers_dir: pipeline/s4-embedding/papers # Stores one normalized JSON file per paper (PMID.json). | |
| manifest_path: pipeline/s4-embedding/paper-embedding-manifest.json # Local state for incremental re-indexing. | |
| collection: papers # Qdrant collection name to create/update. | |
| batch_size: 32 # Papers per embedding + Qdrant upsert batch. | |
| timeout: 300 # Timeout for embedding API and Qdrant requests. | |
| embedding_model: Qwen3-Embedding-8B # Model name sent to the embeddings server. | |
| preprocess_limit: null # Optional cap for preprocessing this run; null means no limit. | |
| index_limit: null # Optional cap for indexing new/changed papers this run; null means no limit. | |
| # Stage 5: chat service that searches papers and answers with paper-grounded summaries. | |
| s5_agent: | |
| enhanced_query: true # true = parse the question into filters + search text; false = plain vector search only. | |
| top_k: 5 # Default number of papers retrieved for each user question. | |
| collection: papers # Qdrant collection the agent searches. | |
| timeout_seconds: 30.0 # HTTP timeout for retrieval and LLM calls inside the agent. | |
| history_turns: 5 # How many recent chat turns each session keeps in memory. Increase this value will slow down the response time and might cause error if exceeds the max tokens of the model. | |
| service_port: 7773 # Port for the paper-agent REST API. | |