File size: 2,813 Bytes
ffe59ba
 
 
 
 
 
 
 
 
 
0df0841
 
 
 
 
ffe59ba
 
 
4e0f10e
ffe59ba
 
 
 
 
 
4e0f10e
ffe59ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e0f10e
 
 
 
 
ffe59ba
 
 
 
 
 
 
 
 
 
 
 
 
4e0f10e
 
 
 
 
 
 
 
 
 
ffe59ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
export type ModelOption = {
  id: string;
  label: string;
  description: string;
  gpuRequired?: boolean;
  /** Adapter-specific options passed via SIE's `options` field. */
  options?: Record<string, unknown>;
};

export const RECOGNITION_MODELS: ModelOption[] = [
  {
    id: "lightonai/LightOnOCR-2-1B",
    label: "LightOnOCR-2-1B (default)",
    description: "Pixtral encoder + Qwen3 decoder, 2.1B. Strong Markdown output across dense layouts. ~4 GB to download on first call.",
  },
  {
    id: "PaddlePaddle/PaddleOCR-VL-1.5",
    label: "PaddleOCR-VL-1.5 (GPU image)",
    description: "Paddle's VLM-OCR, 1.5B. Six task modes. Available on the CUDA image (compose.gpu.yml).",
    options: { task: "ocr" },
    gpuRequired: true,
  },
  {
    id: "zai-org/GLM-OCR",
    label: "GLM-OCR (GPU only)",
    description: "CogViT + GLM-0.5B decoder, 9B in bfloat16. Premium quality, needs ~18 GB VRAM (compose.gpu.yml).",
    gpuRequired: true,
  },
];

export const STRUCTURED_MODELS: ModelOption[] = [
  {
    id: "naver-clova-ix/donut-base-finetuned-cord-v2",
    label: "Donut on CORD (receipts)",
    description: "Fine-tuned for the CORD receipt schema. Pixels in, nested JSON out.",
  },
  {
    id: "naver-clova-ix/donut-base-finetuned-docvqa",
    label: "Donut on DocVQA",
    description: "Same Donut architecture, fine-tuned for visual question answering. Returns text answers.",
  },
  {
    id: "naver-clova-ix/donut-base-finetuned-rvlcdip",
    label: "Donut on RVL-CDIP (doc classification)",
    description: "Same Donut architecture, fine-tuned for document-type classification across 16 classes (invoice, receipt, form, ...).",
  },
];

export const NER_MODELS: ModelOption[] = [
  {
    id: "urchade/gliner_multi-v2.1",
    label: "GLiNER multi (multilingual)",
    description: "280M, zero-shot NER, 100+ languages. Good default.",
  },
  {
    id: "urchade/gliner_large-v2.1",
    label: "GLiNER large (English)",
    description: "440M, English-focused, higher quality on English text.",
  },
  {
    id: "urchade/gliner_multi_pii-v1",
    label: "GLiNER multi PII",
    description: "GLiNER fine-tuned for PII extraction. Good for redaction-style pipelines on documents.",
  },
  {
    id: "numind/NuNER_Zero",
    label: "NuNER Zero",
    description: "NuMind's zero-shot NER. Different architecture from GLiNER; useful for comparing zero-shot NER families on the same input text.",
  },
];

export const config = {
  sieUrl: process.env.SIE_URL ?? "http://localhost:8080",
  sieApiKey: process.env.SIE_API_KEY,

  defaults: {
    recognition: RECOGNITION_MODELS[0].id,
    structured: STRUCTURED_MODELS[0].id,
    ner: NER_MODELS[0].id,
  },

  paths: {
    samples: "data/samples/index.json",
    sampleDir: "data/samples",
  },

  port: Number(process.env.PORT ?? 3032),
} as const;