File size: 1,589 Bytes
b06ee4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import { createContext } from "react";

let nextMessageId = 0;

export function createMessageId(): number {
  return nextMessageId++;
}

export interface ChatMessage {
  id: number;
  role: "user" | "assistant" | "system";
  content: string;
  reasoning?: string;
}

export type LoadingStatus =
  | { state: "idle" }
  | { state: "loading"; progress?: number; message?: string }
  | { state: "ready" }
  | { state: "error"; error: string };

export type ReasoningEffort = "low" | "medium" | "high";

export interface ModelConfig {
  id: string;
  label: string;
  thinking: boolean;
}

export const AVAILABLE_MODELS: ModelConfig[] = [
  {
    id: "onnx-community/Olmo-Hybrid-Instruct-SFT-7B-ONNX",
    label: "Instruct SFT",
    thinking: false,
  },
  {
    id: "onnx-community/Olmo-Hybrid-Instruct-DPO-7B-ONNX",
    label: "Instruct DPO",
    thinking: false,
  },
  {
    id: "onnx-community/Olmo-Hybrid-Think-SFT-7B-ONNX",
    label: "Think SFT",
    thinking: true,
  },
];

export interface LLMContextValue {
  status: LoadingStatus;
  messages: ChatMessage[];
  isGenerating: boolean;
  tps: number;
  reasoningEffort: ReasoningEffort;
  setReasoningEffort: (effort: ReasoningEffort) => void;
  selectedModel: ModelConfig;
  setSelectedModel: (model: ModelConfig) => void;
  loadedModelId: string | null;
  loadModel: () => void;
  send: (text: string) => void;
  stop: () => void;
  clearChat: () => void;
  editMessage: (index: number, newContent: string) => void;
  retryMessage: (index: number) => void;
}

export const LLMContext = createContext<LLMContextValue | null>(null);