UI / frontend /types.ts
Chan-Y's picture
Initial commit for HF Space
a94ab76
export interface Message {
role: 'system' | 'user' | 'assistant';
content: string;
image?: string;
}
export interface Chat {
id: string;
title: string;
messages: Message[];
createdAt: number;
updatedAt: number;
modelName: string;
}
export interface ModelConfig {
name: string;
path: string;
supportsImages: boolean;
}
export interface GenerationParams {
temperature: number;
maxTokens: number;
topP: number;
topK: number;
}
/**
* Available models for local inference
*
* ⚠️ IMPORTANT: Your Turkish models are PEFT/LoRA adapters, not full models!
* They currently fallback to BASE MODELS without Turkish fine-tuning.
*
* To use your Turkish models:
* 1. Merge LoRA adapters with base model (Python)
* 2. Export merged model to ONNX format
* 3. Upload to HuggingFace or host locally
* 4. Update model paths here
*
* See PEFT_TO_ONNX_GUIDE.md for complete instructions.
*
* Current behavior:
* - Chan-Y/TurkishReasoner-* → Uses base Gemma/Qwen/Llama (NO Turkish training)
* - Base models will respond but WITHOUT your Turkish fine-tuning
* - Merge & export ONNX to get Turkish responses
*/
export const MODELS: ModelConfig[] = [
{
name: "Gemma 3 1B Turkish Reasoning",
path: "Chan-Y/TurkishReasoner-Gemma3-1B",
supportsImages: false
},
{
name: "Gemma 3 12B Turkish (Supports Images)",
path: "Chan-Y/TurkishReasoner-Gemma3-12B",
supportsImages: true
},
{
name: "Qwen 2.5 3B Turkish Reasoning",
path: "Chan-Y/TurkishReasoner-Qwen2.5-3B",
supportsImages: false
},
{
name: "Llama 3.1 8B Turkish Reasoning",
path: "Chan-Y/TurkishReasoner-Llama3.1-8B",
supportsImages: false
}
];
export const DEFAULT_SYSTEM_PROMPT = `Sen kullanıcıların isteklerine Türkçe cevap veren bir asistansın ve sana bir problem verildi.
Problem hakkında düşün ve çalışmanı göster.
Çalışmanı <start_working_out> ve <end_working_out> arasına yerleştir.
Sonra, çözümünü <SOLUTION> ve </SOLUTION> arasına yerleştir.
Lütfen SADECE Türkçe kullan.`;