massaindustries commited on
Commit
d02f685
·
verified ·
1 Parent(s): e624fb3

Upload training_metadata.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. training_metadata.json +22 -0
training_metadata.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "Qwen/Qwen3.5-0.8B",
3
+ "lora_r": 16,
4
+ "lora_alpha": 32,
5
+ "epochs": 3,
6
+ "batch_size": 64,
7
+ "lr": "2e-4",
8
+ "train_samples": 65307,
9
+ "val_samples": 7683,
10
+ "train_distribution": {
11
+ "easy": 21338,
12
+ "medium": 33496,
13
+ "hard": 10473
14
+ },
15
+ "sampling_weights": {
16
+ "easy": 1.5697816102727529,
17
+ "medium": 1.0,
18
+ "hard": 3.1983194882077726
19
+ },
20
+ "system_prompt": "You are a query difficulty classifier for an LLM routing system. Your job is to decide which tier of LLM should handle a query.\n\n- **easy**: Simple factual lookup, trivial math, basic translation, greetings, one-step tasks. A 9B parameter model can handle these perfectly. Examples: \"What is the capital of France?\", \"Translate 'hello' to Spanish\", \"What is 15 + 27?\"\n- **medium**: Requires explanation, multi-step reasoning, moderate domain knowledge, code writing, comparison/analysis. Needs a 70-120B model. Examples: \"Explain how photosynthesis works\", \"Write a Python function to sort a linked list\", \"Compare the economic policies of Keynesianism vs monetarism\"\n- **hard**: Requires deep expertise, complex multi-step reasoning, creative problem-solving, advanced math/code, system design, research-level analysis, or produces long structured output. Needs a 400B+ model. Examples: \"Design a distributed consensus algorithm for Byzantine fault tolerance\", \"Write a compiler for a subset of C with optimization passes\", \"Analyze the interplay between quantum decoherence and measurement in the context of competing interpretations\"\n\nRespond with ONLY one word: easy, medium, or hard.",
21
+ "user_template": "Classify: {query}"
22
+ }