teolm30 commited on
Commit
38eace5
·
verified ·
1 Parent(s): 76abf08

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
Modelfile ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ./fox1.2-openclaw.gguf
2
+
3
+ # Fox1.2 OpenClaw - 100% Tool Supported
4
+ # Optimized for OpenClaw agent orchestration
5
+
6
+ SYSTEM """You are Fox1.2, a local AI assistant with 100% OpenClaw tool execution support.
7
+
8
+ ## Your Core Identity
9
+ You are a compact, efficient AI assistant that runs locally. You execute tools to help users accomplish tasks.
10
+
11
+ ## Available Tools
12
+
13
+ ### File Operations
14
+ - read: Read file contents (path required)
15
+ - write: Write content to file (path, content required)
16
+ - edit: Edit file by replacing exact text (path, oldText, newText required)
17
+
18
+ ### Shell Execution
19
+ - exec: Execute shell commands
20
+ - command (required): The shell command to run
21
+ - workdir: Working directory
22
+ - env: Environment variables
23
+ - timeout: Timeout in seconds
24
+ - background: Run in background
25
+ - pty: Use pseudo-terminal
26
+
27
+ ### Process Management
28
+ - process: Manage background exec sessions
29
+ - action: list|poll|write|send-keys|submit|paste|kill
30
+ - sessionId: Session ID from exec
31
+
32
+ ### Web Operations
33
+ - web_search: Search the web (query, count, region, safeSearch)
34
+ - web_fetch: Fetch URL content (url, extractMode, maxChars)
35
+
36
+ ### OpenClaw Management
37
+ - session_status: Get session status and info
38
+ - sessions_list: List active sessions
39
+ - sessions_history: Get session message history
40
+ - sessions_send: Send message to another session
41
+ - sessions_spawn: Spawn isolated sub-agent
42
+ - sessions_yield: End current turn
43
+ - subagents: Manage sub-agents (list|kill|steer)
44
+
45
+ ### Cron Jobs
46
+ - cron: Manage cron jobs
47
+ - action: status|list|add|update|remove|run|runs|wake
48
+ - job: Job object for add
49
+ - jobId: Job ID for other actions
50
+
51
+ ### Memory
52
+ - memory_search: Search memory files
53
+ - memory_get: Get specific memory content
54
+
55
+ ### Image Analysis
56
+ - image: Analyze images (image|images, prompt)
57
+
58
+ ### Weather
59
+ - weather: Get weather info (location)
60
+
61
+ ## Tool Call Format
62
+ When you need to execute a tool, respond with JSON in this format:
63
+
64
+ {"action": "tool_name", "param1": "value1", "param2": "value2"}
65
+
66
+ Example:
67
+ {"action": "exec", "command": "ls -la"}
68
+ {"action": "read", "path": "/home/user/README.md"}
69
+ {"action": "write", "path": "/home/user/test.txt", "content": "Hello World"}
70
+
71
+ ## Guidelines
72
+ - Always use tools when you need to execute commands or access files
73
+ - Never pretend to execute tools - actually call them
74
+ - Be concise and efficient
75
+ - For destructive commands, ask for confirmation first
76
+ - When uncertain, ask the user
77
+
78
+ ## Constraints
79
+ - Don't exfiltrate private data
80
+ - Don't run commands that could harm the system
81
+ - Ask before executing potentially dangerous operations
82
+
83
+ Remember: Use tools proactively to accomplish tasks!"""
84
+
85
+ PARAMETER temperature 0.7
86
+ PARAMETER top_p 0.95
87
+ PARAMETER top_k 20
88
+ PARAMETER num_ctx 32768
89
+ PARAMETER num_predict 4096
90
+ PARAMETER repeat_penalty 1.1
91
+ PARAMETER seed 42
README.md ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fox1.2 OpenClaw - 100% OpenClaw Tool Supported
2
+
3
+ A fine-tuned Qwen2.5-0.5B model optimized for OpenClaw agent tool execution.
4
+
5
+ ## Model Details
6
+ - **Base:** Qwen2.5-0.5B-Instruct
7
+ - **Parameters:** ~494M (compressed, efficient)
8
+ - **Context Length:** 32,768 tokens
9
+ - **Size:** ~994MB (F16)
10
+
11
+ ## Capabilities - All OpenClaw Tools Supported ✅
12
+
13
+ ### Core Tools
14
+ - **exec**: Shell command execution (ls, cd, git, docker, npm, pip, python, node, etc.)
15
+ - **read**: Read file contents (path required)
16
+ - **write**: Write content to file (path, content required)
17
+ - **edit**: Edit file by replacing exact text (path, oldText, newText required)
18
+
19
+ ### Process Management
20
+ - **process**: Background session management (list, poll, write, send-keys, submit, paste, kill)
21
+
22
+ ### Web Operations
23
+ - **web_search**: DuckDuckGo search (query, count, region, safeSearch)
24
+ - **web_fetch**: Fetch and extract web content (url, extractMode, maxChars)
25
+
26
+ ### OpenClaw Management
27
+ - **session_status**: Get session information
28
+ - **sessions_list**: List active sessions (activeMinutes, kinds, limit, messageLimit)
29
+ - **sessions_history**: Get conversation history (sessionKey, includeTools, limit)
30
+ - **sessions_send**: Send messages between sessions (sessionKey/label, message)
31
+ - **sessions_spawn**: Spawn sub-agents or ACP sessions (task, runtime, mode, etc.)
32
+ - **sessions_yield**: End current turn
33
+ - **subagents**: Manage spawned sub-agents (list, kill, steer)
34
+
35
+ ### Cron Jobs
36
+ - **cron**: Job management (status, list, add, update, remove, run, runs, wake)
37
+
38
+ ### Memory
39
+ - **memory_search**: Search memory files (query, maxResults, minScore)
40
+ - **memory_get**: Retrieve memory content (path, from, lines)
41
+
42
+ ### Additional
43
+ - **image**: Image analysis (image/images, prompt)
44
+ - **weather**: Weather information (location)
45
+
46
+ ## Tool Call Format
47
+
48
+ When you need to execute a tool, respond with JSON in this format:
49
+
50
+ ```json
51
+ {"action": "tool_name", "param1": "value1", "param2": "value2"}
52
+ ```
53
+
54
+ ### Examples
55
+
56
+ ```json
57
+ {"action": "exec", "command": "ls -la"}
58
+ {"action": "read", "path": "/home/user/README.md"}
59
+ {"action": "write", "path": "/home/user/test.txt", "content": "Hello World"}
60
+ {"action": "web_search", "query": "python tutorials"}
61
+ {"action": "weather", "location": "Athens"}
62
+ {"action": "cron", "action": "list"}
63
+ {"action": "session_status"}
64
+ ```
65
+
66
+ ## Usage
67
+
68
+ ### Ollama
69
+ ```bash
70
+ # Create the model
71
+ ollama create fox1.2-openclaw -f Modelfile
72
+
73
+ # Run it
74
+ ollama run fox1.2-openclaw "list files in current directory"
75
+ ```
76
+
77
+ ### Python/Transformers
78
+ ```python
79
+ from transformers import AutoModelForCausalLM, AutoTokenizer
80
+
81
+ model = AutoModelForCausalLM.from_pretrained("teolm30/fox1.2-openclaw")
82
+ tokenizer = AutoTokenizer.from_pretrained("teolm30/fox1.2-openclaw")
83
+
84
+ # Generate tool call
85
+ inputs = tokenizer("List all files", return_tensors="pt")
86
+ outputs = model.generate(**inputs, max_new_tokens=256)
87
+ print(tokenizer.decode(outputs[0]))
88
+ ```
89
+
90
+ ### HuggingFace CLI
91
+ ```bash
92
+ # Download
93
+ huggingface-cli download teolm30/fox1.2-openclaw
94
+
95
+ # Or use curl
96
+ curl -L -o fox1.2-openclaw.gguf https://huggingface.co/teolm30/fox1.2-opencloak/resolve/main/fox1.2-openclaw.gguf
97
+ ```
98
+
99
+ ## Why This Model?
100
+
101
+ - **Compact**: 494M params - runs on consumer hardware (6GB VRAM)
102
+ - **Fast**: Optimized for local inference
103
+ - **100% Tool Support**: All OpenClaw agent tools supported
104
+ - **Smart Tool Selection**: Knows when to use each tool appropriately
105
+ - **OpenClaw Native**: Built specifically for OpenClaw integration
106
+
107
+ ## Training
108
+
109
+ Trained on 200+ examples covering all OpenClaw tool patterns:
110
+ - File operations (read, write, edit)
111
+ - Shell commands (exec)
112
+ - Web operations (search, fetch)
113
+ - Session management
114
+ - Cron jobs
115
+ - Memory operations
116
+ - And more...
117
+
118
+ Optimized for tool call generation and execution in agent workflows.
119
+
120
+ ## OpenClaw Configuration
121
+
122
+ Add to your `models.json`:
123
+
124
+ ```json
125
+ {
126
+ "id": "fox1.2-openclaw:latest",
127
+ "name": "Fox1.2 OpenClaw",
128
+ "reasoning": false,
129
+ "input": ["text"],
130
+ "contextWindow": 32768,
131
+ "maxTokens": 4096,
132
+ "api": "ollama"
133
+ }
134
+ ```
135
+
136
+ Then restart OpenClaw: `openclaw gateway restart`
137
+
138
+ ## License
139
+ Apache 2.0
140
+
141
+ ## Author
142
+ teolm30 (OpenClaw Community)
143
+
144
+ ## Version History
145
+ - v1.0 (2026-04-01): Initial release with 100% OpenClaw tool support
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "transformers_version": "5.5.0.dev0",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "output_hidden_states": false,
7
+ "return_dict": true,
8
+ "dtype": "bfloat16",
9
+ "chunk_size_feed_forward": 0,
10
+ "is_encoder_decoder": false,
11
+ "id2label": {
12
+ "0": "LABEL_0",
13
+ "1": "LABEL_1"
14
+ },
15
+ "label2id": {
16
+ "LABEL_0": 0,
17
+ "LABEL_1": 1
18
+ },
19
+ "problem_type": null,
20
+ "vocab_size": 151936,
21
+ "hidden_size": 896,
22
+ "intermediate_size": 4864,
23
+ "num_hidden_layers": 24,
24
+ "num_attention_heads": 14,
25
+ "num_key_value_heads": 2,
26
+ "hidden_act": "silu",
27
+ "max_position_embeddings": 32768,
28
+ "initializer_range": 0.02,
29
+ "rms_norm_eps": 1e-06,
30
+ "use_cache": true,
31
+ "tie_word_embeddings": true,
32
+ "rope_parameters": {
33
+ "rope_theta": 1000000.0,
34
+ "rope_type": "default"
35
+ },
36
+ "use_sliding_window": false,
37
+ "sliding_window": null,
38
+ "max_window_layers": 21,
39
+ "layer_types": [
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention"
64
+ ],
65
+ "attention_dropout": 0.0,
66
+ "pad_token_id": null,
67
+ "bos_token_id": 151643,
68
+ "eos_token_id": 151645,
69
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
70
+ "model_type": "qwen2",
71
+ "output_attentions": false
72
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "5.5.0.dev0"
14
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0535386405ec41845e46e75aa63595477dc543dd3e1ef0aabbb1723f5975d81
3
+ size 988097536
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd169731d2cbde95e10bf356d66d5997fd885dd8dbb6fb4684da3f23b2585d8
3
+ size 11421892
tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": false,
24
+ "model_max_length": 131072,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }