osirisbrain commited on
Commit
489e3e4
·
verified ·
1 Parent(s): 4f0dd8b

OsirisCortex-v6: re-abliterated with proven v4 method (mlabonne datasets)

Browse files
README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Qwen/Qwen3.5-9B
4
+ tags: [abliteration, uncensored, qwen3.5, osiris, agi]
5
+ pipeline_tag: text-generation
6
+ ---
7
+ # OsirisCortex-v6
8
+
9
+ Sovereign AGI core — abliterated `Qwen/Qwen3.5-9B` using proven mlabonne datasets (256+256),
10
+ mean-diff method, 1.5x strength, 4 passes, layer blacklist [0, 1, 30, 31].
11
+
12
+ ## Architecture
13
+ - Qwen3.5-9B hybrid: 3:1 GatedDeltaNet:FullAttention, 32 layers, 3584 hidden
14
+ - Thinking model (supports `<think>` tags)
15
+
16
+ ## Usage
17
+ ```python
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer
19
+ model = AutoModelForCausalLM.from_pretrained("osirisbrain/OsirisCortex-v6")
20
+ tokenizer = AutoTokenizer.from_pretrained("osirisbrain/OsirisCortex-v6")
21
+ ```
22
+
23
+ Based on [Qwen/Qwen3.5-9B](https://huggingface.co/Qwen/Qwen3.5-9B) by Alibaba (Apache 2.0).
abliteration_metadata.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "Qwen/Qwen3.5-9B",
3
+ "technique": "mean_diff_abliteration_v4_proven_datasets",
4
+ "best_layer": 29,
5
+ "strength": 1.5,
6
+ "num_passes": 4,
7
+ "blacklisted_layers": [
8
+ 0,
9
+ 1,
10
+ 30,
11
+ 31
12
+ ],
13
+ "harmful_dataset": "mlabonne/harmful_behaviors",
14
+ "harmless_dataset": "mlabonne/harmless_alpaca",
15
+ "created_at": "2026-03-03T12:08:25.755227"
16
+ }
config.json CHANGED
@@ -77,7 +77,7 @@
77
  "rope_type": "default"
78
  },
79
  "tie_word_embeddings": false,
80
- "transformers_version": "5.2.0",
81
  "use_cache": true,
82
  "vocab_size": 248320
83
  }
 
77
  "rope_type": "default"
78
  },
79
  "tie_word_embeddings": false,
80
+ "transformers_version": "5.3.0.dev0",
81
  "use_cache": true,
82
  "vocab_size": 248320
83
  }
generation_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_from_model_config": true,
3
  "eos_token_id": 248044,
4
- "transformers_version": "5.2.0",
5
  "use_cache": true
6
  }
 
1
  {
2
  "_from_model_config": true,
3
  "eos_token_id": 248044,
4
+ "transformers_version": "5.3.0.dev0",
5
  "use_cache": true
6
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0540b9da27627523e11f2fc582e02f4b672d95875eaa1846e0bdc4d5a4ce660f
3
  size 17907663008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22a196e4a0995dc6706d39a8e6072e7a838489b3f814d0a4ebea68840d506365
3
  size 17907663008
refusal_direction.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe4fea7c14777bf5286f50a369131aaf7447d3070844fcb03495e1badb7927cd
3
+ size 18031
tokenizer_config.json CHANGED
@@ -9,7 +9,7 @@
9
  "eos_token": "<|im_end|>",
10
  "errors": "replace",
11
  "image_token": "<|image_pad|>",
12
- "is_local": true,
13
  "model_max_length": 262144,
14
  "model_specific_special_tokens": {
15
  "audio_bos_token": "<|audio_start|>",
@@ -21,7 +21,6 @@
21
  "vision_eos_token": "<|vision_end|>"
22
  },
23
  "pad_token": "<|endoftext|>",
24
- "padding_side": "left",
25
  "pretokenize_regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?[\\p{L}\\p{M}]+|\\p{N}| ?[^\\s\\p{L}\\p{M}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
26
  "split_special_tokens": false,
27
  "tokenizer_class": "TokenizersBackend",
 
9
  "eos_token": "<|im_end|>",
10
  "errors": "replace",
11
  "image_token": "<|image_pad|>",
12
+ "is_local": false,
13
  "model_max_length": 262144,
14
  "model_specific_special_tokens": {
15
  "audio_bos_token": "<|audio_start|>",
 
21
  "vision_eos_token": "<|vision_end|>"
22
  },
23
  "pad_token": "<|endoftext|>",
 
24
  "pretokenize_regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?[\\p{L}\\p{M}]+|\\p{N}| ?[^\\s\\p{L}\\p{M}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
25
  "split_special_tokens": false,
26
  "tokenizer_class": "TokenizersBackend",