SheatsToTheWind commited on
Commit
7389710
·
0 Parent(s):

🧠 Add complete launch kit with protected core and demo

Browse files
Files changed (6) hide show
  1. .gitignore +4 -0
  2. LICENSE.txt +13 -0
  3. Perceive_MemoryCore_SDK_OnePager.pdf +0 -0
  4. README.md +28 -0
  5. app.py +28 -0
  6. phaseformer.py +38 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ perceive/
2
+ perceive_protected_sdk.zip
3
+ *.csv
4
+ *.ipynb_checkpoints/
LICENSE.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Perceive AI PROTECTED LICENSE
2
+
3
+ Copyright (c) 2025 Perceive AI LLC
4
+
5
+ This software is licensed, not sold. Redistribution, reverse engineering,
6
+ resale, sublicensing, or modification of any kind without written
7
+ permission from Perceive AI LLC is strictly prohibited.
8
+
9
+ Commercial use is allowed only with a valid license purchased under
10
+ the Developer, Pro, or Enterprise tiers. See pricing.md or contact
11
+ sales@perceive-ai.com for more information.
12
+
13
+ All rights reserved.
Perceive_MemoryCore_SDK_OnePager.pdf ADDED
Binary file (2.15 kB). View file
 
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Perceive PhaseFormer Demo
3
+ emoji: 🧠
4
+ colorFrom: blue
5
+ colorTo: orange
6
+ sdk: gradio
7
+ app_file: app.py
8
+ license: mit
9
+ pinned: false
10
+ tags:
11
+ - transformer
12
+ - memory
13
+ - decay
14
+ - reasoning
15
+ ---
16
+
17
+ [![Hugging Face](https://img.shields.io/badge/view_on-HuggingFace-orange?logo=huggingface)](https://huggingface.co/PerceiveAI/perceive-phaseformer)
18
+
19
+ # Perceive PhaseFormer + MemoryCore
20
+
21
+ This is a time-aware neural core with two architectures:
22
+ - `PhaseFormer` (MLP-based)
23
+ - `PhaseFormerTransformerLayer` (Attention + decay + phase gating)
24
+
25
+ Includes optional PerceiveFlow system for time-decayed memory tracking.
26
+
27
+ Contact: sales@perceive-ai.com
28
+ License required for commercial use.
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from phaseformer import PhaseFormerWrapper
4
+
5
+ def run_phaseformer(mode, seq_len, batch_size, input_dim, t):
6
+ model = PhaseFormerWrapper(mode=mode, input_dim=input_dim)
7
+ x = torch.randn(seq_len, batch_size, input_dim)
8
+ out = model(x, t)
9
+ return f"Output shape: {tuple(out.shape)}"
10
+
11
+ iface = gr.Interface(
12
+ fn=run_phaseformer,
13
+ inputs=[
14
+ gr.Radio(["mlp", "transformer"], label="Select Model"),
15
+ gr.Slider(1, 128, value=10, label="Sequence Length"),
16
+ gr.Slider(1, 64, value=2, label="Batch Size"),
17
+ gr.Slider(1, 512, value=64, label="Input Dimension"),
18
+ gr.Slider(0.0, 10.0, value=5.0, step=0.1, label="Time Step (t)")
19
+ ],
20
+ outputs="text",
21
+ title="🧠 Perceive PhaseFormer Demo",
22
+ description="""
23
+ Choose model mode and input specs. Outputs the shape of the result.
24
+ """
25
+ )
26
+
27
+ if __name__ == "__main__":
28
+ iface.launch()
phaseformer.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ class PhaseFormerTransformerLayer(nn.Module):
7
+ """
8
+ Transformer layer with phase-based temporal gating applied
9
+ to attention and feed-forward residual paths.
10
+
11
+ Args:
12
+ d_model (int): Input/output dimension.
13
+ nhead (int): Number of attention heads.
14
+ dim_feedforward (int): FFN hidden layer size.
15
+ dropout (float): Dropout probability.
16
+ decay_rate (float): Decay coefficient lambda.
17
+ """
18
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, decay_rate=0.1):
19
+ super().__init__()
20
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
21
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
22
+ self.dropout = nn.Dropout(dropout)
23
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
24
+ self.norm1 = nn.LayerNorm(d_model)
25
+ self.norm2 = nn.LayerNorm(d_model)
26
+ self.decay_rate = decay_rate
27
+ self.phase_proj = nn.Linear(d_model, d_model)
28
+
29
+ def forward(self, src, t: float):
30
+ D_t = math.exp(-self.decay_rate * t)
31
+ phase = self.phase_proj(src)
32
+ g = D_t * torch.sin(phase)
33
+
34
+ attn_out, _ = self.self_attn(src, src, src)
35
+ src2 = self.norm1(src + g * attn_out)
36
+
37
+ ff = self.linear2(self.dropout(F.relu(self.linear1(src2))))
38
+ return self.norm2(src2 + g * ff)