vishal-1344 commited on
Commit
afcfa78
·
verified ·
1 Parent(s): 6ba1ba5

Delete sci

Browse files
sci/__init__.py DELETED
@@ -1,45 +0,0 @@
1
- """
2
- SCI: Surgical Cognitive Interpreter
3
- Metacognitive control for signal dynamics.
4
-
5
- This package is structured to keep the top-level import lightweight:
6
- - `import sci` does NOT import torch or heavy submodules immediately.
7
- - Actual components are imported lazily when accessed.
8
-
9
- Author: Vishal Joshua Meesala
10
- """
11
-
12
- from importlib import import_module
13
- from typing import Any
14
-
15
- __all__ = [
16
- "SCIController",
17
- "compute_sp",
18
- "Interpreter",
19
- "Decomposition",
20
- "ReliabilityWeighting",
21
- ]
22
-
23
-
24
- def __getattr__(name: str) -> Any:
25
- """
26
- Lazy attribute access so that:
27
-
28
- import sci
29
- sci.SCIController
30
-
31
- does not import torch until the attribute is actually used.
32
- """
33
- if name == "SCIController":
34
- return import_module("sci.controller").SCIController
35
- if name == "compute_sp":
36
- return import_module("sci.sp").compute_sp
37
- if name == "Interpreter":
38
- return import_module("sci.interpreter").Interpreter
39
- if name == "Decomposition":
40
- return import_module("sci.decomposition").Decomposition
41
- if name == "ReliabilityWeighting":
42
- return import_module("sci.reliability").ReliabilityWeighting
43
-
44
- raise AttributeError(f"module 'sci' has no attribute {name!r}")
45
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/__pycache__/__init__.cpython-312.pyc DELETED
Binary file (1.6 kB)
 
sci/__pycache__/controller.cpython-312.pyc DELETED
Binary file (3.32 kB)
 
sci/config.py DELETED
@@ -1,22 +0,0 @@
1
- """Placeholder config module for SCI.
2
-
3
- This file can be extended to expose default configuration
4
- objects or helper loaders for YAML config files in `configs/`.
5
- """
6
-
7
- from pathlib import Path
8
-
9
- DEFAULTS = {
10
- "feature_dim": 128,
11
- "num_markers": 8,
12
- "num_classes": 10,
13
- }
14
-
15
- def load_yaml(path: str):
16
- try:
17
- import yaml
18
- except Exception:
19
- raise RuntimeError("PyYAML is required to load config files")
20
- p = Path(path)
21
- with p.open("r", encoding="utf-8") as f:
22
- return yaml.safe_load(f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/controller.py DELETED
@@ -1,75 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class SCIController(nn.Module):
6
- """
7
- Minimal SCI closed-loop controller.
8
-
9
- It monitors a scalar interpretive state SP, compares it
10
- to a target SP*, and performs a projected gradient-style
11
- update on the interpreter parameters Θ based on ΔSP.
12
-
13
- This is a simplified, minimal prototype to show the core idea.
14
- """
15
-
16
- def __init__(
17
- self,
18
- interpreter: nn.Module,
19
- sp_target: float = 0.90,
20
- eta: float = 0.01,
21
- gamma: float = 0.10,
22
- trust_region: float = 0.1,
23
- ):
24
- super().__init__()
25
- self.interpreter = interpreter
26
- self.sp_target = sp_target
27
- self.eta = eta
28
- self.gamma = gamma
29
- self.trust_region = trust_region
30
-
31
- @torch.no_grad()
32
- def _project(self, theta: torch.Tensor, theta_old: torch.Tensor) -> torch.Tensor:
33
- """Simple trust-region projection on parameter vector."""
34
- delta = theta - theta_old
35
- norm = delta.norm()
36
- if norm > self.trust_region:
37
- return theta_old + self.trust_region * delta / (norm + 1e-9)
38
- return theta
39
-
40
- def forward(self, x: torch.Tensor):
41
- """
42
- Run a single SCI control step.
43
-
44
- Args:
45
- x: input features (batch_size, feature_dim)
46
-
47
- Returns:
48
- pred: raw predictions (logits)
49
- sp: scalar SP estimate (float tensor)
50
- d_sp: SP* - SP
51
- interpreter: the (possibly) updated interpreter module
52
- """
53
- sp, pred = self.interpreter.compute(x)
54
- d_sp = self.sp_target - sp
55
-
56
- # No-op zone: if |ΔSP| is small, do not update
57
- if torch.abs(d_sp) < self.gamma:
58
- return pred, sp, d_sp, self.interpreter
59
-
60
- # Collect old parameters as a flat vector
61
- theta_old = self.interpreter.parameters_vector().detach()
62
-
63
- # Compute gradient of SP wrt parameters
64
- grad = self.interpreter.grad_sp(x)
65
-
66
- # Basic controller update: Θ_new = Θ_old + η * ΔSP * ∇Θ SP
67
- theta_new = theta_old + self.eta * d_sp * grad
68
-
69
- # Trust-region projection
70
- theta_new = self._project(theta_new, theta_old)
71
-
72
- # Push updated parameters back into the interpreter
73
- self.interpreter.update_parameters(theta_new)
74
-
75
- return pred, sp, d_sp, self.interpreter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/decomposition.py DELETED
@@ -1,20 +0,0 @@
1
- import torch
2
-
3
-
4
- class Decomposition:
5
- """
6
- Placeholder semantic decomposition Π.
7
-
8
- In the full SCI framework, this would include:
9
- - Rhythmic features (FFT/STFT, wavelets, etc.)
10
- - Trend features (detrending, SSA, etc.)
11
- - Spatial / cross-modal features
12
- Here we expose a simple identity mapping for now.
13
- """
14
-
15
- def __init__(self):
16
- pass
17
-
18
- def __call__(self, x: torch.Tensor) -> torch.Tensor:
19
- # TODO: replace with real decomposition (e.g., STFT/wavelets)
20
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/interpreter.py DELETED
@@ -1,73 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from .sp import compute_sp
4
-
5
-
6
- class Interpreter(nn.Module):
7
- """
8
- A lightweight SCI interpreter.
9
-
10
- - Encodes input features into a hidden representation
11
- - Emits marker logits (for SP)
12
- - Emits task logits (for classification)
13
-
14
- This is a minimal prototype; in practice you would replace
15
- the feature encoder with a CNN/Transformer/etc.
16
- """
17
-
18
- def __init__(self, feature_dim: int = 128, num_markers: int = 8, num_classes: int = 10):
19
- super().__init__()
20
- self.encoder = nn.Linear(feature_dim, feature_dim)
21
- self.marker_head = nn.Linear(feature_dim, num_markers)
22
- self.classifier = nn.Linear(feature_dim, num_classes)
23
-
24
- def encode(self, x: torch.Tensor) -> torch.Tensor:
25
- h = torch.relu(self.encoder(x))
26
- return h
27
-
28
- def compute(self, x: torch.Tensor):
29
- """
30
- Compute SP and predictions for a batch of inputs.
31
-
32
- Args:
33
- x: tensor of shape (batch_size, feature_dim)
34
-
35
- Returns:
36
- sp_mean: scalar SP value (mean over batch)
37
- logits: tensor of shape (batch_size, num_classes)
38
- """
39
- h = self.encode(x)
40
- marker_logits = self.marker_head(h)
41
- sp = compute_sp(marker_logits) # (batch_size,)
42
- logits = self.classifier(h)
43
- return sp.mean(), logits
44
-
45
- def grad_sp(self, x: torch.Tensor) -> torch.Tensor:
46
- """
47
- Compute gradient of SP wrt parameters as a flat vector.
48
- NOTE: This assumes gradients have been zeroed before calling.
49
- """
50
- self.zero_grad()
51
- sp, _ = self.compute(x)
52
- sp.backward()
53
- grads = []
54
- for p in self.parameters():
55
- if p.grad is not None:
56
- grads.append(p.grad.view(-1))
57
- if not grads:
58
- return torch.zeros(0)
59
- return torch.cat(grads).detach()
60
-
61
- @torch.no_grad()
62
- def parameters_vector(self) -> torch.Tensor:
63
- """Flatten all parameters into a single vector."""
64
- return torch.cat([p.data.view(-1) for p in self.parameters()])
65
-
66
- @torch.no_grad()
67
- def update_parameters(self, new_theta: torch.Tensor) -> None:
68
- """Load a flat parameter vector back into the module parameters."""
69
- offset = 0
70
- for p in self.parameters():
71
- n = p.numel()
72
- p.data.copy_(new_theta[offset : offset + n].view_as(p))
73
- offset += n
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/reliability.py DELETED
@@ -1,18 +0,0 @@
1
- import torch
2
-
3
-
4
- class ReliabilityWeighting:
5
- """
6
- Placeholder reliability weighting.
7
-
8
- In the full SCI framework this would:
9
- - Estimate SNR, persistence, coherence for each feature
10
- - Convert them to reliability scores z_f
11
- - Normalize via a softmax to obtain weights w_f
12
-
13
- For now, we return the input unchanged.
14
- """
15
-
16
- def __call__(self, features: torch.Tensor) -> torch.Tensor:
17
- # TODO: implement reliability-based weighting
18
- return features
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/sp.py DELETED
@@ -1,24 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
-
5
- def compute_sp(marker_logits: torch.Tensor) -> torch.Tensor:
6
- """
7
- Compute an entropy-based Surgical Precision (SP) score.
8
-
9
- SP = 1 - H(q) / log(K), where:
10
- - q = softmax(marker_logits)
11
- - H(q) is Shannon entropy over markers
12
- - K is the number of markers
13
-
14
- Args:
15
- marker_logits: tensor of shape (..., K)
16
-
17
- Returns:
18
- SP: tensor of shape (...,) with values in [0, 1].
19
- """
20
- q = F.softmax(marker_logits, dim=-1)
21
- k = q.shape[-1]
22
- entropy = -torch.sum(q * torch.log(q + 1e-9), dim=-1)
23
- sp = 1.0 - entropy / torch.log(torch.tensor(float(k), device=marker_logits.device))
24
- return sp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
sci/utils.py DELETED
@@ -1,7 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- def flatten_params(model: nn.Module) -> torch.Tensor:
6
- """Flatten all parameters of a model into a single vector."""
7
- return torch.cat([p.data.view(-1) for p in model.parameters()])