han-xudong commited on
Commit
d19006f
·
verified ·
1 Parent(s): 20e66f0

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. .gitattributes +35 -35
  2. README.md +87 -1
  3. __init__.py +1 -0
  4. config.json +16 -0
  5. model.onnx +3 -0
  6. modeling.py +54 -0
  7. modify_safetensors.py +32 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,89 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: bsd-3-clause
3
+ pipeline_tag: robotics
4
+ tags:
5
+ - necknet
6
+ - prosoro
7
+ - omnineck
8
+ - multimodal
9
+ - onnx
10
+ - pytorch
11
+ library_name: transformers
12
+ datasets:
13
+ - han-xudong/prosoro-100k
14
  ---
15
+
16
+ # Model Card for NeckNet
17
+
18
+ ## Table of Contents
19
+
20
+ - [Model Card for NeckNet](#model-card-for-necknet)
21
+ - [Table of Contents](#table-of-contents)
22
+ - [Model Description](#model-description)
23
+ - [Intended Use](#intended-use)
24
+ - [Training Data](#training-data)
25
+ - [Citation](#citation)
26
+
27
+ ## Model Description
28
+
29
+ NeckNet is an MLP model designed for the OmniNeck. It can predict both 6D force and 3D shape (mesh nodes) from the 6D motion of the ball.
30
+
31
+ Try it out on the [Spaces demo](https://huggingface.co/spaces/han-xudong/necknet-demo).
32
+
33
+ - Developer: Xudong Han, Tianyu Wu, Fang Wan, and Chaoyang Song.
34
+ - Model type: MLP
35
+ - License: BSD-3-Clause
36
+
37
+ ## Intended Use
38
+
39
+ This model is intended for researchers and developers working in robotics and tactile sensing. It can be used to enhance the capabilities of robotic systems by providing accurate predictions of force and shape based on tactile data.
40
+
41
+ To load the model:
42
+
43
+ ```python
44
+ from transformers import AutoModel
45
+
46
+ model = AutoModel.from_pretrained("han-xudong/necknet", trust_remote_code=True)
47
+ x = torch.zeros((1, 6)) # Example input: batch size of 1, 6D motion
48
+ output = model(x)
49
+ ```
50
+
51
+ Or to load the ONNX version:
52
+
53
+ ```python
54
+ # Example code to load onnx
55
+ import onnxruntime as ort
56
+ import numpy as np
57
+ from huggingface_hub import hf_hub_download
58
+
59
+ onnx_model_path = hf_hub_download("han-xudong/necknet", filename="model.onnx")
60
+ ort_session = ort.InferenceSession(onnx_model_path)
61
+
62
+ # Example input
63
+ x = np.zeros((1, 6), dtype=np.float32) # Batch size of 1, 6D motion
64
+ output = ort_session.run(None, {"motion": x})
65
+ ```
66
+
67
+ ## Training Data
68
+
69
+ The model was trained on the [NeckNet-100K](https://huggingface.co/datasets/han-xudong/necknet-100k) dataset, which includes a variety of motion, force, and shape data collected by finite element simulations.
70
+
71
+ ## Citation
72
+
73
+ If you use this model in your research, please cite the following papers:
74
+
75
+ ```bibtex
76
+ @article{liu2024proprioceptive,
77
+ title={Proprioceptive learning with soft polyhedral networks},
78
+ author={Liu, Xiaobo and Han, Xudong and Hong, Wei and Wan, Fang and Song, Chaoyang},
79
+ journal={The International Journal of Robotics Research},
80
+ volume = {43},
81
+ number = {12},
82
+ pages = {1916-1935},
83
+ year = {2024},
84
+ publisher={SAGE Publications Sage UK: London, England},
85
+ doi = {10.1177/02783649241238765}
86
+ }
87
+ ```
88
+
89
+ [](https://arxiv.org/abs/2308.08538)
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modeling import NeckNet, NeckNetConfig
config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "asRobotics/necknet",
3
+ "architectures": ["NeckNet"],
4
+ "model_type": "necknet",
5
+ "x_dim": [6],
6
+ "y_dim": [6, 2931],
7
+ "h1_dim": [100, 1000],
8
+ "h2_dim": [100, 1000],
9
+ "torch_dtype": "float32",
10
+ "layer_norm": false,
11
+ "use_activation": "relu",
12
+ "auto_map": {
13
+ "AutoConfig": "modeling.NeckNetConfig",
14
+ "AutoModel": "modeling.NeckNet"
15
+ }
16
+ }
model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dac52a5d58c8a32b2d5ed96bbf72fb223517201c74dcdd14eed5aa5ea584f161
3
+ size 16036212
modeling.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel, PretrainedConfig
4
+
5
+ class NeckNetConfig(PretrainedConfig):
6
+ model_type = "necknet"
7
+
8
+ def __init__(
9
+ self,
10
+ x_dim=[6],
11
+ y_dim=[6, 1800],
12
+ h1_dim=[100, 1000],
13
+ h2_dim=[100, 1000],
14
+ **kwargs,
15
+ ):
16
+ super().__init__(**kwargs)
17
+ self.x_dim = x_dim
18
+ self.y_dim = y_dim
19
+ self.h1_dim = h1_dim
20
+ self.h2_dim = h2_dim
21
+
22
+
23
+ class NeckNet(PreTrainedModel):
24
+ config_class = NeckNetConfig
25
+
26
+ def __init__(self, config):
27
+ super().__init__(config)
28
+ self.x_dim = config.x_dim
29
+ self.y_dim = config.y_dim
30
+ self.h1_dim = config.h1_dim
31
+ self.h2_dim = config.h2_dim
32
+
33
+ self.model = nn.ModuleDict()
34
+ # Define the model architecture
35
+ for i in range(len(self.y_dim)):
36
+ self.model[f"estimator_{i}"] = nn.Sequential(
37
+ nn.Linear(self.x_dim[0], self.h1_dim[i]),
38
+ nn.ReLU(),
39
+ nn.Linear(self.h1_dim[i], self.h2_dim[i]),
40
+ nn.ReLU(),
41
+ nn.Linear(self.h2_dim[i], self.y_dim[i]),
42
+ )
43
+
44
+ # initialize weights
45
+ self.post_init()
46
+
47
+ def forward(self, x):
48
+ outputs = []
49
+ for i in range(len(self.y_dim)):
50
+ # Get the estimator for the i-th output
51
+ estimator = self.model[f"estimator_{i}"]
52
+ y = estimator(x)
53
+ outputs.append(y)
54
+ return outputs
modify_safetensors.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from safetensors import safe_open
2
+ from safetensors.torch import save_file
3
+ import torch
4
+
5
+ # ==========================================================
6
+ # 1. Load original safetensors
7
+ # ==========================================================
8
+ input_path = "model.safetensors"
9
+ output_path = "model_fixed.safetensors"
10
+
11
+ new_state_dict = {}
12
+
13
+ with safe_open(input_path, framework="pt", device="cpu") as f:
14
+ for key in f.keys():
15
+ tensor = f.get_tensor(key)
16
+ # Add prefix "model." if not already
17
+ new_key = key if key.startswith("model.") else f"model.{key}"
18
+ new_state_dict[new_key] = tensor
19
+
20
+ metadata = {
21
+ "format": "pt",
22
+ }
23
+ # ==========================================================
24
+ # 2. Save to new safetensors file
25
+ # ==========================================================
26
+ save_file(new_state_dict, output_path, metadata=metadata)
27
+ print(f"✅ Saved updated safetensors to {output_path}")
28
+ print(f"✅ Total tensors: {len(new_state_dict)}")
29
+
30
+ # Optional: verify one sample
31
+ print("Example tensor key:", list(new_state_dict.keys())[0])
32
+ print("Shape:", new_state_dict[list(new_state_dict.keys())[0]].shape)