soury commited on
Commit
b283fa8
·
1 Parent(s): 61403cb

tutorial to upload boamps data

Browse files
README.md CHANGED
@@ -1,3 +1,43 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+
5
+ # Guide: How to share your data on the BoAmps repository
6
+
7
+ This guide explains step by step how to share BoAmps format reports on this public Hugging Face repository.
8
+
9
+ ## Table of Contents
10
+ - [Guide: How to share your data on the BoAmps repository](#guide-how-to-share-your-data-on-the-boamps-repository)
11
+ - [Table of Contents](#table-of-contents)
12
+ - [Prerequisites](#prerequisites)
13
+ - [Method 1: Hugging Face Web Interface](#method-1-hugging-face-web-interface)
14
+ - [Method 2: Git (Command Line)](#method-2-git-command-line)
15
+
16
+ ## Prerequisites
17
+
18
+ Before starting, make sure you have:
19
+ - A Hugging Face account
20
+ - The files you want to upload
21
+
22
+ ## Method 1: Hugging Face Web Interface
23
+
24
+ 1. Log in to Hugging Face
25
+ 2. Go to [the boamps dataset](https://huggingface.co/datasets/boavizta/open_data_boamps)
26
+ 3. Navigate to the files: Click on "Files and versions" then on the "data" folder
27
+ ![Access to files](screenshots/01-access-repository.png)
28
+ ![Access to data folder](screenshots/02-access-data-files.png)
29
+ 4. Click on "Contribute" then "Upload files"
30
+ ![Contribute](screenshots/03-upload-files.png)
31
+
32
+ 5. Drop your files in BoAmps format (please name them clearly) and give a name to the PR (e.g. 10 reports on image classification). You can add an extended description but this is optional.
33
+ 6. At the bottom of the page, click on "Open a Pull Request".
34
+ 7. You should see your PR created in "Community" > "Pull request". Now just wait for our team to validate your PR, thank you very much for your participation and your commitment to more frugal AI, in full transparency!
35
+ ![Consult](screenshots/04-consult-pr.png)
36
+
37
+
38
+ ## Method 2: Git (Command Line)
39
+
40
+ 1. Clone the repository
41
+ 2. Create a branch
42
+ 3. Add your files
43
+ 4. Create a PR
data/energy-report-llm-inference-&.json ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "header": {
3
+ "licensing": "Creative Commons 4.0",
4
+ "formatVersion": "0.1",
5
+ "reportId": "2f4643f7-68b5-4fb6-21f0-b5dcda04897d",
6
+ "reportDatetime": "2025-02-26 16:57:00",
7
+ "reportStatus": "draft",
8
+ "publisher": {
9
+ "name": "sopra steria",
10
+ "confidentialityLevel": "public"
11
+ }
12
+ },
13
+ "task": {
14
+ "taskStage": "inference",
15
+ "taskFamily": "chatbot",
16
+ "nbRequest": 1,
17
+ "algorithms": [
18
+ {
19
+ "algorithmType": "llm",
20
+ "foundationModelName": "llama2-13b",
21
+ "foundationModelUri": "https://huggingface.co/meta-llama/Llama-2-13b-hf",
22
+ "framework": "vllm",
23
+ "parametersNumber": 13,
24
+ "quantization": "q16"
25
+ }
26
+ ],
27
+ "dataset": [
28
+ {
29
+ "dataUsage": "input",
30
+ "dataType": "token",
31
+ "dataQuantity": 11
32
+ },
33
+ {
34
+ "dataUsage": "output",
35
+ "dataType": "token",
36
+ "dataQuantity": 828
37
+ }
38
+ ],
39
+ "estimatedAccuracy": "veryGood"
40
+ },
41
+ "measures": [
42
+ {
43
+ "measurementMethod": "codecarbon",
44
+ "version": "2.5.0",
45
+ "cpuTrackingMode": "constant",
46
+ "gpuTrackingMode": "nvml",
47
+ "powerConsumption": 0.00267074,
48
+ "measurementDuration": 19.09390426,
49
+ "measurementDateTime": "2024-09-30 09:09:40"
50
+ }
51
+ ],
52
+ "system": {
53
+ "os": "linux"
54
+ },
55
+ "software": {
56
+ "language": "python",
57
+ "version": "3.10.12"
58
+ },
59
+ "infrastructure": {
60
+ "infraType": "publicCloud",
61
+ "cloudProvider": "ovh",
62
+ "components": [
63
+ {
64
+ "componentName": "Intel(R) Xeon(R) Gold 6226R CPU @ 2.90GHz",
65
+ "componentType": "cpu",
66
+ "nbComponent": 30,
67
+ "manufacturer": "Intel",
68
+ "family": "Xeon",
69
+ "series": "Gold 6226R"
70
+ },
71
+ {
72
+ "componentName": "2 x Tesla V100S-PCIE-32GB",
73
+ "componentType": "gpu",
74
+ "nbComponent": 2,
75
+ "memorySize": 32,
76
+ "manufacturer": "Tesla",
77
+ "family": "V100"
78
+ },
79
+ {
80
+ "componentType": "ram",
81
+ "nbComponent": 1,
82
+ "memorySize": 86
83
+ }
84
+ ]
85
+ },
86
+ "environment": {
87
+ "country": "france",
88
+ "powerSupplierType": "public"
89
+ },
90
+ "quality": "high"
91
+ }
screenshots/01-access-repository.png ADDED

Git LFS Details

  • SHA256: 9c990aa49dc6acf57d65976c6f0512b5831f45de321c60c3a8c2a278fbcc20e3
  • Pointer size: 131 Bytes
  • Size of remote file: 112 kB
screenshots/02-access-data-files.png ADDED

Git LFS Details

  • SHA256: 6d7a77fbf070c6cb1db219af2e51dde7652bc71594ae27bf8cc9fd246cf8c1ab
  • Pointer size: 131 Bytes
  • Size of remote file: 129 kB
screenshots/03-upload-files.png ADDED

Git LFS Details

  • SHA256: a4a38b4db32131a2f6cf7d54ed7f3c395d42ac33a061dc88b381eca83b72cc1c
  • Pointer size: 131 Bytes
  • Size of remote file: 121 kB
screenshots/04-consult-PR.png ADDED

Git LFS Details

  • SHA256: bcaf6c0101a2d0b943169a433f4e90693a51dae05611485e95b3f1d12ad1da7d
  • Pointer size: 131 Bytes
  • Size of remote file: 152 kB