GrishaKushnir commited on
Commit
f29deec
·
verified ·
1 Parent(s): 92a1d15

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
2_Router/anchor_0_Dense/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "in_features": 768,
3
+ "out_features": 1024,
4
+ "bias": true,
5
+ "activation_function": "torch.nn.modules.activation.Tanh"
6
+ }
2_Router/anchor_0_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8caaef08b8292a16478db1d41b423527ce8eec94d62a16fbfbb0eeb4a8ff868f
3
+ size 3149984
2_Router/positive_0_Dense/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "in_features": 768,
3
+ "out_features": 1024,
4
+ "bias": true,
5
+ "activation_function": "torch.nn.modules.activation.Tanh"
6
+ }
2_Router/positive_0_Dense/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b3fa8b97f9410fcd8cb59b8f3f438c4f9e691d87a1fe8c0a7b8fa4bb7fe6c8c
3
+ size 3149984
2_Router/router_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "types": {
3
+ "anchor_0_Dense": "sentence_transformers.models.Dense.Dense",
4
+ "positive_0_Dense": "sentence_transformers.models.Dense.Dense"
5
+ },
6
+ "structure": {
7
+ "anchor": [
8
+ "anchor_0_Dense"
9
+ ],
10
+ "positive": [
11
+ "positive_0_Dense"
12
+ ]
13
+ },
14
+ "parameters": {
15
+ "default_route": "anchor",
16
+ "allow_empty_key": true
17
+ }
18
+ }
README.md ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: sentence-transformers/paraphrase-multilingual-mpnet-base-v2
3
+ language:
4
+ - en
5
+ - es
6
+ - de
7
+ - zh
8
+ - mul
9
+ - multilingual
10
+ library_name: sentence-transformers
11
+ license: mit
12
+ pipeline_tag: feature-extraction
13
+ tags:
14
+ - sentence-transformers
15
+ - sentence-similarity
16
+ - feature-extraction
17
+ - generated_from_trainer
18
+ - dataset_size:21123868
19
+ - loss:CachedMultipleNegativesRankingLoss
20
+ widget:
21
+ - source_sentence: 系统管理员技术员——TS/SCI级别并拥有多项式验证
22
+ sentences:
23
+ - support development of annual budget, create a financial report, report analysis
24
+ results, Microsoft Access, accounting, use presentation software, interpret financial
25
+ statements, synthesise financial information, develop vaccines, handle financial
26
+ overviews of the store, produce statistical financial records, develop financial
27
+ statistics reports, explain accounting records, financial analysis, SAP R3, represent
28
+ the company, examine budgets, prepare presentation material, use spreadsheets
29
+ software, forecast account metrics, meet deadlines, prepare financial projections,
30
+ manage budgets, exercise self-control, financial statements
31
+ - ensure cross-department cooperation, establish customer rapport, improve business
32
+ processes, manage technical security systems, handle incidents, maintain ICT system,
33
+ explain characteristics of computer peripheral equipment, gather technical information,
34
+ collaborate in company's daily operations , apply change management, maintain
35
+ technical equipment, communicate with customers, solve technical problems, perform
36
+ ICT troubleshooting, use ICT equipment in maintenance activities, manage major
37
+ incidents, build business relationships, computer engineering, perform software
38
+ recovery testing, identify process improvements, maintain relationship with customers,
39
+ carry out project activities, collaborate in the development of marketing strategies,
40
+ computer technology, technical terminology
41
+ - utilise machine learning, cloud technologies, develop predictive models, assess
42
+ sportive performance, formulate findings , principles of artificial intelligence,
43
+ perform business research, communicate with stakeholders, computer engineering,
44
+ build predictive models, computer science, develop automated software tests, analyse
45
+ business objectives, Agile development, cloud monitoring and reporting, provide
46
+ written content, obtain relevant licenses, design prototypes, machine learning,
47
+ e-learning software infrastructure, analyse education system, disseminate results
48
+ to the scientific community, learning technologies, ML (computer programming),
49
+ task algorithmisation
50
+ - source_sentence: 安全运营官
51
+ sentences:
52
+ - deliver outstanding service, manage carriers, direct customers to merchandise,
53
+ improve customer interaction, manage time, support managers, assist customers,
54
+ process customer orders, manage customer service, satisfy customers, guarantee
55
+ customer satisfaction, respond to customers' inquiries
56
+ - manage several projects, implement operational business plans, identify improvement
57
+ actions, develop strategy to solve problems, manage website, carry out project
58
+ activities, follow reporting procedures, supervise site maintenance, adjust priorities,
59
+ schedule shifts, conduct public presentations, motivate others, manage operational
60
+ budgets, report to the team leader, encourage teams for continuous improvement,
61
+ lead the sustainability reporting process, implement sustainable procurement,
62
+ show an exemplary leading role in an organisation, manage manufacturing facilities,
63
+ develop training programmes, develop production line, supply chain management,
64
+ leadership principles, lead a team, coaching techniques
65
+ - provide emergency supplies, provide first aid, liaise with security authorities,
66
+ apply medical first aid in case of emergency, regulate traffic, train security
67
+ officers, maintain physical fitness, provide protective escort, ensure public
68
+ safety and security, ensure inspections of facilities, work in inclement conditions,
69
+ follow procedures in the event of an alarm, set safety and security standards,
70
+ comply with the principles of self-defence, present reports, maintain facility
71
+ security systems, conduct security screenings, types of evaluation , monitor security
72
+ measures, office equipment, escort pedestrians across streets, advise on security
73
+ staff selection, wear appropriate protective gear, work in outdoor conditions,
74
+ assist emergency services
75
+ - source_sentence: Empleado de control de COVID
76
+ sentences:
77
+ - maintain records of clients' prescriptions, assist people in contaminated areas,
78
+ label samples, maintain museum records, apply social distancing protocols, collect
79
+ biological samples from patients, infection control, label medical laboratory
80
+ samples, disinfect surfaces, maintain customer records, ensure health and safety
81
+ of staff, personal protective equipment, remove contaminated materials, store
82
+ contaminated materials, prepare prescription labels, use personal protection equipment
83
+ - promote organisational communication, provide legal advice, human resource management,
84
+ company policies, perform customer management, business processes, ensure compliance
85
+ with legal requirements, develop communications strategies, enforce company values,
86
+ develop outreach training plans, use consulting techniques, develop employment
87
+ policies, human resources department processes, personnel management, identify
88
+ training needs, participate in health personnel training, health and safety in
89
+ the workplace, lead police investigations, ensure compliance with policies, prepare
90
+ compliance documents, perform internal investigations, develop employee retention
91
+ programs, develop corporate training programmes, customer relationship management,
92
+ manage localisation
93
+ - perform escalation procedure, imprint visionary aspirations into the business
94
+ management, observe confidentiality, impart business plans to collaborators, lead
95
+ a team, human resources department processes, respect confidentiality obligations,
96
+ hire human resources, manage commercial risks, develop business plans, communicate
97
+ with stakeholders, maintain relationship with customers, manage several projects,
98
+ provide improvement strategies, manage technical security systems, knowledge management,
99
+ risk management, develop program ideas, perform project management, project management,
100
+ cope with uncertainty, address identified risks, provide performance feedback,\
101
+ information confidentiality, track key performance indicators
102
+ - source_sentence: Aerie - Brand Ambassador (Sales Associate) - US
103
+ sentences:
104
+ - lay bricks, provide first aid, enforce park rules, conflict management, give swimming
105
+ lessons, assist in performing physical exercises, perform park safety inspections,
106
+ assist in the movement of heavy loads, lead a team, first aid, supervise pool
107
+ activities, swim, coach staff for running the performance, show an exemplary leading
108
+ role in an organisation, teach public speaking principles, collaborate with coaching
109
+ team, supervise work, calculate stairs rise and run, calculate compensation payments,
110
+ manage a team, information confidentiality
111
+ - react to events in time-critical environments, operate in a specific field of
112
+ nursing care, clinical science, promote healthy fitness environment, lead others,
113
+ comply with legislation related to health care, maintain a safe, hygienic and
114
+ secure working environment, provide healthcare services to patients in specialised
115
+ medicine, write English, conduct physical examinations, leadership principles,
116
+ use clinical assessment techniques, apply context specific clinical competences,
117
+ conduct health related research, conceptualise healthcare user’s needs, assessment
118
+ processes, communicate in healthcare, provide professional care in nursing, nursing
119
+ science, promote health and safety, implement policy in healthcare practices,
120
+ engage with stakeholders, identify problems, respond to changing situations in
121
+ health care, perform resource planning
122
+ - ensure the privacy of guests, provide customised products, company policies, exude
123
+ enthusiasm during the action sessions, provide customer guidance on product selection,
124
+ collect briefing regarding products, perform multiple tasks at the same time,\
125
+ create solutions to problems, respond to visitor complaints
126
+ - source_sentence: 医师——危重症护理——重症监护专家——项目医务总监
127
+ sentences:
128
+ - handle incidents, provide technical documentation, coordinate operational activities,
129
+ ensure information security, work in teams, manage manufacturing documentation,
130
+ project configuration management, operate call distribution system, maintain computer
131
+ hardware, apply change management, manage aircraft support systems, perform escalation
132
+ procedure, manage production changeovers, maintenance operations, call-centre
133
+ technologies, manage service contracts in the drilling industry, encourage teambuilding,
134
+ manage major incidents, resolve equipment malfunctions, work independently, think
135
+ analytically, manage maintenance operations, maintain plan for continuity of operations
136
+ - develop recycling programs, receive actors' resumes, work in cold environments,
137
+ perform cleaning duties, operate floor cleaning equipment, operate forklift
138
+ - perform technical tasks with great care, supervise medical residents, manage a
139
+ multidisciplinary team involved in patient care, administrative tasks in a medical
140
+ environment, demonstrate technical skills during neurological surgery, apply problem
141
+ solving in social service, intensive care medicine, provide comprehensive care
142
+ for patients with surgical conditions, work in teams, solve problems
143
+ co2_eq_emissions:
144
+ emissions: 717.3535184611766
145
+ energy_consumed: 1.9440474755045436
146
+ source: codecarbon
147
+ training_type: fine-tuning
148
+ on_cloud: true
149
+ cpu_model: Intel(R) Xeon(R) CPU @ 2.20GHz
150
+ ram_total_size: 83.47684860229492
151
+ hours_used: 5.34
152
+ hardware_used: 1 x NVIDIA A100-SXM4-40GB
153
+ ---
154
+
155
+ # SentenceTransformer based on sentence-transformers/paraphrase-multilingual-mpnet-base-v2
156
+
157
+ This is a [sentence-transformers](https://www.SBERT.net) model specifically trained for job title matching and similarity. It's finetuned from [sentence-transformers/paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2) on a large dataset of job titles and their associated skills/requirements across multiple languages. The model maps English, Spanish, German and Chinese job titles and descriptions to a 1024-dimensional dense vector space and can be used for semantic job title matching, job similarity search, and related HR/recruitment tasks.
158
+
159
+ The model was presented in the paper [Multilingual JobBERT for Cross-Lingual Job Title Matching](https://huggingface.co/papers/2507.21609).
160
+
161
+ ## Model Details
162
+
163
+ ### Model Description
164
+ - **Model Type:** Sentence Transformer
165
+ - **Base model:** [sentence-transformers/paraphrase-multilingual-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2) <!-- at revision 84fccfe766bcfd679e39efefe4ebf45af190ad2d -->
166
+ - **Maximum Sequence Length:** 64 tokens
167
+ - **Output Dimensionality:** 1024 dimensions
168
+ - **Similarity Function:** Cosine Similarity
169
+ - **Training Dataset:** 4 x 5.2M high-quality job title - skills pairs in English, Spanish, German and Chinese
170
+
171
+ ### Model Sources
172
+
173
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
174
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
175
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
176
+
177
+ ### Full Model Architecture
178
+
179
+ ```
180
+ SentenceTransformer(
181
+ (0): Transformer({'max_seq_length': 64, 'do_lower_case': False}) with Transformer model: XLMRobertaModel
182
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
183
+ (2): Asym(
184
+ (anchor-0): Dense({'in_features': 768, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
185
+ (positive-0): Dense({'in_features': 768, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
186
+ )
187
+ )
188
+ ```
189
+
190
+ ## Usage
191
+
192
+ ### Direct Usage (Sentence Transformers)
193
+
194
+ First install the Sentence Transformers library:
195
+
196
+ ```bash
197
+ pip install -U sentence-transformers
198
+ ```
199
+
200
+ Then you can load and use the model with the following code:
201
+ ```python
202
+ import torch
203
+ import numpy as np
204
+ from tqdm.auto import tqdm
205
+ from sentence_transformers import SentenceTransformer
206
+ from sentence_transformers.util import batch_to_device, cos_sim
207
+
208
+ # Load the model
209
+ model = SentenceTransformer("TechWolf/JobBERT-v3")
210
+
211
+ def encode_batch(jobbert_model, texts):
212
+ features = jobbert_model.tokenize(texts)
213
+ features = batch_to_device(features, jobbert_model.device)
214
+ features["text_keys"] = ["anchor"]
215
+ with torch.no_grad():
216
+ out_features = jobbert_model.forward(features)
217
+ return out_features["sentence_embedding"].cpu().numpy()
218
+
219
+ def encode(jobbert_model, texts, batch_size: int = 8):
220
+ # Sort texts by length and keep track of original indices
221
+ sorted_indices = np.argsort([len(text) for text in texts])
222
+ sorted_texts = [texts[i] for i in sorted_indices]
223
+
224
+ embeddings = []
225
+
226
+ # Encode in batches
227
+ for i in tqdm(range(0, len(sorted_texts), batch_size)):
228
+ batch = sorted_texts[i:i+batch_size]
229
+ embeddings.append(encode_batch(jobbert_model, batch))
230
+
231
+ # Concatenate embeddings and reorder to original indices
232
+ sorted_embeddings = np.concatenate(embeddings)
233
+ original_order = np.argsort(sorted_indices)
234
+ return sorted_embeddings[original_order]
235
+
236
+ # Example usage
237
+ job_titles = [
238
+ 'Software Engineer',
239
+ '高级软件开发人员', # senior software developer
240
+ 'Produktmanager', # product manager
241
+ 'Científica de datos' # data scientist
242
+ ]
243
+
244
+ # Get embeddings
245
+ embeddings = encode(model, job_titles)
246
+
247
+ # Calculate cosine similarity matrix
248
+ similarities = cos_sim(embeddings, embeddings)
249
+ print(similarities)
250
+ ```
251
+
252
+ The output will be a similarity matrix where each value represents the cosine similarity between two job titles:
253
+
254
+ ```
255
+ tensor([[1.0000, 0.8087, 0.4673, 0.5669],
256
+ [0.8087, 1.0000, 0.4428, 0.4968],
257
+ [0.4673, 0.4428, 1.0000, 0.4292],
258
+ [0.5669, 0.4968, 0.4292, 1.0000]])
259
+ ```
260
+
261
+
262
+ <!--
263
+ ### Direct Usage (Transformers)
264
+
265
+ <details><summary>Click to see the direct usage in Transformers</summary>
266
+
267
+ </details>
268
+ -->
269
+
270
+ <!--
271
+ ### Downstream Usage (Sentence Transformers)
272
+
273
+ You can finetune this model on your own dataset.
274
+
275
+ <details><summary>Click to expand</summary>
276
+
277
+ </details>
278
+ -->
279
+
280
+ <!--
281
+ ### Out-of-Scope Use
282
+
283
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
284
+ -->
285
+
286
+ <!--
287
+ ## Bias, Risks and Limitations
288
+
289
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
290
+ -->
291
+
292
+ <!--
293
+ ### Recommendations
294
+
295
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
296
+ -->
297
+
298
+ ## Training Details
299
+
300
+ ### Training Dataset
301
+
302
+ * Size: 21,123,868 training samples
303
+ * Columns: <code>anchor</code> and <code>positive</code>
304
+ * Approximate statistics based on the first 1000 samples:
305
+ | | anchor | positive |
306
+ |:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
307
+ | type | string | string |
308
+ | details | <ul><li>min: 4 tokens</li><li>mean: 10.56 tokens</li><li>max: 38 tokens</li></ul> | <ul><li>min: 19 tokens</li><li>mean: 61.08 tokens</li><li>max: 64 tokens</li></ul> |
309
+ * Samples:
310
+ | anchor | positive |
311
+ |:-----------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
312
+ | <code>通信与培训专员</code> | <code>deliver online training, liaise with educational support staff, interact with an audience, construct individual learning plans, lead a team, develop corporate training programmes, learning technologies, communication, identify with the company's goals, address an audience, learning management systems, use presentation software, motivate others, provide learning support, engage with stakeholders, identify skills gaps, meet expectations of target audience, develop training programmes</code> |
313
+ | <code>Associate Infrastructure Engineer</code> | <code>create solutions to problems, design user interface, cloud technologies, use databases, automate cloud tasks, keep up-to-date to computer trends, work in teams, use object-oriented programming, keep updated on innovations in various business fields, design principles, Angular, adapt to changing situations, JavaScript, Agile development, manage stable, Swift (computer programming), keep up-to-date to design industry trends, monitor technology trends, web programming, provide mentorship, advise on efficiency improvements, adapt to change, JavaScript Framework, database management systems, stimulate creative processes</code> |
314
+ | <code>客户顾问/出纳</code> | <code>customer service, handle financial transactions, adapt to changing situations, have computer literacy, manage cash desk, attend to detail, provide customer guidance on product selection, perform multiple tasks at the same time, carry out financial transactions, provide membership service, manage accounts, adapt to change, identify customer's needs, solve problems</code> |
315
+ * Loss: [<code>CachedMultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cachedmultiplenegativesrankingloss) with these parameters:
316
+ ```json
317
+ {
318
+ "scale": 20.0,
319
+ "similarity_fct": "cos_sim",
320
+ "mini_batch_size": 512
321
+ }
322
+ ```
323
+
324
+ ### Training Hyperparameters
325
+ #### Non-Default Hyperparameters
326
+
327
+ - `overwrite_output_dir`: True
328
+ - `per_device_train_batch_size`: 2048
329
+ - `per_device_eval_batch_size`: 2048
330
+ - `num_train_epochs`: 1
331
+ - `fp16`: True
332
+
333
+ #### All Hyperparameters
334
+ <details><summary>Click to expand</summary>
335
+
336
+ - `overwrite_output_dir`: True
337
+ - `do_predict`: False
338
+ - `eval_strategy`: no
339
+ - `prediction_loss_only`: True
340
+ - `per_device_train_batch_size`: 2048
341
+ - `per_device_eval_batch_size`: 2048
342
+ - `per_gpu_train_batch_size`: None
343
+ - `per_gpu_eval_batch_size`: None
344
+ - `gradient_accumulation_steps`: 1
345
+ - `eval_accumulation_steps`: None
346
+ - `torch_empty_cache_steps`: None
347
+ - `learning_rate`: 5e-05
348
+ - `weight_decay`: 0.0
349
+ - `adam_beta1`: 0.9
350
+ - `adam_beta2`: 0.999
351
+ - `adam_epsilon`: 1e-08
352
+ - `max_grad_norm`: 1.0
353
+ - `num_train_epochs`: 1
354
+ - `max_steps`: -1
355
+ - `lr_scheduler_type`: linear
356
+ - `lr_scheduler_kwargs`: {}
357
+ - `warmup_ratio`: 0.0
358
+ - `warmup_steps`: 0
359
+ - `log_level`: passive
360
+ - `log_level_replica`: warning
361
+ - `log_on_each_node`: True
362
+ - `logging_nan_inf_filter`: True
363
+ - `save_safetensors`: True
364
+ - `save_on_each_node`: False
365
+ - `save_only_model`: False
366
+ - `restore_callback_states_from_checkpoint`: False
367
+ - `no_cuda`: False
368
+ - `use_cpu`: False
369
+ - `use_mps_device`: False
370
+ - `seed`: 42
371
+ - `data_seed`: None
372
+ - `jit_mode_eval`: False
373
+ - `use_ipex`: False
374
+ - `bf16`: False
375
+ - `fp16`: True
376
+ - `fp16_opt_level`: O1
377
+ - `half_precision_backend`: auto
378
+ - `bf16_full_eval`: False
379
+ - `fp16_full_eval`: False
380
+ - `tf32`: None
381
+ - `local_rank`: 0
382
+ - `ddp_backend`: None
383
+ - `tpu_num_cores`: None
384
+ - `tpu_metrics_debug`: False
385
+ - `debug`: []
386
+ - `dataloader_drop_last`: False
387
+ - `dataloader_num_workers`: 0
388
+ - `dataloader_prefetch_factor`: None
389
+ - `past_index`: -1
390
+ - `disable_tqdm`: False
391
+ - `remove_unused_columns`: True
392
+ - `label_names`: None
393
+ - `load_best_model_at_end`: False
394
+ - `ignore_data_skip`: False
395
+ - `fsdp`: []
396
+ - `fsdp_min_num_params`: 0
397
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
398
+ - `fsdp_transformer_layer_cls_to_wrap`: None
399
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
400
+ - `deepspeed`: None
401
+ - `label_smoothing_factor`: 0.0
402
+ - `optim`: adamw_torch
403
+ - `optim_args`: None
404
+ - `adafactor`: False
405
+ - `group_by_length`: False
406
+ - `length_column_name`: length
407
+ - `ddp_find_unused_parameters`: None
408
+ - `ddp_bucket_cap_mb`: None
409
+ - `ddp_broadcast_buffers`: False
410
+ - `dataloader_pin_memory`: True
411
+ - `dataloader_persistent_workers`: False
412
+ - `skip_memory_metrics`: True
413
+ - `use_legacy_prediction_loop`: False
414
+ - `push_to_hub`: False
415
+ - `resume_from_checkpoint`: None
416
+ - `hub_model_id`: None
417
+ - `hub_strategy`: every_save
418
+ - `hub_private_repo`: None
419
+ - `hub_always_push`: False
420
+ - `gradient_checkpointing`: False
421
+ - `gradient_checkpointing_kwargs`: None
422
+ - `include_inputs_for_metrics`: False
423
+ - `include_for_metrics`: []
424
+ - `eval_do_concat_batches`: True
425
+ - `fp16_backend`: auto
426
+ - `push_to_hub_model_id`: None
427
+ - `push_to_hub_organization`: None
428
+ - `mp_parameters`:
429
+ - `auto_find_batch_size`: False
430
+ - `full_determinism`: False
431
+ - `torchdynamo`: None
432
+ - `ray_scope`: last
433
+ - `ddp_timeout`: 1800
434
+ - `torch_compile`: False
435
+ - `torch_compile_backend`: None
436
+ - `torch_compile_mode`: None
437
+ - `dispatch_batches`: None
438
+ - `split_batches`: None
439
+ - `include_tokens_per_second`: False
440
+ - `include_num_input_tokens_seen`: False
441
+ - `neftune_noise_alpha`: None
442
+ - `optim_target_modules`: None
443
+ - `batch_eval_metrics`: False
444
+ - `eval_on_start`: False
445
+ - `use_liger_kernel`: False
446
+ - `eval_use_gather_object`: False
447
+ - `average_tokens_across_devices`: False
448
+ - `prompts`: None
449
+ - `batch_sampler`: batch_sampler
450
+ - `multi_dataset_batch_sampler`: proportional
451
+
452
+ </details>
453
+
454
+ ### Training Logs
455
+ | Epoch | Step | Training Loss |
456
+ |:------:|:-----:|:-------------:|
457
+ | 0.0485 | 500 | 3.89 |
458
+ | 0.0969 | 1000 | 3.373 |
459
+ | 0.1454 | 1500 | 3.1715 |
460
+ | 0.1939 | 2000 | 3.0414 |
461
+ | 0.2424 | 2500 | 2.9462 |
462
+ | 0.2908 | 3000 | 2.8691 |
463
+ | 0.3393 | 3500 | 2.8048 |
464
+ | 0.3878 | 4000 | 2.7501 |
465
+ | 0.4363 | 4500 | 2.7026 |
466
+ | 0.4847 | 5000 | 2.6601 |
467
+ | 0.5332 | 5500 | 2.6247 |
468
+ | 0.5817 | 6000 | 2.5951 |
469
+ | 0.6302 | 6500 | 2.5692 |
470
+ | 0.6786 | 7000 | 2.5447 |
471
+ | 0.7271 | 7500 | 2.5221 |
472
+ | 0.7756 | 8000 | 2.5026 |
473
+ | 0.8240 | 8500 | 2.4912 |
474
+ | 0.8725 | 9000 | 2.4732 |
475
+ | 0.9210 | 9500 | 2.4608 |
476
+ | 0.9695 | 10000 | 2.4548 |
477
+
478
+
479
+ ### Environmental Impact
480
+ Carbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon).
481
+ - **Energy Consumed**: 1.944 kWh
482
+ - **Carbon Emitted**: 0.717 kg of CO2
483
+ - **Hours Used**: 5.34 hours
484
+
485
+ ### Training Hardware
486
+ - **On Cloud**: Yes
487
+ - **GPU Model**: 1 x NVIDIA A100-SXM4-40GB
488
+ - **CPU Model**: Intel(R) Xeon(R) CPU @ 2.20GHz
489
+ - **RAM Size**: 83.48 GB
490
+
491
+ ### Framework Versions
492
+ - Python: 3.10.16
493
+ - Sentence Transformers: 4.1.0
494
+ - Transformers: 4.48.3
495
+ - PyTorch: 2.6.0+cu126
496
+ - Accelerate: 1.3.0
497
+ - Datasets: 3.5.1
498
+ - Tokenizers: 0.21.0
499
+
500
+ ## Citation
501
+
502
+ ### BibTeX
503
+
504
+ #### JobBERT-v3 Paper
505
+ ```bibtex
506
+ @misc{decorte2025multilingualjobbertcrosslingualjob,
507
+ title={Multilingual JobBERT for Cross-Lingual Job Title Matching},
508
+ author={Jens-Joris Decorte and Matthias De Lange and Jeroen Van Hautte},
509
+ year={2025},
510
+ eprint={2507.21609},
511
+ archivePrefix={arXiv},
512
+ primaryClass={cs.CL},
513
+ url={https://arxiv.org/abs/2507.21609},
514
+ }
515
+ ```
516
+
517
+ #### Sentence Transformers
518
+ ```bibtex
519
+ @inproceedings{reimers-2019-sentence-bert,
520
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
521
+ author = "Reimers, Nils and Gurevych, Iryna",
522
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
523
+ month = "11",
524
+ year = "2019",
525
+ publisher = "Association for Computational Linguistics",
526
+ url = "https://arxiv.org/abs/1908.10084",
527
+ }
528
+ ```
529
+
530
+ #### CachedMultipleNegativesRankingLoss
531
+ ```bibtex
532
+ @misc{gao2021scaling,
533
+ title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup},
534
+ author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan},
535
+ year={2021},
536
+ eprint={2101.06983},
537
+ archivePrefix={arXiv},
538
+ primaryClass={cs.LG}
539
+ }
540
+ ```
541
+
542
+ <!--
543
+ ## Glossary
544
+
545
+ *Clearly define terms in order to be accessible across audiences.*
546
+ -->
547
+
548
+ <!--
549
+ ## Model Card Authors
550
+
551
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
552
+ -->
553
+
554
+ <!--
555
+ ## Model Card Contact
556
+
557
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
558
+ -->
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "XLMRobertaModel"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.53.3",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "5.1.1",
4
+ "transformers": "4.53.3",
5
+ "pytorch": "2.8.0"
6
+ },
7
+ "prompts": {
8
+ "query": "",
9
+ "document": ""
10
+ },
11
+ "default_prompt_name": null,
12
+ "similarity_fn_name": "cosine",
13
+ "model_type": "SentenceTransformer"
14
+ }
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Router",
18
+ "type": "sentence_transformers.models.Router"
19
+ }
20
+ ]
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e9ad2581afd112c00a9ae3e5d1d730ef1b57417a7fbcd8ed2c5c34bc1c85577
3
+ size 1110092472
onnx/model_O1.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9ac98181b8768291d1c1e8bf7f2b104691fb57dc137de5430c59b3cffffca64
3
+ size 1110002716
onnx/model_O2.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbb67caaae4e734f1ddf207dfc58bd43b7543630c1f2476863067f65d0eea737
3
+ size 1109848298
onnx/model_O3.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c6b614f811418ac4890e308581a65f9aefd7f8eb32286ed013ee0d4b97929dc
3
+ size 1109848153
onnx/model_O4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79287c58aec4d5fa630f9f29e7773a06c61dbba525e41a8deec5d480b68087f5
3
+ size 554948959
onnx/model_qint8_arm64.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b2c2e5601e16ad3e046e05d5a509d94fb7edaff227d5ee553d370ac476c545
3
+ size 278752386
onnx/model_qint8_avx512.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b2c2e5601e16ad3e046e05d5a509d94fb7edaff227d5ee553d370ac476c545
3
+ size 278752386
onnx/model_qint8_avx512_vnni.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b2c2e5601e16ad3e046e05d5a509d94fb7edaff227d5ee553d370ac476c545
3
+ size 278752386
onnx/model_quint8_avx2.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0b0074ce38713e94d14aeeb3161d3894fd6a986bf95ecb7bd9f449a57aeefa9
3
+ size 278752385
openvino/openvino_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a18aeb1297470a1ce6bc8aaf07e1a66d0fffd7a849e3b09da51796a56968bc8
3
+ size 1109816496
openvino/openvino_model.xml ADDED
The diff for this file is too large to render. See raw diff
 
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 64,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc5c1151948923156f20bcafd54fd796705d693f8d7b56c83aec49d651f6d602
3
+ size 17082986
tokenizer_config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": false,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "max_length": 128,
51
+ "model_max_length": 64,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "<pad>",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "</s>",
57
+ "stride": 0,
58
+ "tokenizer_class": "XLMRobertaTokenizerFast",
59
+ "truncation_side": "right",
60
+ "truncation_strategy": "longest_first",
61
+ "unk_token": "<unk>"
62
+ }