[WIP] Upload folder using huggingface_hub (multi-commit 060de48e91a3cb5255e66404ec7c1e3fae97e7b5496b82d24f9500a2953d9279)
#1
by
orionweller
- opened
- README.md +0 -80
- adapter_config.json +0 -34
- adapter_model.safetensors +0 -3
- special_tokens_map.json +0 -24
- tokenizer.json +0 -0
- tokenizer.model +0 -3
- tokenizer_config.json +0 -42
- training_args.bin +0 -3
README.md
DELETED
|
@@ -1,80 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
base_model: meta-llama/Llama-2-7b-hf
|
| 3 |
-
library_name: peft
|
| 4 |
-
---
|
| 5 |
-
|
| 6 |
-
# Reproduced RepLLaMA
|
| 7 |
-
|
| 8 |
-
This is a reproduced version of the RepLLaMA model. See [this thread](https://github.com/texttron/tevatron/issues/129) for details of the reproduction process, which changed from their original version.
|
| 9 |
-
|
| 10 |
-
# Other Links
|
| 11 |
-
| Binary | Description |
|
| 12 |
-
|:-------|:------------|
|
| 13 |
-
| [samaya-ai/promptriever-llama2-7b-v1](https://huggingface.co/samaya-ai/promptriever-llama2-7b-v1) | A Promptriever bi-encoder model based on LLaMA 2 (7B parameters).|
|
| 14 |
-
| [samaya-ai/promptriever-llama3.1-8b-instruct-v1](https://huggingface.co/samaya-ai/promptriever-llama3.1-8b-instruct-v1) | A Promptriever bi-encoder model based on LLaMA 3.1 Instruct (8B parameters).|
|
| 15 |
-
| [samaya-ai/promptriever-llama3.1-8b-v1](https://huggingface.co/samaya-ai/promptriever-llama3.1-8b-v1) | A Promptriever bi-encoder model based on LLaMA 3.1 (8B parameters).|
|
| 16 |
-
| [samaya-ai/promptriever-mistral-v0.1-7b-v1](https://huggingface.co/samaya-ai/promptriever-mistral-v0.1-7b-v1) | A Promptriever bi-encoder model based on Mistral v0.1 (7B parameters). |
|
| 17 |
-
| [samaya-ai/RepLLaMA-reproduced](https://huggingface.co/samaya-ai/RepLLaMA-reproduced) | A reproduction of the RepLLaMA model (no instructions). A bi-encoder based on LLaMA 2, trained on the [tevatron/msmarco-passage-aug](https://huggingface.co/datasets/Tevatron/msmarco-passage-aug) dataset. |
|
| 18 |
-
| [samaya-ai/msmarco-w-instructions](https://huggingface.co/samaya-ai/msmarco-w-instructions) | A dataset of MS MARCO with added instructions and instruction-negatives, used for training the above models. |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
# Usage
|
| 22 |
-
|
| 23 |
-
You can use this with the RepLLaMA example code in [tevatron](https://github.com/texttron/tevatron) or with mteb:
|
| 24 |
-
|
| 25 |
-
```python
|
| 26 |
-
import mteb
|
| 27 |
-
model = mteb.get_model("samaya-ai/RepLLaMA-reproduced")
|
| 28 |
-
tasks = mteb.get_tasks(tasks=["NFCorpus"], languages=["eng"])
|
| 29 |
-
evaluation = mteb.MTEB(tasks=tasks)
|
| 30 |
-
evaluation.run(model, batch_size=16)
|
| 31 |
-
```
|
| 32 |
-
|
| 33 |
-
The command used to create this reproduction was the Tevatron codebase (commit 9bb8381) with command:
|
| 34 |
-
|
| 35 |
-
```
|
| 36 |
-
#!/bin/bash
|
| 37 |
-
deepspeed --include localhost:0,1,2,3 --master_port 60000 --module tevatron.retriever.driver.train \
|
| 38 |
-
--deepspeed deepspeed/ds_zero3_config.json \
|
| 39 |
-
--output_dir retriever-llama2-4gpu \
|
| 40 |
-
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
| 41 |
-
--lora \
|
| 42 |
-
--lora_r 32 \
|
| 43 |
-
--lora_target_modules q_proj,k_proj,v_proj,o_proj,down_proj,up_proj,gate_proj \
|
| 44 |
-
--save_steps 200 \
|
| 45 |
-
--dataset_name Tevatron/msmarco-passage-aug \
|
| 46 |
-
--query_prefix "query: " \
|
| 47 |
-
--passage_prefix "passage: " \
|
| 48 |
-
--bf16 \
|
| 49 |
-
--pooling eos \
|
| 50 |
-
--append_eos_token \
|
| 51 |
-
--normalize \
|
| 52 |
-
--temperature 0.01 \
|
| 53 |
-
--per_device_train_batch_size 8 \
|
| 54 |
-
--gradient_checkpointing \
|
| 55 |
-
--train_group_size 16 \
|
| 56 |
-
--learning_rate 1e-4 \
|
| 57 |
-
--query_max_len 32 \
|
| 58 |
-
--passage_max_len 196 \
|
| 59 |
-
--num_train_epochs 1 \
|
| 60 |
-
--logging_steps 10 \
|
| 61 |
-
--overwrite_output_dir \
|
| 62 |
-
--warmup_steps 100 \
|
| 63 |
-
--gradient_accumulation_steps 4
|
| 64 |
-
```
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
# Citation
|
| 68 |
-
For citation, please also see the [original RepLLaMA paper](https://arxiv.org/abs/2310.08319) and feel free to cite Promptriever as well:
|
| 69 |
-
|
| 70 |
-
```bibtex
|
| 71 |
-
@article{weller2024promptriever,
|
| 72 |
-
title={Promptriever: Instruction-Trained Retrievers Can Be Prompted Like Language Models},
|
| 73 |
-
author={Orion Weller and Benjamin Van Durme and Dawn Lawrie and Ashwin Paranjape and Yuhao Zhang and Jack Hessel},
|
| 74 |
-
year={2024},
|
| 75 |
-
eprint={2409.11136},
|
| 76 |
-
archivePrefix={arXiv},
|
| 77 |
-
primaryClass={cs.IR},
|
| 78 |
-
url={https://arxiv.org/abs/2409.11136},
|
| 79 |
-
}
|
| 80 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
adapter_config.json
DELETED
|
@@ -1,34 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"alpha_pattern": {},
|
| 3 |
-
"auto_mapping": null,
|
| 4 |
-
"base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
|
| 5 |
-
"bias": "none",
|
| 6 |
-
"fan_in_fan_out": false,
|
| 7 |
-
"inference_mode": true,
|
| 8 |
-
"init_lora_weights": true,
|
| 9 |
-
"layer_replication": null,
|
| 10 |
-
"layers_pattern": null,
|
| 11 |
-
"layers_to_transform": null,
|
| 12 |
-
"loftq_config": {},
|
| 13 |
-
"lora_alpha": 64,
|
| 14 |
-
"lora_dropout": 0.1,
|
| 15 |
-
"megatron_config": null,
|
| 16 |
-
"megatron_core": "megatron.core",
|
| 17 |
-
"modules_to_save": null,
|
| 18 |
-
"peft_type": "LORA",
|
| 19 |
-
"r": 32,
|
| 20 |
-
"rank_pattern": {},
|
| 21 |
-
"revision": null,
|
| 22 |
-
"target_modules": [
|
| 23 |
-
"v_proj",
|
| 24 |
-
"up_proj",
|
| 25 |
-
"down_proj",
|
| 26 |
-
"o_proj",
|
| 27 |
-
"q_proj",
|
| 28 |
-
"gate_proj",
|
| 29 |
-
"k_proj"
|
| 30 |
-
],
|
| 31 |
-
"task_type": "FEATURE_EXTRACTION",
|
| 32 |
-
"use_dora": false,
|
| 33 |
-
"use_rslora": false
|
| 34 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
adapter_model.safetensors
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:427e98cbebcc7c87284cd361c7f3573515d5f8d070019f9ef52a1209dec2e7c6
|
| 3 |
-
size 159965640
|
|
|
|
|
|
|
|
|
|
|
|
special_tokens_map.json
DELETED
|
@@ -1,24 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"bos_token": {
|
| 3 |
-
"content": "<s>",
|
| 4 |
-
"lstrip": false,
|
| 5 |
-
"normalized": false,
|
| 6 |
-
"rstrip": false,
|
| 7 |
-
"single_word": false
|
| 8 |
-
},
|
| 9 |
-
"eos_token": {
|
| 10 |
-
"content": "</s>",
|
| 11 |
-
"lstrip": false,
|
| 12 |
-
"normalized": false,
|
| 13 |
-
"rstrip": false,
|
| 14 |
-
"single_word": false
|
| 15 |
-
},
|
| 16 |
-
"pad_token": "</s>",
|
| 17 |
-
"unk_token": {
|
| 18 |
-
"content": "<unk>",
|
| 19 |
-
"lstrip": false,
|
| 20 |
-
"normalized": false,
|
| 21 |
-
"rstrip": false,
|
| 22 |
-
"single_word": false
|
| 23 |
-
}
|
| 24 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer.json
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.model
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
| 3 |
-
size 499723
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer_config.json
DELETED
|
@@ -1,42 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"add_bos_token": true,
|
| 3 |
-
"add_eos_token": false,
|
| 4 |
-
"add_prefix_space": null,
|
| 5 |
-
"added_tokens_decoder": {
|
| 6 |
-
"0": {
|
| 7 |
-
"content": "<unk>",
|
| 8 |
-
"lstrip": false,
|
| 9 |
-
"normalized": false,
|
| 10 |
-
"rstrip": false,
|
| 11 |
-
"single_word": false,
|
| 12 |
-
"special": true
|
| 13 |
-
},
|
| 14 |
-
"1": {
|
| 15 |
-
"content": "<s>",
|
| 16 |
-
"lstrip": false,
|
| 17 |
-
"normalized": false,
|
| 18 |
-
"rstrip": false,
|
| 19 |
-
"single_word": false,
|
| 20 |
-
"special": true
|
| 21 |
-
},
|
| 22 |
-
"2": {
|
| 23 |
-
"content": "</s>",
|
| 24 |
-
"lstrip": false,
|
| 25 |
-
"normalized": false,
|
| 26 |
-
"rstrip": false,
|
| 27 |
-
"single_word": false,
|
| 28 |
-
"special": true
|
| 29 |
-
}
|
| 30 |
-
},
|
| 31 |
-
"bos_token": "<s>",
|
| 32 |
-
"clean_up_tokenization_spaces": false,
|
| 33 |
-
"eos_token": "</s>",
|
| 34 |
-
"legacy": false,
|
| 35 |
-
"model_max_length": 1000000000000000019884624838656,
|
| 36 |
-
"pad_token": "</s>",
|
| 37 |
-
"padding_side": "right",
|
| 38 |
-
"sp_model_kwargs": {},
|
| 39 |
-
"tokenizer_class": "LlamaTokenizer",
|
| 40 |
-
"unk_token": "<unk>",
|
| 41 |
-
"use_default_system_prompt": false
|
| 42 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.bin
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:bbfc3e46a521a56d396f4b7986a8974b72cfad3e69454bf7377bd9cfdb699663
|
| 3 |
-
size 7736
|
|
|
|
|
|
|
|
|
|
|
|