Add new SentenceTransformer model
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +10 -0
- README.md +577 -0
- config.json +67 -0
- config_sentence_transformers.json +16 -0
- configuration_xlm_roberta.py +130 -0
- custom_st.py +229 -0
- model.safetensors +3 -0
- modules.json +23 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +3 -0
- tokenizer_config.json +62 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"word_embedding_dimension": 1024,
|
| 3 |
+
"pooling_mode_cls_token": false,
|
| 4 |
+
"pooling_mode_mean_tokens": true,
|
| 5 |
+
"pooling_mode_max_tokens": false,
|
| 6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
| 7 |
+
"pooling_mode_weightedmean_tokens": false,
|
| 8 |
+
"pooling_mode_lasttoken": false,
|
| 9 |
+
"include_prompt": true
|
| 10 |
+
}
|
README.md
ADDED
|
@@ -0,0 +1,577 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- sentence-transformers
|
| 4 |
+
- sentence-similarity
|
| 5 |
+
- feature-extraction
|
| 6 |
+
- generated_from_trainer
|
| 7 |
+
- dataset_size:199321
|
| 8 |
+
- loss:CachedInfonce
|
| 9 |
+
widget:
|
| 10 |
+
- source_sentence: What organization is the person excited about donating to
|
| 11 |
+
sentences:
|
| 12 |
+
- The superiority of Balcomy, next to Crail, Fife, in 1394 was possessed by Nicholas
|
| 13 |
+
de Hay, and on 15 January that year it passed to David Lindsay of Carnbie. indicating
|
| 14 |
+
that George Lauder only held Balcomy by hereditary feu.
|
| 15 |
+
- However, Robertson pulled out of the bout citing injury and was replaced by Tim
|
| 16 |
+
Means. He lost the back and forth fight via submission in the third round. Sullivan
|
| 17 |
+
was expected to face Marcio Alexandre Jr. on July 12, 2015, at The Ultimate Fighter
|
| 18 |
+
21 Finale. However, Alexandre pulled out of the fight during the week leading
|
| 19 |
+
up to the event citing a rib injury and was replaced by promotional newcomer Dominic
|
| 20 |
+
Waters. Sullivan won the one-sided fight via unanimous decision. Sullivan faced
|
| 21 |
+
Alexander Yakovlev at UFC on Fox 18 on January 30, 2016. He lost the fight via
|
| 22 |
+
knockout in the first round.
|
| 23 |
+
- I am super excited about donating to the ASPCA, I really wish I had the financial
|
| 24 |
+
means to be part of their monthly donation club. In my last post I mentioned some
|
| 25 |
+
charms I made for some friends, one a loyal customer at my shop.
|
| 26 |
+
- source_sentence: What are the four most famous wildlife sanctuaries in Rajasthan
|
| 27 |
+
sentences:
|
| 28 |
+
- There are two sets of mushroom dies in a set with several plants/fungi dies and
|
| 29 |
+
I have been finding myself drawn to mushroom images lately so I tackled those!
|
| 30 |
+
Each mushroom is 6 pieces!
|
| 31 |
+
- Copernicia is a genus of palms native to South America and the Greater Antilles.
|
| 32 |
+
Of the known species and nothospecies (hybrids), 22 of the 27 are endemic to Cuba.
|
| 33 |
+
They are fan palms (Arecaceae tribe Corypheae), with the leaves with a bare petiole
|
| 34 |
+
terminating in a rounded fan of numerous leaflets.
|
| 35 |
+
- The four most famous and visited wildlife sanctuaries of Rajasthan are Ranthambore
|
| 36 |
+
National Park, Desert National Park, Sariska Wildlife Sanctuary and Bharatpur
|
| 37 |
+
Bird Sanctuary. These natural preserves are world renowned for their stable and
|
| 38 |
+
growing tiger population, which, until recently, was dwindling alarmingly. The
|
| 39 |
+
sanctuaries are thronged by various wildlife enthusiasts and photographers who
|
| 40 |
+
want to catch this majestic animal in their cameras.
|
| 41 |
+
- source_sentence: What were the political ideologies of the individuals involved
|
| 42 |
+
in the inoculation debate in Norfolk
|
| 43 |
+
sentences:
|
| 44 |
+
- Once we typed in the site information and pressed Enter, the page loaded and we
|
| 45 |
+
were directed to confirm that we were at least 21 years of age. Now, we come to
|
| 46 |
+
the ListCrawler homepage.
|
| 47 |
+
- Interestingly, pro and anti-inoculation stances also seemed to be defined by political
|
| 48 |
+
ideology. Dr. Archibald Campbell along with a group of Norfolk gentlemen decided
|
| 49 |
+
to employ inoculator Dr. John Dalgleish to inoculate them and their families.
|
| 50 |
+
Dr. Dalgleish published an article in the Virginia Gazette in support of inoculation,
|
| 51 |
+
declaring smallpox an epidemic since ships coming from the West Indies constantly
|
| 52 |
+
brought infected people to the area. The public of Norfolk blamed this epidemic
|
| 53 |
+
on inoculation rather than the ships, and opposed inoculation altogether. Dr.
|
| 54 |
+
Campbell was a Loyalist, and was the only Loyalist physician in Norfolk. While
|
| 55 |
+
leaders of anti-inoculation groups, Maximillian Calvert and Paul Loyal, were Patriots,
|
| 56 |
+
along with every other physician in Norfolk, who opposed inoculation as well.
|
| 57 |
+
- "Incumbent Democratic Governor Pat Morris Neff won re-election to a second term,\
|
| 58 |
+
\ defeating Republican candidate William Hawley Atwell in a landslide. Democratic\
|
| 59 |
+
\ primary \n\nGovernor Neff won in the Democratic party against three different\
|
| 60 |
+
\ challengers relatively comfortably, and narrowly avoided a runoff."
|
| 61 |
+
- source_sentence: What is the Brazyn Roller and what makes it effective
|
| 62 |
+
sentences:
|
| 63 |
+
- Rolling with the Brazyn Roller. This is a killer roller with great "knobs" to
|
| 64 |
+
get a little bit deeper that you can also take with you.
|
| 65 |
+
- (born July 26, 1974) is a Japanese musical lyricist and keyboardist under Giza
|
| 66 |
+
Studio label. She is a former member of the Japanese pop band Garnet Crow, primarily
|
| 67 |
+
as the lead songwriter and keyboardist.
|
| 68 |
+
- Poux is a French surname.
|
| 69 |
+
- source_sentence: What items are present in the described setting along with the
|
| 70 |
+
policy on pets
|
| 71 |
+
sentences:
|
| 72 |
+
- In 1839 Sergeant John Adams and Dr. John Conolly were impressed by the work of
|
| 73 |
+
Hill, and introduced the method into their Hanwell Asylum, by then the largest
|
| 74 |
+
in the country. Hill's system was adapted, since Conolly was unable to supervise
|
| 75 |
+
each attendant as closely as Hill had done.
|
| 76 |
+
- There is also a grandfather clock and two oriental lions on grey marbled pedestals.
|
| 77 |
+
Pets are allowed (Charges may be applicable)
|
| 78 |
+
- 'Investigators stated that Philoumenos appeared to have been trying to protect
|
| 79 |
+
his face with his hands when a blow to his face or head severed one finger on
|
| 80 |
+
each hand. Raby escaped the scene of the crime undetected. Raby was subsequently
|
| 81 |
+
found to have acted alone, "without any connection to a religious or political
|
| 82 |
+
entity." An investigation launched by the Israeli police initially failed to identify
|
| 83 |
+
the killer. Raby was arrested on 17 November 1982 as he again attempted enter
|
| 84 |
+
the Monastery at Jacob''s Well illicitly by climbing over a wall; he was carrying
|
| 85 |
+
hand grenades. Raby supplied the police with accurate details of his earlier,
|
| 86 |
+
previously unsolved, crimes. These were the murder of Philoumenos; a March 1979
|
| 87 |
+
murder of a Jewish gynecologist in Tel-Aviv; the murder of the family of a woman
|
| 88 |
+
in Lod, Israel in April 1979 who claimed to have clairvoyant powers; and an assault
|
| 89 |
+
on a nun at the Jacob''s Well holy site in April 1982. The nun was seriously wounded
|
| 90 |
+
in the attack. Both she and the gynecologist were attacked by axe, according to
|
| 91 |
+
prosecutors. Raby, a newly religious Jew, was described as unwashed, dressed in
|
| 92 |
+
worn-out clothing, and audibly muttered passages of scripture in a strange manner.
|
| 93 |
+
Psychiatric evaluations found that he was mentally incompetent to stand trial;
|
| 94 |
+
he was committed to a mental hospital; details of his subsequent whereabouts are
|
| 95 |
+
restricted by privacy regulations. At a court hearing after his arrest, an Israeli
|
| 96 |
+
prosecutor told the court that Raby was convinced that the monastery was the site
|
| 97 |
+
of the ancient Jewish Temple, and that he made an attempt on the life of the nun
|
| 98 |
+
"in response to a divine command." Erroneous accounts
|
| 99 |
+
|
| 100 |
+
Initial accounts depicted the murder as an anti-Christian hate attack carried
|
| 101 |
+
out by a group of Jewish settlers, the result being what Maariv described as "a
|
| 102 |
+
wave of hatred" in Greece. Reports indicating that "radical Jews" had tortured
|
| 103 |
+
Philoumenos and "cut off the fingers of his hand" before killing him had appeared
|
| 104 |
+
in Greek newspapers. Maariv also quoted an official in the Greek Orthodox Patriarchate
|
| 105 |
+
in Jerusalem asserting that "the murder was carried out by radical religious Jews"
|
| 106 |
+
claiming that "the Well does not belong to Christians but to Jews". In a 2017
|
| 107 |
+
article in the journal Israel Studies, researchers David Gurevich and Yisca Harani
|
| 108 |
+
found that false accounts blaming the slaying on "settlers" and "Zionist extremists"
|
| 109 |
+
persisted even after the arrest of the assailant and his confinement in a mental
|
| 110 |
+
institution, and that there were "patterns of ritual murder accusation in the
|
| 111 |
+
popular narrative." The same theme was echoed in parts of the Eastern Orthodox
|
| 112 |
+
community and by some secular sources, including Blackwell''s Dictionary of Eastern
|
| 113 |
+
Christianity, the Encyclopedia of the Israeli-Palestinian Conflict, The Spectator
|
| 114 |
+
and Times Literary Supplement, as well as Wikipedia. Gurevich and Harani contended
|
| 115 |
+
that a 1989 account of the murder, published in Orthodox America, a publication
|
| 116 |
+
of the Russian Orthodox Church Outside Russia, became the basis of an anti-Semitic
|
| 117 |
+
ritual murder narrative, according to which a group of anti-Christianity Jews
|
| 118 |
+
first harassed Philoumenos and destroyed Christian holy objects at the monastery,
|
| 119 |
+
then murdered him. Veneration
|
| 120 |
+
|
| 121 |
+
In 2009 the Greek Orthodox Patriarchate of Jerusalem recognised him as a holy
|
| 122 |
+
martyr of the Eastern Orthodox Church, thirty years after his "martyrdom". The
|
| 123 |
+
"careful" wording of the pronouncement of the Jerusalem Patriarchate that canonized
|
| 124 |
+
Philoumenos makes no mention of murderer''s faith or ethnicity; he is described
|
| 125 |
+
as a "vile man" a "heterodox fanatic visitor" and, inaccurately, as an individual
|
| 126 |
+
who "with an axe, opened a deep cut across his forehead, cut off the fingers of
|
| 127 |
+
his right hand, and upon escaping threw a grenade which ended the Father''s life."'
|
| 128 |
+
pipeline_tag: sentence-similarity
|
| 129 |
+
library_name: sentence-transformers
|
| 130 |
+
---
|
| 131 |
+
|
| 132 |
+
# SentenceTransformer
|
| 133 |
+
|
| 134 |
+
This is a [sentence-transformers](https://www.SBERT.net) model trained. It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
| 135 |
+
|
| 136 |
+
## Model Details
|
| 137 |
+
|
| 138 |
+
### Model Description
|
| 139 |
+
- **Model Type:** Sentence Transformer
|
| 140 |
+
<!-- - **Base model:** [Unknown](https://huggingface.co/unknown) -->
|
| 141 |
+
- **Maximum Sequence Length:** 8192 tokens
|
| 142 |
+
- **Output Dimensionality:** 1024 dimensions
|
| 143 |
+
- **Similarity Function:** Cosine Similarity
|
| 144 |
+
<!-- - **Training Dataset:** Unknown -->
|
| 145 |
+
<!-- - **Language:** Unknown -->
|
| 146 |
+
<!-- - **License:** Unknown -->
|
| 147 |
+
|
| 148 |
+
### Model Sources
|
| 149 |
+
|
| 150 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
| 151 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
| 152 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
| 153 |
+
|
| 154 |
+
### Full Model Architecture
|
| 155 |
+
|
| 156 |
+
```
|
| 157 |
+
SentenceTransformer(
|
| 158 |
+
(transformer): Transformer(
|
| 159 |
+
(auto_model): XLMRobertaLoRA(
|
| 160 |
+
(roberta): XLMRobertaModel(
|
| 161 |
+
(embeddings): XLMRobertaEmbeddings(
|
| 162 |
+
(word_embeddings): ParametrizedEmbedding(
|
| 163 |
+
250002, 1024, padding_idx=1
|
| 164 |
+
(parametrizations): ModuleDict(
|
| 165 |
+
(weight): ParametrizationList(
|
| 166 |
+
(0): LoRAParametrization()
|
| 167 |
+
)
|
| 168 |
+
)
|
| 169 |
+
)
|
| 170 |
+
(token_type_embeddings): ParametrizedEmbedding(
|
| 171 |
+
1, 1024
|
| 172 |
+
(parametrizations): ModuleDict(
|
| 173 |
+
(weight): ParametrizationList(
|
| 174 |
+
(0): LoRAParametrization()
|
| 175 |
+
)
|
| 176 |
+
)
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
(emb_drop): Dropout(p=0.1, inplace=False)
|
| 180 |
+
(emb_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 181 |
+
(encoder): XLMRobertaEncoder(
|
| 182 |
+
(layers): ModuleList(
|
| 183 |
+
(0-23): 24 x Block(
|
| 184 |
+
(mixer): MHA(
|
| 185 |
+
(rotary_emb): RotaryEmbedding()
|
| 186 |
+
(Wqkv): ParametrizedLinearResidual(
|
| 187 |
+
in_features=1024, out_features=3072, bias=True
|
| 188 |
+
(parametrizations): ModuleDict(
|
| 189 |
+
(weight): ParametrizationList(
|
| 190 |
+
(0): LoRAParametrization()
|
| 191 |
+
)
|
| 192 |
+
)
|
| 193 |
+
)
|
| 194 |
+
(inner_attn): FlashSelfAttention(
|
| 195 |
+
(drop): Dropout(p=0.1, inplace=False)
|
| 196 |
+
)
|
| 197 |
+
(inner_cross_attn): FlashCrossAttention(
|
| 198 |
+
(drop): Dropout(p=0.1, inplace=False)
|
| 199 |
+
)
|
| 200 |
+
(out_proj): ParametrizedLinear(
|
| 201 |
+
in_features=1024, out_features=1024, bias=True
|
| 202 |
+
(parametrizations): ModuleDict(
|
| 203 |
+
(weight): ParametrizationList(
|
| 204 |
+
(0): LoRAParametrization()
|
| 205 |
+
)
|
| 206 |
+
)
|
| 207 |
+
)
|
| 208 |
+
)
|
| 209 |
+
(dropout1): Dropout(p=0.1, inplace=False)
|
| 210 |
+
(drop_path1): StochasticDepth(p=0.0, mode=row)
|
| 211 |
+
(norm1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 212 |
+
(mlp): Mlp(
|
| 213 |
+
(fc1): ParametrizedLinear(
|
| 214 |
+
in_features=1024, out_features=4096, bias=True
|
| 215 |
+
(parametrizations): ModuleDict(
|
| 216 |
+
(weight): ParametrizationList(
|
| 217 |
+
(0): LoRAParametrization()
|
| 218 |
+
)
|
| 219 |
+
)
|
| 220 |
+
)
|
| 221 |
+
(fc2): ParametrizedLinear(
|
| 222 |
+
in_features=4096, out_features=1024, bias=True
|
| 223 |
+
(parametrizations): ModuleDict(
|
| 224 |
+
(weight): ParametrizationList(
|
| 225 |
+
(0): LoRAParametrization()
|
| 226 |
+
)
|
| 227 |
+
)
|
| 228 |
+
)
|
| 229 |
+
)
|
| 230 |
+
(dropout2): Dropout(p=0.1, inplace=False)
|
| 231 |
+
(drop_path2): StochasticDepth(p=0.0, mode=row)
|
| 232 |
+
(norm2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 233 |
+
)
|
| 234 |
+
)
|
| 235 |
+
)
|
| 236 |
+
(pooler): XLMRobertaPooler(
|
| 237 |
+
(dense): ParametrizedLinear(
|
| 238 |
+
in_features=1024, out_features=1024, bias=True
|
| 239 |
+
(parametrizations): ModuleDict(
|
| 240 |
+
(weight): ParametrizationList(
|
| 241 |
+
(0): LoRAParametrization()
|
| 242 |
+
)
|
| 243 |
+
)
|
| 244 |
+
)
|
| 245 |
+
(activation): Tanh()
|
| 246 |
+
)
|
| 247 |
+
)
|
| 248 |
+
)
|
| 249 |
+
)
|
| 250 |
+
(pooler): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
| 251 |
+
(normalizer): Normalize()
|
| 252 |
+
)
|
| 253 |
+
```
|
| 254 |
+
|
| 255 |
+
## Usage
|
| 256 |
+
|
| 257 |
+
### Direct Usage (Sentence Transformers)
|
| 258 |
+
|
| 259 |
+
First install the Sentence Transformers library:
|
| 260 |
+
|
| 261 |
+
```bash
|
| 262 |
+
pip install -U sentence-transformers
|
| 263 |
+
```
|
| 264 |
+
|
| 265 |
+
Then you can load this model and run inference.
|
| 266 |
+
```python
|
| 267 |
+
from sentence_transformers import SentenceTransformer
|
| 268 |
+
|
| 269 |
+
# Download from the 🤗 Hub
|
| 270 |
+
model = SentenceTransformer("Jrinky/final_stage1")
|
| 271 |
+
# Run inference
|
| 272 |
+
sentences = [
|
| 273 |
+
'What items are present in the described setting along with the policy on pets',
|
| 274 |
+
'There is also a grandfather clock and two oriental lions on grey marbled pedestals. Pets are allowed (Charges may be applicable)',
|
| 275 |
+
'Investigators stated that Philoumenos appeared to have been trying to protect his face with his hands when a blow to his face or head severed one finger on each hand. Raby escaped the scene of the crime undetected. Raby was subsequently found to have acted alone, "without any connection to a religious or political entity." An investigation launched by the Israeli police initially failed to identify the killer. Raby was arrested on 17 November 1982 as he again attempted enter the Monastery at Jacob\'s Well illicitly by climbing over a wall; he was carrying hand grenades. Raby supplied the police with accurate details of his earlier, previously unsolved, crimes. These were the murder of Philoumenos; a March 1979 murder of a Jewish gynecologist in Tel-Aviv; the murder of the family of a woman in Lod, Israel in April 1979 who claimed to have clairvoyant powers; and an assault on a nun at the Jacob\'s Well holy site in April 1982. The nun was seriously wounded in the attack. Both she and the gynecologist were attacked by axe, according to prosecutors. Raby, a newly religious Jew, was described as unwashed, dressed in worn-out clothing, and audibly muttered passages of scripture in a strange manner. Psychiatric evaluations found that he was mentally incompetent to stand trial; he was committed to a mental hospital; details of his subsequent whereabouts are restricted by privacy regulations. At a court hearing after his arrest, an Israeli prosecutor told the court that Raby was convinced that the monastery was the site of the ancient Jewish Temple, and that he made an attempt on the life of the nun "in response to a divine command." Erroneous accounts\nInitial accounts depicted the murder as an anti-Christian hate attack carried out by a group of Jewish settlers, the result being what Maariv described as "a wave of hatred" in Greece. Reports indicating that "radical Jews" had tortured Philoumenos and "cut off the fingers of his hand" before killing him had appeared in Greek newspapers. Maariv also quoted an official in the Greek Orthodox Patriarchate in Jerusalem asserting that "the murder was carried out by radical religious Jews" claiming that "the Well does not belong to Christians but to Jews". In a 2017 article in the journal Israel Studies, researchers David Gurevich and Yisca Harani found that false accounts blaming the slaying on "settlers" and "Zionist extremists" persisted even after the arrest of the assailant and his confinement in a mental institution, and that there were "patterns of ritual murder accusation in the popular narrative." The same theme was echoed in parts of the Eastern Orthodox community and by some secular sources, including Blackwell\'s Dictionary of Eastern Christianity, the Encyclopedia of the Israeli-Palestinian Conflict, The Spectator and Times Literary Supplement, as well as Wikipedia. Gurevich and Harani contended that a 1989 account of the murder, published in Orthodox America, a publication of the Russian Orthodox Church Outside Russia, became the basis of an anti-Semitic ritual murder narrative, according to which a group of anti-Christianity Jews first harassed Philoumenos and destroyed Christian holy objects at the monastery, then murdered him. Veneration\nIn 2009 the Greek Orthodox Patriarchate of Jerusalem recognised him as a holy martyr of the Eastern Orthodox Church, thirty years after his "martyrdom". The "careful" wording of the pronouncement of the Jerusalem Patriarchate that canonized Philoumenos makes no mention of murderer\'s faith or ethnicity; he is described as a "vile man" a "heterodox fanatic visitor" and, inaccurately, as an individual who "with an axe, opened a deep cut across his forehead, cut off the fingers of his right hand, and upon escaping threw a grenade which ended the Father\'s life."',
|
| 276 |
+
]
|
| 277 |
+
embeddings = model.encode(sentences)
|
| 278 |
+
print(embeddings.shape)
|
| 279 |
+
# [3, 1024]
|
| 280 |
+
|
| 281 |
+
# Get the similarity scores for the embeddings
|
| 282 |
+
similarities = model.similarity(embeddings, embeddings)
|
| 283 |
+
print(similarities.shape)
|
| 284 |
+
# [3, 3]
|
| 285 |
+
```
|
| 286 |
+
|
| 287 |
+
<!--
|
| 288 |
+
### Direct Usage (Transformers)
|
| 289 |
+
|
| 290 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
| 291 |
+
|
| 292 |
+
</details>
|
| 293 |
+
-->
|
| 294 |
+
|
| 295 |
+
<!--
|
| 296 |
+
### Downstream Usage (Sentence Transformers)
|
| 297 |
+
|
| 298 |
+
You can finetune this model on your own dataset.
|
| 299 |
+
|
| 300 |
+
<details><summary>Click to expand</summary>
|
| 301 |
+
|
| 302 |
+
</details>
|
| 303 |
+
-->
|
| 304 |
+
|
| 305 |
+
<!--
|
| 306 |
+
### Out-of-Scope Use
|
| 307 |
+
|
| 308 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
| 309 |
+
-->
|
| 310 |
+
|
| 311 |
+
<!--
|
| 312 |
+
## Bias, Risks and Limitations
|
| 313 |
+
|
| 314 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
| 315 |
+
-->
|
| 316 |
+
|
| 317 |
+
<!--
|
| 318 |
+
### Recommendations
|
| 319 |
+
|
| 320 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
| 321 |
+
-->
|
| 322 |
+
|
| 323 |
+
## Training Details
|
| 324 |
+
|
| 325 |
+
### Training Dataset
|
| 326 |
+
|
| 327 |
+
#### Unnamed Dataset
|
| 328 |
+
|
| 329 |
+
* Size: 199,321 training samples
|
| 330 |
+
* Columns: <code>anchor</code> and <code>positive</code>
|
| 331 |
+
* Approximate statistics based on the first 1000 samples:
|
| 332 |
+
| | anchor | positive |
|
| 333 |
+
|:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
|
| 334 |
+
| type | string | string |
|
| 335 |
+
| details | <ul><li>min: 7 tokens</li><li>mean: 17.09 tokens</li><li>max: 46 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 109.45 tokens</li><li>max: 1835 tokens</li></ul> |
|
| 336 |
+
* Samples:
|
| 337 |
+
| anchor | positive |
|
| 338 |
+
|:-----------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| 339 |
+
| <code>Where is Nagpada located</code> | <code>Nagpada is a neighbourhood in South Mumbai.</code> |
|
| 340 |
+
| <code>What types of players are associated with Folkestone F.C., Midland Football League, and English Football League</code> | <code>players<br>Folkestone F.C. players<br>Midland Football League players<br>English Football League players</code> |
|
| 341 |
+
| <code>What is Anthony Elujoba known for in the field of Pharmacognosy</code> | <code>Anthony Elujoba (born 1948) is a Nigerian professor of Pharmacognosy, fondly referred to as the "village chemist" because of his involvement in research into medicinal plants. He was acting vice chancellor of Obafemi Awolowo University, Nigeria.</code> |
|
| 342 |
+
* Loss: <code>cachedselfloss.CachedInfonce</code> with these parameters:
|
| 343 |
+
```json
|
| 344 |
+
{
|
| 345 |
+
"scale": 20.0,
|
| 346 |
+
"similarity_fct": "cos_sim"
|
| 347 |
+
}
|
| 348 |
+
```
|
| 349 |
+
|
| 350 |
+
### Evaluation Dataset
|
| 351 |
+
|
| 352 |
+
#### Unnamed Dataset
|
| 353 |
+
|
| 354 |
+
* Size: 4,068 evaluation samples
|
| 355 |
+
* Columns: <code>anchor</code> and <code>positive</code>
|
| 356 |
+
* Approximate statistics based on the first 1000 samples:
|
| 357 |
+
| | anchor | positive |
|
| 358 |
+
|:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
|
| 359 |
+
| type | string | string |
|
| 360 |
+
| details | <ul><li>min: 6 tokens</li><li>mean: 17.26 tokens</li><li>max: 40 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 108.28 tokens</li><li>max: 2233 tokens</li></ul> |
|
| 361 |
+
* Samples:
|
| 362 |
+
| anchor | positive |
|
| 363 |
+
|:---------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| 364 |
+
| <code>What metaphor is being used to describe collaboration in the text segment</code> | <code>Like two oxen in a field, tied shoulder to shoulder. With Jesus doing all of the heavy lifting.</code> |
|
| 365 |
+
| <code>What titles did McGurk win while playing as a schoolboy and a student</code> | <code>He won consecutive MacRory Cup titles lining out as a schoolboy with St Patrick's College, Maghera before winning a Sigerson Cup title as a student at Queen's University Belfast. McGurk progressed onto the Lavey senior teams in both codes and was corner-forward on the team that won the All-Ireland SCFC title in 1991.</code> |
|
| 366 |
+
| <code>What are the borders of the Trinity-Bellwoods neighborhood in Toronto</code> | <code>Trinity-Bellwoods is an inner city neighbourhood in Toronto, Ontario, Canada. It is bounded on the east by Bathurst Street, on the north by College Street, on the south by Queen Street West, and by Dovercourt Road on the west.</code> |
|
| 367 |
+
* Loss: <code>cachedselfloss.CachedInfonce</code> with these parameters:
|
| 368 |
+
```json
|
| 369 |
+
{
|
| 370 |
+
"scale": 20.0,
|
| 371 |
+
"similarity_fct": "cos_sim"
|
| 372 |
+
}
|
| 373 |
+
```
|
| 374 |
+
|
| 375 |
+
### Training Hyperparameters
|
| 376 |
+
#### Non-Default Hyperparameters
|
| 377 |
+
|
| 378 |
+
- `eval_strategy`: steps
|
| 379 |
+
- `per_device_train_batch_size`: 2000
|
| 380 |
+
- `per_device_eval_batch_size`: 2000
|
| 381 |
+
- `learning_rate`: 2e-05
|
| 382 |
+
- `num_train_epochs`: 1
|
| 383 |
+
- `warmup_ratio`: 0.1
|
| 384 |
+
- `bf16`: True
|
| 385 |
+
- `batch_sampler`: no_duplicates
|
| 386 |
+
|
| 387 |
+
#### All Hyperparameters
|
| 388 |
+
<details><summary>Click to expand</summary>
|
| 389 |
+
|
| 390 |
+
- `overwrite_output_dir`: False
|
| 391 |
+
- `do_predict`: False
|
| 392 |
+
- `eval_strategy`: steps
|
| 393 |
+
- `prediction_loss_only`: True
|
| 394 |
+
- `per_device_train_batch_size`: 2000
|
| 395 |
+
- `per_device_eval_batch_size`: 2000
|
| 396 |
+
- `per_gpu_train_batch_size`: None
|
| 397 |
+
- `per_gpu_eval_batch_size`: None
|
| 398 |
+
- `gradient_accumulation_steps`: 1
|
| 399 |
+
- `eval_accumulation_steps`: None
|
| 400 |
+
- `torch_empty_cache_steps`: None
|
| 401 |
+
- `learning_rate`: 2e-05
|
| 402 |
+
- `weight_decay`: 0.0
|
| 403 |
+
- `adam_beta1`: 0.9
|
| 404 |
+
- `adam_beta2`: 0.999
|
| 405 |
+
- `adam_epsilon`: 1e-08
|
| 406 |
+
- `max_grad_norm`: 1.0
|
| 407 |
+
- `num_train_epochs`: 1
|
| 408 |
+
- `max_steps`: -1
|
| 409 |
+
- `lr_scheduler_type`: linear
|
| 410 |
+
- `lr_scheduler_kwargs`: {}
|
| 411 |
+
- `warmup_ratio`: 0.1
|
| 412 |
+
- `warmup_steps`: 0
|
| 413 |
+
- `log_level`: passive
|
| 414 |
+
- `log_level_replica`: warning
|
| 415 |
+
- `log_on_each_node`: True
|
| 416 |
+
- `logging_nan_inf_filter`: True
|
| 417 |
+
- `save_safetensors`: True
|
| 418 |
+
- `save_on_each_node`: False
|
| 419 |
+
- `save_only_model`: False
|
| 420 |
+
- `restore_callback_states_from_checkpoint`: False
|
| 421 |
+
- `no_cuda`: False
|
| 422 |
+
- `use_cpu`: False
|
| 423 |
+
- `use_mps_device`: False
|
| 424 |
+
- `seed`: 42
|
| 425 |
+
- `data_seed`: None
|
| 426 |
+
- `jit_mode_eval`: False
|
| 427 |
+
- `use_ipex`: False
|
| 428 |
+
- `bf16`: True
|
| 429 |
+
- `fp16`: False
|
| 430 |
+
- `fp16_opt_level`: O1
|
| 431 |
+
- `half_precision_backend`: auto
|
| 432 |
+
- `bf16_full_eval`: False
|
| 433 |
+
- `fp16_full_eval`: False
|
| 434 |
+
- `tf32`: None
|
| 435 |
+
- `local_rank`: 0
|
| 436 |
+
- `ddp_backend`: None
|
| 437 |
+
- `tpu_num_cores`: None
|
| 438 |
+
- `tpu_metrics_debug`: False
|
| 439 |
+
- `debug`: []
|
| 440 |
+
- `dataloader_drop_last`: False
|
| 441 |
+
- `dataloader_num_workers`: 0
|
| 442 |
+
- `dataloader_prefetch_factor`: None
|
| 443 |
+
- `past_index`: -1
|
| 444 |
+
- `disable_tqdm`: False
|
| 445 |
+
- `remove_unused_columns`: True
|
| 446 |
+
- `label_names`: None
|
| 447 |
+
- `load_best_model_at_end`: False
|
| 448 |
+
- `ignore_data_skip`: False
|
| 449 |
+
- `fsdp`: []
|
| 450 |
+
- `fsdp_min_num_params`: 0
|
| 451 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
| 452 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
| 453 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
| 454 |
+
- `deepspeed`: None
|
| 455 |
+
- `label_smoothing_factor`: 0.0
|
| 456 |
+
- `optim`: adamw_torch
|
| 457 |
+
- `optim_args`: None
|
| 458 |
+
- `adafactor`: False
|
| 459 |
+
- `group_by_length`: False
|
| 460 |
+
- `length_column_name`: length
|
| 461 |
+
- `ddp_find_unused_parameters`: None
|
| 462 |
+
- `ddp_bucket_cap_mb`: None
|
| 463 |
+
- `ddp_broadcast_buffers`: False
|
| 464 |
+
- `dataloader_pin_memory`: True
|
| 465 |
+
- `dataloader_persistent_workers`: False
|
| 466 |
+
- `skip_memory_metrics`: True
|
| 467 |
+
- `use_legacy_prediction_loop`: False
|
| 468 |
+
- `push_to_hub`: False
|
| 469 |
+
- `resume_from_checkpoint`: None
|
| 470 |
+
- `hub_model_id`: None
|
| 471 |
+
- `hub_strategy`: every_save
|
| 472 |
+
- `hub_private_repo`: None
|
| 473 |
+
- `hub_always_push`: False
|
| 474 |
+
- `gradient_checkpointing`: False
|
| 475 |
+
- `gradient_checkpointing_kwargs`: None
|
| 476 |
+
- `include_inputs_for_metrics`: False
|
| 477 |
+
- `include_for_metrics`: []
|
| 478 |
+
- `eval_do_concat_batches`: True
|
| 479 |
+
- `fp16_backend`: auto
|
| 480 |
+
- `push_to_hub_model_id`: None
|
| 481 |
+
- `push_to_hub_organization`: None
|
| 482 |
+
- `mp_parameters`:
|
| 483 |
+
- `auto_find_batch_size`: False
|
| 484 |
+
- `full_determinism`: False
|
| 485 |
+
- `torchdynamo`: None
|
| 486 |
+
- `ray_scope`: last
|
| 487 |
+
- `ddp_timeout`: 1800
|
| 488 |
+
- `torch_compile`: False
|
| 489 |
+
- `torch_compile_backend`: None
|
| 490 |
+
- `torch_compile_mode`: None
|
| 491 |
+
- `dispatch_batches`: None
|
| 492 |
+
- `split_batches`: None
|
| 493 |
+
- `include_tokens_per_second`: False
|
| 494 |
+
- `include_num_input_tokens_seen`: False
|
| 495 |
+
- `neftune_noise_alpha`: None
|
| 496 |
+
- `optim_target_modules`: None
|
| 497 |
+
- `batch_eval_metrics`: False
|
| 498 |
+
- `eval_on_start`: False
|
| 499 |
+
- `use_liger_kernel`: False
|
| 500 |
+
- `eval_use_gather_object`: False
|
| 501 |
+
- `average_tokens_across_devices`: False
|
| 502 |
+
- `prompts`: None
|
| 503 |
+
- `batch_sampler`: no_duplicates
|
| 504 |
+
- `multi_dataset_batch_sampler`: proportional
|
| 505 |
+
|
| 506 |
+
</details>
|
| 507 |
+
|
| 508 |
+
### Training Logs
|
| 509 |
+
| Epoch | Step | Training Loss | Validation Loss |
|
| 510 |
+
|:-----:|:----:|:-------------:|:---------------:|
|
| 511 |
+
| 0.1 | 10 | 0.3939 | 0.4079 |
|
| 512 |
+
| 0.2 | 20 | 0.4225 | 0.3920 |
|
| 513 |
+
| 0.3 | 30 | 0.4067 | 0.3819 |
|
| 514 |
+
| 0.4 | 40 | 0.3918 | 0.3760 |
|
| 515 |
+
| 0.5 | 50 | 0.4631 | 0.3719 |
|
| 516 |
+
| 0.6 | 60 | 0.3806 | 0.3686 |
|
| 517 |
+
| 0.7 | 70 | 0.3971 | 0.3663 |
|
| 518 |
+
| 0.8 | 80 | 0.3788 | 0.3655 |
|
| 519 |
+
| 0.9 | 90 | 0.3852 | 0.3649 |
|
| 520 |
+
| 1.0 | 100 | 0.3881 | 0.3648 |
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
### Framework Versions
|
| 524 |
+
- Python: 3.11.8
|
| 525 |
+
- Sentence Transformers: 3.4.1
|
| 526 |
+
- Transformers: 4.49.0
|
| 527 |
+
- PyTorch: 2.4.0+cu121
|
| 528 |
+
- Accelerate: 1.4.0
|
| 529 |
+
- Datasets: 3.3.2
|
| 530 |
+
- Tokenizers: 0.21.0
|
| 531 |
+
|
| 532 |
+
## Citation
|
| 533 |
+
|
| 534 |
+
### BibTeX
|
| 535 |
+
|
| 536 |
+
#### Sentence Transformers
|
| 537 |
+
```bibtex
|
| 538 |
+
@inproceedings{reimers-2019-sentence-bert,
|
| 539 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
| 540 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
| 541 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
| 542 |
+
month = "11",
|
| 543 |
+
year = "2019",
|
| 544 |
+
publisher = "Association for Computational Linguistics",
|
| 545 |
+
url = "https://arxiv.org/abs/1908.10084",
|
| 546 |
+
}
|
| 547 |
+
```
|
| 548 |
+
|
| 549 |
+
#### CachedInfonce
|
| 550 |
+
```bibtex
|
| 551 |
+
@misc{gao2021scaling,
|
| 552 |
+
title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup},
|
| 553 |
+
author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan},
|
| 554 |
+
year={2021},
|
| 555 |
+
eprint={2101.06983},
|
| 556 |
+
archivePrefix={arXiv},
|
| 557 |
+
primaryClass={cs.LG}
|
| 558 |
+
}
|
| 559 |
+
```
|
| 560 |
+
|
| 561 |
+
<!--
|
| 562 |
+
## Glossary
|
| 563 |
+
|
| 564 |
+
*Clearly define terms in order to be accessible across audiences.*
|
| 565 |
+
-->
|
| 566 |
+
|
| 567 |
+
<!--
|
| 568 |
+
## Model Card Authors
|
| 569 |
+
|
| 570 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
| 571 |
+
-->
|
| 572 |
+
|
| 573 |
+
<!--
|
| 574 |
+
## Model Card Contact
|
| 575 |
+
|
| 576 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
| 577 |
+
-->
|
config.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "C:\\Users\\zhuyu\\Desktop\\fsdownload\\final4",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"XLMRobertaLoRA"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.1,
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "configuration_xlm_roberta.XLMRobertaFlashConfig",
|
| 9 |
+
"AutoModel": "jinaai/xlm-roberta-flash-implementation--modeling_lora.XLMRobertaLoRA",
|
| 10 |
+
"AutoModelForMaskedLM": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForMaskedLM",
|
| 11 |
+
"AutoModelForPreTraining": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForPreTraining"
|
| 12 |
+
},
|
| 13 |
+
"bos_token_id": 0,
|
| 14 |
+
"classifier_dropout": null,
|
| 15 |
+
"emb_pooler": null,
|
| 16 |
+
"eos_token_id": 2,
|
| 17 |
+
"hidden_act": "gelu",
|
| 18 |
+
"hidden_dropout_prob": 0.1,
|
| 19 |
+
"hidden_size": 1024,
|
| 20 |
+
"initializer_range": 0.02,
|
| 21 |
+
"intermediate_size": 4096,
|
| 22 |
+
"layer_norm_eps": 1e-05,
|
| 23 |
+
"load_trained_adapters": true,
|
| 24 |
+
"lora_adaptations": [
|
| 25 |
+
"retrieval.query",
|
| 26 |
+
"retrieval.passage",
|
| 27 |
+
"separation",
|
| 28 |
+
"classification",
|
| 29 |
+
"text-matching"
|
| 30 |
+
],
|
| 31 |
+
"lora_alpha": 1,
|
| 32 |
+
"lora_dropout_p": 0.0,
|
| 33 |
+
"lora_main_params_trainable": false,
|
| 34 |
+
"lora_rank": 4,
|
| 35 |
+
"matryoshka_dimensions": [
|
| 36 |
+
32,
|
| 37 |
+
64,
|
| 38 |
+
128,
|
| 39 |
+
256,
|
| 40 |
+
512,
|
| 41 |
+
768,
|
| 42 |
+
1024
|
| 43 |
+
],
|
| 44 |
+
"max_position_embeddings": 8194,
|
| 45 |
+
"model_type": "xlm-roberta",
|
| 46 |
+
"num_attention_heads": 16,
|
| 47 |
+
"num_hidden_layers": 24,
|
| 48 |
+
"output_past": true,
|
| 49 |
+
"pad_token_id": 1,
|
| 50 |
+
"position_embedding_type": "rotary",
|
| 51 |
+
"rotary_emb_base": 20000.0,
|
| 52 |
+
"task_instructions": {
|
| 53 |
+
"classification": "",
|
| 54 |
+
"retrieval.passage": "Represent the document for retrieval: ",
|
| 55 |
+
"retrieval.query": "Represent the query for retrieving evidence documents: ",
|
| 56 |
+
"separation": "",
|
| 57 |
+
"text-matching": ""
|
| 58 |
+
},
|
| 59 |
+
"torch_dtype": "bfloat16",
|
| 60 |
+
"transformers_version": "4.49.0",
|
| 61 |
+
"truncate_dim": null,
|
| 62 |
+
"type_vocab_size": 1,
|
| 63 |
+
"use_cache": true,
|
| 64 |
+
"use_flash_attn": true,
|
| 65 |
+
"use_reentrant": false,
|
| 66 |
+
"vocab_size": 250002
|
| 67 |
+
}
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"__version__": {
|
| 3 |
+
"sentence_transformers": "3.4.1",
|
| 4 |
+
"transformers": "4.49.0",
|
| 5 |
+
"pytorch": "2.6.0+cu126"
|
| 6 |
+
},
|
| 7 |
+
"prompts": {
|
| 8 |
+
"retrieval.query": "Represent the query for retrieving evidence documents: ",
|
| 9 |
+
"retrieval.passage": "Represent the document for retrieval: ",
|
| 10 |
+
"separation": "",
|
| 11 |
+
"classification": "",
|
| 12 |
+
"text-matching": ""
|
| 13 |
+
},
|
| 14 |
+
"default_prompt_name": null,
|
| 15 |
+
"similarity_fn_name": "cosine"
|
| 16 |
+
}
|
configuration_xlm_roberta.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import PretrainedConfig
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class XLMRobertaFlashConfig(PretrainedConfig):
|
| 8 |
+
|
| 9 |
+
model_type = "xlm-roberta"
|
| 10 |
+
|
| 11 |
+
def __init__(
|
| 12 |
+
self,
|
| 13 |
+
vocab_size: int = 250002,
|
| 14 |
+
hidden_size: int = 1024,
|
| 15 |
+
num_hidden_layers: int = 24,
|
| 16 |
+
num_attention_heads: int = 16,
|
| 17 |
+
intermediate_size: int = 4096,
|
| 18 |
+
hidden_act: str = "gelu",
|
| 19 |
+
hidden_dropout_prob: float = 0.1,
|
| 20 |
+
attention_probs_dropout_prob: float = 0.1,
|
| 21 |
+
max_position_embeddings: int = 8194,
|
| 22 |
+
type_vocab_size: int = 1,
|
| 23 |
+
initializer_range: float = 0.02,
|
| 24 |
+
layer_norm_eps: float = 1e-05,
|
| 25 |
+
pad_token_id: int = 1,
|
| 26 |
+
bos_token_id: int = 0,
|
| 27 |
+
eos_token_id: int = 2,
|
| 28 |
+
position_embedding_type: str = "rotary",
|
| 29 |
+
rotary_emb_base: float = 10000.0,
|
| 30 |
+
use_cache: bool = True,
|
| 31 |
+
use_reentrant: bool = False,
|
| 32 |
+
classifier_dropout: Optional[float] = None,
|
| 33 |
+
lora_adaptations: Optional[List[str]] = None,
|
| 34 |
+
task_instructions: Optional[Dict[str, str]] = None,
|
| 35 |
+
lora_rank: int = 4,
|
| 36 |
+
lora_dropout_p: float = 0.0,
|
| 37 |
+
lora_alpha: int = 1,
|
| 38 |
+
lora_main_params_trainable: bool = False,
|
| 39 |
+
load_trained_adapters: bool = False,
|
| 40 |
+
use_flash_attn: bool = True,
|
| 41 |
+
torch_dtype: Optional[Union[str, torch.dtype]] = None,
|
| 42 |
+
emb_pooler: Optional[str] = None,
|
| 43 |
+
matryoshka_dimensions: Optional[List[int]] = None,
|
| 44 |
+
truncate_dim: Optional[int] = None,
|
| 45 |
+
**kwargs: Dict[str, Any],
|
| 46 |
+
):
|
| 47 |
+
"""
|
| 48 |
+
Initialize the XLMRobertaFlashConfig configuration.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
vocab_size (int): Size of the vocabulary.
|
| 52 |
+
hidden_size (int): Dimensionality of the encoder layers and the pooler layer.
|
| 53 |
+
num_hidden_layers (int): Number of hidden layers in the Transformer encoder.
|
| 54 |
+
num_attention_heads (int): Number of attention heads for each attention layer in the Transformer encoder.
|
| 55 |
+
intermediate_size (int): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer.
|
| 56 |
+
hidden_act (str): The activation function to use.
|
| 57 |
+
hidden_dropout_prob (float): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 58 |
+
attention_probs_dropout_prob (float): The dropout ratio for the attention probabilities.
|
| 59 |
+
max_position_embeddings (int): The maximum length of the position embeddings.
|
| 60 |
+
type_vocab_size (int): The vocabulary size of the token type ids.
|
| 61 |
+
initializer_range (float): The standard deviation for initializing all weight matrices.
|
| 62 |
+
layer_norm_eps (float): The epsilon used by the layer normalization layers.
|
| 63 |
+
pad_token_id (int): The ID of the padding token.
|
| 64 |
+
bos_token_id (int): The ID of the beginning-of-sequence token.
|
| 65 |
+
eos_token_id (int): The ID of the end-of-sequence token.
|
| 66 |
+
position_embedding_type (str): Type of position embeddings. Options are 'absolute', 'alibi', or 'rotary'.
|
| 67 |
+
rotary_emb_base (float): Base for rotary embeddings.
|
| 68 |
+
use_cache (bool): Whether or not the model should return the last key/values attentions (not used by all models).
|
| 69 |
+
use_reentrant (bool): Whether or not the model should enable the 'use_reentrant' flag in gradient checkpointing.
|
| 70 |
+
classifier_dropout (Optional[float]): The dropout ratio for the classification head.
|
| 71 |
+
lora_adaptations (Optional[List[str]]): LoRA adaptations configuration.
|
| 72 |
+
lora_prompts (Optional[Dict[str, str]]): LoRA prompts configuration.
|
| 73 |
+
lora_rank (int): Rank for LoRA adaptations.
|
| 74 |
+
lora_dropout_p (float): Dropout probability for LoRA adaptations.
|
| 75 |
+
lora_alpha (int): Alpha parameter for LoRA.
|
| 76 |
+
lora_main_params_trainable (bool): Whether to make the main model parameters trainable when using LoRA.
|
| 77 |
+
load_trained_adapters (bool): Whether to load trained adapters.
|
| 78 |
+
use_flash_attn (bool): Whether to use FlashAttention.
|
| 79 |
+
torch_dtype (Optional[Union[str, torch.dtype]]): Data type for the tensors.
|
| 80 |
+
emb_pooler (Optional[str]): Pooling layer configuration.
|
| 81 |
+
matryoshka_dimensions (Optional[List[int]]): Configuration for matryoshka dimension reduction.
|
| 82 |
+
truncate_dim (Optional[int]): Dimension to truncate embeddings to, if any.
|
| 83 |
+
**kwargs (Dict[str, Any]): Additional keyword arguments passed to the configuration.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
super().__init__(
|
| 87 |
+
pad_token_id=pad_token_id,
|
| 88 |
+
bos_token_id=bos_token_id,
|
| 89 |
+
eos_token_id=eos_token_id,
|
| 90 |
+
**kwargs,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
self.vocab_size = vocab_size
|
| 94 |
+
self.hidden_size = hidden_size
|
| 95 |
+
self.num_hidden_layers = num_hidden_layers
|
| 96 |
+
self.num_attention_heads = num_attention_heads
|
| 97 |
+
self.hidden_act = hidden_act
|
| 98 |
+
self.intermediate_size = intermediate_size
|
| 99 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 100 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 101 |
+
self.max_position_embeddings = max_position_embeddings
|
| 102 |
+
self.type_vocab_size = type_vocab_size
|
| 103 |
+
self.initializer_range = initializer_range
|
| 104 |
+
self.layer_norm_eps = layer_norm_eps
|
| 105 |
+
self.position_embedding_type = position_embedding_type
|
| 106 |
+
self.rotary_emb_base = rotary_emb_base
|
| 107 |
+
self.use_cache = use_cache
|
| 108 |
+
self.use_reentrant = use_reentrant
|
| 109 |
+
self.classifier_dropout = classifier_dropout
|
| 110 |
+
self.load_trained_adapters = load_trained_adapters
|
| 111 |
+
self.lora_adaptations = lora_adaptations
|
| 112 |
+
self.task_instructions = task_instructions
|
| 113 |
+
self.lora_rank = lora_rank
|
| 114 |
+
self.lora_dropout_p = lora_dropout_p
|
| 115 |
+
self.lora_alpha = lora_alpha
|
| 116 |
+
self.lora_main_params_trainable = lora_main_params_trainable
|
| 117 |
+
self.use_flash_attn = use_flash_attn
|
| 118 |
+
self.emb_pooler = emb_pooler
|
| 119 |
+
self.matryoshka_dimensions = matryoshka_dimensions
|
| 120 |
+
self.truncate_dim = truncate_dim
|
| 121 |
+
if (
|
| 122 |
+
torch_dtype
|
| 123 |
+
and hasattr(torch, torch_dtype)
|
| 124 |
+
and type(getattr(torch, torch_dtype)) is torch.dtype
|
| 125 |
+
):
|
| 126 |
+
self.torch_dtype = getattr(torch, torch_dtype)
|
| 127 |
+
else:
|
| 128 |
+
self.torch_dtype = torch_dtype
|
| 129 |
+
if not self.use_flash_attn or not torch.cuda.is_available():
|
| 130 |
+
self.torch_dtype = torch.float32
|
custom_st.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from io import BytesIO
|
| 5 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Transformer(nn.Module):
|
| 15 |
+
"""Huggingface AutoModel to generate token embeddings.
|
| 16 |
+
Loads the correct class, e.g. BERT / RoBERTa etc.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
model_name_or_path: Huggingface models name
|
| 20 |
+
(https://huggingface.co/models)
|
| 21 |
+
max_seq_length: Truncate any inputs longer than max_seq_length
|
| 22 |
+
model_args: Keyword arguments passed to the Huggingface
|
| 23 |
+
Transformers model
|
| 24 |
+
tokenizer_args: Keyword arguments passed to the Huggingface
|
| 25 |
+
Transformers tokenizer
|
| 26 |
+
config_args: Keyword arguments passed to the Huggingface
|
| 27 |
+
Transformers config
|
| 28 |
+
cache_dir: Cache dir for Huggingface Transformers to store/load
|
| 29 |
+
models
|
| 30 |
+
do_lower_case: If true, lowercases the input (independent if the
|
| 31 |
+
model is cased or not)
|
| 32 |
+
tokenizer_name_or_path: Name or path of the tokenizer. When
|
| 33 |
+
None, then model_name_or_path is used
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
save_in_root: bool = True
|
| 37 |
+
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
model_name_or_path: str,
|
| 41 |
+
max_seq_length: int = None,
|
| 42 |
+
model_args: Dict[str, Any] = None,
|
| 43 |
+
tokenizer_args: Dict[str, Any] = None,
|
| 44 |
+
config_args: Dict[str, Any] = None,
|
| 45 |
+
cache_dir: str = None,
|
| 46 |
+
do_lower_case: bool = False,
|
| 47 |
+
tokenizer_name_or_path: str = None,
|
| 48 |
+
**kwargs,
|
| 49 |
+
) -> None:
|
| 50 |
+
super().__init__()
|
| 51 |
+
self.config_keys = ["max_seq_length", "do_lower_case"]
|
| 52 |
+
self.do_lower_case = do_lower_case
|
| 53 |
+
if model_args is None:
|
| 54 |
+
model_args = {}
|
| 55 |
+
if tokenizer_args is None:
|
| 56 |
+
tokenizer_args = {}
|
| 57 |
+
if config_args is None:
|
| 58 |
+
config_args = {}
|
| 59 |
+
|
| 60 |
+
if kwargs.get("backend", "torch") != "torch":
|
| 61 |
+
logger.warning(
|
| 62 |
+
f'"jinaai/jina-embeddings-v3" is currently not compatible with the {kwargs["backend"]} backend. '
|
| 63 |
+
'Continuing with the "torch" backend.'
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
self.config = AutoConfig.from_pretrained(model_name_or_path, **config_args, cache_dir=cache_dir)
|
| 67 |
+
|
| 68 |
+
self._lora_adaptations = self.config.lora_adaptations
|
| 69 |
+
if (
|
| 70 |
+
not isinstance(self._lora_adaptations, list)
|
| 71 |
+
or len(self._lora_adaptations) < 1
|
| 72 |
+
):
|
| 73 |
+
raise ValueError(
|
| 74 |
+
f"`lora_adaptations` must be a list and contain at least one element"
|
| 75 |
+
)
|
| 76 |
+
self._adaptation_map = {
|
| 77 |
+
name: idx for idx, name in enumerate(self._lora_adaptations)
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
self.default_task = model_args.pop('default_task', None)
|
| 81 |
+
|
| 82 |
+
self.auto_model = AutoModel.from_pretrained(model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args)
|
| 83 |
+
|
| 84 |
+
if max_seq_length is not None and "model_max_length" not in tokenizer_args:
|
| 85 |
+
tokenizer_args["model_max_length"] = max_seq_length
|
| 86 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
| 87 |
+
tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path,
|
| 88 |
+
cache_dir=cache_dir,
|
| 89 |
+
**tokenizer_args,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# No max_seq_length set. Try to infer from model
|
| 93 |
+
if max_seq_length is None:
|
| 94 |
+
if (
|
| 95 |
+
hasattr(self.auto_model, "config")
|
| 96 |
+
and hasattr(self.auto_model.config, "max_position_embeddings")
|
| 97 |
+
and hasattr(self.tokenizer, "model_max_length")
|
| 98 |
+
):
|
| 99 |
+
max_seq_length = min(self.auto_model.config.max_position_embeddings, self.tokenizer.model_max_length)
|
| 100 |
+
|
| 101 |
+
self.max_seq_length = max_seq_length
|
| 102 |
+
|
| 103 |
+
if tokenizer_name_or_path is not None:
|
| 104 |
+
self.auto_model.config.tokenizer_class = self.tokenizer.__class__.__name__
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def default_task(self):
|
| 109 |
+
return self._default_task
|
| 110 |
+
|
| 111 |
+
@default_task.setter
|
| 112 |
+
def default_task(self, task: Union[None, str]):
|
| 113 |
+
self._validate_task(task)
|
| 114 |
+
self._default_task = task
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _validate_task(self, task: str):
|
| 118 |
+
if task and task not in self._lora_adaptations:
|
| 119 |
+
raise ValueError(
|
| 120 |
+
f"Unsupported task '{task}'. "
|
| 121 |
+
f"Supported tasks are: {', '.join(self.config.lora_adaptations)}. "
|
| 122 |
+
f"Alternatively, don't pass the `task` argument to disable LoRA."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
def forward(
|
| 126 |
+
self, features: Dict[str, torch.Tensor], task: Optional[str] = None
|
| 127 |
+
) -> Dict[str, torch.Tensor]:
|
| 128 |
+
"""Returns token_embeddings, cls_token"""
|
| 129 |
+
self._validate_task(task)
|
| 130 |
+
task = task or self.default_task
|
| 131 |
+
adapter_mask = None
|
| 132 |
+
if task:
|
| 133 |
+
task_id = self._adaptation_map[task]
|
| 134 |
+
num_examples = features['input_ids'].size(0)
|
| 135 |
+
adapter_mask = torch.full(
|
| 136 |
+
(num_examples,), task_id, dtype=torch.int32, device=features['input_ids'].device
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
lora_arguments = (
|
| 140 |
+
{"adapter_mask": adapter_mask} if adapter_mask is not None else {}
|
| 141 |
+
)
|
| 142 |
+
features.pop('prompt_length', None)
|
| 143 |
+
output_states = self.auto_model.forward(**features, **lora_arguments, return_dict=False)
|
| 144 |
+
output_tokens = output_states[0]
|
| 145 |
+
features.update({"token_embeddings": output_tokens, "attention_mask": features["attention_mask"]})
|
| 146 |
+
return features
|
| 147 |
+
|
| 148 |
+
def get_word_embedding_dimension(self) -> int:
|
| 149 |
+
return self.auto_model.config.hidden_size
|
| 150 |
+
|
| 151 |
+
def tokenize(
|
| 152 |
+
self,
|
| 153 |
+
texts: Union[List[str], List[dict], List[Tuple[str, str]]],
|
| 154 |
+
padding: Union[str, bool] = True
|
| 155 |
+
) -> Dict[str, torch.Tensor]:
|
| 156 |
+
"""Tokenizes a text and maps tokens to token-ids"""
|
| 157 |
+
output = {}
|
| 158 |
+
if isinstance(texts[0], str):
|
| 159 |
+
to_tokenize = [texts]
|
| 160 |
+
elif isinstance(texts[0], dict):
|
| 161 |
+
to_tokenize = []
|
| 162 |
+
output["text_keys"] = []
|
| 163 |
+
for lookup in texts:
|
| 164 |
+
text_key, text = next(iter(lookup.items()))
|
| 165 |
+
to_tokenize.append(text)
|
| 166 |
+
output["text_keys"].append(text_key)
|
| 167 |
+
to_tokenize = [to_tokenize]
|
| 168 |
+
else:
|
| 169 |
+
batch1, batch2 = [], []
|
| 170 |
+
for text_tuple in texts:
|
| 171 |
+
batch1.append(text_tuple[0])
|
| 172 |
+
batch2.append(text_tuple[1])
|
| 173 |
+
to_tokenize = [batch1, batch2]
|
| 174 |
+
|
| 175 |
+
# strip
|
| 176 |
+
to_tokenize = [[str(s).strip() for s in col] for col in to_tokenize]
|
| 177 |
+
|
| 178 |
+
# Lowercase
|
| 179 |
+
if self.do_lower_case:
|
| 180 |
+
to_tokenize = [[s.lower() for s in col] for col in to_tokenize]
|
| 181 |
+
|
| 182 |
+
output.update(
|
| 183 |
+
self.tokenizer(
|
| 184 |
+
*to_tokenize,
|
| 185 |
+
padding=padding,
|
| 186 |
+
truncation="longest_first",
|
| 187 |
+
return_tensors="pt",
|
| 188 |
+
max_length=self.max_seq_length,
|
| 189 |
+
)
|
| 190 |
+
)
|
| 191 |
+
return output
|
| 192 |
+
|
| 193 |
+
def get_config_dict(self) -> Dict[str, Any]:
|
| 194 |
+
return {key: self.__dict__[key] for key in self.config_keys}
|
| 195 |
+
|
| 196 |
+
def save(self, output_path: str, safe_serialization: bool = True) -> None:
|
| 197 |
+
self.auto_model.save_pretrained(output_path, safe_serialization=safe_serialization)
|
| 198 |
+
self.tokenizer.save_pretrained(output_path)
|
| 199 |
+
|
| 200 |
+
with open(os.path.join(output_path, "sentence_bert_config.json"), "w") as fOut:
|
| 201 |
+
json.dump(self.get_config_dict(), fOut, indent=2)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@classmethod
|
| 205 |
+
def load(cls, input_path: str) -> "Transformer":
|
| 206 |
+
# Old classes used other config names than 'sentence_bert_config.json'
|
| 207 |
+
for config_name in [
|
| 208 |
+
"sentence_bert_config.json",
|
| 209 |
+
"sentence_roberta_config.json",
|
| 210 |
+
"sentence_distilbert_config.json",
|
| 211 |
+
"sentence_camembert_config.json",
|
| 212 |
+
"sentence_albert_config.json",
|
| 213 |
+
"sentence_xlm-roberta_config.json",
|
| 214 |
+
"sentence_xlnet_config.json",
|
| 215 |
+
]:
|
| 216 |
+
sbert_config_path = os.path.join(input_path, config_name)
|
| 217 |
+
if os.path.exists(sbert_config_path):
|
| 218 |
+
break
|
| 219 |
+
|
| 220 |
+
with open(sbert_config_path) as fIn:
|
| 221 |
+
config = json.load(fIn)
|
| 222 |
+
# Don't allow configs to set trust_remote_code
|
| 223 |
+
if "model_args" in config and "trust_remote_code" in config["model_args"]:
|
| 224 |
+
config["model_args"].pop("trust_remote_code")
|
| 225 |
+
if "tokenizer_args" in config and "trust_remote_code" in config["tokenizer_args"]:
|
| 226 |
+
config["tokenizer_args"].pop("trust_remote_code")
|
| 227 |
+
if "config_args" in config and "trust_remote_code" in config["config_args"]:
|
| 228 |
+
config["config_args"].pop("trust_remote_code")
|
| 229 |
+
return cls(model_name_or_path=input_path, **config)
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:689e21707a14e065ab0824f33c8935d88143cee0625546bd872d7216cb1fde5f
|
| 3 |
+
size 1144685320
|
modules.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"name": "transformer",
|
| 5 |
+
"path": "",
|
| 6 |
+
"type": "custom_st.Transformer",
|
| 7 |
+
"kwargs": [
|
| 8 |
+
"task"
|
| 9 |
+
]
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"idx": 1,
|
| 13 |
+
"name": "pooler",
|
| 14 |
+
"path": "1_Pooling",
|
| 15 |
+
"type": "sentence_transformers.models.Pooling"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"idx": 2,
|
| 19 |
+
"name": "normalizer",
|
| 20 |
+
"path": "2_Normalize",
|
| 21 |
+
"type": "sentence_transformers.models.Normalize"
|
| 22 |
+
}
|
| 23 |
+
]
|
sentence_bert_config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"max_seq_length": 8192,
|
| 3 |
+
"do_lower_case": false
|
| 4 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"cls_token": {
|
| 10 |
+
"content": "<s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"eos_token": {
|
| 17 |
+
"content": "</s>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"mask_token": {
|
| 24 |
+
"content": "<mask>",
|
| 25 |
+
"lstrip": true,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
},
|
| 30 |
+
"pad_token": {
|
| 31 |
+
"content": "<pad>",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false
|
| 36 |
+
},
|
| 37 |
+
"sep_token": {
|
| 38 |
+
"content": "</s>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false
|
| 43 |
+
},
|
| 44 |
+
"unk_token": {
|
| 45 |
+
"content": "<unk>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false
|
| 50 |
+
}
|
| 51 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa7a6ad87a7ce8fe196787355f6af7d03aee94d19c54a5eb1392ed18c8ef451a
|
| 3 |
+
size 17082988
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "<s>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<pad>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "</s>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"3": {
|
| 28 |
+
"content": "<unk>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"250001": {
|
| 36 |
+
"content": "<mask>",
|
| 37 |
+
"lstrip": true,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"bos_token": "<s>",
|
| 45 |
+
"clean_up_tokenization_spaces": true,
|
| 46 |
+
"cls_token": "<s>",
|
| 47 |
+
"eos_token": "</s>",
|
| 48 |
+
"extra_special_tokens": {},
|
| 49 |
+
"mask_token": "<mask>",
|
| 50 |
+
"max_length": 8192,
|
| 51 |
+
"model_max_length": 8192,
|
| 52 |
+
"pad_to_multiple_of": null,
|
| 53 |
+
"pad_token": "<pad>",
|
| 54 |
+
"pad_token_type_id": 0,
|
| 55 |
+
"padding_side": "right",
|
| 56 |
+
"sep_token": "</s>",
|
| 57 |
+
"stride": 0,
|
| 58 |
+
"tokenizer_class": "XLMRobertaTokenizerFast",
|
| 59 |
+
"truncation_side": "right",
|
| 60 |
+
"truncation_strategy": "longest_first",
|
| 61 |
+
"unk_token": "<unk>"
|
| 62 |
+
}
|