Update skill assignment embedding model
Browse files- 1_Pooling/config.json +10 -0
- README.md +638 -0
- config.json +43 -0
- config_sentence_transformers.json +10 -0
- configuration.py +145 -0
- model.safetensors +3 -0
- modeling.py +1418 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +63 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"word_embedding_dimension": 1024,
|
| 3 |
+
"pooling_mode_cls_token": true,
|
| 4 |
+
"pooling_mode_mean_tokens": false,
|
| 5 |
+
"pooling_mode_max_tokens": false,
|
| 6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
| 7 |
+
"pooling_mode_weightedmean_tokens": false,
|
| 8 |
+
"pooling_mode_lasttoken": false,
|
| 9 |
+
"include_prompt": true
|
| 10 |
+
}
|
README.md
ADDED
|
@@ -0,0 +1,638 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- sentence-transformers
|
| 4 |
+
- sentence-similarity
|
| 5 |
+
- feature-extraction
|
| 6 |
+
- generated_from_trainer
|
| 7 |
+
- dataset_size:11779
|
| 8 |
+
- loss:TripletLoss
|
| 9 |
+
base_model: Alibaba-NLP/gte-large-en-v1.5
|
| 10 |
+
widget:
|
| 11 |
+
- source_sentence: "One day there was a boy called teddy (Ted Andrew)and his favorite\
|
| 12 |
+
\ game \nwas soccer. One day on his walk home from school he saw a glowing \n\
|
| 13 |
+
soccer ball on the side of the road! It was just a normal soccer ball but \nglowing\
|
| 14 |
+
\ so he decided to touch it he walked,he bent,he touched and \nsuddenly he was\
|
| 15 |
+
\ been transported to the soccer finals.\nHe was in a real life soccer game, it\
|
| 16 |
+
\ had not started yet but somehow he \nwas running out on the pitch with them.What\
|
| 17 |
+
\ is happening?he thought,is \nthis a dream he was asking himself this but knew\
|
| 18 |
+
\ it was real.He checked all\nthe players and realized van dijk was missing then\
|
| 19 |
+
\ he looked down and \nrealized that they were his then he realized he was playing\
|
| 20 |
+
\ as him!\nSoon later the Match started and he was in it then konate passed the\
|
| 21 |
+
\ ball to\nhim so he passed it to Mac Alister, who lobbed it down the wing to\
|
| 22 |
+
\ salah, \nwho counted it to jota who headed it in! he couldn’t believe it he\
|
| 23 |
+
\ was in an \nactual soccer Match. It was 1-0 the other teams coach had called\
|
| 24 |
+
\ time out \nso he had a bit to think about this odd situation,then he just decided\
|
| 25 |
+
\ to have\nfun and play.\nSzoboszlai took the starting kick and passed it back\
|
| 26 |
+
\ to him so van dijk (Ted \nAndrew)raced down the pitch then he realized he had\
|
| 27 |
+
\ the skill of van dijk he \nkicked and won a corner,so Trent took and it was\
|
| 28 |
+
\ perfect teddy timed it with\nvan dijk’s skill and scored! now it was half time.\n\
|
| 29 |
+
Half time stared of pretty normally the other side passed it back and tried to\n\
|
| 30 |
+
keep possession but gravenberch the play maker stole it and passed to \nMac Alister\
|
| 31 |
+
\ who passed it back to Roberson who made some forward \nthrust and and passed\
|
| 32 |
+
\ it to Diaz. Now it was really close to another goal he \ncould feel it and sure\
|
| 33 |
+
\ enough Diaz scored,it was back to kick of the other \nteam was pushing up and\
|
| 34 |
+
\ took a good shot but Alisson did a great dive and\nsaved it now it was 3-0 and\
|
| 35 |
+
\ the Match was almost over, 3 minutes left they \nkept possession and won after\
|
| 36 |
+
\ they all celebrated he touched the mach ball\nand he got transported back and\
|
| 37 |
+
\ every day after that he would go play with \nthe team,The end.\n"
|
| 38 |
+
sentences:
|
| 39 |
+
- 'Narrative Storytelling Skills: Narrative Writing'
|
| 40 |
+
- 'Essay Organization Skills: I can write three or more sentences with my ideas
|
| 41 |
+
in order'
|
| 42 |
+
- 'Source Analysis Skills: I can compose source analysis using appropriate sentence
|
| 43 |
+
structure and paragraphs'
|
| 44 |
+
- source_sentence: "Chatter and laughter danced through the bright woods on this Monday\
|
| 45 |
+
\ morning. The sun \nshone on glistening leaves, dark trunks and all kinds of\
|
| 46 |
+
\ wildlife skittering through the dense \nundergrowth. Sensing the humans, the\
|
| 47 |
+
\ mice turned and ran as Eliabeth raced past, Rose, \nAlex and Eugene not far\
|
| 48 |
+
\ behind. \"We have to make it back or breakfast will be cold!\" \nElizabeth panted,\
|
| 49 |
+
\ slowing down. \"But I'm so tired!\" Alex, her friend complained. \
|
| 50 |
+
\ \n\"Oh fine. Let's rest.\" The group sat on a log,\
|
| 51 |
+
\ slightly damp with morning dew, and admired \ntheir surrounds. Small mammals\
|
| 52 |
+
\ peeked out of holes and birds twittered, gliding on \ncarefree wings. The earthy\
|
| 53 |
+
\ scent of the bush mingled with that of fragrant petals, taken \nfrom rambling\
|
| 54 |
+
\ bushes. Eugene stood, and proclaimed that it was time to go. He looked \naround\
|
| 55 |
+
\ for the path, but it wasn't there anymore. \nAfter 10 minutes of panicked searching,\
|
| 56 |
+
\ Alex realised that they were getting nowhere. \"We \nknow the path was there,\
|
| 57 |
+
\ but not anymore. Let's just keep going the way we were. Then we \ncan make it\
|
| 58 |
+
\ back it back to camp and figure out what happened.\" The others all agreed,\
|
| 59 |
+
\ so \nthe left the log and strode onwards. After walking for nearly 350 metres,\
|
| 60 |
+
\ they all realised \nthat not one of them recognised their surroundings. Gone\
|
| 61 |
+
\ were the bright evergreen trees, \nand the cheeky mice. All that was there,\
|
| 62 |
+
\ ahead and behind them was barren, desolate \ntrees, all leaves long reduced\
|
| 63 |
+
\ to soil, and all animals under it. Ruby, who was afraid, \nwhimpered shakily\
|
| 64 |
+
\ \"Guys? What are we going to do now?\" No one knew. Everyone wanted \nto. \n\
|
| 65 |
+
Stuck in the other forest for hours, they started to hallucinate. At least, they\
|
| 66 |
+
\ hoped they \nwere hallucinating. They saw flames appear in the distance, only\
|
| 67 |
+
\ to vanish in seconds, long \nfingered creatures in rags, hungry eyes black and\
|
| 68 |
+
\ empty dash around trees, and most \nscarily, someone else, following them on\
|
| 69 |
+
\ the horizon. Slowly creeping up on them. \nStealthily. As if they thought they\
|
| 70 |
+
\ hadn't been seen. \nDesperate to escape, the children reasone that they might\
|
| 71 |
+
\ get help from the mystery \nperson, and could probably knock them out if they\
|
| 72 |
+
\ had to. So, with this in mind, they made \ntheir first good decision. Upon meeting\
|
| 73 |
+
\ them, they learned the man was called David, and \nhe had been stuck here for\
|
| 74 |
+
\ several years. He took them to his shelter, and that was where \nhe revealed\
|
| 75 |
+
\ who he really was. \n\"I am not David, nor am I your ally. I am certainly not\
|
| 76 |
+
\ your friend, but I will give you a \nchance to escape my creation. Yes, this\
|
| 77 |
+
\ enchanted forest is mine, and yes, I could kill you \nin less than a second.\
|
| 78 |
+
\ But kindly ignore that, as if you become scared I will not give you \nyour chance\
|
| 79 |
+
\ to escape.\" \nHe disappeared on the spot, leaving only an egg, a massive,\
|
| 80 |
+
\ speckled red egg. It was as \ntall as Eugene, who was 14. Suddenly heat swarmed\
|
| 81 |
+
\ out of the egg and through the cave \nthey were in. It rocked and cracked, and\
|
| 82 |
+
\ a baby dragon tumbled out. The kids cowered \nagainst the wall, terrified of\
|
| 83 |
+
\ the baby behemoth in front of them, capable of reducing them \nto cinders. However,\
|
| 84 |
+
\ the 'baby behemoth' thought they were it's siblings, so did not eat \nthem on\
|
| 85 |
+
\ the spot. They realised that this dragon must be their chance, so accepted it\
|
| 86 |
+
\ and \ncalled it Fin. \n After leaving they cave, Fin in tow, the kids realised\
|
| 87 |
+
\ that even with a dragon, they still didn't \nknow how to escape, as Fin wasn't\
|
| 88 |
+
\ strong enough to carry them in the air. They did know \nthey were thirsty though.\
|
| 89 |
+
\ They found a spring, and upon drinking it gained strength several \ntimes their\
|
| 90 |
+
\ usual levels. While uprooting a tree, in her anger at being here alone and without\
|
| 91 |
+
\ \nhelp or escape, Ruby had a stroke of inspiration. She ran to her friends,\
|
| 92 |
+
\ the first smile since \nthey got here on her face. \"If we give Fin (Here Fin\
|
| 93 |
+
\ stood up with a curious snarf sound) \nsome strength water, maybe he could fly\
|
| 94 |
+
\ us out of here!\" Excited, they gave Fin strength \nwater and piled on his back.\
|
| 95 |
+
\ Then they took off. \nWind whistled past their ears, cold air biting through\
|
| 96 |
+
\ their summer clothes. The groud \nspread out like a map under them, allowing\
|
| 97 |
+
\ them to see every minute detail, including the \npath. The path! Their exit!\
|
| 98 |
+
\ The told Fin to go down and- \nThe were stopped. By another dragon. This one\
|
| 99 |
+
\ at least five times Fin's size and angry. \nVery angry. Fin ducked, weaved.\
|
| 100 |
+
\ Breathed flame. The kids hang on for dear life, fire and \nwind playing havoc\
|
| 101 |
+
\ with their senses. Flame blinded them, burnt them. Wind threatened to \ntoss\
|
| 102 |
+
\ them off Finn, down on to the harsh ground, where they would never get up. They\
|
| 103 |
+
\ \nknew there was one way out. \nFin dove down past the talons of the other dragon,\
|
| 104 |
+
\ the sharp limestone green scales, nearly \nat the path. Closer, closer, closer.\
|
| 105 |
+
\ At the last second he leveled with the ground, barrelling \nthrough the portal\
|
| 106 |
+
\ from this world, back the children's. They would have a lot of explaining \n\
|
| 107 |
+
to do back at camp. \nThey flew into camp on Fin's back, the strength water just\
|
| 108 |
+
\ wearing off. All the adults said \nthey would need some coffee for this explanation,\
|
| 109 |
+
\ so once they had their coffee, the kids \ntold their story, the story you just\
|
| 110 |
+
\ read. Fin went on to become massive, and live on an \nisland in the bermuda\
|
| 111 |
+
\ triangle, though he occasionally went back for visits to his siblings. \nThey\
|
| 112 |
+
\ all lived, maybe not happily, but with an excellent story and even more excellent\
|
| 113 |
+
\ \nfriends. \n"
|
| 114 |
+
sentences:
|
| 115 |
+
- "Figurative Language Skills: Through\r\nuse of descriptive writing, word choice\
|
| 116 |
+
\ and attempts at figurative language\r\n(similes, personification, onomatopoeia,\
|
| 117 |
+
\ etc), is able to show the reader the\r\nstory, not tell it."
|
| 118 |
+
- 'Describing Characteristics: Uses simple adjectives to add meaning by describing common
|
| 119 |
+
qualities or features e.g. small, long, red'
|
| 120 |
+
- 'Em Dash Skills: I can use an em dash for parenthetical information'
|
| 121 |
+
- source_sentence: "Know Your Value and Embrace Your Power\nJournalist Sarah Stephan\
|
| 122 |
+
\ proves how the film Legally \nBlonde is still relevant after 2 decades of it\
|
| 123 |
+
\ being released. \nFrom the early 2000s to today, Legally Blonde remains to attract\
|
| 124 |
+
\ audiences with its message of empowerment and resilience. Now, get ready to\
|
| 125 |
+
\ rediscover the continuing wisdom and inspiration it offers for a new generation.\
|
| 126 |
+
\ Girls unite through their feminine competence, forming a bond that keeps them\
|
| 127 |
+
\ together. In today’s world, Legally Blonde is an important lesson for young\
|
| 128 |
+
\ girls that we can be much more than the blonde stereotype.\nIn the movie Legally\
|
| 129 |
+
\ Blonde there are many inspirational messages throughout. One main one is Femininity\
|
| 130 |
+
\ and Competence. Elle Woods embodies feminine traits while also proving she can\
|
| 131 |
+
\ be competent. In the graduation scene Elle Woods declared, “first impressions\
|
| 132 |
+
\ are not always correct, you must always have faith in people, and most importantly,\
|
| 133 |
+
\ you must always have faith in yourself. The technique used in this scene is\
|
| 134 |
+
\ close up shots. This technique creates an intimate connection between Elle and\
|
| 135 |
+
\ the viewers, making her journey of hard work more impactful. This scene in legally\
|
| 136 |
+
\ blonde deliver timeless messages about preservice, self-belief, breaking stereotypes\
|
| 137 |
+
\ and empowerment which all continue to strongly resonate in todays world. Another\
|
| 138 |
+
\ scene in legally blonde that represent Femininity and Competence is the graduation\
|
| 139 |
+
\ scene. In this scene she is seen wearing a hot pink dress and bag, standing\
|
| 140 |
+
\ out. This fashion choice symbolizes some key things. Authenticity (staying true\
|
| 141 |
+
\ to herself by wearing pink), breaking stereotypes (challenging stereotypes of\
|
| 142 |
+
\ femininity), and empowerment which plays a key role in young peoples life today.\
|
| 143 |
+
\ These messages are the reason Legally Blonde is still relevant to young people.\n\
|
| 144 |
+
In the world of Legally Blonde, the strength of sisterhood takes centre stage\
|
| 145 |
+
\ as Elle Woods navigates the unexpected challenges of Harvard Law School. Elle\
|
| 146 |
+
\ Woods portrays the concepts of girls sticking together by strengthening her\
|
| 147 |
+
\ and her sorority sisters through the hard times. When Elle is preparing for\
|
| 148 |
+
\ her first big court \n\ncase, she rallies the support of her sorority sisters\
|
| 149 |
+
\ to help prepare for a pivotal courtroom moment. The technique used in this scene\
|
| 150 |
+
\ is the music. The music is an uplifting and motivating song that is used to\
|
| 151 |
+
\ underline the positive energy and determination of Elle and her friends. This\
|
| 152 |
+
\ scene is essential in the film for its emotional depth, character development,\
|
| 153 |
+
\ and thematic richness. Throughout Elle’s time at Law School, she is underestimated\
|
| 154 |
+
\ by classmates and professors, until she met Paulette who encourages her. You\
|
| 155 |
+
\ can see this in the film when Elle goes to the nail salon and breaks down to\
|
| 156 |
+
\ Paulette and Vivian gives her advice to steal back Warner and prove to him that\
|
| 157 |
+
\ he’s missing out. This scene is truly empowering that is shows girls should\
|
| 158 |
+
\ stick together and help each other out. This scene should prove to young girls\
|
| 159 |
+
\ watching the film that they should always stick up for one another."
|
| 160 |
+
sentences:
|
| 161 |
+
- 'Identifying Figurative Language: Figurative Language'
|
| 162 |
+
- 'Adjective Detail Grouping Skills: I can use adjectival groups or phrases to give
|
| 163 |
+
important detail about a noun'
|
| 164 |
+
- 'Theme Development Skills: I can develop the theme throughout the poem.'
|
| 165 |
+
- source_sentence: "The water of life\nBeep Beep Beep! The obnoxious noise of John's\
|
| 166 |
+
\ alarm clock woke him up, this morning\nwas particularly quiet, then John realised\
|
| 167 |
+
\ that it was the water of life worship day. The water\nof life was a liquid with\
|
| 168 |
+
\ the properties of water but gives anyone who srinks it immortality, it\nis in\
|
| 169 |
+
\ the middle of the town and every year the citizens go there and have ten minutes\
|
| 170 |
+
\ of\nsilence it is because hundreds of years ago the town was targeted by people\
|
| 171 |
+
\ as the majority\nof the citizens came there to escape from violence and such\
|
| 172 |
+
\ and one man drank the water\nand with immortality he survived the swarms of\
|
| 173 |
+
\ attackers and scared them off. If someone\nwas to miss the water of life worship\
|
| 174 |
+
\ day they would be disrespected by most of the towns\npeople.\nJohn rushed to\
|
| 175 |
+
\ the place when they when about to beginthe ceremony and got there just on\n\
|
| 176 |
+
time. He tripped over someones bag and fell into the well with the water of life.\
|
| 177 |
+
\ As the town\nwas in the desert John never got swimming lessons and was not a\
|
| 178 |
+
\ good swimmer,\nstruggling in the water, breathing frantically and trying with\
|
| 179 |
+
\ all his force to get out he\naccidently swallowed a mouthful of water when he\
|
| 180 |
+
\ began to slow down running out of\nstrength he felt like he was being pulled\
|
| 181 |
+
\ up and he was. By what? how? John was now\nstanding on the ground fully unharmed.\
|
| 182 |
+
\ Everyone was staring at him, soe random bloke\nfrom the crowd charged at his\
|
| 183 |
+
\ back with a pocket knife, John felt an impact on his back but\nit didn't hurt\
|
| 184 |
+
\ at all. John had the sudden realisation, everyone is now after him.\nhe ran\
|
| 185 |
+
\ as fast as he could but people were using cars to catch up to him. He saw something\n\
|
| 186 |
+
in the corner of his eye and it was a cave. Jhon had no Idea how to get there\
|
| 187 |
+
\ without being\nseen. He tried his luck by just stoppingand not getting seen.\
|
| 188 |
+
\ Jhon whas ran over by the car\nas if no one saw him, he felt weightless like\
|
| 189 |
+
\ he was a floating mind and he overheard a bloke\nsay \"Where the hell did he\
|
| 190 |
+
\ go?\"\n\"I have no idea,\" another voice said \"he is better off alone in the\
|
| 191 |
+
\ desert than in the town\nanyway\"\n\"I guess so\" said the first voice but with\
|
| 192 |
+
\ dissapointment\n\"How about we go back now?\"\n\"Sure\"\nJhon was finally able\
|
| 193 |
+
\ to breathe normally. He started a fire in the cave to get comfortable.\nJohn\
|
| 194 |
+
\ was trying to find food, in the distance the engines roared and John started\
|
| 195 |
+
\ to breathe\nquickly again he started breathing quick again and the engine noise\
|
| 196 |
+
\ was getting closer and\ncloser and he hear another voice yelling over the engines\
|
| 197 |
+
\ \"I see him!\"\n\"Get him. What are you doing? Waiting for the sun to explode?\"\
|
| 198 |
+
\ And soon enough John was\nrunning but so tired and lethargic he couldn't run\
|
| 199 |
+
\ far so he was shoved into the back of the\ncar.\n\"What happened?\" Said John\
|
| 200 |
+
\ sleepily\n\"Nothing, now answer us or you will be locked up.\"\n\"What?\" Replied\
|
| 201 |
+
\ John\n \"I said, answer us or you will be locked up!\" Yelled the man. Jhon\
|
| 202 |
+
\ was confused and\nmanaged to run away with ease but everyone was after him.\n\
|
| 203 |
+
He had escaped although he didn't feel free, he met some people who had also escaped\n\
|
| 204 |
+
form the town and they had made a camp that they planned to one day be a town.\
|
| 205 |
+
\ Suddenly\nJhon's phone rung and it was his friend \"where are you mate? We've\
|
| 206 |
+
\ been looking for you.\"\nSaid his friend\n\"I've\nescaped you will hold me hostage!\"\
|
| 207 |
+
\ Yelled John and hung up, he ran far away from the\ncamp to smash his phone so\
|
| 208 |
+
\ no one can track him. Jhon arrived back to his camp and got\nsome rest. Although\
|
| 209 |
+
\ he is not very happy there he is living a decent life. He is living a\nfreedome\
|
| 210 |
+
\ once again.\n"
|
| 211 |
+
sentences:
|
| 212 |
+
- 'Spelling Skills: Spelling meaning - 6'
|
| 213 |
+
- 'Thesis Establishment Skills: Did the student clearly establish their thesis? '
|
| 214 |
+
- 'Identifying Figurative Language: Figurative Language'
|
| 215 |
+
- source_sentence: What is the artefact? This is catter this affect is called a
|
| 216 |
+
cold Almond What are the features of the artefact? The features of this artefact
|
| 217 |
+
are it looks like a gold snake with inscribed writing on the inside Question
|
| 218 |
+
2 What aspect of Ancient Roman society does this artefact represent? This aspect
|
| 219 |
+
represents the 1st century AD What does the artefact tell us about Ancient Roman
|
| 220 |
+
Society? This artefact tells us about the 1st century AD The plantations keep
|
| 221 |
+
the stones gave them girls How does this artefact give us an understanding about
|
| 222 |
+
Ancient Roman society? it gives us an understanding about Ancient Roman society,
|
| 223 |
+
because the plantations keep slaves and gift the stones. and forced them to wear
|
| 224 |
+
it
|
| 225 |
+
sentences:
|
| 226 |
+
- 'Writing Convention Skills: Conventions of Writing'
|
| 227 |
+
- 'Sentence Construction Skills: I can construct basic sentences'
|
| 228 |
+
- 'Essay Organization Skills: anything'
|
| 229 |
+
pipeline_tag: sentence-similarity
|
| 230 |
+
library_name: sentence-transformers
|
| 231 |
+
---
|
| 232 |
+
|
| 233 |
+
# SentenceTransformer based on Alibaba-NLP/gte-large-en-v1.5
|
| 234 |
+
|
| 235 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Alibaba-NLP/gte-large-en-v1.5](https://huggingface.co/Alibaba-NLP/gte-large-en-v1.5). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
| 236 |
+
|
| 237 |
+
## Model Details
|
| 238 |
+
|
| 239 |
+
### Model Description
|
| 240 |
+
- **Model Type:** Sentence Transformer
|
| 241 |
+
- **Base model:** [Alibaba-NLP/gte-large-en-v1.5](https://huggingface.co/Alibaba-NLP/gte-large-en-v1.5) <!-- at revision 104333d6af6f97649377c2afbde10a7704870c7b -->
|
| 242 |
+
- **Maximum Sequence Length:** 8192 tokens
|
| 243 |
+
- **Output Dimensionality:** 1024 dimensions
|
| 244 |
+
- **Similarity Function:** Cosine Similarity
|
| 245 |
+
<!-- - **Training Dataset:** Unknown -->
|
| 246 |
+
<!-- - **Language:** Unknown -->
|
| 247 |
+
<!-- - **License:** Unknown -->
|
| 248 |
+
|
| 249 |
+
### Model Sources
|
| 250 |
+
|
| 251 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
| 252 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
| 253 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
| 254 |
+
|
| 255 |
+
### Full Model Architecture
|
| 256 |
+
|
| 257 |
+
```
|
| 258 |
+
SentenceTransformer(
|
| 259 |
+
(0): Transformer({'max_seq_length': 8192, 'do_lower_case': False}) with Transformer model: NewModel
|
| 260 |
+
(1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
| 261 |
+
)
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
## Usage
|
| 265 |
+
|
| 266 |
+
### Direct Usage (Sentence Transformers)
|
| 267 |
+
|
| 268 |
+
First install the Sentence Transformers library:
|
| 269 |
+
|
| 270 |
+
```bash
|
| 271 |
+
pip install -U sentence-transformers
|
| 272 |
+
```
|
| 273 |
+
|
| 274 |
+
Then you can load this model and run inference.
|
| 275 |
+
```python
|
| 276 |
+
from sentence_transformers import SentenceTransformer
|
| 277 |
+
|
| 278 |
+
# Download from the 🤗 Hub
|
| 279 |
+
model = SentenceTransformer("dpanea/skill-assignment-transformer")
|
| 280 |
+
# Run inference
|
| 281 |
+
sentences = [
|
| 282 |
+
'What is the artefact? This is catter this affect is called a cold Almond What are the features of the artefact? The features of this artefact are it looks like a gold snake with inscribed writing on the inside Question 2 What aspect of Ancient Roman society does this artefact represent? This aspect represents the 1st century AD What does the artefact tell us about Ancient Roman Society? This artefact tells us about the 1st century AD The plantations keep the stones gave them girls How does this artefact give us an understanding about Ancient Roman society? it gives us an understanding about Ancient Roman society, because the plantations keep slaves and gift the stones. and forced them to wear it',
|
| 283 |
+
'Writing Convention Skills: Conventions of Writing',
|
| 284 |
+
'Sentence Construction Skills: I can construct basic sentences',
|
| 285 |
+
]
|
| 286 |
+
embeddings = model.encode(sentences)
|
| 287 |
+
print(embeddings.shape)
|
| 288 |
+
# [3, 1024]
|
| 289 |
+
|
| 290 |
+
# Get the similarity scores for the embeddings
|
| 291 |
+
similarities = model.similarity(embeddings, embeddings)
|
| 292 |
+
print(similarities.shape)
|
| 293 |
+
# [3, 3]
|
| 294 |
+
```
|
| 295 |
+
|
| 296 |
+
<!--
|
| 297 |
+
### Direct Usage (Transformers)
|
| 298 |
+
|
| 299 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
| 300 |
+
|
| 301 |
+
</details>
|
| 302 |
+
-->
|
| 303 |
+
|
| 304 |
+
<!--
|
| 305 |
+
### Downstream Usage (Sentence Transformers)
|
| 306 |
+
|
| 307 |
+
You can finetune this model on your own dataset.
|
| 308 |
+
|
| 309 |
+
<details><summary>Click to expand</summary>
|
| 310 |
+
|
| 311 |
+
</details>
|
| 312 |
+
-->
|
| 313 |
+
|
| 314 |
+
<!--
|
| 315 |
+
### Out-of-Scope Use
|
| 316 |
+
|
| 317 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
| 318 |
+
-->
|
| 319 |
+
|
| 320 |
+
<!--
|
| 321 |
+
## Bias, Risks and Limitations
|
| 322 |
+
|
| 323 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
| 324 |
+
-->
|
| 325 |
+
|
| 326 |
+
<!--
|
| 327 |
+
### Recommendations
|
| 328 |
+
|
| 329 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
| 330 |
+
-->
|
| 331 |
+
|
| 332 |
+
## Training Details
|
| 333 |
+
|
| 334 |
+
### Training Dataset
|
| 335 |
+
|
| 336 |
+
#### Unnamed Dataset
|
| 337 |
+
|
| 338 |
+
* Size: 11,779 training samples
|
| 339 |
+
* Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>sentence_2</code>
|
| 340 |
+
* Approximate statistics based on the first 1000 samples:
|
| 341 |
+
| | sentence_0 | sentence_1 | sentence_2 |
|
| 342 |
+
|:--------|:---------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
|
| 343 |
+
| type | string | string | string |
|
| 344 |
+
| details | <ul><li>min: 124 tokens</li><li>mean: 615.96 tokens</li><li>max: 1566 tokens</li></ul> | <ul><li>min: 7 tokens</li><li>mean: 19.72 tokens</li><li>max: 69 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 19.55 tokens</li><li>max: 53 tokens</li></ul> |
|
| 345 |
+
* Samples:
|
| 346 |
+
| sentence_0 | sentence_1 | sentence_2 |
|
| 347 |
+
|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
| 348 |
+
| <code>2024 POETRY FEATURE ARTICLE – SCAFFOLD - blank<br>Name: <br>Song Chosen: SET IT ALL FREE<br>Poem Chosen: STILL, I RISE<br>Common theme: These form together to give the message of overcoming challenges and rising above difficulties with confidence and strength.<br>]<br>THIS Scaffold could be submitted as your draft. <br>HEADLINE: It needs to be strong, catchy and stimulate the reader. Try for ‘ear appeal’ or ‘brain appeal’ if you can. Possibly use alliteration or a pun. Just use the title of your poem until you can think of a title for the article. FOCUS BLUB: A brief, gripping sentence or two that lets readers know more specifically what the article is about. It gives a sense of the style of your piece. / Voiceworks - Whispers Of Wisdom Discover the themes of resilience and empowerment in Scarlett Johanssons “set it all Free” and mya Angelou’s “still I rise” I will explore how these works help us to overcome adversity and embrace our true strength...</code> | <code>Emotionally Engaging Language: I can evoke an emotional response through emotive language.</code> | <code>Reference Formatting Skills: Formats the reference list/bibliography correctly.</code> |
|
| 349 |
+
| <code>Why is there no fuel for the next 500 kilometers? We need fuel and there is no way to turn back.This is such a bad time.We need fuel and i am gonna rage quit and drive us off the bridge if we can't get fuel any time soon pull over it's my turn, to drive you have been driving for the last hour and i want t go speeding, down this hill and get to the fuel station quicker, you drive way to slow and it is annoying me.Ok fine i'm pulling over.Finally ok i see that red car coming ,he wants to race and im racing him.ya i beat him but now we only have enough fuel for the next 200 km and the next fuel station is 250 km away i will drive until we run out of fuel then we will have to push and i'm paying for the fuel don't even think about paying for the fuel little brother.Ok time to push.No i am not pushing the car and you can not make me just because u are 1 year older than me does no mean can boss me around.Fine i will push lazy boy.What Why is the gas station shut down and the next one is 300k...</code> | <code>Essay Organization Skills: Essay Writing</code> | <code>Case Evaluation Skills: Does the student include discerning evaluation of ideas to support their case for positive change? </code> |
|
| 350 |
+
| <code>What is the artefact? the artefact is a gold armband. What are the features of the artefacts? the features on the arte fact it's a gold amband it looks like it beendigging to look like a snake rap around ur arm. you can see the snake scale's and and snake head on the amberd. Question 2 What aspect of Ancient Roman society does this artefact represent? the artefacts represent partion partion partian partian head tate were the richest people in human society it tells us that partions were the richest people in Aome Home society. patients were on of social What does the artefact tell us about Ancient Roman society? pyramid. they had all theexpertsn suf and they had Slaves How does this artefact give us an understanding about Ancient Roman society? the artefact gives us a understanding their were rich people and Cparthers) they had a late more money then all the others people in home society. 7</code> | <code>Spelling Visuals: Spelling visual - 4</code> | <code>Event Setting Visualization Skills: I can use technical vocabulary, contemporary language and images to create a sense of the event and the setting</code> |
|
| 351 |
+
* Loss: [<code>TripletLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#tripletloss) with these parameters:
|
| 352 |
+
```json
|
| 353 |
+
{
|
| 354 |
+
"distance_metric": "TripletDistanceMetric.EUCLIDEAN",
|
| 355 |
+
"triplet_margin": 5
|
| 356 |
+
}
|
| 357 |
+
```
|
| 358 |
+
|
| 359 |
+
### Training Hyperparameters
|
| 360 |
+
#### Non-Default Hyperparameters
|
| 361 |
+
|
| 362 |
+
- `eval_strategy`: steps
|
| 363 |
+
- `per_device_train_batch_size`: 4
|
| 364 |
+
- `per_device_eval_batch_size`: 4
|
| 365 |
+
- `multi_dataset_batch_sampler`: round_robin
|
| 366 |
+
|
| 367 |
+
#### All Hyperparameters
|
| 368 |
+
<details><summary>Click to expand</summary>
|
| 369 |
+
|
| 370 |
+
- `overwrite_output_dir`: False
|
| 371 |
+
- `do_predict`: False
|
| 372 |
+
- `eval_strategy`: steps
|
| 373 |
+
- `prediction_loss_only`: True
|
| 374 |
+
- `per_device_train_batch_size`: 4
|
| 375 |
+
- `per_device_eval_batch_size`: 4
|
| 376 |
+
- `per_gpu_train_batch_size`: None
|
| 377 |
+
- `per_gpu_eval_batch_size`: None
|
| 378 |
+
- `gradient_accumulation_steps`: 1
|
| 379 |
+
- `eval_accumulation_steps`: None
|
| 380 |
+
- `torch_empty_cache_steps`: None
|
| 381 |
+
- `learning_rate`: 5e-05
|
| 382 |
+
- `weight_decay`: 0.0
|
| 383 |
+
- `adam_beta1`: 0.9
|
| 384 |
+
- `adam_beta2`: 0.999
|
| 385 |
+
- `adam_epsilon`: 1e-08
|
| 386 |
+
- `max_grad_norm`: 1
|
| 387 |
+
- `num_train_epochs`: 3
|
| 388 |
+
- `max_steps`: -1
|
| 389 |
+
- `lr_scheduler_type`: linear
|
| 390 |
+
- `lr_scheduler_kwargs`: {}
|
| 391 |
+
- `warmup_ratio`: 0.0
|
| 392 |
+
- `warmup_steps`: 0
|
| 393 |
+
- `log_level`: passive
|
| 394 |
+
- `log_level_replica`: warning
|
| 395 |
+
- `log_on_each_node`: True
|
| 396 |
+
- `logging_nan_inf_filter`: True
|
| 397 |
+
- `save_safetensors`: True
|
| 398 |
+
- `save_on_each_node`: False
|
| 399 |
+
- `save_only_model`: False
|
| 400 |
+
- `restore_callback_states_from_checkpoint`: False
|
| 401 |
+
- `no_cuda`: False
|
| 402 |
+
- `use_cpu`: False
|
| 403 |
+
- `use_mps_device`: False
|
| 404 |
+
- `seed`: 42
|
| 405 |
+
- `data_seed`: None
|
| 406 |
+
- `jit_mode_eval`: False
|
| 407 |
+
- `use_ipex`: False
|
| 408 |
+
- `bf16`: False
|
| 409 |
+
- `fp16`: False
|
| 410 |
+
- `fp16_opt_level`: O1
|
| 411 |
+
- `half_precision_backend`: auto
|
| 412 |
+
- `bf16_full_eval`: False
|
| 413 |
+
- `fp16_full_eval`: False
|
| 414 |
+
- `tf32`: None
|
| 415 |
+
- `local_rank`: 0
|
| 416 |
+
- `ddp_backend`: None
|
| 417 |
+
- `tpu_num_cores`: None
|
| 418 |
+
- `tpu_metrics_debug`: False
|
| 419 |
+
- `debug`: []
|
| 420 |
+
- `dataloader_drop_last`: False
|
| 421 |
+
- `dataloader_num_workers`: 0
|
| 422 |
+
- `dataloader_prefetch_factor`: None
|
| 423 |
+
- `past_index`: -1
|
| 424 |
+
- `disable_tqdm`: False
|
| 425 |
+
- `remove_unused_columns`: True
|
| 426 |
+
- `label_names`: None
|
| 427 |
+
- `load_best_model_at_end`: False
|
| 428 |
+
- `ignore_data_skip`: False
|
| 429 |
+
- `fsdp`: []
|
| 430 |
+
- `fsdp_min_num_params`: 0
|
| 431 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
| 432 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
| 433 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
| 434 |
+
- `deepspeed`: None
|
| 435 |
+
- `label_smoothing_factor`: 0.0
|
| 436 |
+
- `optim`: adamw_torch
|
| 437 |
+
- `optim_args`: None
|
| 438 |
+
- `adafactor`: False
|
| 439 |
+
- `group_by_length`: False
|
| 440 |
+
- `length_column_name`: length
|
| 441 |
+
- `ddp_find_unused_parameters`: None
|
| 442 |
+
- `ddp_bucket_cap_mb`: None
|
| 443 |
+
- `ddp_broadcast_buffers`: False
|
| 444 |
+
- `dataloader_pin_memory`: True
|
| 445 |
+
- `dataloader_persistent_workers`: False
|
| 446 |
+
- `skip_memory_metrics`: True
|
| 447 |
+
- `use_legacy_prediction_loop`: False
|
| 448 |
+
- `push_to_hub`: False
|
| 449 |
+
- `resume_from_checkpoint`: None
|
| 450 |
+
- `hub_model_id`: None
|
| 451 |
+
- `hub_strategy`: every_save
|
| 452 |
+
- `hub_private_repo`: None
|
| 453 |
+
- `hub_always_push`: False
|
| 454 |
+
- `hub_revision`: None
|
| 455 |
+
- `gradient_checkpointing`: False
|
| 456 |
+
- `gradient_checkpointing_kwargs`: None
|
| 457 |
+
- `include_inputs_for_metrics`: False
|
| 458 |
+
- `include_for_metrics`: []
|
| 459 |
+
- `eval_do_concat_batches`: True
|
| 460 |
+
- `fp16_backend`: auto
|
| 461 |
+
- `push_to_hub_model_id`: None
|
| 462 |
+
- `push_to_hub_organization`: None
|
| 463 |
+
- `mp_parameters`:
|
| 464 |
+
- `auto_find_batch_size`: False
|
| 465 |
+
- `full_determinism`: False
|
| 466 |
+
- `torchdynamo`: None
|
| 467 |
+
- `ray_scope`: last
|
| 468 |
+
- `ddp_timeout`: 1800
|
| 469 |
+
- `torch_compile`: False
|
| 470 |
+
- `torch_compile_backend`: None
|
| 471 |
+
- `torch_compile_mode`: None
|
| 472 |
+
- `include_tokens_per_second`: False
|
| 473 |
+
- `include_num_input_tokens_seen`: False
|
| 474 |
+
- `neftune_noise_alpha`: None
|
| 475 |
+
- `optim_target_modules`: None
|
| 476 |
+
- `batch_eval_metrics`: False
|
| 477 |
+
- `eval_on_start`: False
|
| 478 |
+
- `use_liger_kernel`: False
|
| 479 |
+
- `liger_kernel_config`: None
|
| 480 |
+
- `eval_use_gather_object`: False
|
| 481 |
+
- `average_tokens_across_devices`: False
|
| 482 |
+
- `prompts`: None
|
| 483 |
+
- `batch_sampler`: batch_sampler
|
| 484 |
+
- `multi_dataset_batch_sampler`: round_robin
|
| 485 |
+
|
| 486 |
+
</details>
|
| 487 |
+
|
| 488 |
+
### Training Logs
|
| 489 |
+
| Epoch | Step | Training Loss |
|
| 490 |
+
|:------:|:----:|:-------------:|
|
| 491 |
+
| 0.0340 | 100 | - |
|
| 492 |
+
| 0.0679 | 200 | - |
|
| 493 |
+
| 0.1019 | 300 | - |
|
| 494 |
+
| 0.1358 | 400 | - |
|
| 495 |
+
| 0.1698 | 500 | 1.7346 |
|
| 496 |
+
| 0.2037 | 600 | - |
|
| 497 |
+
| 0.2377 | 700 | - |
|
| 498 |
+
| 0.2716 | 800 | - |
|
| 499 |
+
| 0.3056 | 900 | - |
|
| 500 |
+
| 0.3396 | 1000 | 0.8428 |
|
| 501 |
+
| 0.3735 | 1100 | - |
|
| 502 |
+
| 0.4075 | 1200 | - |
|
| 503 |
+
| 0.4414 | 1300 | - |
|
| 504 |
+
| 0.4754 | 1400 | - |
|
| 505 |
+
| 0.5093 | 1500 | 0.4421 |
|
| 506 |
+
| 0.5433 | 1600 | - |
|
| 507 |
+
| 0.5772 | 1700 | - |
|
| 508 |
+
| 0.6112 | 1800 | - |
|
| 509 |
+
| 0.6452 | 1900 | - |
|
| 510 |
+
| 0.6791 | 2000 | 0.3366 |
|
| 511 |
+
| 0.7131 | 2100 | - |
|
| 512 |
+
| 0.7470 | 2200 | - |
|
| 513 |
+
| 0.7810 | 2300 | - |
|
| 514 |
+
| 0.8149 | 2400 | - |
|
| 515 |
+
| 0.8489 | 2500 | 0.2568 |
|
| 516 |
+
| 0.8829 | 2600 | - |
|
| 517 |
+
| 0.9168 | 2700 | - |
|
| 518 |
+
| 0.9508 | 2800 | - |
|
| 519 |
+
| 0.9847 | 2900 | - |
|
| 520 |
+
| 1.0 | 2945 | - |
|
| 521 |
+
| 1.0187 | 3000 | 0.1666 |
|
| 522 |
+
| 1.0526 | 3100 | - |
|
| 523 |
+
| 1.0866 | 3200 | - |
|
| 524 |
+
| 1.1205 | 3300 | - |
|
| 525 |
+
| 1.1545 | 3400 | - |
|
| 526 |
+
| 1.1885 | 3500 | 0.1027 |
|
| 527 |
+
| 1.2224 | 3600 | - |
|
| 528 |
+
| 1.2564 | 3700 | - |
|
| 529 |
+
| 1.2903 | 3800 | - |
|
| 530 |
+
| 1.3243 | 3900 | - |
|
| 531 |
+
| 1.3582 | 4000 | 0.0657 |
|
| 532 |
+
| 1.3922 | 4100 | - |
|
| 533 |
+
| 1.4261 | 4200 | - |
|
| 534 |
+
| 1.4601 | 4300 | - |
|
| 535 |
+
| 1.4941 | 4400 | - |
|
| 536 |
+
| 1.5280 | 4500 | 0.0788 |
|
| 537 |
+
| 1.5620 | 4600 | - |
|
| 538 |
+
| 1.5959 | 4700 | - |
|
| 539 |
+
| 1.6299 | 4800 | - |
|
| 540 |
+
| 1.6638 | 4900 | - |
|
| 541 |
+
| 1.6978 | 5000 | 0.0648 |
|
| 542 |
+
| 1.7317 | 5100 | - |
|
| 543 |
+
| 1.7657 | 5200 | - |
|
| 544 |
+
| 1.7997 | 5300 | - |
|
| 545 |
+
| 1.8336 | 5400 | - |
|
| 546 |
+
| 1.8676 | 5500 | 0.0413 |
|
| 547 |
+
| 1.9015 | 5600 | - |
|
| 548 |
+
| 1.9355 | 5700 | - |
|
| 549 |
+
| 1.9694 | 5800 | - |
|
| 550 |
+
| 2.0 | 5890 | - |
|
| 551 |
+
| 2.0034 | 5900 | - |
|
| 552 |
+
| 2.0374 | 6000 | 0.0293 |
|
| 553 |
+
| 2.0713 | 6100 | - |
|
| 554 |
+
| 2.1053 | 6200 | - |
|
| 555 |
+
| 2.1392 | 6300 | - |
|
| 556 |
+
| 2.1732 | 6400 | - |
|
| 557 |
+
| 2.2071 | 6500 | 0.0158 |
|
| 558 |
+
| 2.2411 | 6600 | - |
|
| 559 |
+
| 2.2750 | 6700 | - |
|
| 560 |
+
| 2.3090 | 6800 | - |
|
| 561 |
+
| 2.3430 | 6900 | - |
|
| 562 |
+
| 2.3769 | 7000 | 0.0183 |
|
| 563 |
+
| 2.4109 | 7100 | - |
|
| 564 |
+
| 2.4448 | 7200 | - |
|
| 565 |
+
| 2.4788 | 7300 | - |
|
| 566 |
+
| 2.5127 | 7400 | - |
|
| 567 |
+
| 2.5467 | 7500 | 0.0079 |
|
| 568 |
+
| 2.5806 | 7600 | - |
|
| 569 |
+
| 2.6146 | 7700 | - |
|
| 570 |
+
| 2.6486 | 7800 | - |
|
| 571 |
+
| 2.6825 | 7900 | - |
|
| 572 |
+
| 2.7165 | 8000 | 0.007 |
|
| 573 |
+
| 2.7504 | 8100 | - |
|
| 574 |
+
| 2.7844 | 8200 | - |
|
| 575 |
+
| 2.8183 | 8300 | - |
|
| 576 |
+
| 2.8523 | 8400 | - |
|
| 577 |
+
| 2.8862 | 8500 | 0.0057 |
|
| 578 |
+
| 2.9202 | 8600 | - |
|
| 579 |
+
| 2.9542 | 8700 | - |
|
| 580 |
+
| 2.9881 | 8800 | - |
|
| 581 |
+
| 3.0 | 8835 | - |
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
### Framework Versions
|
| 585 |
+
- Python: 3.10.12
|
| 586 |
+
- Sentence Transformers: 4.1.0
|
| 587 |
+
- Transformers: 4.53.0
|
| 588 |
+
- PyTorch: 2.1.0+cu118
|
| 589 |
+
- Accelerate: 1.8.1
|
| 590 |
+
- Datasets: 3.6.0
|
| 591 |
+
- Tokenizers: 0.21.2
|
| 592 |
+
|
| 593 |
+
## Citation
|
| 594 |
+
|
| 595 |
+
### BibTeX
|
| 596 |
+
|
| 597 |
+
#### Sentence Transformers
|
| 598 |
+
```bibtex
|
| 599 |
+
@inproceedings{reimers-2019-sentence-bert,
|
| 600 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
| 601 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
| 602 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
| 603 |
+
month = "11",
|
| 604 |
+
year = "2019",
|
| 605 |
+
publisher = "Association for Computational Linguistics",
|
| 606 |
+
url = "https://arxiv.org/abs/1908.10084",
|
| 607 |
+
}
|
| 608 |
+
```
|
| 609 |
+
|
| 610 |
+
#### TripletLoss
|
| 611 |
+
```bibtex
|
| 612 |
+
@misc{hermans2017defense,
|
| 613 |
+
title={In Defense of the Triplet Loss for Person Re-Identification},
|
| 614 |
+
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
|
| 615 |
+
year={2017},
|
| 616 |
+
eprint={1703.07737},
|
| 617 |
+
archivePrefix={arXiv},
|
| 618 |
+
primaryClass={cs.CV}
|
| 619 |
+
}
|
| 620 |
+
```
|
| 621 |
+
|
| 622 |
+
<!--
|
| 623 |
+
## Glossary
|
| 624 |
+
|
| 625 |
+
*Clearly define terms in order to be accessible across audiences.*
|
| 626 |
+
-->
|
| 627 |
+
|
| 628 |
+
<!--
|
| 629 |
+
## Model Card Authors
|
| 630 |
+
|
| 631 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
| 632 |
+
-->
|
| 633 |
+
|
| 634 |
+
<!--
|
| 635 |
+
## Model Card Contact
|
| 636 |
+
|
| 637 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
| 638 |
+
-->
|
config.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"NewModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.0,
|
| 6 |
+
"auto_map": {
|
| 7 |
+
"AutoConfig": "configuration.NewConfig",
|
| 8 |
+
"AutoModel": "modeling.NewModel",
|
| 9 |
+
"AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
|
| 10 |
+
"AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
|
| 11 |
+
"AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
|
| 12 |
+
"AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
|
| 13 |
+
"AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
|
| 14 |
+
},
|
| 15 |
+
"classifier_dropout": null,
|
| 16 |
+
"hidden_act": "gelu",
|
| 17 |
+
"hidden_dropout_prob": 0.1,
|
| 18 |
+
"hidden_size": 1024,
|
| 19 |
+
"initializer_range": 0.02,
|
| 20 |
+
"intermediate_size": 4096,
|
| 21 |
+
"layer_norm_eps": 1e-12,
|
| 22 |
+
"layer_norm_type": "layer_norm",
|
| 23 |
+
"logn_attention_clip1": false,
|
| 24 |
+
"logn_attention_scale": false,
|
| 25 |
+
"max_position_embeddings": 8192,
|
| 26 |
+
"model_type": "new",
|
| 27 |
+
"num_attention_heads": 16,
|
| 28 |
+
"num_hidden_layers": 24,
|
| 29 |
+
"pack_qkv": true,
|
| 30 |
+
"pad_token_id": 0,
|
| 31 |
+
"position_embedding_type": "rope",
|
| 32 |
+
"rope_scaling": {
|
| 33 |
+
"factor": 2.0,
|
| 34 |
+
"type": "ntk"
|
| 35 |
+
},
|
| 36 |
+
"rope_theta": 160000,
|
| 37 |
+
"torch_dtype": "float32",
|
| 38 |
+
"transformers_version": "4.53.0",
|
| 39 |
+
"type_vocab_size": 2,
|
| 40 |
+
"unpad_inputs": false,
|
| 41 |
+
"use_memory_efficient_attention": false,
|
| 42 |
+
"vocab_size": 30528
|
| 43 |
+
}
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"__version__": {
|
| 3 |
+
"sentence_transformers": "4.1.0",
|
| 4 |
+
"transformers": "4.53.0",
|
| 5 |
+
"pytorch": "2.1.0+cu118"
|
| 6 |
+
},
|
| 7 |
+
"prompts": {},
|
| 8 |
+
"default_prompt_name": null,
|
| 9 |
+
"similarity_fn_name": "cosine"
|
| 10 |
+
}
|
configuration.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The GTE Team Authors and Alibaba Group.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
""" NEW model configuration"""
|
| 17 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 18 |
+
from transformers.utils import logging
|
| 19 |
+
|
| 20 |
+
logger = logging.get_logger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class NewConfig(PretrainedConfig):
|
| 24 |
+
r"""
|
| 25 |
+
This is the configuration class to store the configuration of a [`NewModel`] or a [`TFNewModel`]. It is used to
|
| 26 |
+
instantiate a NEW model according to the specified arguments, defining the model architecture. Instantiating a
|
| 27 |
+
configuration with the defaults will yield a similar configuration to that of the NEW
|
| 28 |
+
[izhx/new-base-en](https://huggingface.co/izhx/new-base-en) architecture.
|
| 29 |
+
|
| 30 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 31 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
vocab_size (`int`, *optional*, defaults to 30522):
|
| 36 |
+
Vocabulary size of the NEW model. Defines the number of different tokens that can be represented by the
|
| 37 |
+
`inputs_ids` passed when calling [`NewModel`] or [`TFNewModel`].
|
| 38 |
+
hidden_size (`int`, *optional*, defaults to 768):
|
| 39 |
+
Dimensionality of the encoder layers and the pooler layer.
|
| 40 |
+
num_hidden_layers (`int`, *optional*, defaults to 12):
|
| 41 |
+
Number of hidden layers in the Transformer encoder.
|
| 42 |
+
num_attention_heads (`int`, *optional*, defaults to 12):
|
| 43 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 44 |
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
| 45 |
+
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
|
| 46 |
+
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
|
| 47 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| 48 |
+
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| 49 |
+
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 50 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 51 |
+
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
|
| 52 |
+
The dropout ratio for the attention probabilities.
|
| 53 |
+
max_position_embeddings (`int`, *optional*, defaults to 512):
|
| 54 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 55 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 56 |
+
type_vocab_size (`int`, *optional*, defaults to 2):
|
| 57 |
+
The vocabulary size of the `token_type_ids` passed when calling [`NewModel`] or [`TFNewModel`].
|
| 58 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 59 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 60 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
| 61 |
+
The epsilon used by the layer normalization layers.
|
| 62 |
+
position_embedding_type (`str`, *optional*, defaults to `"rope"`):
|
| 63 |
+
Type of position embedding. Choose one of `"absolute"`, `"rope"`.
|
| 64 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
| 65 |
+
The base period of the RoPE embeddings.
|
| 66 |
+
rope_scaling (`Dict`, *optional*):
|
| 67 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
| 68 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
| 69 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
| 70 |
+
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
|
| 71 |
+
these scaling strategies behave:
|
| 72 |
+
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
|
| 73 |
+
experimental feature, subject to breaking API changes in future versions.
|
| 74 |
+
classifier_dropout (`float`, *optional*):
|
| 75 |
+
The dropout ratio for the classification head.
|
| 76 |
+
|
| 77 |
+
Examples:
|
| 78 |
+
|
| 79 |
+
```python
|
| 80 |
+
>>> from transformers import NewConfig, NewModel
|
| 81 |
+
|
| 82 |
+
>>> # Initializing a NEW izhx/new-base-en style configuration
|
| 83 |
+
>>> configuration = NewConfig()
|
| 84 |
+
|
| 85 |
+
>>> # Initializing a model (with random weights) from the izhx/new-base-en style configuration
|
| 86 |
+
>>> model = NewModel(configuration)
|
| 87 |
+
|
| 88 |
+
>>> # Accessing the model configuration
|
| 89 |
+
>>> configuration = model.config
|
| 90 |
+
```"""
|
| 91 |
+
|
| 92 |
+
model_type = "new"
|
| 93 |
+
|
| 94 |
+
def __init__(
|
| 95 |
+
self,
|
| 96 |
+
vocab_size=30528,
|
| 97 |
+
hidden_size=768,
|
| 98 |
+
num_hidden_layers=12,
|
| 99 |
+
num_attention_heads=12,
|
| 100 |
+
intermediate_size=3072,
|
| 101 |
+
hidden_act="gelu",
|
| 102 |
+
hidden_dropout_prob=0.1,
|
| 103 |
+
attention_probs_dropout_prob=0.0,
|
| 104 |
+
max_position_embeddings=2048,
|
| 105 |
+
type_vocab_size=1,
|
| 106 |
+
initializer_range=0.02,
|
| 107 |
+
layer_norm_type='layer_norm',
|
| 108 |
+
layer_norm_eps=1e-12,
|
| 109 |
+
# pad_token_id=0,
|
| 110 |
+
position_embedding_type="rope",
|
| 111 |
+
rope_theta=10000.0,
|
| 112 |
+
rope_scaling=None,
|
| 113 |
+
classifier_dropout=None,
|
| 114 |
+
pack_qkv=True,
|
| 115 |
+
unpad_inputs=False,
|
| 116 |
+
use_memory_efficient_attention=False,
|
| 117 |
+
logn_attention_scale=False,
|
| 118 |
+
logn_attention_clip1=False,
|
| 119 |
+
**kwargs,
|
| 120 |
+
):
|
| 121 |
+
super().__init__(**kwargs)
|
| 122 |
+
|
| 123 |
+
self.vocab_size = vocab_size
|
| 124 |
+
self.hidden_size = hidden_size
|
| 125 |
+
self.num_hidden_layers = num_hidden_layers
|
| 126 |
+
self.num_attention_heads = num_attention_heads
|
| 127 |
+
self.hidden_act = hidden_act
|
| 128 |
+
self.intermediate_size = intermediate_size
|
| 129 |
+
self.hidden_dropout_prob = hidden_dropout_prob
|
| 130 |
+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
| 131 |
+
self.max_position_embeddings = max_position_embeddings
|
| 132 |
+
self.type_vocab_size = type_vocab_size
|
| 133 |
+
self.initializer_range = initializer_range
|
| 134 |
+
self.layer_norm_type = layer_norm_type
|
| 135 |
+
self.layer_norm_eps = layer_norm_eps
|
| 136 |
+
self.position_embedding_type = position_embedding_type
|
| 137 |
+
self.rope_theta = rope_theta
|
| 138 |
+
self.rope_scaling = rope_scaling
|
| 139 |
+
self.classifier_dropout = classifier_dropout
|
| 140 |
+
|
| 141 |
+
self.pack_qkv = pack_qkv
|
| 142 |
+
self.unpad_inputs = unpad_inputs
|
| 143 |
+
self.use_memory_efficient_attention = use_memory_efficient_attention
|
| 144 |
+
self.logn_attention_scale = logn_attention_scale
|
| 145 |
+
self.logn_attention_clip1 = logn_attention_clip1
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56ac5b513db1d64706848d5ab52dcd29f5fece6645bcec0fdb51207a53684f25
|
| 3 |
+
size 1736585680
|
modeling.py
ADDED
|
@@ -0,0 +1,1418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 The GTE Team Authors and Alibaba Group.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""PyTorch NEW model."""
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
|
| 26 |
+
from transformers.activations import ACT2FN
|
| 27 |
+
from transformers.modeling_outputs import (
|
| 28 |
+
BaseModelOutput,
|
| 29 |
+
BaseModelOutputWithPooling,
|
| 30 |
+
MaskedLMOutput,
|
| 31 |
+
MultipleChoiceModelOutput,
|
| 32 |
+
QuestionAnsweringModelOutput,
|
| 33 |
+
SequenceClassifierOutput,
|
| 34 |
+
ModelOutput,
|
| 35 |
+
)
|
| 36 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 37 |
+
from transformers.utils import logging
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
import xformers.ops as xops
|
| 41 |
+
except ImportError as e:
|
| 42 |
+
xops = None
|
| 43 |
+
|
| 44 |
+
from .configuration import NewConfig
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
logger = logging.get_logger(__name__)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/bert_padding.py
|
| 51 |
+
# Which was adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
|
| 52 |
+
class IndexFirstAxis(torch.autograd.Function):
|
| 53 |
+
@staticmethod
|
| 54 |
+
def forward(ctx, input, indices):
|
| 55 |
+
ctx.save_for_backward(indices)
|
| 56 |
+
assert input.ndim >= 2
|
| 57 |
+
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
|
| 58 |
+
second_dim = other_shape.numel()
|
| 59 |
+
# TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
|
| 60 |
+
# return input[indices]
|
| 61 |
+
# return torch.gather(
|
| 62 |
+
# rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
|
| 63 |
+
# ).reshape(-1, *other_shape)
|
| 64 |
+
return torch.gather(
|
| 65 |
+
input.view(ctx.first_axis_dim, second_dim),
|
| 66 |
+
0,
|
| 67 |
+
indices.unsqueeze(-1).expand(indices.size(0), second_dim)
|
| 68 |
+
).reshape(-1, *other_shape)
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def backward(ctx, grad_output):
|
| 72 |
+
(indices,) = ctx.saved_tensors
|
| 73 |
+
assert grad_output.ndim >= 2
|
| 74 |
+
other_shape = grad_output.shape[1:]
|
| 75 |
+
# grad_output = rearrange(grad_output, "b ... -> b (...)")
|
| 76 |
+
grad_output = grad_output.view(grad_output.size(0), other_shape.numel())
|
| 77 |
+
grad_input = torch.zeros(
|
| 78 |
+
[ctx.first_axis_dim, grad_output.shape[1]],
|
| 79 |
+
device=grad_output.device,
|
| 80 |
+
dtype=grad_output.dtype,
|
| 81 |
+
)
|
| 82 |
+
# TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
|
| 83 |
+
# grad_input[indices] = grad_output
|
| 84 |
+
# grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
|
| 85 |
+
grad_input.scatter_(
|
| 86 |
+
0, indices.unsqueeze(-1).expand(indices.size(0), grad_output.size(1)), grad_output
|
| 87 |
+
)
|
| 88 |
+
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
index_first_axis = IndexFirstAxis.apply
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def unpad_input(hidden_states, attention_mask=None, indices=None):
|
| 95 |
+
"""
|
| 96 |
+
Arguments:
|
| 97 |
+
hidden_states: (batch, seqlen, ...)
|
| 98 |
+
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
|
| 99 |
+
indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
|
| 100 |
+
Return:
|
| 101 |
+
hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
|
| 102 |
+
"""
|
| 103 |
+
if indices is None:
|
| 104 |
+
assert attention_mask is not None
|
| 105 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 106 |
+
|
| 107 |
+
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
|
| 108 |
+
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
|
| 109 |
+
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
|
| 110 |
+
# index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
|
| 111 |
+
# so we write custom forward and backward to make it a bit faster.
|
| 112 |
+
hidden_states = hidden_states.view(-1, *hidden_states.shape[2:])
|
| 113 |
+
return index_first_axis(hidden_states, indices)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class IndexPutFirstAxis(torch.autograd.Function):
|
| 117 |
+
@staticmethod
|
| 118 |
+
def forward(
|
| 119 |
+
ctx,
|
| 120 |
+
values: torch.Tensor,
|
| 121 |
+
indices: torch.Tensor,
|
| 122 |
+
first_axis_dim
|
| 123 |
+
) -> torch.Tensor:
|
| 124 |
+
ctx.save_for_backward(indices)
|
| 125 |
+
assert indices.ndim == 1
|
| 126 |
+
assert values.ndim >= 2
|
| 127 |
+
output = torch.zeros(
|
| 128 |
+
first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
|
| 129 |
+
)
|
| 130 |
+
output[indices] = values
|
| 131 |
+
return output
|
| 132 |
+
|
| 133 |
+
@staticmethod
|
| 134 |
+
def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None, None]:
|
| 135 |
+
indices, = ctx.saved_tensors
|
| 136 |
+
grad_values = grad_output[indices]
|
| 137 |
+
return grad_values, None, None
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
index_put_first_axis = IndexPutFirstAxis.apply
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def pad_input(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int) -> torch.Tensor:
|
| 144 |
+
"""Add padding to sequences.
|
| 145 |
+
|
| 146 |
+
Arguments:
|
| 147 |
+
inputs: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
|
| 148 |
+
indices: (total_nnz), `indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()`
|
| 149 |
+
batch: int batch_size
|
| 150 |
+
seqlen: int max sequence length
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
inputs: (batch, seqlen, ...)
|
| 154 |
+
"""
|
| 155 |
+
output = index_put_first_axis(inputs, indices, batch * seqlen)
|
| 156 |
+
return output.view(batch, seqlen, *inputs.shape[1:])
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def rotate_half(x):
|
| 160 |
+
"""Rotates half the hidden dims of the input."""
|
| 161 |
+
x1 = x[..., : x.shape[-1] // 2]
|
| 162 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
| 163 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def apply_rotary_pos_emb(q, k, cos, sin):
|
| 167 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
q (`torch.Tensor`): The query tensor.
|
| 171 |
+
k (`torch.Tensor`): The key tensor.
|
| 172 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| 173 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| 174 |
+
Returns:
|
| 175 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| 176 |
+
"""
|
| 177 |
+
cos, sin = cos.to(q.dtype), sin.to(q.dtype)
|
| 178 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
| 179 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
| 180 |
+
return q_embed, k_embed
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class RotaryEmbedding(torch.nn.Module):
|
| 184 |
+
def __init__(self, dim, max_position_embeddings=512, base=10000.0, device=None):
|
| 185 |
+
super().__init__()
|
| 186 |
+
|
| 187 |
+
self.dim = dim
|
| 188 |
+
self.max_position_embeddings = max_position_embeddings
|
| 189 |
+
self.base = base
|
| 190 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 191 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 192 |
+
|
| 193 |
+
# Build here to make `torch.jit.trace` work.
|
| 194 |
+
self._set_cos_sin_cache(
|
| 195 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 199 |
+
self.max_seq_len_cached = seq_len
|
| 200 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
|
| 201 |
+
|
| 202 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 203 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 204 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 205 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 206 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 207 |
+
|
| 208 |
+
def forward(self, x, seq_len=None):
|
| 209 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
| 210 |
+
if seq_len > self.max_seq_len_cached:
|
| 211 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
| 212 |
+
|
| 213 |
+
return (
|
| 214 |
+
self.cos_cached[:seq_len, ...].to(dtype=x.dtype),
|
| 215 |
+
self.sin_cached[:seq_len, ...].to(dtype=x.dtype),
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class NTKScalingRotaryEmbedding(RotaryEmbedding):
|
| 220 |
+
"""RotaryEmbedding extended with fixed and mixed NTK scaling. https://kexue.fm/archives/9706 """
|
| 221 |
+
|
| 222 |
+
def __init__(self, dim, max_position_embeddings=512, base=10000, device=None, scaling_factor=1.0, mixed_b=None):
|
| 223 |
+
self.scaling_factor = scaling_factor
|
| 224 |
+
self.mixed_b = mixed_b
|
| 225 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
| 226 |
+
max_position_embeddings = max_position_embeddings * self.scaling_factor
|
| 227 |
+
self._set_cos_sin_cache(max_position_embeddings, self.inv_freq.device, torch.get_default_dtype())
|
| 228 |
+
|
| 229 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
| 230 |
+
self.max_seq_len_cached = seq_len
|
| 231 |
+
|
| 232 |
+
if seq_len > self.max_position_embeddings:
|
| 233 |
+
base = self.base * (self.scaling_factor if self.mixed_b is None else 1)
|
| 234 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
| 235 |
+
|
| 236 |
+
if self.mixed_b is None:
|
| 237 |
+
inv_freq = inv_freq / self.scaling_factor ** (2 / self.dim) # (6)
|
| 238 |
+
else:
|
| 239 |
+
a = torch.tensor(self.scaling_factor).log() / (self.dim / 2) ** self.mixed_b # (13)
|
| 240 |
+
lambda_1_m = (a * torch.arange(1, self.dim // 2 + 1).float().to(device) ** self.mixed_b).exp() # (12)
|
| 241 |
+
inv_freq = inv_freq / lambda_1_m # (10)
|
| 242 |
+
|
| 243 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
| 244 |
+
|
| 245 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
|
| 246 |
+
|
| 247 |
+
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
| 248 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
| 249 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
| 250 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
| 251 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class RMSNorm(nn.Module):
|
| 255 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 256 |
+
"""
|
| 257 |
+
RMSNorm is equivalent to T5LayerNorm
|
| 258 |
+
"""
|
| 259 |
+
super().__init__()
|
| 260 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 261 |
+
self.variance_epsilon = eps
|
| 262 |
+
|
| 263 |
+
def forward(self, hidden_states):
|
| 264 |
+
input_dtype = hidden_states.dtype
|
| 265 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 266 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 267 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 268 |
+
return self.weight * hidden_states.to(input_dtype)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
LAYER_NORM = {
|
| 272 |
+
'layer_norm': nn.LayerNorm,
|
| 273 |
+
'rms_norm': RMSNorm
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class NewEmbeddings(nn.Module):
|
| 278 |
+
"""
|
| 279 |
+
Embedding and Unpadding.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
def __init__(self, config: NewConfig):
|
| 283 |
+
super().__init__()
|
| 284 |
+
self.padding_idx = config.pad_token_id
|
| 285 |
+
self.word_embeddings = nn.Embedding(
|
| 286 |
+
config.vocab_size, config.hidden_size, padding_idx=self.padding_idx
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
self.position_embedding_type = config.position_embedding_type
|
| 290 |
+
if self.position_embedding_type == 'absolute':
|
| 291 |
+
self.position_embeddings = nn.Embedding(
|
| 292 |
+
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
|
| 293 |
+
)
|
| 294 |
+
elif self.position_embedding_type == 'rope':
|
| 295 |
+
self._init_rope(config)
|
| 296 |
+
else:
|
| 297 |
+
raise ValueError
|
| 298 |
+
|
| 299 |
+
self.type_vocab_size = config.type_vocab_size
|
| 300 |
+
if self.type_vocab_size > 0:
|
| 301 |
+
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
| 302 |
+
|
| 303 |
+
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
| 304 |
+
# any TensorFlow checkpoint file
|
| 305 |
+
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 306 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 307 |
+
# position_ids is contiguous in memory and excluded when serialized
|
| 308 |
+
self.register_buffer(
|
| 309 |
+
"position_ids", torch.arange(config.max_position_embeddings), persistent=False
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
def _init_rope(self, config):
|
| 313 |
+
kwargs = dict(
|
| 314 |
+
dim=int(config.hidden_size / config.num_attention_heads),
|
| 315 |
+
max_position_embeddings=config.max_position_embeddings,
|
| 316 |
+
base=config.rope_theta
|
| 317 |
+
)
|
| 318 |
+
if config.rope_scaling is None:
|
| 319 |
+
self.rotary_emb = RotaryEmbedding(**kwargs)
|
| 320 |
+
else:
|
| 321 |
+
kwargs.update(scaling_factor=config.rope_scaling["factor"])
|
| 322 |
+
scaling_type = config.rope_scaling["type"]
|
| 323 |
+
if scaling_type == 'ntk':
|
| 324 |
+
kwargs.update(mixed_b=config.rope_scaling.get('mixed_b', None))
|
| 325 |
+
self.rotary_emb = NTKScalingRotaryEmbedding(**kwargs)
|
| 326 |
+
# elif scaling_type == "linear":
|
| 327 |
+
# self.rotary_emb = LinearScalingRotaryEmbedding(**kwargs)
|
| 328 |
+
# elif scaling_type == "dynamic":
|
| 329 |
+
# self.rotary_emb = DynamicNTKScalingRotaryEmbedding(**kwargs)
|
| 330 |
+
else:
|
| 331 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
| 332 |
+
|
| 333 |
+
def forward(
|
| 334 |
+
self,
|
| 335 |
+
unpad_inputs: bool,
|
| 336 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 337 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 338 |
+
length: Optional[List[int]] = None,
|
| 339 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 340 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 341 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 342 |
+
) -> Tuple[torch.Tensor, torch.Tensor, Optional[Tuple], Optional[List[int]]]:
|
| 343 |
+
"""
|
| 344 |
+
"""
|
| 345 |
+
if inputs_embeds is None:
|
| 346 |
+
device, input_shape = input_ids.device, input_ids.shape
|
| 347 |
+
else:
|
| 348 |
+
device, input_shape = inputs_embeds.device, inputs_embeds.shape[:2]
|
| 349 |
+
batch_size, seq_length = input_shape
|
| 350 |
+
|
| 351 |
+
# Set attention_mask if it's None
|
| 352 |
+
if attention_mask is None:
|
| 353 |
+
attention_mask = torch.ones(input_shape, device=device)
|
| 354 |
+
if length is not None:
|
| 355 |
+
for i, l in enumerate(length):
|
| 356 |
+
attention_mask[i, l:] = 0
|
| 357 |
+
|
| 358 |
+
# Set attention_mask_bool for unpadding
|
| 359 |
+
if unpad_inputs:
|
| 360 |
+
attention_mask_bool = attention_mask.bool()
|
| 361 |
+
if length is None:
|
| 362 |
+
length = attention_mask.sum(-1).tolist()
|
| 363 |
+
|
| 364 |
+
# Get word embeddings
|
| 365 |
+
if inputs_embeds is None:
|
| 366 |
+
if unpad_inputs:
|
| 367 |
+
input_ids = input_ids[attention_mask_bool].unsqueeze(0)
|
| 368 |
+
inputs_embeds = self.word_embeddings(input_ids)
|
| 369 |
+
else:
|
| 370 |
+
if unpad_inputs:
|
| 371 |
+
inputs_embeds = inputs_embeds[attention_mask_bool].unsqueeze(0)
|
| 372 |
+
embeddings = inputs_embeds
|
| 373 |
+
|
| 374 |
+
# Set and unpad position_ids
|
| 375 |
+
if position_ids is None:
|
| 376 |
+
if seq_length > self.position_ids.size(0):
|
| 377 |
+
self.register_buffer(
|
| 378 |
+
"position_ids", torch.arange(seq_length, device=embeddings.device), persistent=False
|
| 379 |
+
)
|
| 380 |
+
if unpad_inputs:
|
| 381 |
+
# [1, cumsum_seq_len]
|
| 382 |
+
position_ids = torch.cat([self.position_ids[:l] for l in length]).unsqueeze(0)
|
| 383 |
+
else:
|
| 384 |
+
# [bs, seq_len]
|
| 385 |
+
position_ids = self.position_ids[:seq_length].expand(batch_size, -1)
|
| 386 |
+
elif unpad_inputs:
|
| 387 |
+
position_ids = position_ids[attention_mask_bool].unsqueeze(0) # [1, cumsum_seq_len]
|
| 388 |
+
|
| 389 |
+
# Compute rotary embedding
|
| 390 |
+
if self.position_embedding_type == 'rope':
|
| 391 |
+
rope_cos, rope_sin = self.rotary_emb(inputs_embeds, seq_len=seq_length)
|
| 392 |
+
rope_cos = rope_cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
|
| 393 |
+
rope_sin = rope_sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
|
| 394 |
+
rope_embeds = rope_cos, rope_sin
|
| 395 |
+
else:
|
| 396 |
+
rope_embeds = None
|
| 397 |
+
|
| 398 |
+
if self.type_vocab_size > 0:
|
| 399 |
+
if token_type_ids is None:
|
| 400 |
+
token_type_ids = position_ids.mul(0)
|
| 401 |
+
else:
|
| 402 |
+
if self.type_vocab_size < 2:
|
| 403 |
+
token_type_ids.mul_(0)
|
| 404 |
+
if unpad_inputs:
|
| 405 |
+
token_type_ids = token_type_ids[attention_mask_bool].unsqueeze(0)
|
| 406 |
+
|
| 407 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 408 |
+
embeddings = embeddings + token_type_embeddings
|
| 409 |
+
|
| 410 |
+
# BERT position
|
| 411 |
+
if self.position_embedding_type == "absolute":
|
| 412 |
+
position_embeddings = self.position_embeddings(position_ids)
|
| 413 |
+
embeddings = embeddings + position_embeddings
|
| 414 |
+
|
| 415 |
+
embeddings = self.LayerNorm(embeddings)
|
| 416 |
+
embeddings = self.dropout(embeddings)
|
| 417 |
+
|
| 418 |
+
return embeddings, attention_mask, rope_embeds, length
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
class NewAttention(nn.Module):
|
| 422 |
+
def __init__(self, config: NewConfig, pack_qkv=None, use_memory_efficient_attention=None):
|
| 423 |
+
super().__init__()
|
| 424 |
+
self.config = config
|
| 425 |
+
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
| 426 |
+
raise ValueError(
|
| 427 |
+
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
|
| 428 |
+
f"heads ({config.num_attention_heads})"
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
self.hidden_size = config.hidden_size
|
| 432 |
+
self.num_attention_heads = config.num_attention_heads
|
| 433 |
+
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
| 434 |
+
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
| 435 |
+
|
| 436 |
+
if pack_qkv is None:
|
| 437 |
+
pack_qkv = config.pack_qkv
|
| 438 |
+
self.pack_qkv = pack_qkv
|
| 439 |
+
|
| 440 |
+
if self.pack_qkv:
|
| 441 |
+
self.qkv_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=True)
|
| 442 |
+
else:
|
| 443 |
+
self.q_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 444 |
+
self.k_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 445 |
+
self.v_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
|
| 446 |
+
|
| 447 |
+
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
| 448 |
+
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
|
| 449 |
+
|
| 450 |
+
if use_memory_efficient_attention is None:
|
| 451 |
+
use_memory_efficient_attention = self.config.use_memory_efficient_attention
|
| 452 |
+
self.use_memory_efficient_attention = use_memory_efficient_attention
|
| 453 |
+
self.memory_efficient_attention = None if xops is None else xops.memory_efficient_attention
|
| 454 |
+
if self.use_memory_efficient_attention:
|
| 455 |
+
assert self.memory_efficient_attention is not None, 'please install xformers'
|
| 456 |
+
|
| 457 |
+
def forward(
|
| 458 |
+
self,
|
| 459 |
+
hidden_states: torch.Tensor,
|
| 460 |
+
attention_bias: torch.FloatTensor,
|
| 461 |
+
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 462 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 463 |
+
attention_scale: Optional[torch.FloatTensor] = None,
|
| 464 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 465 |
+
output_attentions: Optional[bool] = False,
|
| 466 |
+
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
| 467 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 468 |
+
shape_hd = (self.num_attention_heads, self.attention_head_size)
|
| 469 |
+
# qkv
|
| 470 |
+
if self.pack_qkv and qkv_inputs is None:
|
| 471 |
+
qkv_pack = self.qkv_proj(hidden_states).split(self.all_head_size, dim=-1)
|
| 472 |
+
else:
|
| 473 |
+
if qkv_inputs is None:
|
| 474 |
+
qkv_inputs = (hidden_states, hidden_states, hidden_states)
|
| 475 |
+
qkv_pack = [
|
| 476 |
+
getattr(self, n + '_proj')(s) for s, n in zip(qkv_inputs, 'qkv')
|
| 477 |
+
]
|
| 478 |
+
query_states, key_states, value_states = [t.view(t.shape[:-1] + shape_hd) for t in qkv_pack]
|
| 479 |
+
|
| 480 |
+
if self.config.position_embedding_type == 'rope':
|
| 481 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, *rope_embeds)
|
| 482 |
+
|
| 483 |
+
dtype = query_states.dtype
|
| 484 |
+
|
| 485 |
+
if self.config.logn_attention_scale and attention_scale is not None:
|
| 486 |
+
# https://kexue.fm/archives/8823
|
| 487 |
+
query_states = query_states * attention_scale.to(dtype)
|
| 488 |
+
|
| 489 |
+
if padding_inputs is not None:
|
| 490 |
+
query_states = pad_input(query_states.squeeze(), *padding_inputs)
|
| 491 |
+
key_states = pad_input(key_states.squeeze(), *padding_inputs)
|
| 492 |
+
value_states = pad_input(value_states.squeeze(), *padding_inputs)
|
| 493 |
+
|
| 494 |
+
if self.use_memory_efficient_attention:
|
| 495 |
+
assert self.memory_efficient_attention is not None, "xformers is not loaded"
|
| 496 |
+
assert output_attentions is False, "memory_efficient_attention do not output attentions"
|
| 497 |
+
assert head_mask is None, "Not support yet"
|
| 498 |
+
attention_probs = None
|
| 499 |
+
if torch.is_tensor(attention_bias):
|
| 500 |
+
attention_bias = attention_bias.to(dtype)
|
| 501 |
+
context_layer = self.memory_efficient_attention(
|
| 502 |
+
query_states,
|
| 503 |
+
key_states,
|
| 504 |
+
value_states,
|
| 505 |
+
attn_bias=attention_bias,
|
| 506 |
+
p=self.dropout.p
|
| 507 |
+
)
|
| 508 |
+
else:
|
| 509 |
+
if output_attentions and isinstance(self, NewSdpaAttention):
|
| 510 |
+
raise RuntimeError("SDPA do not output attentions")
|
| 511 |
+
context_layer, attention_probs = self._attention(
|
| 512 |
+
query_states, key_states, value_states, attention_bias, head_mask
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
if padding_inputs is not None:
|
| 516 |
+
context_layer = unpad_input(context_layer, indices=padding_inputs[0])
|
| 517 |
+
|
| 518 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
| 519 |
+
context_layer = context_layer.view(new_context_layer_shape)
|
| 520 |
+
|
| 521 |
+
# output proj
|
| 522 |
+
attn_output = self.o_proj(context_layer)
|
| 523 |
+
|
| 524 |
+
# add attentions if we output them
|
| 525 |
+
outputs = (attn_output, attention_probs) if output_attentions else (attn_output,)
|
| 526 |
+
return outputs
|
| 527 |
+
|
| 528 |
+
def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
|
| 529 |
+
"""
|
| 530 |
+
Args:
|
| 531 |
+
q/k/v: (B, L, n_head, head_dim),
|
| 532 |
+
Returns:
|
| 533 |
+
attn_output: (B L, n_head, head_dim)
|
| 534 |
+
"""
|
| 535 |
+
query_states = query_states.transpose(1, 2)
|
| 536 |
+
key_states = key_states.transpose(1, 2)
|
| 537 |
+
value_states = value_states.transpose(1, 2)
|
| 538 |
+
# Take the dot product between "query" and "key" to get the raw attention scores.
|
| 539 |
+
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
|
| 540 |
+
|
| 541 |
+
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
| 542 |
+
if attention_bias is not None:
|
| 543 |
+
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
| 544 |
+
attention_scores = attention_scores + attention_bias
|
| 545 |
+
|
| 546 |
+
# Normalize the attention scores to probabilities.
|
| 547 |
+
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
|
| 548 |
+
|
| 549 |
+
# This is actually dropping out entire tokens to attend to, which might
|
| 550 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 551 |
+
if self.dropout.p > 0:
|
| 552 |
+
attention_probs = self.dropout(attention_probs)
|
| 553 |
+
|
| 554 |
+
# Mask heads if we want to
|
| 555 |
+
if head_mask is not None:
|
| 556 |
+
attention_probs = attention_probs * head_mask
|
| 557 |
+
|
| 558 |
+
context_layer = torch.matmul(attention_probs, value_states)
|
| 559 |
+
|
| 560 |
+
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 561 |
+
return context_layer, attention_probs
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
class NewSdpaAttention(NewAttention):
|
| 565 |
+
"""
|
| 566 |
+
New attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 567 |
+
`NewAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 568 |
+
SDPA API.
|
| 569 |
+
"""
|
| 570 |
+
def __init__(self, config: NewConfig, **kwargs):
|
| 571 |
+
super().__init__(config, **kwargs)
|
| 572 |
+
# torch.backends.cuda.enable_mem_efficient_sdp(False)
|
| 573 |
+
# logger.warning(
|
| 574 |
+
# "Disable memory efficient attention kernel for `NewSdpaAttention`, you can set "
|
| 575 |
+
# "`use_memory_efficient_attention=True` if it expected to use."
|
| 576 |
+
# )
|
| 577 |
+
|
| 578 |
+
def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
|
| 579 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 580 |
+
query_states.transpose(1, 2),
|
| 581 |
+
key_states.transpose(1, 2),
|
| 582 |
+
value_states.transpose(1, 2),
|
| 583 |
+
attn_mask=attention_bias,
|
| 584 |
+
dropout_p=self.dropout.p if self.training else 0.0,
|
| 585 |
+
)
|
| 586 |
+
attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
|
| 587 |
+
return attn_output, None
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
NEW_ATTENTION_CLASSES = {
|
| 591 |
+
"eager": NewAttention,
|
| 592 |
+
# "flash_attention_2": , # TODO
|
| 593 |
+
"sdpa": NewSdpaAttention,
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
class NewGatedMLP(nn.Module):
|
| 598 |
+
"""
|
| 599 |
+
GLU Variants Improve Transformer.
|
| 600 |
+
"""
|
| 601 |
+
|
| 602 |
+
def __init__(self, config: NewConfig):
|
| 603 |
+
super().__init__()
|
| 604 |
+
self.intermediate_size = config.intermediate_size
|
| 605 |
+
self.up_gate_proj = nn.Linear(config.hidden_size, self.intermediate_size * 2, bias=False)
|
| 606 |
+
self.down_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=True)
|
| 607 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
| 608 |
+
if config.hidden_dropout_prob > 0:
|
| 609 |
+
self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 610 |
+
else:
|
| 611 |
+
self.hidden_dropout = None
|
| 612 |
+
|
| 613 |
+
def forward(self, hidden_states):
|
| 614 |
+
up_gate = self.up_gate_proj(hidden_states)
|
| 615 |
+
up_states, gate = torch.split(up_gate, self.intermediate_size, dim=-1)
|
| 616 |
+
gate = self.act_fn(gate)
|
| 617 |
+
gated_states = gate * up_states
|
| 618 |
+
if self.hidden_dropout is not None:
|
| 619 |
+
gated_states = self.hidden_dropout(gated_states)
|
| 620 |
+
down_states = self.down_proj(gated_states)
|
| 621 |
+
return down_states
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
class NewLayer(nn.Module):
|
| 625 |
+
def __init__(
|
| 626 |
+
self,
|
| 627 |
+
config: NewConfig,
|
| 628 |
+
pack_qkv=None,
|
| 629 |
+
use_memory_efficient_attention=None,
|
| 630 |
+
attn_implementation=None
|
| 631 |
+
):
|
| 632 |
+
super().__init__()
|
| 633 |
+
if attn_implementation is None:
|
| 634 |
+
attn_implementation = config._attn_implementation
|
| 635 |
+
if use_memory_efficient_attention is None:
|
| 636 |
+
use_memory_efficient_attention = config.use_memory_efficient_attention
|
| 637 |
+
if use_memory_efficient_attention:
|
| 638 |
+
if attn_implementation != 'eager':
|
| 639 |
+
logger.warning_once(f"Override {attn_implementation=} to 'eager' as {use_memory_efficient_attention=}")
|
| 640 |
+
attn_implementation = 'eager' # Since it will be SDPA by default for torch>=2.1.1
|
| 641 |
+
self.attention = NEW_ATTENTION_CLASSES[attn_implementation](
|
| 642 |
+
config, pack_qkv=pack_qkv, use_memory_efficient_attention=use_memory_efficient_attention
|
| 643 |
+
)
|
| 644 |
+
self.mlp = NewGatedMLP(config)
|
| 645 |
+
|
| 646 |
+
ln_class = LAYER_NORM[config.layer_norm_type]
|
| 647 |
+
self.attn_ln = ln_class(config.hidden_size, eps=config.layer_norm_eps)
|
| 648 |
+
self.mlp_ln = ln_class(config.hidden_size, eps=config.layer_norm_eps)
|
| 649 |
+
|
| 650 |
+
if config.hidden_dropout_prob > 0:
|
| 651 |
+
self.hidden_dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 652 |
+
else:
|
| 653 |
+
self.hidden_dropout = None
|
| 654 |
+
|
| 655 |
+
def forward(
|
| 656 |
+
self,
|
| 657 |
+
hidden_states: torch.Tensor,
|
| 658 |
+
attention_bias: torch.FloatTensor,
|
| 659 |
+
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 660 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 661 |
+
attention_scale: Optional[torch.FloatTensor] = None,
|
| 662 |
+
subset_indices: Optional[torch.LongTensor] = None,
|
| 663 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 664 |
+
output_attentions: Optional[bool] = False,
|
| 665 |
+
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
| 666 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 667 |
+
# Multi head self attention
|
| 668 |
+
residual = hidden_states if qkv_inputs is None else qkv_inputs[0]
|
| 669 |
+
attention_outputs = self.attention(
|
| 670 |
+
hidden_states,
|
| 671 |
+
attention_bias,
|
| 672 |
+
rope_embeds,
|
| 673 |
+
padding_inputs,
|
| 674 |
+
attention_scale,
|
| 675 |
+
head_mask,
|
| 676 |
+
output_attentions=output_attentions,
|
| 677 |
+
qkv_inputs=qkv_inputs,
|
| 678 |
+
)
|
| 679 |
+
hidden_states = attention_outputs[0]
|
| 680 |
+
if self.hidden_dropout is not None:
|
| 681 |
+
hidden_states = self.hidden_dropout(hidden_states)
|
| 682 |
+
hidden_states = residual + hidden_states
|
| 683 |
+
|
| 684 |
+
# In pretraining, after the attention of last layer, we only need the masked tokens.
|
| 685 |
+
if subset_indices is not None:
|
| 686 |
+
hidden_states = hidden_states[subset_indices]
|
| 687 |
+
|
| 688 |
+
hidden_states = self.attn_ln(hidden_states)
|
| 689 |
+
|
| 690 |
+
# Fully Connected
|
| 691 |
+
residual = hidden_states
|
| 692 |
+
hidden_states = self.mlp(hidden_states)
|
| 693 |
+
if self.hidden_dropout is not None:
|
| 694 |
+
hidden_states = self.hidden_dropout(hidden_states)
|
| 695 |
+
hidden_states = residual + hidden_states
|
| 696 |
+
hidden_states = self.mlp_ln(hidden_states)
|
| 697 |
+
|
| 698 |
+
# add self attentions if we output attention weights
|
| 699 |
+
outputs = (hidden_states,) + attention_outputs[1:]
|
| 700 |
+
return outputs
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
class NewEncoder(nn.Module):
|
| 704 |
+
def __init__(self, config):
|
| 705 |
+
super().__init__()
|
| 706 |
+
self.config = config
|
| 707 |
+
self.layer = nn.ModuleList([NewLayer(config) for _ in range(config.num_hidden_layers)])
|
| 708 |
+
self.gradient_checkpointing = False
|
| 709 |
+
|
| 710 |
+
def forward(
|
| 711 |
+
self,
|
| 712 |
+
hidden_states: torch.Tensor,
|
| 713 |
+
attention_bias: Optional[torch.FloatTensor] = None,
|
| 714 |
+
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 715 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 716 |
+
attention_scale: Optional[torch.FloatTensor] = None,
|
| 717 |
+
subset_indices: Optional[torch.LongTensor] = None,
|
| 718 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
| 719 |
+
output_attentions: Optional[bool] = False,
|
| 720 |
+
output_hidden_states: Optional[bool] = False,
|
| 721 |
+
return_dict: Optional[bool] = True,
|
| 722 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
|
| 723 |
+
all_hidden_states = () if output_hidden_states else None
|
| 724 |
+
all_self_attentions = () if output_attentions else None
|
| 725 |
+
|
| 726 |
+
for i, layer_module in enumerate(self.layer):
|
| 727 |
+
if output_hidden_states:
|
| 728 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 729 |
+
|
| 730 |
+
if i >= len(self.layer) - 1:
|
| 731 |
+
layer_subset_indices = subset_indices
|
| 732 |
+
else:
|
| 733 |
+
layer_subset_indices = None
|
| 734 |
+
|
| 735 |
+
layer_head_mask = head_mask[i] if head_mask is not None else None
|
| 736 |
+
|
| 737 |
+
if self.gradient_checkpointing and self.training:
|
| 738 |
+
layer_outputs = self._gradient_checkpointing_func(
|
| 739 |
+
layer_module.__call__,
|
| 740 |
+
hidden_states,
|
| 741 |
+
attention_bias,
|
| 742 |
+
rope_embeds,
|
| 743 |
+
padding_inputs,
|
| 744 |
+
attention_scale,
|
| 745 |
+
layer_subset_indices,
|
| 746 |
+
layer_head_mask,
|
| 747 |
+
)
|
| 748 |
+
else:
|
| 749 |
+
layer_outputs = layer_module(
|
| 750 |
+
hidden_states,
|
| 751 |
+
attention_bias,
|
| 752 |
+
rope_embeds,
|
| 753 |
+
padding_inputs,
|
| 754 |
+
attention_scale,
|
| 755 |
+
layer_subset_indices,
|
| 756 |
+
layer_head_mask,
|
| 757 |
+
output_attentions,
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
+
hidden_states = layer_outputs[0]
|
| 761 |
+
if output_attentions:
|
| 762 |
+
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
| 763 |
+
|
| 764 |
+
if output_hidden_states:
|
| 765 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 766 |
+
|
| 767 |
+
if not return_dict:
|
| 768 |
+
return tuple(
|
| 769 |
+
v
|
| 770 |
+
for v in [
|
| 771 |
+
hidden_states,
|
| 772 |
+
all_hidden_states,
|
| 773 |
+
all_self_attentions,
|
| 774 |
+
]
|
| 775 |
+
if v is not None
|
| 776 |
+
)
|
| 777 |
+
return BaseModelOutput(
|
| 778 |
+
last_hidden_state=hidden_states,
|
| 779 |
+
hidden_states=all_hidden_states,
|
| 780 |
+
attentions=all_self_attentions,
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->New
|
| 785 |
+
class NewPooler(nn.Module):
|
| 786 |
+
def __init__(self, config):
|
| 787 |
+
super().__init__()
|
| 788 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 789 |
+
self.activation = nn.Tanh()
|
| 790 |
+
|
| 791 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 792 |
+
# We "pool" the model by simply taking the hidden state corresponding
|
| 793 |
+
# to the first token.
|
| 794 |
+
first_token_tensor = hidden_states[:, 0]
|
| 795 |
+
pooled_output = self.dense(first_token_tensor)
|
| 796 |
+
pooled_output = self.activation(pooled_output)
|
| 797 |
+
return pooled_output
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
class NewPreTrainedModel(PreTrainedModel):
|
| 801 |
+
"""
|
| 802 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 803 |
+
models.
|
| 804 |
+
"""
|
| 805 |
+
|
| 806 |
+
config_class = NewConfig
|
| 807 |
+
base_model_prefix = "new"
|
| 808 |
+
supports_gradient_checkpointing = True
|
| 809 |
+
_supports_sdpa = True
|
| 810 |
+
|
| 811 |
+
def _init_weights(self, module):
|
| 812 |
+
"""Initialize the weights"""
|
| 813 |
+
if isinstance(module, nn.Linear):
|
| 814 |
+
# Slightly different from the TF version which uses truncated_normal for initialization
|
| 815 |
+
# cf https://github.com/pytorch/pytorch/pull/5617
|
| 816 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 817 |
+
if module.bias is not None:
|
| 818 |
+
module.bias.data.zero_()
|
| 819 |
+
elif isinstance(module, nn.Embedding):
|
| 820 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
| 821 |
+
if module.padding_idx is not None:
|
| 822 |
+
module.weight.data[module.padding_idx].zero_()
|
| 823 |
+
elif isinstance(module, nn.LayerNorm):
|
| 824 |
+
module.bias.data.zero_()
|
| 825 |
+
module.weight.data.fill_(1.0)
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
class NewModel(NewPreTrainedModel):
|
| 829 |
+
"""
|
| 830 |
+
The bare New Model transformer outputting raw hidden-states without any specific head on top.
|
| 831 |
+
"""
|
| 832 |
+
|
| 833 |
+
def __init__(self, config: NewConfig, add_pooling_layer=False):
|
| 834 |
+
super().__init__(config)
|
| 835 |
+
self.config = config
|
| 836 |
+
|
| 837 |
+
self.embeddings = NewEmbeddings(config)
|
| 838 |
+
self.encoder = NewEncoder(config)
|
| 839 |
+
|
| 840 |
+
self.pooler = NewPooler(config) if add_pooling_layer else None
|
| 841 |
+
|
| 842 |
+
# Initialize weights and apply final processing
|
| 843 |
+
self.post_init()
|
| 844 |
+
|
| 845 |
+
def get_input_embeddings(self):
|
| 846 |
+
return self.embeddings.word_embeddings
|
| 847 |
+
|
| 848 |
+
def set_input_embeddings(self, value):
|
| 849 |
+
self.embeddings.word_embeddings = value
|
| 850 |
+
|
| 851 |
+
def forward(
|
| 852 |
+
self,
|
| 853 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 854 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 855 |
+
length: Optional[List[int]] = None,
|
| 856 |
+
subset_indices: Optional[torch.LongTensor] = None,
|
| 857 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 858 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 859 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 860 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 861 |
+
output_attentions: Optional[bool] = None,
|
| 862 |
+
output_hidden_states: Optional[bool] = None,
|
| 863 |
+
return_dict: Optional[bool] = None,
|
| 864 |
+
unpad_inputs: Optional[bool] = None,
|
| 865 |
+
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
|
| 866 |
+
r"""
|
| 867 |
+
length (`list` of length `batch_size`, *optional*):
|
| 868 |
+
If is `None`, return padded `last_hidden_state`.
|
| 869 |
+
subset_indices ():
|
| 870 |
+
pass
|
| 871 |
+
unpad_inputs (`bool`, *optional*):
|
| 872 |
+
pass
|
| 873 |
+
"""
|
| 874 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 875 |
+
output_hidden_states = (
|
| 876 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 877 |
+
)
|
| 878 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 879 |
+
unpad_inputs = unpad_inputs if unpad_inputs is not None else self.config.unpad_inputs
|
| 880 |
+
output_padded = length is None
|
| 881 |
+
|
| 882 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 883 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 884 |
+
elif input_ids is not None:
|
| 885 |
+
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
| 886 |
+
input_shape = input_ids.size()
|
| 887 |
+
elif inputs_embeds is not None:
|
| 888 |
+
input_shape = inputs_embeds.size()[:-1]
|
| 889 |
+
else:
|
| 890 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 891 |
+
|
| 892 |
+
# TODO: not used
|
| 893 |
+
# # Prepare head mask if needed
|
| 894 |
+
# # 1.0 in head_mask indicate we keep the head
|
| 895 |
+
# # attention_probs has shape bsz x n_heads x N x N
|
| 896 |
+
# # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 897 |
+
# # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 898 |
+
# head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
| 899 |
+
|
| 900 |
+
# Get embeddings, may unpad them
|
| 901 |
+
(embedding_output, attention_mask, rope_embeds, length) = self.embeddings(
|
| 902 |
+
unpad_inputs,
|
| 903 |
+
input_ids=input_ids,
|
| 904 |
+
attention_mask=attention_mask,
|
| 905 |
+
length=length,
|
| 906 |
+
token_type_ids=token_type_ids,
|
| 907 |
+
position_ids=position_ids,
|
| 908 |
+
inputs_embeds=inputs_embeds
|
| 909 |
+
)
|
| 910 |
+
|
| 911 |
+
batch_size, seq_length = input_shape
|
| 912 |
+
if unpad_inputs and self.config.use_memory_efficient_attention:
|
| 913 |
+
attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length)
|
| 914 |
+
else:
|
| 915 |
+
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
| 916 |
+
# ourselves in which case we just need to make it broadcastable to all heads.
|
| 917 |
+
attention_bias = self.get_extended_attention_mask(attention_mask, input_shape)
|
| 918 |
+
if self.config.use_memory_efficient_attention:
|
| 919 |
+
# Invalid shape for attention bias: torch.Size([48, 1, 1, 512]) (expected (48, 12, 512, 512))
|
| 920 |
+
attention_bias = attention_bias.expand(-1, self.config.num_attention_heads, seq_length, -1)
|
| 921 |
+
|
| 922 |
+
padding_inputs = None
|
| 923 |
+
if unpad_inputs and (output_padded or not self.config.use_memory_efficient_attention):
|
| 924 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 925 |
+
if not self.config.use_memory_efficient_attention:
|
| 926 |
+
padding_inputs = (indices, *input_shape)
|
| 927 |
+
|
| 928 |
+
attention_scale = None
|
| 929 |
+
if self.config.logn_attention_scale:
|
| 930 |
+
logger.warning_once("TODO: logn_attention_scale")
|
| 931 |
+
# # attention scale log_512(input_len)
|
| 932 |
+
# attention_scale = attention_mask.sum(1).log() / torch.tensor(self.config.max_position_embeddings).log()
|
| 933 |
+
# # inference-time logn scale need clip 1
|
| 934 |
+
# if self.config.logn_attention_clip1:
|
| 935 |
+
# attention_scale.clip_(1)
|
| 936 |
+
# attention_scale = attention_scale[:, None, None, None]
|
| 937 |
+
# else:
|
| 938 |
+
# attention_scale = None
|
| 939 |
+
|
| 940 |
+
encoder_outputs = self.encoder(
|
| 941 |
+
embedding_output,
|
| 942 |
+
attention_bias=attention_bias,
|
| 943 |
+
rope_embeds=rope_embeds,
|
| 944 |
+
padding_inputs=padding_inputs,
|
| 945 |
+
attention_scale=attention_scale,
|
| 946 |
+
subset_indices=subset_indices,
|
| 947 |
+
head_mask=head_mask,
|
| 948 |
+
output_attentions=output_attentions,
|
| 949 |
+
output_hidden_states=output_hidden_states,
|
| 950 |
+
return_dict=return_dict,
|
| 951 |
+
)
|
| 952 |
+
sequence_output = encoder_outputs[0]
|
| 953 |
+
if unpad_inputs and output_padded:
|
| 954 |
+
sequence_output = pad_input(
|
| 955 |
+
sequence_output.squeeze(), indices, batch_size, seq_length
|
| 956 |
+
)
|
| 957 |
+
|
| 958 |
+
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
| 959 |
+
|
| 960 |
+
if not return_dict:
|
| 961 |
+
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
| 962 |
+
|
| 963 |
+
return BaseModelOutputWithPooling(
|
| 964 |
+
last_hidden_state=sequence_output,
|
| 965 |
+
pooler_output=pooled_output,
|
| 966 |
+
hidden_states=encoder_outputs.hidden_states,
|
| 967 |
+
attentions=encoder_outputs.attentions,
|
| 968 |
+
)
|
| 969 |
+
|
| 970 |
+
|
| 971 |
+
class NewLMPredictionHead(nn.Module):
|
| 972 |
+
def __init__(self, config):
|
| 973 |
+
super().__init__()
|
| 974 |
+
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
| 975 |
+
self.transform_act_fn = ACT2FN[config.hidden_act]
|
| 976 |
+
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
| 977 |
+
|
| 978 |
+
# The output weights are the same as the input embeddings, but there is
|
| 979 |
+
# an output-only bias for each token.
|
| 980 |
+
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
|
| 981 |
+
|
| 982 |
+
def forward(self, hidden_states):
|
| 983 |
+
hidden_states = self.dense(hidden_states)
|
| 984 |
+
hidden_states = self.transform_act_fn(hidden_states)
|
| 985 |
+
hidden_states = self.norm(hidden_states)
|
| 986 |
+
hidden_states = self.decoder(hidden_states)
|
| 987 |
+
return hidden_states
|
| 988 |
+
|
| 989 |
+
|
| 990 |
+
class NewForMaskedLM(NewPreTrainedModel):
|
| 991 |
+
_tied_weights_keys = ["lm_head.decoder.bias", "lm_head.decoder.weight"]
|
| 992 |
+
|
| 993 |
+
def __init__(self, config: NewConfig):
|
| 994 |
+
super().__init__(config)
|
| 995 |
+
self.new = NewModel(config, add_pooling_layer=False)
|
| 996 |
+
self.lm_head = NewLMPredictionHead(config)
|
| 997 |
+
self.loss_fct = nn.CrossEntropyLoss()
|
| 998 |
+
|
| 999 |
+
# Initialize weights and apply final processing
|
| 1000 |
+
self.post_init()
|
| 1001 |
+
|
| 1002 |
+
def get_output_embeddings(self):
|
| 1003 |
+
return self.lm_head.decoder
|
| 1004 |
+
|
| 1005 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1006 |
+
self.lm_head.decoder = new_embeddings
|
| 1007 |
+
|
| 1008 |
+
def forward(
|
| 1009 |
+
self,
|
| 1010 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1011 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1012 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1013 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1014 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1015 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1016 |
+
labels: Optional[torch.Tensor] = None,
|
| 1017 |
+
output_attentions: Optional[bool] = None,
|
| 1018 |
+
output_hidden_states: Optional[bool] = None,
|
| 1019 |
+
return_dict: Optional[bool] = None,
|
| 1020 |
+
unpad_inputs: Optional[bool] = None,
|
| 1021 |
+
) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
|
| 1022 |
+
r"""
|
| 1023 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1024 |
+
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
|
| 1025 |
+
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
|
| 1026 |
+
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
|
| 1027 |
+
"""
|
| 1028 |
+
|
| 1029 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1030 |
+
|
| 1031 |
+
if labels is None or not self.new.config.unpad_inputs:
|
| 1032 |
+
length = None
|
| 1033 |
+
subset_indices = None
|
| 1034 |
+
else:
|
| 1035 |
+
length = attention_mask.sum(-1).tolist()
|
| 1036 |
+
labels = labels[attention_mask.bool()].unsqueeze(0)
|
| 1037 |
+
subset_indices = labels > -100
|
| 1038 |
+
|
| 1039 |
+
outputs = self.new(
|
| 1040 |
+
input_ids,
|
| 1041 |
+
attention_mask=attention_mask,
|
| 1042 |
+
length=length,
|
| 1043 |
+
subset_indices=subset_indices,
|
| 1044 |
+
token_type_ids=token_type_ids,
|
| 1045 |
+
position_ids=position_ids,
|
| 1046 |
+
head_mask=head_mask,
|
| 1047 |
+
inputs_embeds=inputs_embeds,
|
| 1048 |
+
output_attentions=output_attentions,
|
| 1049 |
+
output_hidden_states=output_hidden_states,
|
| 1050 |
+
return_dict=return_dict,
|
| 1051 |
+
unpad_inputs=unpad_inputs,
|
| 1052 |
+
)
|
| 1053 |
+
|
| 1054 |
+
sequence_output = outputs[0]
|
| 1055 |
+
prediction_scores = self.lm_head(sequence_output)
|
| 1056 |
+
|
| 1057 |
+
masked_lm_loss = None
|
| 1058 |
+
if labels is not None:
|
| 1059 |
+
if subset_indices is None:
|
| 1060 |
+
mask = attention_mask.bool()
|
| 1061 |
+
prediction_scores = prediction_scores[mask]
|
| 1062 |
+
labels = labels[mask]
|
| 1063 |
+
else:
|
| 1064 |
+
labels = labels[subset_indices]
|
| 1065 |
+
masked_lm_loss = self.loss_fct(prediction_scores, labels)
|
| 1066 |
+
|
| 1067 |
+
if not return_dict:
|
| 1068 |
+
output = (prediction_scores,) + outputs[2:]
|
| 1069 |
+
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
|
| 1070 |
+
|
| 1071 |
+
return MaskedLMOutput(
|
| 1072 |
+
loss=masked_lm_loss,
|
| 1073 |
+
logits=prediction_scores,
|
| 1074 |
+
hidden_states=outputs.hidden_states,
|
| 1075 |
+
attentions=outputs.attentions,
|
| 1076 |
+
)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
class NewForSequenceClassification(NewPreTrainedModel):
|
| 1080 |
+
def __init__(self, config):
|
| 1081 |
+
super().__init__(config)
|
| 1082 |
+
self.num_labels = config.num_labels
|
| 1083 |
+
self.config = config
|
| 1084 |
+
|
| 1085 |
+
self.new = NewModel(config, add_pooling_layer=True)
|
| 1086 |
+
classifier_dropout = (
|
| 1087 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 1088 |
+
)
|
| 1089 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 1090 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 1091 |
+
|
| 1092 |
+
# Initialize weights and apply final processing
|
| 1093 |
+
self.post_init()
|
| 1094 |
+
|
| 1095 |
+
def forward(
|
| 1096 |
+
self,
|
| 1097 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1098 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1099 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1100 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1101 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1102 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1103 |
+
labels: Optional[torch.Tensor] = None,
|
| 1104 |
+
output_attentions: Optional[bool] = None,
|
| 1105 |
+
output_hidden_states: Optional[bool] = None,
|
| 1106 |
+
return_dict: Optional[bool] = None,
|
| 1107 |
+
unpad_inputs: Optional[bool] = None,
|
| 1108 |
+
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
|
| 1109 |
+
r"""
|
| 1110 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1111 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 1112 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 1113 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 1114 |
+
"""
|
| 1115 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1116 |
+
|
| 1117 |
+
outputs = self.new(
|
| 1118 |
+
input_ids,
|
| 1119 |
+
attention_mask=attention_mask,
|
| 1120 |
+
token_type_ids=token_type_ids,
|
| 1121 |
+
position_ids=position_ids,
|
| 1122 |
+
head_mask=head_mask,
|
| 1123 |
+
inputs_embeds=inputs_embeds,
|
| 1124 |
+
output_attentions=output_attentions,
|
| 1125 |
+
output_hidden_states=output_hidden_states,
|
| 1126 |
+
return_dict=return_dict,
|
| 1127 |
+
unpad_inputs=unpad_inputs,
|
| 1128 |
+
)
|
| 1129 |
+
|
| 1130 |
+
pooled_output = outputs[1]
|
| 1131 |
+
|
| 1132 |
+
pooled_output = self.dropout(pooled_output)
|
| 1133 |
+
logits = self.classifier(pooled_output)
|
| 1134 |
+
|
| 1135 |
+
loss = None
|
| 1136 |
+
if labels is not None:
|
| 1137 |
+
if self.config.problem_type is None:
|
| 1138 |
+
if self.num_labels == 1:
|
| 1139 |
+
self.config.problem_type = "regression"
|
| 1140 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| 1141 |
+
self.config.problem_type = "single_label_classification"
|
| 1142 |
+
else:
|
| 1143 |
+
self.config.problem_type = "multi_label_classification"
|
| 1144 |
+
|
| 1145 |
+
if self.config.problem_type == "regression":
|
| 1146 |
+
loss_fct = nn.MSELoss()
|
| 1147 |
+
if self.num_labels == 1:
|
| 1148 |
+
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
| 1149 |
+
else:
|
| 1150 |
+
loss = loss_fct(logits, labels)
|
| 1151 |
+
elif self.config.problem_type == "single_label_classification":
|
| 1152 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 1153 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1154 |
+
elif self.config.problem_type == "multi_label_classification":
|
| 1155 |
+
loss_fct = nn.BCEWithLogitsLoss()
|
| 1156 |
+
loss = loss_fct(logits, labels)
|
| 1157 |
+
|
| 1158 |
+
if not return_dict:
|
| 1159 |
+
output = (logits,) + outputs[2:]
|
| 1160 |
+
return ((loss,) + output) if loss is not None else output
|
| 1161 |
+
|
| 1162 |
+
return SequenceClassifierOutput(
|
| 1163 |
+
loss=loss,
|
| 1164 |
+
logits=logits,
|
| 1165 |
+
hidden_states=outputs.hidden_states,
|
| 1166 |
+
attentions=outputs.attentions,
|
| 1167 |
+
)
|
| 1168 |
+
|
| 1169 |
+
|
| 1170 |
+
class NewForMultipleChoice(NewPreTrainedModel):
|
| 1171 |
+
def __init__(self, config):
|
| 1172 |
+
super().__init__(config)
|
| 1173 |
+
|
| 1174 |
+
self.new = NewModel(config, add_pooling_layer=True)
|
| 1175 |
+
classifier_dropout = (
|
| 1176 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 1177 |
+
)
|
| 1178 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 1179 |
+
self.classifier = nn.Linear(config.hidden_size, 1)
|
| 1180 |
+
|
| 1181 |
+
# Initialize weights and apply final processing
|
| 1182 |
+
self.post_init()
|
| 1183 |
+
|
| 1184 |
+
def forward(
|
| 1185 |
+
self,
|
| 1186 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1187 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1188 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1189 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1190 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1191 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1192 |
+
labels: Optional[torch.Tensor] = None,
|
| 1193 |
+
output_attentions: Optional[bool] = None,
|
| 1194 |
+
output_hidden_states: Optional[bool] = None,
|
| 1195 |
+
return_dict: Optional[bool] = None,
|
| 1196 |
+
unpad_inputs: Optional[bool] = None,
|
| 1197 |
+
) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
|
| 1198 |
+
r"""
|
| 1199 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1200 |
+
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
|
| 1201 |
+
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
|
| 1202 |
+
`input_ids` above)
|
| 1203 |
+
"""
|
| 1204 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1205 |
+
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
|
| 1206 |
+
|
| 1207 |
+
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
|
| 1208 |
+
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
|
| 1209 |
+
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
|
| 1210 |
+
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
|
| 1211 |
+
inputs_embeds = (
|
| 1212 |
+
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
|
| 1213 |
+
if inputs_embeds is not None
|
| 1214 |
+
else None
|
| 1215 |
+
)
|
| 1216 |
+
|
| 1217 |
+
outputs = self.new(
|
| 1218 |
+
input_ids,
|
| 1219 |
+
attention_mask=attention_mask,
|
| 1220 |
+
token_type_ids=token_type_ids,
|
| 1221 |
+
position_ids=position_ids,
|
| 1222 |
+
head_mask=head_mask,
|
| 1223 |
+
inputs_embeds=inputs_embeds,
|
| 1224 |
+
output_attentions=output_attentions,
|
| 1225 |
+
output_hidden_states=output_hidden_states,
|
| 1226 |
+
return_dict=return_dict,
|
| 1227 |
+
unpad_inputs=unpad_inputs,
|
| 1228 |
+
)
|
| 1229 |
+
|
| 1230 |
+
pooled_output = outputs[1]
|
| 1231 |
+
|
| 1232 |
+
pooled_output = self.dropout(pooled_output)
|
| 1233 |
+
logits = self.classifier(pooled_output)
|
| 1234 |
+
reshaped_logits = logits.view(-1, num_choices)
|
| 1235 |
+
|
| 1236 |
+
loss = None
|
| 1237 |
+
if labels is not None:
|
| 1238 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 1239 |
+
loss = loss_fct(reshaped_logits, labels)
|
| 1240 |
+
|
| 1241 |
+
if not return_dict:
|
| 1242 |
+
output = (reshaped_logits,) + outputs[2:]
|
| 1243 |
+
return ((loss,) + output) if loss is not None else output
|
| 1244 |
+
|
| 1245 |
+
return MultipleChoiceModelOutput(
|
| 1246 |
+
loss=loss,
|
| 1247 |
+
logits=reshaped_logits,
|
| 1248 |
+
hidden_states=outputs.hidden_states,
|
| 1249 |
+
attentions=outputs.attentions,
|
| 1250 |
+
)
|
| 1251 |
+
|
| 1252 |
+
|
| 1253 |
+
@dataclass
|
| 1254 |
+
class NewTokenClassifierOutput(ModelOutput):
|
| 1255 |
+
loss: Optional[torch.FloatTensor] = None
|
| 1256 |
+
logits: torch.FloatTensor = None
|
| 1257 |
+
last_hidden_state: torch.FloatTensor = None
|
| 1258 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 1259 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 1260 |
+
|
| 1261 |
+
|
| 1262 |
+
class NewForTokenClassification(NewPreTrainedModel):
|
| 1263 |
+
def __init__(self, config):
|
| 1264 |
+
super().__init__(config)
|
| 1265 |
+
self.num_labels = config.num_labels
|
| 1266 |
+
|
| 1267 |
+
self.new = NewModel(config, add_pooling_layer=False)
|
| 1268 |
+
classifier_dropout = (
|
| 1269 |
+
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
| 1270 |
+
)
|
| 1271 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
| 1272 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
| 1273 |
+
|
| 1274 |
+
# Initialize weights and apply final processing
|
| 1275 |
+
self.post_init()
|
| 1276 |
+
|
| 1277 |
+
def forward(
|
| 1278 |
+
self,
|
| 1279 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1280 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1281 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1282 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1283 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1284 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1285 |
+
labels: Optional[torch.Tensor] = None,
|
| 1286 |
+
output_attentions: Optional[bool] = None,
|
| 1287 |
+
output_hidden_states: Optional[bool] = None,
|
| 1288 |
+
return_dict: Optional[bool] = None,
|
| 1289 |
+
unpad_inputs: Optional[bool] = None,
|
| 1290 |
+
) -> Union[Tuple[torch.Tensor], NewTokenClassifierOutput]:
|
| 1291 |
+
r"""
|
| 1292 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1293 |
+
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
|
| 1294 |
+
"""
|
| 1295 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1296 |
+
|
| 1297 |
+
outputs = self.new(
|
| 1298 |
+
input_ids,
|
| 1299 |
+
attention_mask=attention_mask,
|
| 1300 |
+
token_type_ids=token_type_ids,
|
| 1301 |
+
position_ids=position_ids,
|
| 1302 |
+
head_mask=head_mask,
|
| 1303 |
+
inputs_embeds=inputs_embeds,
|
| 1304 |
+
output_attentions=output_attentions,
|
| 1305 |
+
output_hidden_states=output_hidden_states,
|
| 1306 |
+
return_dict=return_dict,
|
| 1307 |
+
unpad_inputs=unpad_inputs,
|
| 1308 |
+
)
|
| 1309 |
+
|
| 1310 |
+
sequence_output = outputs[0]
|
| 1311 |
+
|
| 1312 |
+
sequence_output = self.dropout(sequence_output)
|
| 1313 |
+
logits = self.classifier(sequence_output)
|
| 1314 |
+
|
| 1315 |
+
loss = None
|
| 1316 |
+
if labels is not None:
|
| 1317 |
+
loss_fct = nn.CrossEntropyLoss()
|
| 1318 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
| 1319 |
+
|
| 1320 |
+
if not return_dict:
|
| 1321 |
+
output = (logits,) + outputs[2:]
|
| 1322 |
+
return ((loss,) + output) if loss is not None else output
|
| 1323 |
+
|
| 1324 |
+
return NewTokenClassifierOutput(
|
| 1325 |
+
loss=loss,
|
| 1326 |
+
logits=logits,
|
| 1327 |
+
last_hidden_state=sequence_output,
|
| 1328 |
+
hidden_states=outputs.hidden_states,
|
| 1329 |
+
attentions=outputs.attentions,
|
| 1330 |
+
)
|
| 1331 |
+
|
| 1332 |
+
|
| 1333 |
+
class NewForQuestionAnswering(NewPreTrainedModel):
|
| 1334 |
+
def __init__(self, config):
|
| 1335 |
+
super().__init__(config)
|
| 1336 |
+
self.num_labels = config.num_labels
|
| 1337 |
+
|
| 1338 |
+
self.new = NewModel(config, add_pooling_layer=False)
|
| 1339 |
+
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
| 1340 |
+
|
| 1341 |
+
# Initialize weights and apply final processing
|
| 1342 |
+
self.post_init()
|
| 1343 |
+
|
| 1344 |
+
def forward(
|
| 1345 |
+
self,
|
| 1346 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 1347 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1348 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 1349 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 1350 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 1351 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 1352 |
+
start_positions: Optional[torch.Tensor] = None,
|
| 1353 |
+
end_positions: Optional[torch.Tensor] = None,
|
| 1354 |
+
output_attentions: Optional[bool] = None,
|
| 1355 |
+
output_hidden_states: Optional[bool] = None,
|
| 1356 |
+
return_dict: Optional[bool] = None,
|
| 1357 |
+
unpad_inputs: Optional[bool] = None,
|
| 1358 |
+
) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
|
| 1359 |
+
r"""
|
| 1360 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1361 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1362 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1363 |
+
are not taken into account for computing the loss.
|
| 1364 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1365 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1366 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1367 |
+
are not taken into account for computing the loss.
|
| 1368 |
+
"""
|
| 1369 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1370 |
+
|
| 1371 |
+
outputs = self.new(
|
| 1372 |
+
input_ids,
|
| 1373 |
+
attention_mask=attention_mask,
|
| 1374 |
+
token_type_ids=token_type_ids,
|
| 1375 |
+
position_ids=position_ids,
|
| 1376 |
+
head_mask=head_mask,
|
| 1377 |
+
inputs_embeds=inputs_embeds,
|
| 1378 |
+
output_attentions=output_attentions,
|
| 1379 |
+
output_hidden_states=output_hidden_states,
|
| 1380 |
+
return_dict=return_dict,
|
| 1381 |
+
unpad_inputs=unpad_inputs,
|
| 1382 |
+
)
|
| 1383 |
+
|
| 1384 |
+
sequence_output = outputs[0]
|
| 1385 |
+
|
| 1386 |
+
logits = self.qa_outputs(sequence_output)
|
| 1387 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
| 1388 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
| 1389 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
| 1390 |
+
|
| 1391 |
+
total_loss = None
|
| 1392 |
+
if start_positions is not None and end_positions is not None:
|
| 1393 |
+
# If we are on multi-GPU, split add a dimension
|
| 1394 |
+
if len(start_positions.size()) > 1:
|
| 1395 |
+
start_positions = start_positions.squeeze(-1)
|
| 1396 |
+
if len(end_positions.size()) > 1:
|
| 1397 |
+
end_positions = end_positions.squeeze(-1)
|
| 1398 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
| 1399 |
+
ignored_index = start_logits.size(1)
|
| 1400 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
| 1401 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
| 1402 |
+
|
| 1403 |
+
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
|
| 1404 |
+
start_loss = loss_fct(start_logits, start_positions)
|
| 1405 |
+
end_loss = loss_fct(end_logits, end_positions)
|
| 1406 |
+
total_loss = (start_loss + end_loss) / 2
|
| 1407 |
+
|
| 1408 |
+
if not return_dict:
|
| 1409 |
+
output = (start_logits, end_logits) + outputs[2:]
|
| 1410 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
| 1411 |
+
|
| 1412 |
+
return QuestionAnsweringModelOutput(
|
| 1413 |
+
loss=total_loss,
|
| 1414 |
+
start_logits=start_logits,
|
| 1415 |
+
end_logits=end_logits,
|
| 1416 |
+
hidden_states=outputs.hidden_states,
|
| 1417 |
+
attentions=outputs.attentions,
|
| 1418 |
+
)
|
modules.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"name": "0",
|
| 5 |
+
"path": "",
|
| 6 |
+
"type": "sentence_transformers.models.Transformer"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"idx": 1,
|
| 10 |
+
"name": "1",
|
| 11 |
+
"path": "1_Pooling",
|
| 12 |
+
"type": "sentence_transformers.models.Pooling"
|
| 13 |
+
}
|
| 14 |
+
]
|
sentence_bert_config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"max_seq_length": 8192,
|
| 3 |
+
"do_lower_case": false
|
| 4 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": {
|
| 3 |
+
"content": "[CLS]",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"mask_token": {
|
| 10 |
+
"content": "[MASK]",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "[PAD]",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"sep_token": {
|
| 24 |
+
"content": "[SEP]",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
},
|
| 30 |
+
"unk_token": {
|
| 31 |
+
"content": "[UNK]",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false
|
| 36 |
+
}
|
| 37 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": true,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"extra_special_tokens": {},
|
| 48 |
+
"mask_token": "[MASK]",
|
| 49 |
+
"max_length": 8000,
|
| 50 |
+
"model_max_length": 8192,
|
| 51 |
+
"pad_to_multiple_of": null,
|
| 52 |
+
"pad_token": "[PAD]",
|
| 53 |
+
"pad_token_type_id": 0,
|
| 54 |
+
"padding_side": "right",
|
| 55 |
+
"sep_token": "[SEP]",
|
| 56 |
+
"stride": 0,
|
| 57 |
+
"strip_accents": null,
|
| 58 |
+
"tokenize_chinese_chars": true,
|
| 59 |
+
"tokenizer_class": "BertTokenizer",
|
| 60 |
+
"truncation_side": "right",
|
| 61 |
+
"truncation_strategy": "longest_first",
|
| 62 |
+
"unk_token": "[UNK]"
|
| 63 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|