Upload 31 files
Browse files- .gitattributes +5 -0
- Deception/.DS_Store +0 -0
- Deception/Code/.DS_Store +0 -0
- Deception/Code/Cohen_Kappa_Scores Code.ipynb +0 -0
- Deception/Code/Multitask_Learning/.DS_Store +0 -0
- Deception/Code/Multitask_Learning/Multilabel_task_head.py +61 -0
- Deception/Code/Multitask_Learning/base_network.py +24 -0
- Deception/Code/Multitask_Learning/data.csv +0 -0
- Deception/Code/Multitask_Learning/data/data_Xtrain.json +0 -0
- Deception/Code/Multitask_Learning/data/data_Xval.json +1 -0
- Deception/Code/Multitask_Learning/data/data_ytrain.csv +0 -0
- Deception/Code/Multitask_Learning/data/data_yval.csv +251 -0
- Deception/Code/Multitask_Learning/dataloader.py +70 -0
- Deception/Code/Multitask_Learning/main.py +263 -0
- Deception/Code/Multitask_Learning/multi_task.py +40 -0
- Deception/Code/Multitask_Learning/new_data.csv +0 -0
- Deception/Code/Multitask_Learning/presentation main.py +114 -0
- Deception/Code/Multitask_Learning/singlelabel_task_head.py +41 -0
- Deception/Code/Multitask_Learning/utils.py +51 -0
- Deception/Code/Multitask_Learning/watchdata.ipynb +267 -0
- Deception/Code/Sample_data for Cohen Kappa Score.csv +0 -0
- Deception/Data/.DS_Store +0 -0
- Deception/Data/Mask Infilling/albert_total_final.csv +3 -0
- Deception/Data/Mask Infilling/bert_total_final.csv +3 -0
- Deception/Data/Mask Infilling/electra_total_final.csv +3 -0
- Deception/Data/Mask Infilling/mpnet_total_final.csv +3 -0
- Deception/Data/Mask Infilling/roberta_total_final.csv +3 -0
- Deception/Data/Paraphrasing/paraphrase.csv +0 -0
- Deception/Data/sentences/.DS_Store +0 -0
- Deception/Data/sentences/Fake News.csv +0 -0
- Deception/Data/sentences/Tweet.csv +0 -0
- Deception/README.md +2 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
Deception/Data/Mask[[:space:]]Infilling/albert_total_final.csv filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
Deception/Data/Mask[[:space:]]Infilling/bert_total_final.csv filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
Deception/Data/Mask[[:space:]]Infilling/electra_total_final.csv filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
Deception/Data/Mask[[:space:]]Infilling/mpnet_total_final.csv filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
Deception/Data/Mask[[:space:]]Infilling/roberta_total_final.csv filter=lfs diff=lfs merge=lfs -text
|
Deception/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Deception/Code/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Deception/Code/Cohen_Kappa_Scores Code.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Code/Multitask_Learning/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Deception/Code/Multitask_Learning/Multilabel_task_head.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Code for making a Two hidden layer multi-label classification model
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torch.optim as optim
|
| 8 |
+
|
| 9 |
+
from sklearn.metrics import recall_score, precision_score, accuracy_score
|
| 10 |
+
|
| 11 |
+
class MultiLabelTaskHead(nn.Module):
|
| 12 |
+
def __init__(self, input_size, output_size, device):
|
| 13 |
+
super(MultiLabelTaskHead, self).__init__()
|
| 14 |
+
self.fc1 = nn.Linear(input_size, 50)
|
| 15 |
+
self.fc2 = nn.Linear(50, 50)
|
| 16 |
+
self.fc3 = nn.Linear(50, output_size)
|
| 17 |
+
## Example in case of 5 w analysis output_size = 5
|
| 18 |
+
self.sigmoid = nn.Sigmoid()
|
| 19 |
+
self.device = device
|
| 20 |
+
|
| 21 |
+
def forward(self, x):
|
| 22 |
+
x = F.relu(self.fc1(x))
|
| 23 |
+
x = F.relu(self.fc2(x))
|
| 24 |
+
x = self.fc3(x)
|
| 25 |
+
x = self.sigmoid(x)
|
| 26 |
+
return x
|
| 27 |
+
|
| 28 |
+
def predict(self, x):
|
| 29 |
+
x = self.forward(x)
|
| 30 |
+
x = torch.round(x)
|
| 31 |
+
return x
|
| 32 |
+
|
| 33 |
+
def accuracy(self, prediction, target):
|
| 34 |
+
prediction = torch.round(prediction)
|
| 35 |
+
|
| 36 |
+
return torch.mean((prediction == target).float())
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def recall(self, prediction, target):
|
| 41 |
+
prediction = torch.round(prediction)
|
| 42 |
+
|
| 43 |
+
tp = torch.sum(torch.logical_and(prediction == 1, target == 1), axis=0)
|
| 44 |
+
fn = torch.sum(torch.logical_and(prediction == 0, target == 1), axis=0)
|
| 45 |
+
|
| 46 |
+
recall = tp / (tp + fn)
|
| 47 |
+
overall_recall = torch.mean(recall)
|
| 48 |
+
|
| 49 |
+
return overall_recall
|
| 50 |
+
|
| 51 |
+
def precision(self, prediction, target):
|
| 52 |
+
prediction = torch.round(prediction)
|
| 53 |
+
|
| 54 |
+
tp = torch.sum(torch.logical_and(prediction == 1, target == 1), axis=0)
|
| 55 |
+
fp = torch.sum(torch.logical_and(prediction == 1, target == 0), axis=0)
|
| 56 |
+
|
| 57 |
+
precision = tp / (tp + fp)
|
| 58 |
+
overall_precision = torch.mean(precision)
|
| 59 |
+
|
| 60 |
+
return overall_precision
|
| 61 |
+
|
Deception/Code/Multitask_Learning/base_network.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Code for making a base nework that would take tokenized input and pass it through an embedding layer and then through a LSTM layer to get the output
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import torch.optim as optim
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class base_network(nn.Module):
|
| 10 |
+
def __init__(self, input_size, embedding_size, hidden_size, num_layers, dropout, bidirectional, device):
|
| 11 |
+
super(base_network, self).__init__()
|
| 12 |
+
self.embedding = nn.Embedding(input_size, embedding_size)
|
| 13 |
+
|
| 14 |
+
self.lstm = nn.LSTM(embedding_size, hidden_size, num_layers, batch_first=True,
|
| 15 |
+
dropout=dropout, bidirectional=bidirectional)
|
| 16 |
+
# self.fc = nn.Linear(hidden_size, output_size)
|
| 17 |
+
self.device = device
|
| 18 |
+
|
| 19 |
+
def forward(self, x):
|
| 20 |
+
x = x.to(self.device)
|
| 21 |
+
x = self.embedding(x)
|
| 22 |
+
x, (h_n, c_n) = self.lstm(x)
|
| 23 |
+
out = torch.permute(h_n[-2:, :, :], (1, 0, 2)).reshape(x.size(0), -1)
|
| 24 |
+
return out
|
Deception/Code/Multitask_Learning/data.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Code/Multitask_Learning/data/data_Xtrain.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Code/Multitask_Learning/data/data_Xval.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
["Restrain from eating meat for 1-2 days: Gujarat HC", "Sena must support Murmu/party MP writes to Uddhav", "Eye on caste & regional balance as RJD may get up to 18 berths/JD(U) 13-14", "Using bulldozers is UP culture/spare madrassas: MP Ajmal to Himanta", "NDRF carries out pan-India secur audit of ropeways", "India reports 7/219 new Covid cases and 25 deaths in last 24 hours", "School jobs scam: Mamata Banerjee expels Partha Chatterjee from Bengal cabinet", "Delhi high court imposes Rs 10 lakh costs on NGO for using PIL to blackmail citizens", "Fresh attempts aimed at finishing off Shiv Sena: Uddhav", "Tejashwi: RJD-JD(U) alliance a prelude to 2024 opposition un", "Eye hospital asked to pay Rs 1 cr for child's death", "Will prefer to jump into a well rather than joining Congress: Nitin Gadkari", "Amit Shah's 1st Bihar visit since break with Nitish", "Why EVMs are not used for election of President and Vice President", "Two terrorists shot dead in Srinagar encounter", "Claim on Irani's Goa bar bogus/delete posts/HC tells Cong netas", "Over half of UAPA-accused in 18-30 age group: MHA data", "\"Coercive or Unilateral\" action to change status quo will undermine common secur: India at UNSC amid Taiwan tensions", "Sixth suspect held in Udaipur beheading case", "Justice H P Sandesh puts transfer threat on record", "31 of 88 deaths in custody in 2021 were suicides/says NCRB data", "4 kids killed in MP as school van smashes into truck", "India set to cross 200 crore mark in Covid vaccine doses", "Central Vista: DEC Infrastructure emerges as lowest bidder for construction of Executive Enclave", "Route to Bordeaux via \u2018beans thoran\u2019 and \u2018butter chicken'", "Come September/transGenders to get Ayushman Bhrat TG card to access 'composite Educational package'", "Ready for encounter/will be five steps ahead of UP': Karnataka minister on BJP youth leader's murder", "After 5 years of work/govt retracts data protection bill", "75 science hubs for SCs/STs to be set up across India", "British Educational system meant to create servant class: PM Modi", "Legislators of Uddhav Thackeray -led Sena meet Maharashtra governor/raise law and order/farmer issues", "Process is the punishment in our criminal justice system: CJI", "TRS has conspiRed to trigger communal clashes to divert attention from allegations against KCR's family/claims Telangana BJP chief", "KCR in Bihar to woo Nitish Kumar but yet to endorse him as PM face", "Recent years have been great for sports: PM Narendra Modi on Sports Day", "J&K govt sacks 4 employees including son of Hizbul chief Salahuddin", "Educationalcare & spiritual are closely linked in India: PM Modi", "Karnataka hijab ban: Supreme Court reserves verdict on pleas challenging High Court judgement", "PIL against freebies: Need to strike balance between 'economy losing money' and welfare measures/says Supreme Court", "Bulldozers raze UP madrassa after plaints", "Northeast Diary: How to end interstate border rows", "5 reasons Why Ashok Gehlot as Congress president may help party", "Teesta Setalvad files bail plea/court seeks Gujarat govt's reply", "Telangana CM KCR meets Nitish Kumar & Tejashwi Yadav in Patna", "NDA govt should come forward for debate on GST hike/price rise in Parliament: KT Rama Rao", "Pakistan breaches truce along border/fires at BSF patrol party", "Don't agree with governor's remarks on Mumbai as Marathi people contributed to its growth: Maharashtra CM", "NTCA revokes order for Cheetah Task force; asks to monitor hunting skills and adaptation", "UPI transactions hit all-time high of Rs 10.7 lakh crore in August", "India slams China for shielding Pakistani terrorists", "Amulya Handhique: Stern satyagrahi who delivered secret messages of Non-Violence Movement", "CWC to meet on August 28 amid buzz of Ashok Gehlot getting top job", "Jharkhand crisis: Hemant Soren sends 33 MLAs to Raipur to keep BJP at bay", "Sharad Pawar renews pitch for opposition un for 2024 Lok Sabha polls", "LOC must end on arrest of accused/HC tells agencies", "Jawan injured in accidental blast in Poonch dies", "BJP 'desperately attacking' judiciary/alleges Congress leader Abhishek Singhvi", "Savarkar on Rahul's yatra banner/Congress draws taunts", "Kolkata professor asked to quit/pay \u20b999cr for swimsuit post", "US has very close Educational relationship with India: Pentagon", "Can NSA be invoked against hoarders? SC seeks government view", "Girl held under UAPA writes exam from jail", "The Architect of the New BJP': New book focuses on PM Modi's organisational skills", "UP govt to conduct survey of unrecognized madrassas across state; Owaisi slams decision", "1993 Mumbai blasts: SC says Centre bound to release Abu Salem on completion of sentence", "Sonia Gandhi to travel abroad for medical check-ups; Rahul/Priyanka to accompany her", "Influencers will soon need to post disclaimers", "BJP-RSS have deep links with PFI: Congress", "Bihar Assembly building centenary pillar built at cost of about Rs 3 crore: Official", "Recent years have been great for sports: PM Narendra Modi on Sports Day", "Try creating \u2018Akhand Bharat\u2019/Himanta tells Rahul Gandhi; Baghel takes RSS dig at him", "Telangana chief minister KCR: Foreign hand behind cloudburst", "Nitish Kumar to visit Delhi from September 5/likely to meet opposition leaders", "Corbevax gets nod as booster after Covishield/Covaxin shots", "Accreditation a must for higher Educational institutes to get govt aid now", "Maharashtra growth engine of country: CM Shinde tells Amit Shah", "It is in mutual interest of India and China to find a way to accommodate each other: Jaishankar", "Nationwide NIA-ED crackdown on PFI: Searches conducted in 5 UP districts/1 arrested", "Madhya Pradesh man gives supari to bahu to kill his wife", "Two terrorists killed during infiltration bid along LoC", "Chandigarh Univers 'leaked video' row: All you need to know", "Congress president\u2019s election mired in controversy as leaders question voters\u2019 list", "Arunachal beef signboard order kept in abeyance", "Article 370 did not obstruct Jammu & Kashmir's development: Ghulam Nabi Azad", "BSF fires at flying object along border in Jammu", "SC to hear six pleas challenging provisions of Places of Worship (Special Provisions) Act", "US 'Memorial Wall' to celebrate 90th anniversary of JRD Tata's historic flight", "Covid 19: India logs 18/930 new cases/35 deaths; active cases increase to 1/19/457", "SIT cites 90 witnesses to back charges against Teesta Setalvad/", "Don't rest till realise the dreams: Nitin Gadkari", "50 J&K Congress leaders quit party in support of Ghulam Nabi Azad", "Don't drag Sonia Gandhi in this': Adhir Chowdhury says will apologise to President 'Educationally' over 'Rashtrapatni' remark", "Teacher pays it forward/donates land for school", "Centre releases over Rs 324 crore as advance share for Assam floods", "Amid tension in Bengaluru/SC says no Ganesh puja at idgah", "PM Modi will be in Japan on September 27 for Shinzo Abe funeral/meet Fumio Kishida", "2 recite Chalisa at UP mall/held", "Army fires at Pakistan drone/forces it to retreat", "Heavy rains in Kerala: 18 deaths till date/thousands displaced/many properties damaged", "Masked vandals damage Punjab church; ISI role suspected", "India refutes speculative media reports on sending troops to Sri Lanka: Indian mission", "Assam cops in gunfight with Myanmar-bound Ulfa (I) rebels", "PM Modi thanks Gujarat for 'affection'/highlights bits from tour", "In paradox/skipping Covid test may help get jabs early", "India reports 15/815 new Covid cases and 68 deaths in last 24 hours", "Sharad Pawar renews pitch for opposition un for 2024 Lok Sabha polls", "Northeast Diary: Myanmar airstrikes push more refugees to India", "Rahul Gandhi resumes Bharat Jodo Yatra from Kerala's Madavana/pays tributes to Sree Narayana Guru", "Have kin of 168 missing in 1992-93 Mumbai riots been given payout: SC", "Opposition pits former minister & governor Alva against NDA nominee Dhankhar", "SC issues notice to Gujarat govt on Teesta bail plea", "Karnataka hijab ban case: Supreme Court issues notice to Karnataka govt; next hearing on September 5", "Murmu vs. Sinha: India set to elect its 15th President on Monday", " Conference to contest all 90 assembly seats alone in J&K", "BJP has shown its true colours: Shiv Sena on Sushil Kumar Modi's 'break' remarks", "Doctor stitches back woman after C-sec as fetus found premature", "Elgar Parishad case: Accused activist claims NIA intercepted emails without proper authorisation", "If person from Hindi-speaking area wants to be president/he must contest poll: Shashi Tharoor", "Mistakes can be rectified'/says Mamata as Mahua Moitra faces flak over 'Kali' remark", "PM Modi to visit Kerala and Karnataka on September 1-2", "India/France agree to expand cooperation to deal with challenges in Indo-Pacific", "In Telangana/its Amit Shah's Liberation Day vs. KCR's Un Day", "Punjab: AAP MP Raghav Chadha appointed chairman of advisory panel", "RSS chief Mohan Bhagwat visits mosque in Delhi/meets Muslim intellectuals: 5 points", "Ashok Gehlot meets Sonia after hinting he could enter Congress presidential poll fray", "Mining case: BJP leader seeks Jharkhand CM's removal after ED points to Hemant Soren aide's 'clout'", "Manipur has become 'JD(U)-free'; BJP will 'very soon' break JD(U)-RJD alliance in Bihar: Sushil Modi", "When people are moved from here to there': Nitish Kumar responds to PM Modi's charge of polarisation to save the corrupt", "Reply in 10 days on ED chief tenure extension: SC to govt", "Row over Adhir\u2019s President remark leads to Sonia-Irani face-off", "Need to balance welfare measures & economy\u2019s Educational: Supreme Court on freebies", "Netas biggest risk to cheetahs/stop them/Modi tells volunteers", "CUET Day 1: NTA cancels exam at 11 centres as they fail \u2018mock test\u2019", "Mission 2024: With 3-tier UP plan/BJP eyes big gains in Lok Sabha polls", "PM greets teachers/pays homage to ex-President Radhakrishnan on Teachers' Day", "In a first/PMLA court discharges two following Supreme Court\u2019s pRedicate offence order", "AIBA urges CJI to reject pleas seeking expunging of SC judges' remarks against Nupur Sharma", "PM should address nation to spell out reasons for him not taking on China on border row: Rahul", "Bhupinder Hooda/Anand Sharma & Prithviraj Chavan meet Ghulam Nabi Azad", "Now people jabbed abroad can take remaining Covid vaccine doses in India: Government", "Sworn in as CM for eighth time/Nitish Kumar tells BJP to 'worry' about 2024 Lok Sabha polls", "India succesSounds Factualully test-fires VL-SRSAM", "No alliance with Samajwadi Party ever in future: Shivpal Yadav", "Central Secretariat Service officers start 'Jansewa Abhiyan' to focus on Educational grievances/pendency", "Eknath Shinde government re-approves renaming of Aurangabad and Osmanabad", "Amit Shah marks Hyderabad Liberation Day/lashes out at vote-bank politics", "Modi inaugurates/lays foundation stones for projects worth over Rs 1/700 crore in Varanasi", "Trade with India up despite curbs: Russia", "Educationalcare/Educational never classified as freebies: Finance minister Nirmala Sitharaman", "Indian Army chief reaches Nepal amid controversy over Agnipath", "Farooq Abdullah to call all-party meet in September on J&K voting rights to \u00e2\u20ac\u02dcnon-natives'", "I-Day vigil: Inter-state secur review by J&K/Punjab/HP officials", "Monkeypox symptoms now different from those of earlier outbreaks: Study", "India without clinical trial registry since July 1", "Army plans to do away with Raj-era customs/honours", "Desi qHPV vaccine against cervical cancer to be launched today", "2 LeT terrorists neutralised in encounter in J-K's Shopian", "Snooping/threatening/stealing foundations of 'Amritkaal' promised by PM Modi: Rahul Gandhi", "How the jackfruit got its flour power #TheTimesofaBetterIndia", "After 6 months/c Covid cases cross 2k mark; 5 die in a day", "JP Nadda lauds PM Modi's leadership for administering 200 crore Covid vaccines", "In rare move/SC bench order criticises CJI\u2019s listing system", "Meet the YouCuber who gives Rubik\u2019s his own twisty spin", "By 2030/all app-based cabs in Delhi may be EVs", "NHAI mulling cut in upfront payment to highway builders by half during construction to roll out more projects", "\u2018No lions/tigers/giraffes; cheetahs to rule Kuno\u2019", "Road to India becoming \u2018Vishwaguru' goes through J&K/says PDP chief Mehbooba", "60 of 67 Shiv Sena ex-corporators from Thane side with Shinde", "Record rains turn bengaluru into lake", "Now Uddhav Thackeray-led 14 Sena MLAs challenge disqualification notice in SC", "Special prosecutor in 2021 cruise drugs bust case resigns", "Freebies/welfare schemes different: Supreme Court", "Remote e-classes now from Jamtara/Mewat king cons", "Congress netas are behaving like Gandhis\u2019 slaves: Anurag Thakur", "A look at mass MP suspensions over the years", "Pak \u2018Colonel\u2019 gave Rs 30k for attack/says terrorist captuRed along LoC", "Man dies before Supreme Court could decide on minor son\u2019s wish to donate liver", "Earthquake-hit Gujarat faced plots to defame it/stop investment/says PM Modi", "Mamata home intruder has Bangladesh links", "SC seeks UP govt response on Kappan bail plea/journo says has no PFI link", "Opposition parties to field joint candidate for Vice President\u2019s post", "Shinde's MLAs can avoid disqualification only by merging with another party: Thackeray faction to Supreme Court", "Cyrus Mistry made significant contribution to India's growth story: Rahul Gandhi", "Rajasthan Congress passes resolution seeking Rahul Gandhi at party helm", "\u2018Stunning Educational rebuke\u2019: US media hails PM remarks to Putin on Ukraine", "Junk Bihar deputy CM\u2019s bail in IRCTC case: CBI to court", "Two terrorists killed in Anantnag encounter", "EWS gives quota to bourgeois/against statute: Petitioners", "Will run for post of Congress chief; no one from Gandhi family to contest polls/says Ashok Gehlot", "Gone in 12 seconds: Noida twin towers rest in pieces", "Family objects to defecation in open/two killed", "Nadda appeals to opposition parties to support 'kisan putra' Dhankhar", "Tejashwi Yadav had threatened CBI/Nanand Rai: Bihar BJP chief", "Twitter non-compliant by habit: Centre to Karnataka HC", "PM Modi meets Vice President Jagdeep Dhankhar", "Scientific storage must to reduce grain loss", "Good rainfall in northwest India reduces paddy acreage deficit", "Tainted' Bihar minister resigns hours after CM Nitish Kumar changes his portfolio", "Governor revokes assent for Punjab trust vote session", "Maharashtra cabinet expansion likely after Prez polls/hints Shiv Sena's rebel faction", "Expose BJP attempts to topple Kejriwal government: NCP", "Avoid sale of seat belt alarm stopper: CCPA to e-tailers", "Antibodies that can lead to vaccination for all Covid strains found", "Cop shot dead by \u2018liquor mafia\u2019 in Bihar\u2019s Siwan", "Government agrees to hold price rise debate in Parliament next week/opposition sees victory of un", "Incidents happen/where do they not occur? says Jharkhand CM on death of minor girl in Dumka", "BJP\u2019s Khushbu seeks justice for Bilkis Bano", "Govt focus on Vishwakarma Puja also a salute to workers", "Two terrorists killed in Pulwama encounter", "Biden to host Pacific Island leaders as China courts region", "Bigger platform of secular forces needed to counter challenges posed by BJP: Yechury", "Warrant against Mehbooba sister in 1989 kidnap case", "Give guidelines for freebies till law made/Centre tells Supreme Court", "Gorakhpur infamous for mosquitoes and mafia now known for development/says CM Adanath", "Cong walks out of Rajya Sabha alleging barricading of residences of Sonia/Rahul Gandhi", "Namsai Declaration signed between Assam and Arunachal; effort on to resolve border dispute by 2022", "India proving economy/ecology not conflicting fields: PM Modi", "SC seeks response of Centre/CVC on pleas challenging extension of tenure of ED chief", "Teesta bypassed HC/mustn't get special treatment: Gujarat to SC", "Bihar CM Nitish Kumar to seek trust vote in assembly on August 24", "\u2018Government wants Big Tech to pay news outlets for content\u2019", "Supreme Court to hear plea seeking nod to worship \u2018Shivling' in Gyanvapi premises", "Fighting malnutrition with millet: Chandigarh & Odisha show the way", "Amarnath yatra resumes/35 missing pilgrims from Andhra found safe", "Rahul Gandhi talks opposition un as he flags off Bharat yatra", "Congress Lok Sabha MPs to hold meeting in Parliament", "IMD warns of heavy rains in several states/9 killed in Maharashtra", "Flag Code tweak to cut Tricolour price/help govt's 'Har Ghar Tiranga' campaign", "You are creating history': PM Modi to 'Shramjeevis' building new Parliament", "Bleak winter looms as farmers say no to bio-decomposer use", "Delhi's sole abattoir gets nod to open after over a month", "FIR registered against Medha Patkar/11 for misusing funds", "China criticises PM Modi/Blinken for greeting Dalai Lama on his 87th birthday", "Tejashwi Yadav criticizes BJP for speaking 'jumla jhoot' to Educational", "India says it will protect its interests as Chinese boat heads to Sri Lanka", "Ansari asserts he never knew or invited Pak journalist Nusrat Mirza to any conference", "Supreme Court rejects PIL seeking fresh probe into Rafale deal", "Centre flayed for publishing draft policy on disabilities only in Hindi and English", "President Murmu reaches London to attend funeral of Queen Elizabeth II", "At CWC meet/Anand Sharma flags lack of polls at party\u2019s lower rungs", "At 1st convocation of ITI/PM announces 5/000 new \u2018skill hubs\u2019", "At 1.5L cases/16.2% rise in Educationals against kids: Report", "Billionaires in boats as flooded houses in elite Bengaluru local lose light & water", "11% jump in no of trucks opting for permit over 2019", "Bengal govt has crossed all levels of brutal: BJP", "Former NITI Aayog CEO Amitabh Kant to become India's G20 Sherpa: Sources", "Keep signboards beef-free: Eateries in Arunachal told", "Rebel Sena MLAs left for Surat without luggage/got new clothes in hotel'", "In 10 days/2.5 crore voters gave Aadhaar information voluntarily to EC", "BJP leader found hanging from tree in Jammu and Kashmir's Kathua"]
|
Deception/Code/Multitask_Learning/data/data_ytrain.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Code/Multitask_Learning/data/data_yval.csv
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ordered_list_1,ordered_list_3,ordered_list_4,Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves),ordered_list_7
|
| 2 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[0, 0, 1, 0, 0, 0, 0]"
|
| 3 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 4 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 5 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 6 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 1]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 7 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 8 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 9 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 10 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 11 |
+
"[0, 1, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 12 |
+
"[1, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Nan4,"[0, 0, 0, 0, 0, 0, 1]"
|
| 13 |
+
"[0, 1, 1, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 14 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 15 |
+
"[0, 1, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 16 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 17 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 18 |
+
"[0, 0, 1, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 19 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 20 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 1]","[0, 0, 0, 1, 0]",Gaining Advantage,"[0, 1, 0, 0, 0, 0, 0]"
|
| 21 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 22 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 1, 1]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 0, 0, 0, 0, 1]"
|
| 23 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 24 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 25 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 26 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 27 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 28 |
+
"[0, 0, 0, 1, 0]","[1, 1, 1, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 29 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 30 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 31 |
+
"[0, 1, 0, 0, 0]","[0, 1, 1, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 32 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 33 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 34 |
+
"[1, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 35 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 36 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 37 |
+
"[0, 0, 0, 0, 1]","[0, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[0, 1, 0, 0, 0, 0, 0]"
|
| 38 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 39 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[0, 1, 0, 0, 0, 0, 0]"
|
| 40 |
+
"[0, 1, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Protecting Others,"[0, 0, 0, 0, 0, 0, 1]"
|
| 41 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Gaining Advantage,"[0, 1, 0, 0, 0, 0, 0]"
|
| 42 |
+
"[0, 1, 0, 0, 0]","[0, 1, 1, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Protecting Others,"[0, 0, 0, 0, 0, 0, 1]"
|
| 43 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 44 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 45 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 46 |
+
"[0, 1, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 47 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Avoiding Punishment,"[0, 0, 0, 0, 0, 0, 1]"
|
| 48 |
+
"[0, 0, 1, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 49 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 50 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 51 |
+
"[0, 0, 1, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Avoiding Embarrassment,"[1, 0, 0, 0, 0, 0, 0]"
|
| 52 |
+
"[0, 0, 0, 1, 0]","[0, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 53 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 54 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 55 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 56 |
+
"[0, 1, 0, 0, 0]","[1, 1, 1, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Avoiding Embarrassment,"[1, 0, 0, 0, 0, 0, 0]"
|
| 57 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 58 |
+
"[0, 1, 0, 0, 0]","[0, 1, 1, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 59 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 60 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 0, 0, 0, 0, 1]"
|
| 61 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 62 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 63 |
+
"[1, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 64 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 1]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 65 |
+
"[0, 0, 1, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[0, 1, 0, 0, 0, 0, 0]"
|
| 66 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 67 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 68 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 69 |
+
"[0, 0, 1, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 70 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 71 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 72 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 73 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 74 |
+
"[0, 0, 0, 0, 1]","[0, 1, 0, 0, 0, 1, 1]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 75 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Gaining Esteem,"[0, 0, 1, 0, 0, 0, 0]"
|
| 76 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 77 |
+
"[0, 0, 0, 0, 1]","[0, 1, 1, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 78 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Oneself,"[1, 0, 0, 0, 0, 0, 0]"
|
| 79 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 80 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 81 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 82 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 1]","[1, 0, 0, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 83 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 84 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 85 |
+
"[1, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 86 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 87 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 88 |
+
"[0, 0, 0, 0, 1]","[1, 1, 1, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 89 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 90 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 91 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 92 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 93 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 94 |
+
"[0, 0, 1, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 0, 1]"
|
| 95 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 0, 1]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 96 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Oneself,"[1, 0, 0, 0, 0, 0, 0]"
|
| 97 |
+
"[0, 0, 0, 1, 0]","[0, 0, 0, 0, 0, 0, 1]","[0, 0, 0, 1, 0]",Protecting Oneself,"[1, 0, 0, 0, 0, 0, 0]"
|
| 98 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 99 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 100 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 101 |
+
"[0, 0, 1, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[0, 1, 0, 0, 0, 0, 0]"
|
| 102 |
+
"[0, 0, 0, 1, 0]","[1, 1, 1, 0, 0, 0, 1]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 103 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 1]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 104 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 105 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 0, 1]"
|
| 106 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 0, 1]","[0, 0, 0, 1, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 107 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 108 |
+
"[0, 0, 0, 0, 1]","[1, 1, 1, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 109 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 110 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 111 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 112 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 113 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[0, 1, 0, 0, 0, 0, 0]"
|
| 114 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 115 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 116 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Defaming Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 117 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 0, 0, 0, 0, 1]"
|
| 118 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 1, 0, 0, 0, 0, 0]"
|
| 119 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 120 |
+
"[0, 0, 0, 0, 1]","[1, 1, 1, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 121 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 122 |
+
"[0, 0, 0, 1, 0]","[1, 1, 1, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 123 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 1]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 124 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 125 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Oneself,"[0, 1, 0, 0, 0, 0, 0]"
|
| 126 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 127 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 1, 1]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 128 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 129 |
+
"[0, 0, 0, 1, 0]","[1, 0, 1, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Avoiding Embarrassment,"[1, 0, 0, 0, 0, 0, 0]"
|
| 130 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 131 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 1]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 132 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Protecting Others,"[0, 0, 0, 0, 0, 0, 1]"
|
| 133 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 134 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 135 |
+
"[0, 0, 0, 0, 1]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 136 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 137 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 138 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 1]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 139 |
+
"[0, 0, 1, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Avoiding Embarrassment,"[1, 0, 0, 0, 0, 0, 0]"
|
| 140 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 141 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 142 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 143 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 144 |
+
"[0, 0, 1, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 145 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 146 |
+
"[0, 0, 1, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 147 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 148 |
+
"[0, 0, 0, 1, 0]","[1, 1, 1, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 149 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 150 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 151 |
+
"[0, 0, 1, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 0, 1]"
|
| 152 |
+
"[1, 0, 0, 0, 0]","[0, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 153 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 0, 0, 0, 0, 1]"
|
| 154 |
+
"[0, 1, 0, 0, 0]","[0, 1, 0, 0, 0, 1, 1]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 155 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 156 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 157 |
+
"[0, 0, 0, 1, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 158 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 159 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 160 |
+
"[0, 1, 0, 0, 0]","[1, 1, 1, 0, 0, 1, 1]","[0, 0, 0, 1, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 161 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 162 |
+
"[0, 0, 1, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 163 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 164 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 165 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 166 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protect Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 167 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 168 |
+
"[0, 0, 1, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 1, 0]"
|
| 169 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 170 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 171 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 172 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 173 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 174 |
+
"[0, 0, 0, 1, 0]","[0, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 0, 1]"
|
| 175 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 176 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 177 |
+
"[1, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 0, 1]"
|
| 178 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 179 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 180 |
+
"[0, 0, 0, 0, 1]","[0, 1, 0, 0, 0, 0, 1]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 181 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 182 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 183 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 184 |
+
"[0, 1, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 185 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 186 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 187 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 188 |
+
"[0, 0, 0, 0, 1]","[1, 1, 1, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 189 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 190 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 191 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 192 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 193 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 194 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 195 |
+
"[0, 0, 1, 0, 0]","[0, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Avoiding Embarrassment,"[1, 0, 0, 0, 0, 0, 0]"
|
| 196 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 197 |
+
"[0, 0, 1, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 198 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 199 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 200 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 1]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 201 |
+
"[0, 0, 0, 0, 1]","[0, 1, 0, 0, 0, 0, 1]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 202 |
+
"[0, 1, 0, 0, 0]","[1, 1, 1, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 203 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 204 |
+
"[0, 0, 0, 0, 1]","[0, 0, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Others,"[0, 0, 0, 0, 0, 0, 1]"
|
| 205 |
+
"[0, 0, 0, 0, 1]","[1, 1, 1, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 206 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 207 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 1]"
|
| 208 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 209 |
+
"[0, 1, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 1]","[0, 0, 1, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 210 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 1]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 211 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 212 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 213 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 1]","[1, 0, 0, 0, 0]",Protecting Others,"[1, 0, 0, 0, 0, 0, 0]"
|
| 214 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 215 |
+
"[0, 1, 0, 0, 0]","[1, 1, 1, 0, 0, 0, 0]","[0, 0, 1, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 216 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 1, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 217 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 218 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Oneself,"[1, 0, 0, 0, 0, 0, 0]"
|
| 219 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 220 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 221 |
+
"[0, 0, 0, 0, 1]","[0, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 222 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 223 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Nan4,"[0, 1, 0, 0, 0, 0, 0]"
|
| 224 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 225 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 1]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 226 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 227 |
+
"[1, 0, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[1, 0, 0, 0, 0, 0, 0]"
|
| 228 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 1]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 229 |
+
"[0, 0, 0, 0, 1]","[0, 1, 1, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 230 |
+
"[0, 1, 0, 0, 0]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 231 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[0, 0, 0, 0, 0, 0, 1]"
|
| 232 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 1]","[0, 0, 1, 0, 0]",Gaining Advantage,"[0, 0, 0, 0, 0, 0, 1]"
|
| 233 |
+
"[1, 0, 0, 0, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 234 |
+
"[0, 0, 0, 1, 0]","[1, 0, 1, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Oneself,"[1, 0, 0, 0, 0, 0, 0]"
|
| 235 |
+
"[0, 1, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 236 |
+
"[0, 0, 1, 0, 0]","[1, 0, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 237 |
+
"[0, 0, 1, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 238 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
| 239 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 240 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 241 |
+
"[0, 1, 0, 0, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 242 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Protecting Others,"[1, 0, 0, 0, 0, 0, 0]"
|
| 243 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Protecting Oneself,"[1, 0, 0, 0, 0, 0, 0]"
|
| 244 |
+
"[0, 0, 0, 0, 1]","[1, 0, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 245 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 0, 1, 0]",Gaining Esteem,"[0, 0, 0, 0, 0, 0, 1]"
|
| 246 |
+
"[0, 0, 0, 1, 0]","[1, 1, 0, 0, 0, 0, 0]","[1, 0, 0, 0, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 247 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[0, 0, 1, 0, 0]",Gaining Esteem,"[1, 0, 0, 0, 0, 0, 0]"
|
| 248 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 1, 0]","[1, 0, 0, 0, 0]",Protecting Themselves,"[1, 0, 0, 0, 0, 0, 0]"
|
| 249 |
+
"[0, 0, 0, 0, 1]","[1, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 1, 0]",Gaining Advantage,"[1, 0, 0, 0, 0, 0, 0]"
|
| 250 |
+
"[1, 0, 0, 0, 0]","[0, 1, 0, 0, 0, 0, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 0, 0, 0, 0, 1]"
|
| 251 |
+
"[0, 0, 0, 1, 0]","[1, 0, 0, 0, 0, 1, 0]","[0, 0, 0, 0, 1]",Nan4,"[0, 0, 1, 0, 0, 0, 0]"
|
Deception/Code/Multitask_Learning/dataloader.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import all standard libraries
|
| 2 |
+
import os
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import pdb
|
| 7 |
+
from sklearn.model_selection import train_test_split
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
# load the pandas dataset
|
| 11 |
+
df = pd.read_csv('data.csv')
|
| 12 |
+
l = df.columns
|
| 13 |
+
|
| 14 |
+
# Remove rows with nan values in particular column
|
| 15 |
+
df[l[1]] = df[l[1]].dropna()
|
| 16 |
+
|
| 17 |
+
# Replace Nan values with a string
|
| 18 |
+
df[l[1]] = df[l[1]].fillna('Nan')
|
| 19 |
+
df[l[3]] = df[l[3]].fillna('Nan2')
|
| 20 |
+
df[l[4]] = df[l[4]].fillna('Nan3')
|
| 21 |
+
df[l[6]] = df[l[6]].fillna('Nan4')
|
| 22 |
+
df[l[7]] = df[l[7]].fillna('Nan5')
|
| 23 |
+
|
| 24 |
+
df[l[1]] = df[l[1]].str.split('/')
|
| 25 |
+
df[l[3]] = df[l[3]].str.split(',')
|
| 26 |
+
df[l[4]] = df[l[4]].str.split('/')
|
| 27 |
+
df[l[7]] = df[l[7]].str.split('/')
|
| 28 |
+
|
| 29 |
+
unique_words_1 = list(set(word for row in df[l[1]] for word in row))
|
| 30 |
+
unique_words_3 = list(set(word for row in df[l[3]] for word in row))
|
| 31 |
+
unique_words_4 = list(set(word for row in df[l[4]] for word in row))
|
| 32 |
+
unique_words_7 = list(set(word for row in df[l[7]] for word in row))
|
| 33 |
+
|
| 34 |
+
def create_ordered_list(words, unique_words):
|
| 35 |
+
ordered_list = [1 if word in words else 0 for word in unique_words]
|
| 36 |
+
return ordered_list
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
df['ordered_list_1'] = df[l[1]].apply(lambda x: create_ordered_list(x, unique_words_1))
|
| 40 |
+
df['ordered_list_3'] = df[l[3]].apply(lambda x: create_ordered_list(x, unique_words_3))
|
| 41 |
+
df['ordered_list_4'] = df[l[4]].apply(lambda x: create_ordered_list(x, unique_words_4))
|
| 42 |
+
df['ordered_list_7'] = df[l[7]].apply(lambda x: create_ordered_list(x, unique_words_7))
|
| 43 |
+
|
| 44 |
+
df.to_csv('new_data.csv', index=False)
|
| 45 |
+
|
| 46 |
+
l = df.columns
|
| 47 |
+
|
| 48 |
+
# remove unwanted columns
|
| 49 |
+
df = df[[l[0], l[8], l[9], l[10], l[6], l[11]]]
|
| 50 |
+
|
| 51 |
+
# Split the dataset into train and validation
|
| 52 |
+
X_train, X_val, y_train, y_val = train_test_split(
|
| 53 |
+
df[l[0]], df.loc[:, df.columns != l[0]], test_size=0.1, random_state=42)
|
| 54 |
+
|
| 55 |
+
print(X_train.shape, y_train.shape, X_val.shape, y_val.shape)
|
| 56 |
+
|
| 57 |
+
# Save the train and validation dataset
|
| 58 |
+
os.makedirs('data', exist_ok=True)
|
| 59 |
+
y_train.to_csv('data/data_ytrain.csv', index=False)
|
| 60 |
+
y_val.to_csv('data/data_yval.csv', index=False)
|
| 61 |
+
|
| 62 |
+
with open('data/data_Xtrain.json', 'w') as file:
|
| 63 |
+
print(len(X_train.tolist()))
|
| 64 |
+
json.dump(X_train.tolist(), file)
|
| 65 |
+
|
| 66 |
+
with open('data/data_Xval.json', 'w') as file:
|
| 67 |
+
json.dump(X_val.tolist(), file)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
Deception/Code/Multitask_Learning/main.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import glob
|
| 3 |
+
import ast
|
| 4 |
+
import numpy as np
|
| 5 |
+
import json
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
import pdb
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from nltk.tokenize import WhitespaceTokenizer
|
| 11 |
+
from sklearn.preprocessing import LabelEncoder
|
| 12 |
+
from sklearn.metrics import recall_score, precision_score, accuracy_score
|
| 13 |
+
from natsort import natsorted
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
import torch.nn.functional as F
|
| 19 |
+
import torch.optim as optim
|
| 20 |
+
from torch.utils.data import Dataset, DataLoader
|
| 21 |
+
from torch.utils.data import DataLoader, TensorDataset
|
| 22 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 23 |
+
|
| 24 |
+
from Multilabel_task_head import MultiLabelTaskHead
|
| 25 |
+
from singlelabel_task_head import SingleLabelTaskHead
|
| 26 |
+
from base_network import base_network
|
| 27 |
+
from multi_task import MultiTaskModel
|
| 28 |
+
|
| 29 |
+
np.random.seed(45)
|
| 30 |
+
torch.manual_seed(45)
|
| 31 |
+
|
| 32 |
+
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
| 33 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
|
| 34 |
+
|
| 35 |
+
# Check if CUDA is available
|
| 36 |
+
if torch.cuda.is_available():
|
| 37 |
+
device = torch.device("cuda")
|
| 38 |
+
print("Using CUDA on", torch.cuda.get_device_name(device))
|
| 39 |
+
|
| 40 |
+
warnings.filterwarnings('ignore')
|
| 41 |
+
|
| 42 |
+
batch_size = 32
|
| 43 |
+
epoch = 1000
|
| 44 |
+
max_seq_length = 128
|
| 45 |
+
input_size = 128
|
| 46 |
+
# device = 'cpu'
|
| 47 |
+
|
| 48 |
+
# Load the data
|
| 49 |
+
with open('data/data_Xtrain.json', 'r') as file:
|
| 50 |
+
X_train = np.array(json.load(file))
|
| 51 |
+
with open('data/data_Xval.json', 'r') as file:
|
| 52 |
+
Xval = np.array(json.load(file))
|
| 53 |
+
|
| 54 |
+
y_train = pd.read_csv('data/data_ytrain.csv')
|
| 55 |
+
y_train_s = y_train['Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves)']
|
| 56 |
+
yval = pd.read_csv('data/data_yval.csv')
|
| 57 |
+
yval_s = yval['Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves)']
|
| 58 |
+
|
| 59 |
+
y_train_m = y_train[['ordered_list_1', 'ordered_list_3', 'ordered_list_4', 'ordered_list_7']].applymap(
|
| 60 |
+
lambda x: ast.literal_eval(x) if isinstance(x, str) else x)
|
| 61 |
+
y_val_m = yval[['ordered_list_1', 'ordered_list_3', 'ordered_list_4', 'ordered_list_7']].applymap(
|
| 62 |
+
lambda x: ast.literal_eval(x) if isinstance(x, str) else x)
|
| 63 |
+
|
| 64 |
+
print(type(y_train_m))
|
| 65 |
+
|
| 66 |
+
y_train_m1 = y_train_m['ordered_list_1'].apply(np.array).to_numpy()
|
| 67 |
+
y_val_m1 = y_val_m['ordered_list_1'].apply(np.array).to_numpy()
|
| 68 |
+
y_train_m3 = y_train_m['ordered_list_3'].apply(np.array).to_numpy()
|
| 69 |
+
y_val_m3 = y_val_m['ordered_list_3'].apply(np.array).to_numpy()
|
| 70 |
+
y_train_m4 = y_train_m['ordered_list_4'].apply(np.array).to_numpy()
|
| 71 |
+
y_val_m4 = y_val_m['ordered_list_4'].apply(np.array).to_numpy()
|
| 72 |
+
y_train_m7 = y_train_m['ordered_list_7'].apply(np.array).to_numpy()
|
| 73 |
+
y_val_m7 = y_val_m['ordered_list_7'].apply(np.array).to_numpy()
|
| 74 |
+
|
| 75 |
+
# Label Encoding of single label dataset.
|
| 76 |
+
le = LabelEncoder()
|
| 77 |
+
y_train_s = le.fit_transform(y_train_s)
|
| 78 |
+
yval_s = le.transform(yval_s)
|
| 79 |
+
y_train_s = np.array(y_train_s)
|
| 80 |
+
yval_s = np.array(yval_s)
|
| 81 |
+
|
| 82 |
+
# Tokenize and pad the data
|
| 83 |
+
tokenizer = WhitespaceTokenizer()
|
| 84 |
+
tokenized_sentences = [tokenizer.tokenize(
|
| 85 |
+
sentence)[:max_seq_length] for sentence in X_train]
|
| 86 |
+
tokenized_sentences_val = [tokenizer.tokenize(
|
| 87 |
+
sentence)[:max_seq_length] for sentence in Xval]
|
| 88 |
+
vocab = {token: i+1 for i,
|
| 89 |
+
token in enumerate(set(token for sent in tokenized_sentences for token in sent))}
|
| 90 |
+
indexed_sequences = [torch.tensor([vocab.get(token, 0) for token in sent] + [
|
| 91 |
+
0] * (max_seq_length - len(sent))) for sent in tokenized_sentences]
|
| 92 |
+
indexed_sequences_val = [torch.tensor([vocab.get(token, 0) for token in sent] + [
|
| 93 |
+
0] * (max_seq_length - len(sent))) for sent in tokenized_sentences_val]
|
| 94 |
+
padded_sequences = pad_sequence(
|
| 95 |
+
indexed_sequences, batch_first=True, padding_value=0)
|
| 96 |
+
pad_sequences_val = pad_sequence(
|
| 97 |
+
indexed_sequences_val, batch_first=True, padding_value=0)
|
| 98 |
+
|
| 99 |
+
# attention_mask = torch.where(padded_sequences != 0, torch.tensor(1), torch.tensor(0))
|
| 100 |
+
|
| 101 |
+
X_train = padded_sequences
|
| 102 |
+
Xval = pad_sequences_val
|
| 103 |
+
y_train_m1 = np.vstack(y_train_m1)
|
| 104 |
+
y_train_m3 = np.vstack(y_train_m3)
|
| 105 |
+
y_train_m4 = np.vstack(y_train_m4)
|
| 106 |
+
y_train_m7 = np.vstack(y_train_m7)
|
| 107 |
+
|
| 108 |
+
y_val_m1 = np.vstack(y_val_m1)
|
| 109 |
+
y_val_m3 = np.vstack(y_val_m3)
|
| 110 |
+
y_val_m4 = np.vstack(y_val_m4)
|
| 111 |
+
y_val_m7 = np.vstack(y_val_m7)
|
| 112 |
+
|
| 113 |
+
X_train, y_train_s, y_train_m1, y_train_m3, y_train_m4, y_train_m7 = torch.tensor(X_train).long().to(device), torch.tensor(y_train_s).long().to(device), torch.tensor(
|
| 114 |
+
y_train_m1).long().to(device), torch.tensor(y_train_m3).long().to(device), torch.tensor(y_train_m4).long().to(device), torch.tensor(y_train_m7).long().to(device)
|
| 115 |
+
|
| 116 |
+
Xval, yval_s, y_val_m1, y_val_m3, y_val_m4, y_val_m7 = torch.tensor(Xval).long().to(device), torch.tensor(yval_s).long().to(device), torch.tensor(
|
| 117 |
+
y_val_m1).long().to(device), torch.tensor(y_val_m3).long().to(device), torch.tensor(y_val_m4).long().to(device), torch.tensor(y_val_m7).long().to(device)
|
| 118 |
+
|
| 119 |
+
dataset_train=TensorDataset(
|
| 120 |
+
X_train, y_train_s, y_train_m1, y_train_m3, y_train_m4, y_train_m7)
|
| 121 |
+
dataloader_train=DataLoader(
|
| 122 |
+
dataset_train, batch_size=batch_size, shuffle=True)
|
| 123 |
+
|
| 124 |
+
dataset_val=TensorDataset(
|
| 125 |
+
Xval, yval_s, y_val_m1, y_val_m3, y_val_m4, y_val_m7)
|
| 126 |
+
dataloader_val=DataLoader(
|
| 127 |
+
dataset_val, batch_size=batch_size, shuffle=True)
|
| 128 |
+
|
| 129 |
+
task_heads=[SingleLabelTaskHead(input_size=128, output_size=10, device=device).to(device), MultiLabelTaskHead(input_size=128, output_size=5, device=device).to(device), MultiLabelTaskHead(
|
| 130 |
+
input_size=128, output_size=7, device=device).to(device), MultiLabelTaskHead(input_size=128, output_size=5, device=device).to(device), MultiLabelTaskHead(input_size=128, output_size=7, device=device).to(device)]
|
| 131 |
+
|
| 132 |
+
model=MultiTaskModel(base_network(input_size=7700+1, embedding_size=128,
|
| 133 |
+
hidden_size=64, num_layers=2, dropout=0.5, bidirectional=True, device=device), task_heads, device=device).to(device)
|
| 134 |
+
|
| 135 |
+
optimizer=optim.Adam(model.parameters(), lr=0.001)
|
| 136 |
+
loss_fn=nn.CrossEntropyLoss()
|
| 137 |
+
criterion_m=nn.BCEWithLogitsLoss()
|
| 138 |
+
|
| 139 |
+
def accuracy_multi(prediction, target):
|
| 140 |
+
prediction=torch.round(prediction)
|
| 141 |
+
return torch.mean((prediction == target).float(), dim=0)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def recall_multi(prediction, target):
|
| 146 |
+
prediction = torch.round(prediction)
|
| 147 |
+
|
| 148 |
+
tp = torch.sum(torch.logical_and(prediction == 1, target == 1), axis=0)
|
| 149 |
+
fn = torch.sum(torch.logical_and(prediction == 0, target == 1), axis=0)
|
| 150 |
+
|
| 151 |
+
recall = tp / (tp + fn)
|
| 152 |
+
|
| 153 |
+
return recall
|
| 154 |
+
# prediction=torch.round(prediction)
|
| 155 |
+
# return recall_score(target.cpu().detach().numpy(), prediction.cpu().detach().numpy(), average='macro')
|
| 156 |
+
|
| 157 |
+
def precision_multi(prediction, target):
|
| 158 |
+
prediction = torch.round(prediction)
|
| 159 |
+
|
| 160 |
+
tp = torch.sum(torch.logical_and(prediction == 1, target == 1), axis=0)
|
| 161 |
+
fp = torch.sum(torch.logical_and(prediction == 1, target == 0), axis=0)
|
| 162 |
+
|
| 163 |
+
precision = tp / (tp + fp)
|
| 164 |
+
|
| 165 |
+
return precision
|
| 166 |
+
|
| 167 |
+
# prediction=torch.round(prediction)
|
| 168 |
+
# return precision_score(target.cpu().detach().numpy(), prediction.cpu().detach().numpy(), average='macro')
|
| 169 |
+
|
| 170 |
+
def train(model, dataloader_train, optimizer, criterion, epoch):
|
| 171 |
+
model.train()
|
| 172 |
+
multi_accuracy=0
|
| 173 |
+
for batch_idx, (data, target_s, target_m1, target_m3, target_m4, target_m7) in enumerate(dataloader_train):
|
| 174 |
+
target=[target_s, target_m1, target_m3, target_m4, target_m7]
|
| 175 |
+
optimizer.zero_grad()
|
| 176 |
+
task_outputs=model(data)
|
| 177 |
+
losses=[loss_fn(output, label)
|
| 178 |
+
for output, label in zip([task_outputs[0]], [target[0]])] + [criterion_m(output, label.float())
|
| 179 |
+
for output, label in zip(task_outputs[1:], target[1:])]
|
| 180 |
+
loss=sum(losses)
|
| 181 |
+
loss.backward()
|
| 182 |
+
optimizer.step()
|
| 183 |
+
|
| 184 |
+
multi_accuracy=model.accuracy(task_outputs, target)
|
| 185 |
+
multi_recall=model.recall(task_outputs, target)
|
| 186 |
+
multi_precision=model.precision(task_outputs, target)
|
| 187 |
+
|
| 188 |
+
multi_accuracy_label=[accuracy_multi(
|
| 189 |
+
x, y) for x, y in zip(task_outputs[1:], target[1:])]
|
| 190 |
+
|
| 191 |
+
multi_recall_label=[recall_multi(x, y) for x, y in zip(task_outputs[1:], target[1:])]
|
| 192 |
+
multi_precision_label=[precision_multi(x, y) for x, y in zip(task_outputs[1:], target[1:])]
|
| 193 |
+
if batch_idx % 100 == 0:
|
| 194 |
+
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
|
| 195 |
+
epoch, batch_idx * len(data), len(dataloader_train.dataset),
|
| 196 |
+
100. * batch_idx / len(dataloader_train), loss.item()))
|
| 197 |
+
for i in range(len(task_outputs)):
|
| 198 |
+
print(f"Task {i+1} Accuracy: {multi_accuracy[i]}", end="\t")
|
| 199 |
+
print(f"Task {i+1} Recall: {multi_recall[i]}", end="\t")
|
| 200 |
+
print(f"Task {i+1} Precision: {multi_precision[i]}")
|
| 201 |
+
if i > 0:
|
| 202 |
+
print(
|
| 203 |
+
f"Task {i+1} Accuracy Label: {multi_accuracy_label[i-1]}")
|
| 204 |
+
print(
|
| 205 |
+
f"Task {i+1} Recall Label: {multi_recall_label[i-1]}")
|
| 206 |
+
print(
|
| 207 |
+
f"Task {i+1} Precision Label: {multi_precision_label[i-1]}")
|
| 208 |
+
print('----------------------------------------------------------------------')
|
| 209 |
+
|
| 210 |
+
else:
|
| 211 |
+
print('----------------------------------------------------------------------')
|
| 212 |
+
|
| 213 |
+
print('*'*120)
|
| 214 |
+
|
| 215 |
+
# pdb.set_trace()
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
# save the checkpoints
|
| 219 |
+
if epoch % 10 == 0:
|
| 220 |
+
torch.save({
|
| 221 |
+
'epoch': epoch,
|
| 222 |
+
'model_state_dict': model.state_dict(),
|
| 223 |
+
'optimizer_state_dict': optimizer.state_dict(),
|
| 224 |
+
# 'loss': loss_fn,
|
| 225 |
+
}, f"saved_model/EXPERIMENT_{experiment_num}/checkpoints/checkpoint_{epoch}_{loss}.pt")
|
| 226 |
+
|
| 227 |
+
# def validate(model, dataloader_val, criterion, epoch):
|
| 228 |
+
# with torch.no_grad():
|
| 229 |
+
# for batch_idx, (data, target_s, target_m1, target_m3, target_m4, target_m7) in enumerate(dataloader_val):
|
| 230 |
+
# target=[target_s, target_m1, target_m3, target_m4, target_m7]
|
| 231 |
+
# task_outputs=model(data)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
dir_info=natsorted(glob.glob('saved_model/EXPERIMENT_*'))
|
| 235 |
+
|
| 236 |
+
if len(dir_info) == 0:
|
| 237 |
+
experiment_num=1
|
| 238 |
+
else:
|
| 239 |
+
experiment_num=int(dir_info[-1].split('_')[-1]) + 1
|
| 240 |
+
|
| 241 |
+
if not os.path.isdir('saved_model/EXPERIMENT_{}'.format(experiment_num)):
|
| 242 |
+
os.makedirs('saved_model/EXPERIMENT_{}'.format(experiment_num))
|
| 243 |
+
os.system('cp *.py saved_model/EXPERIMENT_{}'.format(experiment_num))
|
| 244 |
+
|
| 245 |
+
ckpt_lst=natsorted(
|
| 246 |
+
glob.glob('saved_model/EXPERIMENT_{}/checkpoints/*'.format(experiment_num)))
|
| 247 |
+
START_EPOCH=0
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
if len(ckpt_lst) >= 1:
|
| 251 |
+
ckpt_path=ckpt_lst[-1]
|
| 252 |
+
checkpoint=torch.load(ckpt_path, map_location=device)
|
| 253 |
+
model.load_state_dict(checkpoint['model_state_dict'])
|
| 254 |
+
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
|
| 255 |
+
# scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
|
| 256 |
+
START_EPOCH=checkpoint['epoch']
|
| 257 |
+
print('Loading checkpoint from previous epoch: {}'.format(START_EPOCH))
|
| 258 |
+
START_EPOCH += 1
|
| 259 |
+
else:
|
| 260 |
+
os.makedirs('saved_model/EXPERIMENT_{}/checkpoints/'.format(experiment_num))
|
| 261 |
+
|
| 262 |
+
for epoch in range(START_EPOCH, epoch + 1):
|
| 263 |
+
train(model, dataloader_train, optimizer, loss_fn, epoch)
|
Deception/Code/Multitask_Learning/multi_task.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class MultiTaskModel(nn.Module):
|
| 5 |
+
def __init__(self, base_net, task_heads, device):
|
| 6 |
+
super().__init__()
|
| 7 |
+
self.base_net = base_net
|
| 8 |
+
self.task_heads = nn.ModuleList(task_heads)
|
| 9 |
+
self.device = device
|
| 10 |
+
|
| 11 |
+
def forward(self, x):
|
| 12 |
+
|
| 13 |
+
base_output = self.base_net(x)
|
| 14 |
+
# Forward pass through task-specific heads
|
| 15 |
+
task_outputs = [head(base_output) for head in self.task_heads]
|
| 16 |
+
|
| 17 |
+
return task_outputs
|
| 18 |
+
|
| 19 |
+
def predict(self, x):
|
| 20 |
+
|
| 21 |
+
base_output = self.base_net(x)
|
| 22 |
+
# Forward pass through task-specific heads
|
| 23 |
+
task_outputs = [head.predict(base_output) for head in self.task_heads]
|
| 24 |
+
|
| 25 |
+
return task_outputs
|
| 26 |
+
|
| 27 |
+
def accuracy(self, predictions, targets):
|
| 28 |
+
accuracies = [head.accuracy(prediction, target) for head, prediction, target in zip(
|
| 29 |
+
self.task_heads, predictions, targets)]
|
| 30 |
+
return accuracies
|
| 31 |
+
|
| 32 |
+
def recall(self, predictions, targets):
|
| 33 |
+
recalls = [head.recall(prediction, target) for head, prediction, target in zip(
|
| 34 |
+
self.task_heads, predictions, targets)]
|
| 35 |
+
return recalls
|
| 36 |
+
|
| 37 |
+
def precision(self, predictions, targets):
|
| 38 |
+
precisions = [head.precision(prediction, target) for head, prediction, target in zip(
|
| 39 |
+
self.task_heads, predictions, targets)]
|
| 40 |
+
return precisions
|
Deception/Code/Multitask_Learning/new_data.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Code/Multitask_Learning/presentation main.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# this is the main file, where we shall integrate all the other files and run the code
|
| 2 |
+
import os
|
| 3 |
+
import ast
|
| 4 |
+
import numpy as np
|
| 5 |
+
import json
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
import pdb
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
from nltk.tokenize import WhitespaceTokenizer
|
| 11 |
+
from sklearn.preprocessing import LabelEncoder
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import torch.nn.functional as F
|
| 17 |
+
import torch.optim as optim
|
| 18 |
+
from torch.utils.data import Dataset, DataLoader
|
| 19 |
+
from torch.utils.data import DataLoader, TensorDataset
|
| 20 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 21 |
+
|
| 22 |
+
from Multilabel_task_head import MultiLabelTaskHead
|
| 23 |
+
from singlelabel_task_head import SingleLabelTaskHead
|
| 24 |
+
from base_network import base_network
|
| 25 |
+
from multi_task import MultiTaskModel
|
| 26 |
+
|
| 27 |
+
batch_size = 32
|
| 28 |
+
epoch = 1000
|
| 29 |
+
max_seq_length = 128
|
| 30 |
+
input_size = 128
|
| 31 |
+
device = 'cpu'
|
| 32 |
+
|
| 33 |
+
# Load the data
|
| 34 |
+
with open('data/data_Xtrain.json', 'r') as file:
|
| 35 |
+
X_train = json.load(file)
|
| 36 |
+
|
| 37 |
+
with open('data/data_Xval.json', 'r') as file:
|
| 38 |
+
Xval = json.load(file)
|
| 39 |
+
|
| 40 |
+
y_train = pd.read_csv('data/data_ytrain.csv')
|
| 41 |
+
y_train_s = y_train['Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves)']
|
| 42 |
+
# yval = pd.read_csv('data/data_yval.csv')
|
| 43 |
+
y_train_m1 = y_train['ordered_list_1']
|
| 44 |
+
y_train_m1 = y_train_m1.apply(ast.literal_eval)
|
| 45 |
+
|
| 46 |
+
le = LabelEncoder()
|
| 47 |
+
y_train_s = le.fit_transform(y_train_s)
|
| 48 |
+
|
| 49 |
+
# Convert the data into numpy arrays
|
| 50 |
+
X_train = np.array(X_train)
|
| 51 |
+
# Xval = np.array(Xval)
|
| 52 |
+
y_train_s = np.array(y_train_s)
|
| 53 |
+
# yval = np.array(yval)
|
| 54 |
+
|
| 55 |
+
print(X_train.shape)
|
| 56 |
+
|
| 57 |
+
# Tokenize and pad the data
|
| 58 |
+
tokenizer = WhitespaceTokenizer()
|
| 59 |
+
tokenized_sentences = [tokenizer.tokenize(
|
| 60 |
+
sentence)[:max_seq_length] for sentence in X_train]
|
| 61 |
+
vocab = {token: i+1 for i,
|
| 62 |
+
token in enumerate(set(token for sent in tokenized_sentences for token in sent))}
|
| 63 |
+
indexed_sequences = [torch.tensor([vocab.get(token, 0) for token in sent] + [
|
| 64 |
+
0] * (max_seq_length - len(sent))) for sent in tokenized_sentences]
|
| 65 |
+
padded_sequences = pad_sequence(
|
| 66 |
+
indexed_sequences, batch_first=True, padding_value=0)
|
| 67 |
+
|
| 68 |
+
# attention_mask = torch.where(padded_sequences != 0, torch.tensor(1), torch.tensor(0))
|
| 69 |
+
|
| 70 |
+
X_train = padded_sequences
|
| 71 |
+
|
| 72 |
+
X_train, y_train_s = torch.tensor(X_train), torch.tensor(y_train_s)
|
| 73 |
+
X_train = X_train.to(device)
|
| 74 |
+
y_train_s = y_train_s.to(device)
|
| 75 |
+
dataset_train = TensorDataset(X_train, y_train_s)
|
| 76 |
+
dataloader_train = DataLoader(
|
| 77 |
+
dataset_train, batch_size=batch_size, shuffle=True)
|
| 78 |
+
|
| 79 |
+
# ### define for validation
|
| 80 |
+
# dataset_val = TensorDataset(Xval, yval)
|
| 81 |
+
# dataloader_val = DataLoader(dataset_val, batch_size=batch_size, shuffle=True)
|
| 82 |
+
|
| 83 |
+
# model = base_network(input_size, embedding_size, output_size,
|
| 84 |
+
# hidden_size, num_layers, dropout, bidirectional, device)
|
| 85 |
+
|
| 86 |
+
# , TaskHead2(), ..., TaskHeadN()]
|
| 87 |
+
task_heads = [SingleLabelTaskHead(
|
| 88 |
+
input_size=128, output_size=10, device=device), MultiLabelTaskHead(input_size=128, output_size=10, device=device)]
|
| 89 |
+
model = MultiTaskModel(base_network(input_size=7700+1, embedding_size=128, output_size=128,
|
| 90 |
+
hidden_size=128, num_layers=2, dropout=0.5, bidirectional=False, device=device), task_heads, device=device)
|
| 91 |
+
|
| 92 |
+
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
| 93 |
+
loss_fn = nn.CrossEntropyLoss()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def train(model, dataloader_train, optimizer, criterion, epoch):
|
| 97 |
+
model.train()
|
| 98 |
+
for batch_idx, (data, target) in enumerate(dataloader_train):
|
| 99 |
+
optimizer.zero_grad()
|
| 100 |
+
task_outputs = model(data)
|
| 101 |
+
print(task_outputs)
|
| 102 |
+
losses = [loss_fn(output, label)
|
| 103 |
+
for output, label in zip(task_outputs, [target])]
|
| 104 |
+
loss = sum(losses)
|
| 105 |
+
loss.backward()
|
| 106 |
+
optimizer.step()
|
| 107 |
+
if batch_idx % 100 == 0:
|
| 108 |
+
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
|
| 109 |
+
epoch, batch_idx * len(data), len(dataloader_train.dataset),
|
| 110 |
+
100. * batch_idx / len(dataloader_train), loss.item()))
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
for epoch in range(1, epoch + 1):
|
| 114 |
+
train(model, dataloader_train, optimizer, loss_fn, epoch)
|
Deception/Code/Multitask_Learning/singlelabel_task_head.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# In this case we shall use the same model as the one used in the previous task
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import torch.optim as optim
|
| 6 |
+
|
| 7 |
+
from sklearn.metrics import recall_score, precision_score, accuracy_score
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class SingleLabelTaskHead(nn.Module):
|
| 11 |
+
def __init__(self, input_size, output_size, device):
|
| 12 |
+
super(SingleLabelTaskHead, self).__init__()
|
| 13 |
+
self.fc1 = nn.Linear(input_size, 50)
|
| 14 |
+
self.fc2 = nn.Linear(50, 50)
|
| 15 |
+
self.fc3 = nn.Linear(50, output_size)
|
| 16 |
+
self.softmax = nn.Softmax(dim=1)
|
| 17 |
+
self.device = device
|
| 18 |
+
|
| 19 |
+
def forward(self, x):
|
| 20 |
+
x = F.relu(self.fc1(x))
|
| 21 |
+
x = F.relu(self.fc2(x))
|
| 22 |
+
x = self.fc3(x)
|
| 23 |
+
x = self.softmax(x)
|
| 24 |
+
return x
|
| 25 |
+
|
| 26 |
+
def predict(self, x):
|
| 27 |
+
x = self.forward(x)
|
| 28 |
+
x = torch.argmax(x, dim=1)
|
| 29 |
+
return x
|
| 30 |
+
|
| 31 |
+
def accuracy(self, prediction, target):
|
| 32 |
+
prediction = torch.argmax(prediction, dim=1)
|
| 33 |
+
return torch.mean((prediction == target).float())
|
| 34 |
+
|
| 35 |
+
def recall(self, prediction, target):
|
| 36 |
+
prediction = torch.argmax(prediction, dim=1)
|
| 37 |
+
return recall_score(target.cpu().detach().numpy(), prediction.cpu().detach().numpy(), average='micro')
|
| 38 |
+
def precision(self, prediction, target):
|
| 39 |
+
prediction = torch.argmax(prediction, dim=1)
|
| 40 |
+
return precision_score(target.cpu().detach().numpy(), prediction.cpu().detach().numpy(), average='micro')
|
| 41 |
+
|
Deception/Code/Multitask_Learning/utils.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
def save_checkpoint(save_path, model, optimizer, valid_loss):
|
| 4 |
+
|
| 5 |
+
if save_path == None:
|
| 6 |
+
return
|
| 7 |
+
|
| 8 |
+
state_dict = {'model_state_dict': model.state_dict(),
|
| 9 |
+
'optimizer_state_dict': optimizer.state_dict(),
|
| 10 |
+
'valid_loss': valid_loss}
|
| 11 |
+
|
| 12 |
+
torch.save(state_dict, save_path)
|
| 13 |
+
print(f'Model saved to ==> {save_path}')
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def load_checkpoint(load_path, model, optimizer, device):
|
| 17 |
+
|
| 18 |
+
if load_path == None:
|
| 19 |
+
return
|
| 20 |
+
|
| 21 |
+
state_dict = torch.load(load_path, map_location=device)
|
| 22 |
+
print(f'Model loaded from <== {load_path}')
|
| 23 |
+
|
| 24 |
+
model.load_state_dict(state_dict['model_state_dict'])
|
| 25 |
+
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
|
| 26 |
+
|
| 27 |
+
return state_dict['valid_loss']
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
|
| 31 |
+
|
| 32 |
+
if save_path == None:
|
| 33 |
+
return
|
| 34 |
+
|
| 35 |
+
state_dict = {'train_loss_list': train_loss_list,
|
| 36 |
+
'valid_loss_list': valid_loss_list,
|
| 37 |
+
'global_steps_list': global_steps_list}
|
| 38 |
+
|
| 39 |
+
torch.save(state_dict, save_path)
|
| 40 |
+
print(f'Model saved to ==> {save_path}')
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def load_metrics(load_path, device):
|
| 44 |
+
|
| 45 |
+
if load_path == None:
|
| 46 |
+
return
|
| 47 |
+
|
| 48 |
+
state_dict = torch.load(load_path, map_location=device)
|
| 49 |
+
print(f'Model loaded from <== {load_path}')
|
| 50 |
+
|
| 51 |
+
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
|
Deception/Code/Multitask_Learning/watchdata.ipynb
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 2,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import pandas as pd"
|
| 10 |
+
]
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"cell_type": "code",
|
| 14 |
+
"execution_count": 3,
|
| 15 |
+
"metadata": {},
|
| 16 |
+
"outputs": [],
|
| 17 |
+
"source": [
|
| 18 |
+
"df = pd.read_csv('/home/dwip.dalal/AIISC/ScratchIMP/data/data_ytrain.csv')"
|
| 19 |
+
]
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"cell_type": "code",
|
| 23 |
+
"execution_count": 4,
|
| 24 |
+
"metadata": {},
|
| 25 |
+
"outputs": [
|
| 26 |
+
{
|
| 27 |
+
"data": {
|
| 28 |
+
"text/html": [
|
| 29 |
+
"<div>\n",
|
| 30 |
+
"<style scoped>\n",
|
| 31 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 32 |
+
" vertical-align: middle;\n",
|
| 33 |
+
" }\n",
|
| 34 |
+
"\n",
|
| 35 |
+
" .dataframe tbody tr th {\n",
|
| 36 |
+
" vertical-align: top;\n",
|
| 37 |
+
" }\n",
|
| 38 |
+
"\n",
|
| 39 |
+
" .dataframe thead th {\n",
|
| 40 |
+
" text-align: right;\n",
|
| 41 |
+
" }\n",
|
| 42 |
+
"</style>\n",
|
| 43 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 44 |
+
" <thead>\n",
|
| 45 |
+
" <tr style=\"text-align: right;\">\n",
|
| 46 |
+
" <th></th>\n",
|
| 47 |
+
" <th>ordered_list_1</th>\n",
|
| 48 |
+
" <th>ordered_list_3</th>\n",
|
| 49 |
+
" <th>ordered_list_4</th>\n",
|
| 50 |
+
" <th>Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves)</th>\n",
|
| 51 |
+
" <th>ordered_list_7</th>\n",
|
| 52 |
+
" </tr>\n",
|
| 53 |
+
" </thead>\n",
|
| 54 |
+
" <tbody>\n",
|
| 55 |
+
" <tr>\n",
|
| 56 |
+
" <th>0</th>\n",
|
| 57 |
+
" <td>[1, 0, 0, 0, 0]</td>\n",
|
| 58 |
+
" <td>[1, 1, 0, 0, 0, 0, 0]</td>\n",
|
| 59 |
+
" <td>[0, 0, 0, 0, 1]</td>\n",
|
| 60 |
+
" <td>Nan4</td>\n",
|
| 61 |
+
" <td>[0, 0, 1, 0, 0, 0, 0]</td>\n",
|
| 62 |
+
" </tr>\n",
|
| 63 |
+
" <tr>\n",
|
| 64 |
+
" <th>1</th>\n",
|
| 65 |
+
" <td>[0, 0, 1, 0, 0]</td>\n",
|
| 66 |
+
" <td>[0, 1, 0, 0, 0, 1, 0]</td>\n",
|
| 67 |
+
" <td>[1, 0, 0, 0, 0]</td>\n",
|
| 68 |
+
" <td>Gaining Advantage</td>\n",
|
| 69 |
+
" <td>[1, 0, 0, 0, 0, 0, 0]</td>\n",
|
| 70 |
+
" </tr>\n",
|
| 71 |
+
" <tr>\n",
|
| 72 |
+
" <th>2</th>\n",
|
| 73 |
+
" <td>[0, 0, 0, 0, 1]</td>\n",
|
| 74 |
+
" <td>[1, 1, 0, 0, 0, 0, 0]</td>\n",
|
| 75 |
+
" <td>[0, 1, 0, 0, 0]</td>\n",
|
| 76 |
+
" <td>Defaming Esteem</td>\n",
|
| 77 |
+
" <td>[1, 0, 0, 0, 0, 0, 0]</td>\n",
|
| 78 |
+
" </tr>\n",
|
| 79 |
+
" <tr>\n",
|
| 80 |
+
" <th>3</th>\n",
|
| 81 |
+
" <td>[0, 0, 0, 0, 1]</td>\n",
|
| 82 |
+
" <td>[1, 1, 0, 0, 0, 1, 0]</td>\n",
|
| 83 |
+
" <td>[0, 0, 0, 1, 0]</td>\n",
|
| 84 |
+
" <td>Gaining Advantage</td>\n",
|
| 85 |
+
" <td>[0, 0, 0, 0, 0, 0, 1]</td>\n",
|
| 86 |
+
" </tr>\n",
|
| 87 |
+
" <tr>\n",
|
| 88 |
+
" <th>4</th>\n",
|
| 89 |
+
" <td>[1, 0, 0, 0, 0]</td>\n",
|
| 90 |
+
" <td>[1, 1, 0, 0, 0, 0, 0]</td>\n",
|
| 91 |
+
" <td>[0, 0, 0, 1, 0]</td>\n",
|
| 92 |
+
" <td>Gaining Esteem</td>\n",
|
| 93 |
+
" <td>[0, 0, 0, 0, 0, 0, 1]</td>\n",
|
| 94 |
+
" </tr>\n",
|
| 95 |
+
" </tbody>\n",
|
| 96 |
+
"</table>\n",
|
| 97 |
+
"</div>"
|
| 98 |
+
],
|
| 99 |
+
"text/plain": [
|
| 100 |
+
" ordered_list_1 ordered_list_3 ordered_list_4 \\\n",
|
| 101 |
+
"0 [1, 0, 0, 0, 0] [1, 1, 0, 0, 0, 0, 0] [0, 0, 0, 0, 1] \n",
|
| 102 |
+
"1 [0, 0, 1, 0, 0] [0, 1, 0, 0, 0, 1, 0] [1, 0, 0, 0, 0] \n",
|
| 103 |
+
"2 [0, 0, 0, 0, 1] [1, 1, 0, 0, 0, 0, 0] [0, 1, 0, 0, 0] \n",
|
| 104 |
+
"3 [0, 0, 0, 0, 1] [1, 1, 0, 0, 0, 1, 0] [0, 0, 0, 1, 0] \n",
|
| 105 |
+
"4 [1, 0, 0, 0, 0] [1, 1, 0, 0, 0, 0, 0] [0, 0, 0, 1, 0] \n",
|
| 106 |
+
"\n",
|
| 107 |
+
" Intent Of Lie (Gaining Advantage/Gaining Esteem/Avoiding Punishment/Avoiding Embarrassment/Protecting Themselves) \\\n",
|
| 108 |
+
"0 Nan4 \n",
|
| 109 |
+
"1 Gaining Advantage \n",
|
| 110 |
+
"2 Defaming Esteem \n",
|
| 111 |
+
"3 Gaining Advantage \n",
|
| 112 |
+
"4 Gaining Esteem \n",
|
| 113 |
+
"\n",
|
| 114 |
+
" ordered_list_7 \n",
|
| 115 |
+
"0 [0, 0, 1, 0, 0, 0, 0] \n",
|
| 116 |
+
"1 [1, 0, 0, 0, 0, 0, 0] \n",
|
| 117 |
+
"2 [1, 0, 0, 0, 0, 0, 0] \n",
|
| 118 |
+
"3 [0, 0, 0, 0, 0, 0, 1] \n",
|
| 119 |
+
"4 [0, 0, 0, 0, 0, 0, 1] "
|
| 120 |
+
]
|
| 121 |
+
},
|
| 122 |
+
"execution_count": 4,
|
| 123 |
+
"metadata": {},
|
| 124 |
+
"output_type": "execute_result"
|
| 125 |
+
}
|
| 126 |
+
],
|
| 127 |
+
"source": [
|
| 128 |
+
"df.head()"
|
| 129 |
+
]
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"cell_type": "code",
|
| 133 |
+
"execution_count": 5,
|
| 134 |
+
"metadata": {},
|
| 135 |
+
"outputs": [],
|
| 136 |
+
"source": [
|
| 137 |
+
"df = df[['ordered_list_1', 'ordered_list_3', 'ordered_list_4', 'ordered_list_7']]"
|
| 138 |
+
]
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"cell_type": "code",
|
| 142 |
+
"execution_count": 6,
|
| 143 |
+
"metadata": {},
|
| 144 |
+
"outputs": [
|
| 145 |
+
{
|
| 146 |
+
"data": {
|
| 147 |
+
"text/html": [
|
| 148 |
+
"<div>\n",
|
| 149 |
+
"<style scoped>\n",
|
| 150 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 151 |
+
" vertical-align: middle;\n",
|
| 152 |
+
" }\n",
|
| 153 |
+
"\n",
|
| 154 |
+
" .dataframe tbody tr th {\n",
|
| 155 |
+
" vertical-align: top;\n",
|
| 156 |
+
" }\n",
|
| 157 |
+
"\n",
|
| 158 |
+
" .dataframe thead th {\n",
|
| 159 |
+
" text-align: right;\n",
|
| 160 |
+
" }\n",
|
| 161 |
+
"</style>\n",
|
| 162 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 163 |
+
" <thead>\n",
|
| 164 |
+
" <tr style=\"text-align: right;\">\n",
|
| 165 |
+
" <th></th>\n",
|
| 166 |
+
" <th>ordered_list_1</th>\n",
|
| 167 |
+
" <th>ordered_list_3</th>\n",
|
| 168 |
+
" <th>ordered_list_4</th>\n",
|
| 169 |
+
" <th>ordered_list_7</th>\n",
|
| 170 |
+
" </tr>\n",
|
| 171 |
+
" </thead>\n",
|
| 172 |
+
" <tbody>\n",
|
| 173 |
+
" <tr>\n",
|
| 174 |
+
" <th>0</th>\n",
|
| 175 |
+
" <td>[1, 0, 0, 0, 0]</td>\n",
|
| 176 |
+
" <td>[1, 1, 0, 0, 0, 0, 0]</td>\n",
|
| 177 |
+
" <td>[0, 0, 0, 0, 1]</td>\n",
|
| 178 |
+
" <td>[0, 0, 1, 0, 0, 0, 0]</td>\n",
|
| 179 |
+
" </tr>\n",
|
| 180 |
+
" <tr>\n",
|
| 181 |
+
" <th>1</th>\n",
|
| 182 |
+
" <td>[0, 0, 1, 0, 0]</td>\n",
|
| 183 |
+
" <td>[0, 1, 0, 0, 0, 1, 0]</td>\n",
|
| 184 |
+
" <td>[1, 0, 0, 0, 0]</td>\n",
|
| 185 |
+
" <td>[1, 0, 0, 0, 0, 0, 0]</td>\n",
|
| 186 |
+
" </tr>\n",
|
| 187 |
+
" <tr>\n",
|
| 188 |
+
" <th>2</th>\n",
|
| 189 |
+
" <td>[0, 0, 0, 0, 1]</td>\n",
|
| 190 |
+
" <td>[1, 1, 0, 0, 0, 0, 0]</td>\n",
|
| 191 |
+
" <td>[0, 1, 0, 0, 0]</td>\n",
|
| 192 |
+
" <td>[1, 0, 0, 0, 0, 0, 0]</td>\n",
|
| 193 |
+
" </tr>\n",
|
| 194 |
+
" <tr>\n",
|
| 195 |
+
" <th>3</th>\n",
|
| 196 |
+
" <td>[0, 0, 0, 0, 1]</td>\n",
|
| 197 |
+
" <td>[1, 1, 0, 0, 0, 1, 0]</td>\n",
|
| 198 |
+
" <td>[0, 0, 0, 1, 0]</td>\n",
|
| 199 |
+
" <td>[0, 0, 0, 0, 0, 0, 1]</td>\n",
|
| 200 |
+
" </tr>\n",
|
| 201 |
+
" <tr>\n",
|
| 202 |
+
" <th>4</th>\n",
|
| 203 |
+
" <td>[1, 0, 0, 0, 0]</td>\n",
|
| 204 |
+
" <td>[1, 1, 0, 0, 0, 0, 0]</td>\n",
|
| 205 |
+
" <td>[0, 0, 0, 1, 0]</td>\n",
|
| 206 |
+
" <td>[0, 0, 0, 0, 0, 0, 1]</td>\n",
|
| 207 |
+
" </tr>\n",
|
| 208 |
+
" </tbody>\n",
|
| 209 |
+
"</table>\n",
|
| 210 |
+
"</div>"
|
| 211 |
+
],
|
| 212 |
+
"text/plain": [
|
| 213 |
+
" ordered_list_1 ordered_list_3 ordered_list_4 \\\n",
|
| 214 |
+
"0 [1, 0, 0, 0, 0] [1, 1, 0, 0, 0, 0, 0] [0, 0, 0, 0, 1] \n",
|
| 215 |
+
"1 [0, 0, 1, 0, 0] [0, 1, 0, 0, 0, 1, 0] [1, 0, 0, 0, 0] \n",
|
| 216 |
+
"2 [0, 0, 0, 0, 1] [1, 1, 0, 0, 0, 0, 0] [0, 1, 0, 0, 0] \n",
|
| 217 |
+
"3 [0, 0, 0, 0, 1] [1, 1, 0, 0, 0, 1, 0] [0, 0, 0, 1, 0] \n",
|
| 218 |
+
"4 [1, 0, 0, 0, 0] [1, 1, 0, 0, 0, 0, 0] [0, 0, 0, 1, 0] \n",
|
| 219 |
+
"\n",
|
| 220 |
+
" ordered_list_7 \n",
|
| 221 |
+
"0 [0, 0, 1, 0, 0, 0, 0] \n",
|
| 222 |
+
"1 [1, 0, 0, 0, 0, 0, 0] \n",
|
| 223 |
+
"2 [1, 0, 0, 0, 0, 0, 0] \n",
|
| 224 |
+
"3 [0, 0, 0, 0, 0, 0, 1] \n",
|
| 225 |
+
"4 [0, 0, 0, 0, 0, 0, 1] "
|
| 226 |
+
]
|
| 227 |
+
},
|
| 228 |
+
"execution_count": 6,
|
| 229 |
+
"metadata": {},
|
| 230 |
+
"output_type": "execute_result"
|
| 231 |
+
}
|
| 232 |
+
],
|
| 233 |
+
"source": [
|
| 234 |
+
"df['ordered_list_1'] = df['ordered_list_1'].apply(ast.literal_eval).apply(np.array)"
|
| 235 |
+
]
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"cell_type": "code",
|
| 239 |
+
"execution_count": null,
|
| 240 |
+
"metadata": {},
|
| 241 |
+
"outputs": [],
|
| 242 |
+
"source": []
|
| 243 |
+
}
|
| 244 |
+
],
|
| 245 |
+
"metadata": {
|
| 246 |
+
"kernelspec": {
|
| 247 |
+
"display_name": "pal",
|
| 248 |
+
"language": "python",
|
| 249 |
+
"name": "pal"
|
| 250 |
+
},
|
| 251 |
+
"language_info": {
|
| 252 |
+
"codemirror_mode": {
|
| 253 |
+
"name": "ipython",
|
| 254 |
+
"version": 3
|
| 255 |
+
},
|
| 256 |
+
"file_extension": ".py",
|
| 257 |
+
"mimetype": "text/x-python",
|
| 258 |
+
"name": "python",
|
| 259 |
+
"nbconvert_exporter": "python",
|
| 260 |
+
"pygments_lexer": "ipython3",
|
| 261 |
+
"version": "3.8.13"
|
| 262 |
+
},
|
| 263 |
+
"orig_nbformat": 4
|
| 264 |
+
},
|
| 265 |
+
"nbformat": 4,
|
| 266 |
+
"nbformat_minor": 2
|
| 267 |
+
}
|
Deception/Code/Sample_data for Cohen Kappa Score.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Data/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Deception/Data/Mask Infilling/albert_total_final.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1335dac5a3c13d890b574807b9e4fcdc5082619184bc13e67d6e2dfc7dfea604
|
| 3 |
+
size 62836465
|
Deception/Data/Mask Infilling/bert_total_final.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:889cfe0560dd218e39d5ed346cebd48e52df2638b0987751fa2d3504ce770164
|
| 3 |
+
size 62454476
|
Deception/Data/Mask Infilling/electra_total_final.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5bac1f89509aa37c3eec815f06ca67f61073026e745f90da05ebfb68faf6a0e3
|
| 3 |
+
size 62443540
|
Deception/Data/Mask Infilling/mpnet_total_final.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f5ce996669e15686ac1cf007e576346ec2aac7283da5201a2abe8afe51137f2
|
| 3 |
+
size 62602102
|
Deception/Data/Mask Infilling/roberta_total_final.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edc7cfa75c94b40aa23a6ca17685a35df290aefe34bce8e19ddab5461c9599cf
|
| 3 |
+
size 62815956
|
Deception/Data/Paraphrasing/paraphrase.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Data/sentences/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Deception/Data/sentences/Fake News.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/Data/sentences/Tweet.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Deception/README.md
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# deception-detection
|
| 2 |
+
Codes for deception paper
|