code stringlengths 17 6.64M |
|---|
def Bullvalene():
'\n\tGather statistics about the metadynamics exploration process varying bump depth, and width.\n\t'
sugarXYZ = '20\n\nC 0.98112 -0.46991 0.42376\nC 1.06359 -0.55769 -0.77982\nC 0.18965 0.16579 -1.73869\nC -1.20916 -0.20825 -1.40751\nC -1.72118 -0.05451 -0.32246\nC -1.28117 0.48316 0.96604\nC -0.33043 1.60964 0.94931\nC 0.10659 2.11114 -0.35456\nC 0.32812 1.61317 -1.43439\nC 0.12451 0.26717 1.35413\nH 1.68037 -1.05279 0.90989\nH 1.79150 -1.17816 -1.16736\nH 0.43620 -0.05734 -2.75571\nH -1.78386 -0.62829 -2.15471\nH -2.70043 -0.37943 -0.29970\nH -2.23007 0.30013 1.42539\nH -0.30281 2.58361 1.39148\nH 0.26267 3.13127 -0.35191\nH 0.63457 2.23708 -2.19717\nH 0.61944 -0.13762 2.21209\n\t'
m = Mol()
m.FromXYZString(sugarXYZ)
def GetEnergyForceForMol(m):
s = MSet()
s.mols.append(m)
manager = GetChemSpider12(s)
def EnAndForce(x_, DoForce=True):
tmpm = Mol(m.atoms, x_)
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, gradient) = manager.EvalBPDirectEEUpdateSingle(tmpm, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
energy = Etotal[0]
force = gradient[0]
if DoForce:
return (energy, force)
else:
return energy
return EnAndForce
F = GetEnergyForceForMol(m)
w = LocalReactions(F, m, 10)
exit(0)
PARAMS['MDdt'] = 0.5
PARAMS['MDMaxStep'] = 8000
PARAMS['MetaBumpTime'] = 10.0
PARAMS['MetaMaxBumps'] = 500
PARAMS['MetaBowlK'] = 0.0
PARAMS['MDThermostat'] = 'Andersen'
PARAMS['MDTemp'] = 2500.0
PARAMS['MDV0'] = None
if 0:
PARAMS['MetaMDBumpHeight'] = 0.0
PARAMS['MetaMDBumpWidth'] = 0.5
traj = MetaDynamics(None, m, 'MetaMD_000_05', F)
traj.Prop()
PARAMS['MetaMDBumpHeight'] = 0.5
PARAMS['MetaMDBumpWidth'] = 0.5
traj = MetaDynamics(None, m, 'MetaMD_050_05', F)
traj.Prop()
PARAMS['MetaMDBumpHeight'] = 0.5
PARAMS['MetaMDBumpWidth'] = 1.0
traj = MetaDynamics(None, m, 'MetaMD_050_10', F)
traj.Prop()
PARAMS['MetaMDBumpHeight'] = 0.5
PARAMS['MetaMDBumpWidth'] = 2.0
traj = MetaDynamics(None, m, 'MetaMD_050_20', F)
traj.Prop()
PARAMS['MetaMDBumpHeight'] = 1.0
PARAMS['MetaMDBumpWidth'] = 1.0
traj = MetaDynamics(None, m, 'MetaMD_100_10', F)
traj.Prop()
PARAMS['MetaMDBumpHeight'] = 1.0
PARAMS['MetaMDBumpWidth'] = 2.0
traj = MetaDynamics(None, m, 'MetaMD_100_20', F)
traj.Prop()
PARAMS['MetaBumpTime'] = 10.0
PARAMS['MetaMDBumpHeight'] = 0.7
PARAMS['MetaMDBumpWidth'] = 1.0
PARAMS['MetaBowlK'] = 0.0
traj = VelocityVerlet(None, m, 'Bullvalene', F)
traj.Prop()
|
def TrainPrepare():
if 1:
WB97XDAtom = {}
WB97XDAtom[1] = (- 0.5026682866)
WB97XDAtom[6] = (- 37.8387398698)
WB97XDAtom[7] = (- 54.5806161811)
WB97XDAtom[8] = (- 75.0586028656)
a = MSet('nicotine_aimd_rand')
a.Load()
b = MSet('nicotine_aimd_rand_train')
for (mol_index, mol) in enumerate(a.mols):
print('mol_index:', mol_index)
mol.properties['gradients'] = (- mol.properties['forces'])
mol.properties['atomization'] = mol.properties['energy']
for i in range(0, mol.NAtoms()):
mol.properties['atomization'] -= WB97XDAtom[mol.atoms[i]]
b.mols.append(mol)
b.Save()
|
def Train():
if 1:
a = MSet('nicotine_aimd_rand_train')
a.Load()
print(len(a.mols))
TreatedAtoms = a.AtomTypes()
PARAMS['HiddenLayers'] = [200, 200, 200]
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 5001
PARAMS['batch_size'] = 200
PARAMS['test_freq'] = 10
PARAMS['tf_prec'] = 'tf.float64'
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_Linear(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_Grad_Linear')
manager.Train(maxstep=1001)
|
def test():
a = MSet('water_aug_cc_pvdz')
a.Load()
for mol in a.mols:
mol.properties['quadrupole'] = mol.properties['quads']
mol.properties['dipole'] = mol.properties['dipoles']
mol.properties['gradients'] = mol.properties['forces']
del mol.properties['quads']
del mol.properties['dipoles']
del mol.properties['forces']
mol.CalculateAtomization()
a.Save()
|
def make_mini_set(filename):
a = MSet(filename)
a.Load()
b = MSet('water_aug_cc_pvdz_mini')
for i in range(1100):
b.mols.append(a.mols[i])
b.Save()
|
def train_energy_symm_func(mset):
PARAMS['train_energy_gradients'] = False
PARAMS['weight_decay'] = None
PARAMS['HiddenLayers'] = [512, 512, 512]
PARAMS['learning_rate'] = 0.0001
PARAMS['max_steps'] = 500
PARAMS['test_freq'] = 1
PARAMS['batch_size'] = 100
PARAMS['NeuronType'] = 'shifted_softplus'
PARAMS['tf_prec'] = 'tf.float32'
PARAMS['train_dipole'] = True
PARAMS['train_quadrupole'] = True
manager = TFMolManageDirect(mset, network_type='BPSymFunc')
|
def get_losses(filename):
with open(filename, 'r') as log:
log = log.readlines()
keep_phrase = 'TensorMol - INFO - step:'
train_loss = []
energy_loss = []
grad_loss = []
test_train_loss = []
test_energy_loss = []
test_grad_loss = []
for line in log:
if ((keep_phrase in line) and (line[79] == ' ')):
a = line.split()
train_loss.append(float(a[13]))
energy_loss.append(float(a[15]))
grad_loss.append(float(a[17]))
if ((keep_phrase in line) and (line[79] == 't')):
a = line.split()
test_train_loss.append(float(a[13]))
test_energy_loss.append(float(a[15]))
test_grad_loss.append(float(a[17]))
print((((((str(train_loss) + '\n\n') + str(energy_loss)) + '\n\n') + str(grad_loss)) + '\n'))
print((((((str(test_train_loss) + '\n\n') + str(test_energy_loss)) + '\n\n') + str(test_grad_loss)) + '\n'))
return (train_loss, energy_loss, grad_loss, test_train_loss, test_energy_loss, test_grad_loss)
|
def optimize_taxol():
Taxol = MSet('Taxol')
Taxol.ReadXYZ()
GeomOptimizer('EnergyForceField').Opt(Taxol, filename='OptLog', Debug=False)
|
def TrainPrepare():
if 0:
a = MSet('chemspider9_force')
dic_list = pickle.load(open('./datasets/chemspider9_force.dat', 'rb'))
for dic in dic_list:
atoms = []
for atom in dic['atoms']:
atoms.append(AtomicNumber(atom))
atoms = np.asarray(atoms, dtype=np.uint8)
mol = Mol(atoms, dic['xyz'])
mol.properties['charges'] = dic['charges']
mol.properties['dipole'] = dic['dipole']
mol.properties['quadropole'] = dic['quad']
mol.properties['energy'] = dic['scf_energy']
mol.properties['gradients'] = dic['gradients']
mol.CalculateAtomization()
a.mols.append(mol)
a.Save()
if 0:
a = MSet('chemspider9_force')
a.Load()
rmsgrad = np.zeros(len(a.mols))
for (i, mol) in enumerate(a.mols):
rmsgrad[i] = (np.sum(np.square(mol.properties['gradients'])) ** 0.5)
meangrad = np.mean(rmsgrad)
print('mean:', meangrad, 'std:', np.std(rmsgrad))
np.savetxt('chemspider9_force_dist.dat', rmsgrad)
for (i, mol) in enumerate(a.mols):
rmsgrad = (np.sum(np.square(mol.properties['gradients'])) ** 0.5)
if (2 > rmsgrad > 1.5):
mol.WriteXYZfile(fname='large_force')
print(rmsgrad)
if 0:
a = MSet('chemspider9_force')
a.Load()
b = MSet('chemspider9_force_cleaned')
for (i, mol) in enumerate(a.mols):
rmsgrad = (np.sum(np.square(mol.properties['gradients'])) ** 0.5)
if (rmsgrad <= 1.5):
b.mols.append(mol)
b.Save()
c = MSet('chemspider9_force_cleaned_debug')
c.mols = b.mols[:1000]
c.Save()
if 1:
a = MSet('GoldStd')
a.Load()
for mol in a.mols:
mol.properties['gradients'] = mol.properties['forces']
mol.properties['atomization'] = mol.properties['energy']
for i in range(0, mol.NAtoms()):
mol.properties['atomization'] -= ele_E_david[mol.atoms[i]]
print(mol.properties['atomization'], mol.properties['energy'])
a.Save()
print(a.mols[0].properties)
a.mols[0].WriteXYZfile(fname='test')
|
def TrainForceField(SetName_='GoldStd'):
a = MSet(SetName_)
a.Load()
TreatedAtoms = a.AtomTypes()
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 201
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 5
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScalar'] = 1
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [200, 200, 200]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_Linear(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_Grad_Linear')
manager.Train(maxstep=101)
|
def TestIRLinearDirect():
'\n\tTest the IR spectrum produced by a network created and trained with TrainForceField()\n\tIntended to be used with MolInstance_DirectBP_EE soon...\n\t'
a = MSet('sampling_mols')
a.ReadXYZ()
m = a.mols[0]
TreatedAtoms = a.AtomTypes()
PARAMS['hidden1'] = 512
PARAMS['hidden2'] = 512
PARAMS['hidden3'] = 512
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 101
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 2
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['GradScalar'] = 1
PARAMS['NeuronType'] = 'relu'
PARAMS['HiddenLayers'] = [512, 512, 512]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_Linear(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('Mol_DavidMetaMD_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_Grad_Linear_1', tset, False, RandomTData_=False, Trainable_=False)
ForceField = (lambda x: manager.Eval_BPEnergy_Direct_Grad_Linear(Mol(m.atoms, x), True, False))
EnergyForceField = (lambda x: manager.Eval_BPEnergy_Direct_Grad_Linear(Mol(m.atoms, x)))
ChargeField = (lambda x: np.random.random(m.NAtoms()))
PARAMS['OptMomentum'] = 0.0
PARAMS['OptMomentumDecay'] = 0.9
PARAMS['OptStepSize'] = 0.02
PARAMS['OptMaxCycles'] = 200
PARAMS['MDdt'] = 0.2
PARAMS['RemoveInvariant'] = True
PARAMS['MDMaxStep'] = 100
PARAMS['MDThermostat'] = 'Nose'
PARAMS['MDV0'] = None
PARAMS['MDTemp'] = 300.0
m = GeomOptimizer(EnergyForceField).Opt(m)
annealx_ = Annealer(EnergyForceField, None, m, 'Anneal')
annealx_.Prop()
m.coords = annealx_.Minx.copy()
PARAMS['MDMaxStep'] = 40000
PARAMS['MDThermostat'] = None
md = IRTrajectory(EnergyForceField, ChargeField, m, 'THP_udp_grad_IR', annealx_.v.copy())
md.Prop()
WriteDerDipoleCorrelationFunction(md.mu_his, 'THP_udp_grad_IR.txt')
|
def GetWaterNetwork(a):
TreatedAtoms = a.AtomTypes()
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [500, 500, 500]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('water_network', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout', False, False)
return manager
|
def GetChemSpiderNetwork(a, Solvation_=False):
TreatedAtoms = np.array([1, 6, 7, 8], dtype=np.uint8)
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [2000, 2000, 2000]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 0.7]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
if Solvation_:
PARAMS['DSFAlpha'] = 0.18
manager = TFMolManage('chemspider12_solvation', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout', False, False)
else:
PARAMS['DSFAlpha'] = (0.18 * BOHRPERA)
manager = TFMolManage('chemspider12_nosolvation', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout', False, False)
return manager
|
def EnAndForce(x_, DoForce=True):
mtmp = Mol(m.atoms, x_)
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, gradient) = manager.EvalBPDirectEEUpdateSingle(mtmp, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
energy = Etotal[0]
force = gradient[0]
if DoForce:
return (energy, force)
else:
return energy
|
def ChargeField(x_):
mtmp = Mol(m.atoms, x_)
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, gradient) = manager.EvalBPDirectEEUpdateSingle(mtmp, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
energy = Etotal[0]
force = gradient[0]
return atom_charge[0]
|
def EnergyField(x_):
return EnAndForce(x_, True)[0]
|
def DipoleField(x_):
q = np.asarray(ChargeField(x_))
dipole = np.zeros(3)
for i in range(0, q.shape[0]):
dipole += ((q[i] * x_[i]) * BOHRPERA)
return dipole
|
def MakeWork():
tmp = np.array(range(np.power(2, 22)), dtype=np.float64)
tmp = np.sqrt(tmp)
tmp = (tmp * tmp)
tmp = tmp.reshape((np.power(2, 11), np.power(2, 11)))
print(tmp.shape)
tmp = np.dot(tmp, tmp)
np.linalg.eig(tmp)
print('Work Complete')
|
def GetChemSpider12(a):
TreatedAtoms = np.array([1, 6, 7, 8], dtype=np.uint8)
PARAMS['NetNameSuffix'] = 'act_sigmoid100'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 21
PARAMS['batch_size'] = 50
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [2000, 2000, 2000]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 0.7]
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 2
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('Mol_chemspider12_maxatom35_H2O_with_CH4_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout_act_sigmoid100_rightalpha', tset, False, 'fc_sqdiff_BP_Direct_EE_ChargeEncode_Update_vdw_DSF_elu_Normalize_Dropout', False, False)
return manager
|
def Xmas():
'\n\tShoot a tree with a ball.\n\t'
tree = Mol()
tree.FromXYZString(treeXYZ)
tree.coords -= tree.Center()
ball = Mol()
ball.FromXYZString(ballXYZ)
ball.coords -= ball.Center()
ball.coords -= np.array([15.0, 0.0, 0.0])
ntree = tree.NAtoms()
toshoot = Mol(np.concatenate([tree.atoms, ball.atoms], axis=0), np.concatenate([tree.coords, ball.coords], axis=0))
v0 = np.zeros(toshoot.coords.shape)
v0[ntree:] -= np.array([(- 0.1581), 0.0, 0.0])
def GetEnergyForceForMol(m):
s = MSet()
s.mols.append(m)
manager = GetChemSpider12(s)
def EnAndForce(x_, DoForce=True):
tmpm = Mol(m.atoms, x_)
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, gradient) = manager.EvalBPDirectEEUpdateSingle(tmpm, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
energy = Etotal[0]
force = gradient[0]
if DoForce:
return (energy, force)
else:
return energy
return EnAndForce
F = GetEnergyForceForMol(toshoot)
PARAMS['MDThermostat'] = None
PARAMS['MDV0'] = None
traj = VelocityVerlet(None, toshoot, 'MerryXmas', F)
traj.v = v0.copy()
traj.Prop()
|
def Train():
if 1:
a = MSet('water_mini')
a.Load()
random.shuffle(a.mols)
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_EandG_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction')
PARAMS['Profiling'] = 0
manager.Train(1)
if 0:
a = MSet('water_mini')
a.Load()
random.shuffle(a.mols)
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [100, 100, 100]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 5
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EE_SymFunction')
PARAMS['Profiling'] = 0
manager.Train(1)
|
def Eval():
if 1:
a = MSet('H2O_trimer_move', center_=False)
a.ReadXYZ('H2O_trimer_move')
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_EandG_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction')
manager = TFMolManage('Mol_water_mini_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EandG_SymFunction_training_sample', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction', False, False)
total_e = []
for m in a.mols:
(Etotal, Ebp, Ebp_atom, force) = manager.EvalBPDirectEandGLinearSingle(m, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'])
print('Unit of energy: a.u')
print(('Etotal: %8.6f' % Etotal))
print('Unit of diple: Joules/Angstrom')
print('force:', force)
total_e.append(Etotal)
np.savetxt('EanGlearning.dat', np.asarray(total_e))
if 1:
a = MSet('H2O_trimer_move', center_=False)
a.ReadXYZ('H2O_trimer_move')
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 5
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 1
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['DipoleScaler'] = 1.0
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['HiddenLayers'] = [100, 100, 100]
PARAMS['EECutoff'] = 15.0
PARAMS['EECutoffOn'] = 0
PARAMS['Elu_Width'] = 4.6
PARAMS['EECutoffOff'] = 15.0
PARAMS['DSFAlpha'] = 0.18
PARAMS['AddEcc'] = True
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
PARAMS['learning_rate_dipole'] = 0.0001
PARAMS['learning_rate_energy'] = 1e-05
PARAMS['SwitchEpoch'] = 2
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='EnergyAndDipole')
tset = TensorMolData_BP_Direct_EE_WithEle_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('Mol_water_mini_ANI1_Sym_Direct_fc_sqdiff_BP_Direct_EE_SymFunction_training_sample', tset, False, 'fc_sqdiff_BP_Direct_EE_SymFunction', False, False)
total_e = []
for m in a.mols:
(Etotal, Ebp, Ebp_atom, Ecc, Evdw, mol_dipole, atom_charge, force) = manager.EvalBPDirectEELinearSingle(m, PARAMS['AN1_r_Rc'], PARAMS['AN1_a_Rc'], PARAMS['EECutoffOff'], True)
print('Unit of energy: a.u')
print(('Etotal: %8.6f Ebp: %8.6f Ecc: %8.6f Evdw: %8.6f' % (Etotal, Ebp, Ecc, Evdw)))
print('Unit of diple: a.u')
print('Dipole: ', mol_dipole)
print('Unit of diple: Joules/Angstrom')
print('force:', force)
total_e.append(Etotal)
np.savetxt('EElearning.dat', np.asarray(total_e))
|
def get_datasets():
return [ousidhoum2019.Ousidhoum2019(), mulki2019.Mulki2019(), mubarak2017twitter.Mubarak2017twitter(), mubarak2017aljazeera.Mubarak2017aljazeera(), davidson2017.Davidson2017(), gibert2018.Gibert2018(), gao2018.Gao2018(), chung2019.Chung2019(), qian2019.Qian2019(), waseem2016.Waseem2016(), jha2017.Jha2017(), elsherief2018.Elsherief2018(), mandl2019en.Mandl2019en(), mandl2019ger.Mandl2019ger(), mandl2019hind.Mandl2019hind(), bretschneider2017.Bretschneider2017(), ross2017.Ross2017(), wiegand2018.Wiegand2018(), pitenis2020.Pitenis2020(), mathur2018.Mathur2018(), alfina2017.Alfina2017(), ibrohim2019.Ibrohim2019(), ibrohim2018.Ibrohim2018(), sanguinetti2018.Sanguinetti2018(), fortuna2019.Fortuna2019(), coltekin2019.Coltekin2019(), albadi2018.Albadi2018(), basile2019.Basile2019(), founta2018.Founta2018(), wulczyn2017toxic.Wulczyn2017toxic(), wulczyn2017aggressive.Wulczyn2017aggressive(), wulczyn2017attack.Wulczyn2017attack(), sigurbergsson2019.Sigurbergsson2019(), kulkarni2021.Kulkarni2021(), novak2021.Novak2021(), kumar2018.Kumar2018(), zampieri2019.Zampieri2019(), bretschneider2016wow.Bretschneider2016wow(), bretschneider2016lol.Bretschneider2016lol()]
|
def get_dataset_by_name(name):
all_datasets = get_datasets()
return next(filter((lambda dataset: (dataset.name == name)), all_datasets), None)
|
class Albadi2018(dataset.Dataset):
name = 'albadi2018'
url = 'https://github.com/nuhaalbadi/Arabic_hatespeech/archive/refs/heads/master.zip'
hash = '7f7d87384b4b715655ec0e2d329bc234bbc965ad116290f2e2d0b11e26e272b3'
files = [{'name': 'albadi2018ar_train.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}, {'name': 'albadi2018ar_test.csv', 'language': 'ar', 'type': 'test', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
file_dir = helpers.unzip_file(tmp_file_path)
train_file = helpers.download_tweets_for_csv(os.path.join(file_dir, 'Arabic_hatespeech-master/train.csv'), 'id', api_config)
test_file = helpers.download_tweets_for_csv(os.path.join(file_dir, 'Arabic_hatespeech-master/test.csv'), 'id', api_config)
helpers.copy_file(train_file, os.path.join(dataset_folder, 'albadi2018ar_train.csv'))
helpers.copy_file(test_file, os.path.join(dataset_folder, 'albadi2018ar_test.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['hate'] == 1):
labels.append('hate')
else:
labels.append('noHate')
row['labels'] = labels
row = row.drop(['hate'])
return row
|
class Alfina2017(dataset.Dataset):
name = 'alfina2017'
url = 'https://github.com/ialfina/id-hatespeech-detection/raw/master/IDHSD_RIO_unbalanced_713_2017.txt'
hash = '4ee1d9cc1f1fdd27fb4298207fabb717f4e09281bd68fa5dcbcf720d75f1d4ed'
files = [{'name': 'alfina2017id.csv', 'language': 'id', 'type': 'training', 'platform': 'twitter'}]
comment = ' '
license = 'The dataset may be used freely, but if you want to publish paper/publication using the dataset, please cite this publication:\nIka Alfina, Rio Mulia, Mohamad Ivan Fanany, and Yudo Ekanata, "Hate Speech Detection in Indonesian Language: A Dataset and Preliminary Study ", in Proceeding of 9th International Conference on Advanced Computer Science and Information Systems 2017(ICACSIS 2017). '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'alfina2017id.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['Tweet']
labels = []
labels.append(row['Label'])
row['labels'] = labels
row = row.drop(['Label', 'Tweet'])
return row
|
class Basile2019(dataset.Dataset):
name = 'basile2019'
url = 'https://github.com/cicl2018/HateEvalTeam/raw/master/Data%20Files/Data%20Files/%232%20Development-English-A/train_dev_en_merged.tsv'
hash = 'fdd34bf56f0afa744ee7484774d259d83a756033cd8049ded81bd55d2fcb1272'
files = [{'name': 'basile2019en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
comment = 'HS - a binary value indicating if HS is occurring against one of the given targets (women or immigrants): 1 if occurs, 0 if not.\nTarget Range - if HS occurs (i.e. the value for the feature HS is 1), a binary value indicating if the target is a generic group of people (0) or a specific individual (1).\nAggressiveness- if HS occurs (i.e. the value for the feature HS is 1), a binary value indicating if the tweeter is aggressive (1) or not (0).'
license = ''
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'basile2019en.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['HS'] == 1):
labels.append('HS')
if (row['TR'] == 1):
labels.append('TR')
if (row['AG'] == 1):
labels.append('AG')
row['labels'] = labels
row = row.drop(['HS', 'TR', 'AG', 'id'])
return row
|
class Bretschneider2016lol(dataset.Dataset):
name = 'bretschneider2016lol'
url = 'http://ub-web.de/research/resources/lol_anonymized.zip'
hash = '901e0d51428f34b94bf6b3f59b0e9cf71dabe94fc74fd81fd1e9be199d2902bc'
files = [{'name': 'bretschneider2016en_lol.csv', 'language': 'en', 'type': 'training', 'platform': 'League of Legends'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
tmp_file_path = helpers.extract_sql_tables(os.path.join(tmp_file_path, 'lol_anonymized.sql'))
tmp_file_path = helpers.join_csvs(os.path.join(tmp_file_path, 'posts.csv'), ['topic_id', 'post_number'], os.path.join(tmp_file_path, 'annotations.csv'), ['topic_id', 'post_number'], how='left')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'bretschneider2016en_lol.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['html_message']
labels = []
if (type(row['offender']) != float):
labels.append('offensive')
row['labels'] = labels
row = row.drop(['topic_id', 'post_number', 'annotator', 'offender', 'victim', 'author', 'html_message', 'timestamp'])
return row
|
class Bretschneider2016wow(dataset.Dataset):
name = 'bretschneider2016wow'
url = 'http://www.ub-web.de/research/resources/wow_anonymized.zip'
hash = '0f5d67879306cd67154c31583b6e8750b9290f54c0065cc8cdf11ab6a8d1a26d'
files = [{'name': 'bretschneider2016en_wow.csv', 'language': 'en', 'type': 'training', 'platform': 'World of Warcraft'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
tmp_file_path = helpers.extract_sql_tables(os.path.join(tmp_file_path, 'wow_anonymized.sql'))
tmp_file_path = helpers.join_csvs(os.path.join(tmp_file_path, 'posts.csv'), ['topic_id', 'post_number'], os.path.join(tmp_file_path, 'annotations.csv'), ['topic_id', 'post_number'], how='left')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'bretschneider2016en_wow.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['html_message']
labels = []
if (type(row['offender']) != float):
labels.append('offensive')
row['labels'] = labels
row = row.drop(['topic_id', 'post_number', 'annotator', 'offender', 'victim', 'author', 'html_message', 'timestamp'])
return row
|
class Bretschneider2017(dataset.Dataset):
name = 'bretschneider2017'
url = 'http://ub-web.de/research/resources/fb_hate_speech_csv.zip'
hash = '5d31274178d342fb6516ee1015a6ec3c8fa7076e2d6313efb43040a6f8ba26af'
files = [{'name': 'bretschneider2017en.csv', 'language': 'en', 'type': 'training', 'platform': 'facebook'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = os.path.join(tmp_file_path, 'fb_hate_speech_csv/comments.csv')
file2 = os.path.join(tmp_file_path, 'fb_hate_speech_csv/annotated_comments.csv')
tmp_file_path = helpers.join_csvs(file1, 'comment_id', file2, 'comment_id')
tmp_file_path = helpers.drop_duplicates(tmp_file_path, ['comment_id'])
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'bretschneider2017en.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['message']
labels = []
if (row['valence'] == 1):
labels.append('moderate')
elif (row['valence'] == 2):
labels.append('substantially_offending')
if (row['target_type'] == 1):
labels.append('no_target')
elif (row['target_type'] == 2):
labels.append('targets_foreigner_refugee')
elif (row['target_type'] == 3):
labels.append('targets_politicians_government')
elif (row['target_type'] == 5):
labels.append('targets_other')
elif (row['target_type'] == 6):
labels.append('targets_unknown')
elif (row['target_type'] == 7):
labels.append('targets_page_community')
elif (row['target_type'] == 8):
labels.append('targets_press')
row['labels'] = labels
row = row.drop(['comment_id', 'post_id', 'anonymized_user', 'message', 'created_at', 'annotator_id', 'entry_number', 'valence', 'target_type'])
return row
|
class Chung2019(dataset.Dataset):
name = 'chung2019'
url = 'https://raw.githubusercontent.com/marcoguerini/CONAN/master/CONAN/CONAN.json'
hash = '511c062b5563affbc78bb2c9d9edafd88fe6419add73b5190865bb42863eacc4'
files = [{'name': 'chung2019.csv', 'language': 'en/fr/it', 'type': 'training', 'platform': 'artifical'}]
license = 'This resource can be used for research purposes. Please cite the publication above if you use it.'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
with open(tmp_file_path, 'r') as f:
a = json.load(f)
b = pd.DataFrame(a['conan'])
tmp_file_path = (tmp_file_path + '.csv')
b.to_csv(tmp_file_path, index=False)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'chung2019.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['hateSpeech']
labels = ['hate']
labels.append(row['hsType'])
row['labels'] = labels
row = row.drop(['cn_id', 'age', 'gender', 'educationLevel', 'cnType', 'hsType', 'hsSubType', 'hateSpeech', 'counterSpeech'])
return row
@classmethod
def unify_format(cls, df):
df = df.apply(cls.unify_row, axis=1)
return df.drop_duplicates(subset=['text'])
|
class Coltekin2019(dataset.Dataset):
name = 'coltekin2019'
url = 'https://coltekin.github.io/offensive-turkish/offenseval2020-turkish.zip'
hash = '7977e96255dbc9b8d14893f1b14cbe3dec53c70358503c062c5a59720ec9c2f2'
files = [{'name': 'coltekin2019tr.csv', 'language': 'tr', 'type': 'training', 'platform': 'twitter'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
zip_file_path = helpers.unzip_file(tmp_file_path)
file1 = os.path.join(zip_file_path, 'offenseval2020-turkish/offenseval-tr-testset-v1/offenseval-tr-labela-v1.tsv')
file1 = helpers.clean_csv(file1, sep=',', names=['lid', 'class'])
file2 = os.path.join(zip_file_path, 'offenseval2020-turkish/offenseval-tr-testset-v1/offenseval-tr-testset-v1.tsv')
file2 = helpers.clean_csv(file2, sep='\t', names=['rid', 'text'], header=0)
tmp_file_path = helpers.join_csvs(file1, 'lid', file2, 'rid')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'coltekin2019tr.csv'))
@classmethod
def unify_row(cls, row):
row['labels'] = [row['class']]
row = row.drop(['lid', 'rid', 'class'])
return row
|
class Dataset(ABC):
@staticmethod
@property
@abstractmethod
def name():
' Name of the dataset '
pass
@staticmethod
@property
@abstractmethod
def url():
' URL of the downloadable file '
pass
@staticmethod
@property
def license():
' License information of the dataset '
return []
@staticmethod
@property
def hash():
' SHA256 hash of the downloaded file '
return ''
@staticmethod
@property
def files():
' List of dicts for each file that will be created during processing.\n \n Each dict should contain the following information:\n name -- the file name\n language -- ISO 639-1 code of the language\n type -- training or test\n platform -- platfrom of the generated data (e.g. twitter, facebook,...)'
return []
@classmethod
def download(cls, file_name: str):
' Download a file from cls.url\n \n Keyword arguments:\n file_name -- file_path where the downloaded file will be stored (including file name)\n '
return helpers.download_from(cls.url, file_name)
@classmethod
@abstractmethod
def process(cls, tmp_file_path: str, dataset_folder: str, api_config: Optional[Dict]=None):
' Process the downloaded file. The processed file should be copied to the corresponding dataset_folder in this method.\n \n Keyword arguments:\n tmp_file_path -- path of the file to process\n dataset_folder -- path where the resulting file should be stored\n '
pass
@classmethod
def valid_hash(cls, file: str):
' Calculate the SHA256 hash of the given file and print a warning if the hash differs.\n \n Keyword arguments:\n file -- path of the file to hash\n '
hash = sha256()
with open(file, 'rb') as file:
while True:
chunk = file.read(hash.block_size)
if (not chunk):
break
hash.update(chunk)
hash_value = hash.hexdigest()
if (cls.hash == hash_value):
return True
else:
print(((((('WARNING: ' + cls.name) + ': Expected Dataset hash to be ') + cls.hash) + ' but was ') + hash_value))
return False
@classmethod
def unify_row(cls, row: pd.Series):
' This method is called for each row in the dataset. Use this method to filter attributes and rename columns.\n \n Keyword arguments:\n row -- pandas.Series that contains the row\n '
return row
@classmethod
def translate_row(cls, row: pd.Series, translation: dict):
' This method is called for each row in the dataset. Translate the labels according to config.json.\n \n Keyword arguments:\n row -- pandas.Series that contains the row\n translation -- dict that contains the translations\n '
translated_labels = []
if (type(row['labels']) == str):
row['labels'] = ast.literal_eval(row['labels'])
for i in row['labels']:
translated_labels.extend(translation.get(i, [i]))
row['labels'] = list(set(translated_labels))
return row
@classmethod
def unify(cls, config: Dict, dataset_name: str):
' Perform unification of the dataset files\n \n Keyword arguments:\n config -- supplied config\n dataset_name -- name of the dataset to be unified\n '
dataset_folder = os.path.join(config['file_directory'], dataset_name)
for file in cls.files:
df = pd.read_csv(os.path.join(dataset_folder, file['name']))
df = cls.unify_format(df)
if (config and (file['name'] in config['datasets'])):
df = cls.translate_labels(df, config['datasets'][file['name']]['translation'])
df.to_csv(os.path.join(dataset_folder, file['name']), index_label='id', quoting=csv.QUOTE_NONNUMERIC, sep='\t')
@classmethod
def translate_labels(cls, df: pd.DataFrame, translation: dict):
' Perform label translation of the dataset file\n \n Keyword arguments:\n df -- pandas.DataFrame that contains the data\n translation -- dict that contains the translations\n '
return df.apply(cls.translate_row, axis=1, args=(translation,))
@classmethod
def unify_format(cls, df: pd.DataFrame):
' Calls the unfiy method for each entry of the dataset\n \n Keyword arguments:\n df -- pandas.DataFrame that contains the dataset data\n '
return df.apply(cls.unify_row, axis=1)
|
class Davidson2017(dataset.Dataset):
name = 'davidson2017'
url = 'https://github.com/t-davidson/hate-speech-and-offensive-language/raw/master/data/labeled_data.csv'
hash = 'fcb8bc7c68120ae4af04a5b9acd58585513ede11e1548ebf36a5c2040b6f6281'
files = [{'name': 'davidson2017en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
license = 'MIT License\n\nCopyright (c) 2017 Tom Davidson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'davidson2017en.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['tweet']
labels = []
if (row['class'] == 0):
labels.append('hate')
if (row['class'] == 1):
labels.append('offensive')
if (row['class'] == 2):
labels.append('normal')
row['labels'] = labels
row = row.drop(['Unnamed: 0', 'count', 'hate_speech', 'offensive_language', 'neither', 'class', 'tweet'])
return row
|
class Elsherief2018(dataset.Dataset):
name = 'elSherief2018'
url = 'https://github.com/mayelsherif/hate_speech_icwsm18/archive/master.zip'
hash = '34365d3d398b0a345a4278df30d851761cb6dc34c7a38f4bdfb77f20fae164c2'
files = [{'name': 'elSherief2018en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
base_path = os.path.join(tmp_file_path, 'hate_speech_icwsm18-master')
files = {os.path.join(base_path, 'twitter_hashtag_based_datasets/ethn_blackpeoplesuck.csv'): ['racism', 'ethnicity', 'black'], os.path.join(base_path, 'twitter_hashtag_based_datasets/ethn_whitepower.csv'): ['racism', 'ethnicity', 'white'], os.path.join(base_path, 'twitter_hashtag_based_datasets/istandwithhatespeech.csv'): ['prohatespeech'], os.path.join(base_path, 'twitter_hashtag_based_datasets/rel_nomuslimrefugees.csv'): ['racism', 'religious', 'islamophobia'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_boojie.csv'): ['archaic_boojie'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_chinaman.csv'): ['archaic_chinaman'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_hillbilly.csv'): ['archaic_hillbilly'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_surrendermonkey.csv'): ['archaic_surrendermonkey'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_whigger.csv'): ['archaic_whigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_whitenigger.csv'): ['archaic_whitenigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_wigerette.csv'): ['archaic_wigerette'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/archaic_wigger.csv'): ['archaic_wigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/class_bitterclinger.csv'): ['class_bitterclinger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/class_conspiracytheorist.csv'): ['class_conspiracytheorist'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/class_redneck.csv'): ['class_redneck'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/class_rube.csv'): ['class_rube'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/class_trailerparktrash.csv'): ['class_trailerparktrash'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/class_whitetrash.csv'): ['class_whitetrash'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/disability_retard.csv'): ['disability_retard'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/disability_retarded.csv'): ['disability_retarded'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_camelfucker.csv'): ['ethn_camelfucker'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_coonass.csv'): ['ethn_coonass'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_housenigger.csv'): ['ethn_housenigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_mooncricket.csv'): ['ethn_mooncricket'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_nigger.csv'): ['ethn_nigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_raghead.csv'): ['ethn_raghead'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_spic.csv'): ['ethn_spic'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_trailerparktrash.csv'): ['ethn_trailerparktrash'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_trailertrash.csv'): ['ethn_trailertrash'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_wetback.csv'): ['ethn_wetback'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_whitenigger.csv'): ['ethn_whitenigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/ethn_whitetrash.csv'): ['ethn_whitetrash'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/gender_bint.csv'): ['gender_bint'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/gender_cunt.csv'): ['gender_cunt'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/gender_dyke.csv'): ['gender_dyke'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/gender_twat.csv'): ['gender_twat'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_bamboocoon.csv'): ['nation_bamboocoon'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_camelfucker.csv'): ['nation_camelfucker'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_chinaman.csv'): ['nation_chinaman'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_limey.csv'): ['nation_limey'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_plasticpaddy.csv'): ['nation_plasticpaddy'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_sidewayspussy.csv'): ['nation_sidewayspussy'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_surrendermonkey.csv'): ['nation_surrendermonkey'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_whigger.csv'): ['nation_whigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_whitenigger.csv'): ['nation_whitenigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_wigger.csv'): ['nation_wigger'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/nation_zionazi.csv'): ['nation_zionazi'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/rel_camelfucker.csv'): ['rel_camelfucker'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/rel_muzzie.csv'): ['rel_muzzie'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/rel_souptaker.csv'): ['rel_souptaker'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/rel_zionazi.csv'): ['rel_zionazi'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/sexorient_dyke.csv'): ['sexorient_dyke'], os.path.join(base_path, 'twitter_key_phrase_based_datasets/sexorient_faggot.csv'): ['sexorient_faggot']}
tmp_file_path = helpers.merge_csvs(files)
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'tweet_id', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'elSherief2018en.csv'))
@classmethod
def unify_row(cls, row):
return row
|
class Fortuna2019(dataset.Dataset):
name = 'fortuna2019'
url = 'https://b2share.eudat.eu/api/files/792b86e1-e676-4a0d-971f-b41a1ffb9b18/annotator_classes.csv'
hash = 'f759888e9489a030187bbf6fbe005a7c5a6c0c3468882430924d9aaebd84759d'
files = [{'name': 'fortuna2019pt.csv', 'language': 'pt', 'type': 'training', 'platform': 'twitter'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'tweet_id', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'fortuna2019pt.csv'))
@classmethod
def unify_row(cls, row):
labels = row['class'].split('; ')
row['labels'] = labels
row = row.drop(['class'])
return row
|
class Founta2018(dataset.Dataset):
name = 'founta2018'
url = 'https://zenodo.org/record/2657374/files/hatespeech_id_label.csv'
hash = '35f19a5746eac9be27cd635a09b9ced11569080df10d84fb140ca76164836cef'
files = [{'name': 'founta2018en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
comment = ' '
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, names=['tweet', 'class'])
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'tweet', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'founta2018en.csv'))
@classmethod
def unify_row(cls, row):
row['labels'] = [row['class']]
row = row.drop(['class'])
return row
|
class Gao2018(dataset.Dataset):
name = 'gao2018'
url = 'https://github.com/sjtuprog/fox-news-comments/raw/master/full-comments-u.json'
hash = '059152e61f632f1e6671a68214d5618a21e6cf78f2512773e0421b9568aab8cf'
files = [{'name': 'gao2018en.csv', 'language': 'en', 'type': 'training', 'platform': 'fox news'}]
comment = 'Inflammatory language explicitly or implicitly threatens or demeans a person or agroup based upon a facet of their identity such as gender, ethnicity, or sexualorientation.\n- Excludes insults towards other anonymous users\n- Includes insults of belief systems'
license = 'The MIT License\n\nCopyright (c) 2010-2019 Google, Inc. http://angularjs.org\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.convert_jsonl_to_csv(tmp_file_path)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'gao2018en.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['label'] == 0):
labels.append('normal')
if (row['label'] == 1):
labels.append('hate')
row['labels'] = labels
row = row.drop(['title', 'succ', 'meta', 'user', 'mentions', 'prev', 'label'])
return row
|
class Gibert2018(dataset.Dataset):
name = 'gibert2018'
url = 'https://github.com/Vicomtech/hate-speech-dataset/archive/master.zip'
hash = 'acc0d7ce40e22cf019daa752a5136049a45462b9ba4eab8bf40ea82dcd867eba'
files = [{'name': 'gibert2018en.csv', 'language': 'en', 'type': 'training', 'platform': 'stormfront'}]
license = 'The resources in this repository are licensed under the Creative Commons Attribution-ShareAlike 3.0 Spain\nLicense. To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/3.0/es/ or send\na letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.'
@classmethod
def replace_csv_entry_with_filecontents(cls, row, directory):
fid = row['file_id']
with open(os.path.join(directory, 'all_files', (fid + '.txt')), 'r', encoding='utf-8') as f:
row['text'] = '\n'.join(f.readlines())
return row
@classmethod
def merge_txt_to_csv(cls, directory):
df = pd.read_csv(os.path.join(directory, 'annotations_metadata.csv'), encoding='utf-8')
df = df.apply(cls.replace_csv_entry_with_filecontents, axis=1, args=(directory,))
output_file = os.path.join(directory, (cls.name + '.csv'))
df.to_csv(output_file)
return output_file
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
extraction_dir = helpers.unzip_file(tmp_file_path)
tmp_file_path = cls.merge_txt_to_csv(os.path.join(extraction_dir, 'hate-speech-dataset-master'))
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'gibert2018en.csv'))
@classmethod
def unify_row(cls, row):
row['labels'] = [row['label']]
row = row.drop(['Unnamed: 0', 'subforum_id', 'file_id', 'user_id', 'num_contexts', 'label'])
return row
|
def download_from(url: str, destination_file: str) -> str:
' Downloads a file to the specified destination.\n \n Keyword arguments:\n url -- url of the file to download\n destination_file -- path to the file to download to\n\n Returns path to the downloaded file.\n '
try:
with urlopen(url) as response:
os.makedirs(os.path.dirname(destination_file), exist_ok=True)
with open(destination_file, 'wb') as f:
shutil.copyfileobj(response, f)
return destination_file
except URLError as e:
print('Additional certificate installation is needed for MacOS. See https://stackoverflow.com/questions/50236117/scraping-ssl-certificate-verify-failed-error-for-http-en-wikipedia-org and https://stackoverflow.com/questions/44649449/brew-installation-of-python-3-6-1-ssl-certificate-verify-failed-certificate/44649450#44649450 for help')
raise e
|
def convert_excel_to_csv(file_name: str) -> str:
' Converts Excel file to CSV file.\n \n Keyword arguments:\n file_name -- path of the Excel file\n\n Returns path to the converted CSV file\n '
new_file = (file_name + '.csv')
excel_data = pd.read_excel(file_name)
excel_data.to_csv(new_file, index=False)
return new_file
|
def copy_file(source_file: str, destination_file: str) -> str:
" Copies a file to a given path. Creates the path if it doesn't exist.\n \n Keyword arguments:\n source_file -- path of the source file\n destination_file -- path of the destination file\n \n Returns path to the destination file\n "
os.makedirs(os.path.dirname(destination_file), exist_ok=True)
shutil.copyfile(source_file, destination_file)
return destination_file
|
def convert_json_to_csv(file_name: str) -> str:
' Converts JSON file to CSV file\n \n Keyword arguments:\n file_name -- path of the JSON file\n\n Returns path to the converted CSV file\n '
new_file = (file_name + '.csv')
json_data = pd.read_json(file_name)
json_data.to_csv(new_file, index=False)
return new_file
|
def convert_jsonl_to_csv(file_name: str) -> str:
' Converts JSONL file to CSV file\n \n Keyword arguments:\n file_name -- path of the JSONL file\n\n Returns path to the converted CSV file\n '
new_file = (file_name + '.json')
data = []
with open(file_name, 'r') as jsonl_file:
for line in jsonl_file:
data.append(json.loads(line))
df = pd.DataFrame(data)
df.to_csv(new_file, index=False)
return new_file
|
def unzip_file(file_name: str) -> str:
' Unpacks a ZIP file\n \n Keyword arguments:\n file_name -- path of the ZIP file\n\n Returns path to the folder containing the unpacked files.\n '
extraction_dir = os.path.join(os.path.dirname(file_name), (os.path.basename(file_name) + '_extracted'))
os.makedirs(extraction_dir, exist_ok=False)
with zipfile.ZipFile(file_name) as zip_file:
zip_file.extractall(extraction_dir)
return extraction_dir
|
def untarbz_file(file_name: str):
' Unpacks a .tar.bz2 file\n \n Keyword arguments:\n file_name -- path of the .tar.bz2 file\n '
tar = tarfile.open(file_name, 'r:bz2')
tar.extractall(path=os.path.dirname(file_name))
tar.close()
|
def add_column(file_name: str, column_name: str, column_value) -> str:
' Inserts a new column into a CSV file.\n \n Keyword arguments:\n file_name -- path of the CSV file\n column_name -- name of the new column\n column_value -- default value that is added in each line\n\n Returns path to the resulting file.\n '
new_file = (file_name + '_new_column')
df = pd.read_csv(file_name)
df.insert(loc=0, column=column_name, value=([column_value] * df.count().max()))
df.to_csv(new_file, index=False)
return new_file
|
def clean_csv(file_name: str, names: [str]=None, header: int='infer', sep: str=',', dtype: dict=None) -> str:
' Loads CSV into Dataframe and exports it as CSV again to archive a clean CSV with standard seperators. Can be used to add column names.\n \n Keyword arguments:\n file_name -- path to the file\n names -- list that contains the names for the columns\n header -- set to 0 if an existing header should be overwritten\n sep -- seperator of the CSV file\n dtype -- dict containing the data types of the columns\n\n Returns path to the resulting file.\n '
new_file = (file_name + '_clean')
df = pd.read_csv(file_name, names=names, sep=sep, header=header, dtype=dtype)
df.to_csv(new_file, index=False, quoting=csv.QUOTE_NONNUMERIC)
return new_file
|
def join_csvs(file1: str, column1: str, file2: str, column2: str, how: str='inner') -> str:
' Joins two CSVs on a given column\n\n Keyword arguments:\n file1 -- path of the first CSV\n column1 -- name of the column to join on in file1\n file2 -- path of the second CSV\n column2 -- name of the column to join on in file2\n how -- joint type of the pandas DataFrame.merge function\n\n Returns path to the resulting file.\n '
new_file = (file1 + '_joined')
df1 = pd.read_csv(file1)
df2 = pd.read_csv(file2)
df = df1.merge(df2, how=how, left_on=column1, right_on=column2)
df.to_csv(new_file, index=False)
return new_file
|
def drop_duplicates(file_name: str, columns: [str]) -> str:
' Drops all duplicates in a CSV file\n\n Keyword arguments:\n file_name -- path of the CSV file\n columns -- list of columns to perform duplicate checking on\n\n Returns path to the resulting file.\n '
new_file = (file_name + '_dropped')
df = pd.read_csv(file_name)
df = df.drop_duplicates(columns)
df.to_csv(new_file, index=False)
return new_file
|
def merge_csvs(files: dict) -> str:
' Merge multiple CSV files into one.\n\n Keyword arguments:\n files -- dictionary with the filename as a key and a list of attributes that will be added in a label column\n\n Returns path to the resulting file.\n '
new_file = (list(files.keys())[0] + '_merged')
merged_df = pd.DataFrame()
for file in files:
df = pd.read_csv(file)
if len(files[file]):
df.insert(loc=0, column='labels', value=([files[file]] * df.count().max()))
merged_df = merged_df.append(df)
merged_df.to_csv(new_file, index=False)
return new_file
|
def download_tweets_for_csv(file_name: str, column: str, api_data: Dict) -> str:
' Replaces the Tweet IDs of a CSV file with the actual tweets.\n\n Keyword arguments:\n file_name -- path of the CSV file\n column -- name of the column containing the tweet IDs\n\n Returns path to the resulting file.\n '
def hydrate(row, translation, columns):
if (str(row[column]) in translation):
row['text'] = translation[row[column]]
row = row.drop(column)
return row
else:
ser = pd.Series(index=columns)
ser = ser.drop(column)
return ser
new_file = (file_name + '_with_tweets')
df = pd.read_csv(file_name, dtype={column: str})
t = Twarc(api_data['twitter']['consumer_key'], api_data['twitter']['consumer_secret'], api_data['twitter']['access_token'], api_data['twitter']['access_token_secret'])
translation = {}
for tweet in t.hydrate(df[column]):
translation[str(tweet['id'])] = tweet['full_text']
df = df.apply(hydrate, axis=1, args=(translation, df.columns)).dropna(how='all')
df.to_csv(new_file, index=False, quoting=csv.QUOTE_NONNUMERIC)
return new_file
|
def extract_sql_tables(file_name: str) -> str:
' Extracts tables from SQL file and saves them as CSV.\n\n Keyword arguments:\n file_name -- path of the SQL file\n\n Returns path of the directory that contains the resulting files.\n '
def find_tables(dump_filename):
table_list = []
with open(dump_filename, 'r') as f:
for line in f:
line = line.strip()
if line.lower().startswith('create table'):
table_name = re.findall('create table `([\\w_]+)`', line.lower())
table_list.extend(table_name)
return table_list
def read_dump(dump_filename: str, output_dir: str, target_table: str) -> None:
column_names = []
rows = []
read_mode = 0
with open(dump_filename, 'r') as f:
for line in f:
line = line.strip()
if (line.lower().startswith('insert') and (target_table in line)):
read_mode = 2
if (line.lower().startswith('create table') and (target_table in line)):
read_mode = 1
continue
if (read_mode == 0):
continue
elif (read_mode == 1):
if line.lower().startswith('primary'):
read_mode = 0
continue
colheader = re.findall('`([\\w_]+)`', line)
for col in colheader:
column_names.append(col.strip())
elif (read_mode == 2):
if line.endswith(';'):
end_index = (- 1)
else:
end_index = 0
data = ast.literal_eval(line[(line.find('VALUES') + 7):end_index])
try:
for item in data:
row = {}
for (key, value) in zip(column_names, item):
row[key] = value
rows.append(row)
except IndexError:
pass
if line.endswith(';'):
df = pd.DataFrame(rows, columns=column_names)
break
df.to_csv(os.path.join(output_dir, (target_table + '.csv')), index=False)
output_dir = os.path.join(os.path.dirname(file_name), 'extracted')
os.makedirs(output_dir, exist_ok=False)
table_list = find_tables(file_name)
if (len(table_list) > 0):
for table in table_list:
read_dump(file_name, output_dir, table)
return output_dir
|
class Ibrohim2018(dataset.Dataset):
name = 'ibrohim2018'
url = 'https://github.com/okkyibrohim/id-abusive-language-detection/raw/master/re_dataset_three_labels.csv'
hash = '8e88d5bf4d98f86d7c8fb9c010008246e206814e8dbe5695ec7de4a76812bc86'
files = [{'name': 'ibrohim2018id.csv', 'language': 'id', 'type': 'training', 'platform': 'twitter'}]
comment = '1 (not abusive language), 2 (abusive but not offensive), and 3 (offensive language)'
license = 'This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'ibrohim2018id.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['Tweet']
labels = []
if (row['Label'] == 1):
labels.append('none')
if (row['Label'] == 2):
labels.append('abusive')
if (row['Label'] == 3):
labels.append('offensive')
row['labels'] = labels
row = row.drop(['Tweet', 'Label'])
return row
|
class Ibrohim2019(dataset.Dataset):
name = 'ibrohim2019'
url = 'https://github.com/okkyibrohim/id-multi-label-hate-speech-and-abusive-language-detection/raw/master/re_dataset.csv'
hash = '44c04e31ad4b7ee4a95f1884e7af4da2c44b69762143eb2de0ede7f90502735e'
files = [{'name': 'ibrohim2019id.csv', 'language': 'id', 'type': 'training', 'platform': 'twitter'}]
comment = 'HS : hate speech label;\nAbusive : abusive language label;\nHS_Individual : hate speech targeted to an individual;\nHS_Group : hate speech targeted to a group;\nHS_Religion : hate speech related to religion/creed;\nHS_Race : hate speech related to race/ethnicity;\nHS_Physical : hate speech related to physical/disability;\nHS_Gender : hate speech related to gender/sexual orientation;\nHS_Gender : hate related to other invective/slander;\nHS_Weak : weak hate speech;\nHS_Moderate : moderate hate speech;\nHS_Strong : strong hate speech.\n'
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'ibrohim2019id.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['Tweet']
labels = []
for label in ['HS', 'Abusive', 'HS_Individual', 'HS_Group', 'HS_Religion', 'HS_Race', 'HS_Physical', 'HS_Gender', 'HS_Other', 'HS_Weak', 'HS_Moderate', 'HS_Strong']:
if (row[label] == 1):
labels.append(label)
row['labels'] = labels
row = row.drop(['Tweet', 'HS', 'Abusive', 'HS_Individual', 'HS_Group', 'HS_Religion', 'HS_Race', 'HS_Physical', 'HS_Gender', 'HS_Other', 'HS_Weak', 'HS_Moderate', 'HS_Strong'])
return row
|
class Jha2017(dataset.Dataset):
name = 'jha2017'
url = 'https://github.com/AkshitaJha/NLP_CSS_2017/archive/master.zip'
hash = 'da7392bfa1b5c7d6aa8540b1943abd5bf941f1a8e8e12dfa37335164c9752edb'
files = [{'name': 'jha2017en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
comment = "The file benevolent_sexist.tsv contains Tweet ID's of tweets that exhibit benevolent sexism. The file hostile_sexist.tsv contains Tweet ID's of tweets that are hostile in nature. The hostile sexist tweets were part of the Hate Speech Dataset (Waseem and Hovy, 2016)."
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
benevolent_file = os.path.join(tmp_file_path, 'NLP_CSS_2017-master/benevolent_sexist.tsv')
hostile_file = os.path.join(tmp_file_path, 'NLP_CSS_2017-master/hostile_sexist.tsv')
benevolent_file = helpers.clean_csv(benevolent_file, ['tweet_id'])
hostile_file = helpers.clean_csv(hostile_file, ['tweet_id'])
tmp_file_path = helpers.merge_csvs({hostile_file: ['sexist', 'hostile'], benevolent_file: ['sexist', 'benevolent']})
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'tweet_id', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'jha2017en.csv'))
|
class Kulkarni2021(dataset.Dataset):
name = 'kulkarni2021'
url = 'https://github.com/l3cube-pune/MarathiNLP/raw/main/L3CubeMahaSent%20Dataset/tweets-train.csv'
hash = '1416e35f859f7473c536432954affe6460fad7a0a0d2c3889ce7a408347832d5'
files = [{'name': 'kulkarni2021mr.csv', 'language': 'mr', 'type': 'training', 'platform': 'tweets'}]
comment = 'Positive(1), Negative(-1) and Neutral(0)'
license = ''
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'kulkarni2021mr.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['tweet']
row['labels'] = [str(row['label'])]
row = row.drop(['tweet', 'label'])
return row
|
class Kumar2018(dataset.Dataset):
name = 'kumar2018'
url = 'https://github.com/SilentFlame/AggressionDetection/raw/master/DataPre-Processing/processedDataWithoutID.txt'
hash = '06154c3f8b85254af949e3e83aca32c1b4e25af322f18221a58d02453132dd48'
files = [{'name': 'kumar2018hing.csv', 'language': 'hing', 'type': 'training', 'platform': 'facebook'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, names=['text', 'class'], sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'kumar2018hing.csv'))
@classmethod
def unify_row(cls, row):
labels = [row['class']]
row['labels'] = labels
row = row.drop(['class'])
return row
|
class Mandl2019en(dataset.Dataset):
name = 'mandl2019en'
url = 'https://hasocfire.github.io/hasoc/2019/files/english_dataset.zip'
hash = '1b4bda7904193be59ed768675fc1d65f172f7bf92af3de6394e8deda8afb640e'
files = [{'name': 'mandl2019en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter and facebook'}]
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'english_dataset/english_dataset.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'english_dataset/hasoc2019_en_test-2919.tsv'), sep='\t')
tmp_file_path = helpers.merge_csvs({file1: [], file2: []})
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mandl2019en.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['task_1'] == 'NOT'):
labels.append('normal')
elif (row['task_2'] == 'HATE'):
labels.append('hate')
elif (row['task_2'] == 'OFFN'):
labels.append('offensive')
elif (row['task_2'] == 'PRFN'):
labels.append('profane')
if (row['task_3'] == 'TIN'):
labels.append('targeted')
elif (row['task_3'] == 'UNT'):
labels.append('untargeted')
row['labels'] = labels
row = row.drop(['text_id', 'task_1', 'task_2', 'task_3'])
return row
|
class Mandl2019ger(dataset.Dataset):
name = 'mandl2019ger'
url = 'https://hasocfire.github.io/hasoc/2019/files/german_dataset.zip'
hash = 'cba78f437b9628c216a4ae0487fbb30e15d9c4b235aa55d9a0d4742fdc8d11c5'
files = [{'name': 'mandl2019ger.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter and facebook'}]
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'german_dataset/german_dataset.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'german_dataset/hasoc_de_test_gold.tsv'), sep='\t')
tmp_file_path = helpers.merge_csvs({file1: [], file2: []})
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mandl2019ger.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['task_1'] == 'NOT'):
labels.append('normal')
elif (row['task_2'] == 'HATE'):
labels.append('hate')
elif (row['task_2'] == 'OFFN'):
labels.append('offensive')
elif (row['task_2'] == 'PRFN'):
labels.append('profane')
row['labels'] = labels
row = row.drop(['text_id', 'task_1', 'task_2'])
return row
|
class Mandl2019hind(dataset.Dataset):
name = 'mandl2019hind'
url = 'https://hasocfire.github.io/hasoc/2019/files/hindi_dataset.zip'
hash = 'd419780fe825f9946e3a03da4cf3fdf41699b188932d3662ca304693829994ad'
files = [{'name': 'mandl2019hind.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter and facebook'}]
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'hindi_dataset/hindi_dataset.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'hindi_dataset/hasoc2019_hi_test_gold_2919.tsv'), sep='\t')
tmp_file_path = helpers.merge_csvs({file1: [], file2: []})
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mandl2019hind.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['task_1'] == 'NOT'):
labels.append('normal')
elif (row['task_2'] == 'HATE'):
labels.append('hate')
elif (row['task_2'] == 'OFFN'):
labels.append('offensive')
elif (row['task_2'] == 'PRFN'):
labels.append('profane')
if (row['task_3'] == 'TIN'):
labels.append('targeted')
elif (row['task_3'] == 'UNT'):
labels.append('untargeted')
row['labels'] = labels
row = row.drop(['text_id', 'task_1', 'task_2', 'task_3'])
return row
|
class Mathur2018(dataset.Dataset):
name = 'mathur2018'
url = 'https://github.com/pmathur5k10/Hinglish-Offensive-Text-Classification/raw/master/Hinglish_Profanity_List.csv'
hash = 'e09ccb2c46616a59faa5d80d205a6e49b01b4781c1eb31587e1098a86c751260'
files = [{'name': 'mathur2018hing.csv', 'language': 'hing', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, names=['text', 'translation', 'class'])
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mathur2018hing.csv'))
@classmethod
def unify_row(cls, row):
labels = [('hate:' + str(row['class']))]
row['labels'] = labels
row = row.drop(['translation', 'class'])
return row
|
class Mubarak2017aljazeera(dataset.Dataset):
name = 'mubarak2017aljazeera'
url = 'http://alt.qcri.org/~hmubarak/offensive/AJCommentsClassification-CF.xlsx'
hash = 'afa00e36ff5492c1bbdd42a0e4979886f40d00f1aa5517807a957e22fb517670'
files = [{'name': 'mubarak2017ar_aljazeera.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}]
comment = 'Annotation\tMeaning\n0\tNORMAL_LANGUAGE\n-1\tOFFENSIVE_LANGUAGE\n-2\tOBSCENE_LANGUAGE'
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.convert_excel_to_csv(tmp_file_path)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mubarak2017ar_aljazeera.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['body']
labels = []
if (row['languagecomment'] == 0):
labels.append('normal')
if (row['languagecomment'] == (- 1)):
labels.append('offensive')
if (row['languagecomment'] == (- 2)):
labels.append('obscene')
row['labels'] = labels
row = row.drop(['_unit_id', '_golden', '_unit_state', '_trusted_judgments', '_last_judgment_at', 'languagecomment', 'languagecomment:confidence', 'articletitle', 'body', 'bodylen', 'insdt', 'languagecomment_gold', 'link', 'serial', 'words'])
return row
|
class Mubarak2017twitter(dataset.Dataset):
name = 'mubarak2017twitter'
url = 'http://alt.qcri.org/~hmubarak/offensive/TweetClassification-Summary.xlsx'
hash = '606f73388adae60af740779f9b501f30cf9adac82afe15a46fe07155db3823cf'
files = [{'name': 'mubarak2017ar_twitter.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}]
comment = 'Annotation\tMeaning\n0\tNORMAL_LANGUAGE\n-1\tOFFENSIVE_LANGUAGE\n-2\tOBSCENE_LANGUAGE'
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.convert_excel_to_csv(tmp_file_path)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mubarak2017ar_twitter.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['aggregatedAnnotation'] == 0):
labels.append('normal')
if (row['aggregatedAnnotation'] == (- 1)):
labels.append('offensive')
if (row['aggregatedAnnotation'] == (- 2)):
labels.append('obscene')
row['labels'] = labels
row = row.drop(['#', 'type', 'aggregatedAnnotation', 'aggregatedAnnotationConfidence', 'annotator1', 'annotator2', 'annotator3'])
return row
|
class Mulki2019(dataset.Dataset):
name = 'mulki2019'
url = 'https://github.com/Hala-Mulki/L-HSAB-First-Arabic-Levantine-HateSpeech-Dataset/raw/master/Dataset/L-HSAB'
hash = '3fc5e06ab624b47e404a0530388631c4894c323ca038e726ce6dd3d0e6a371e3'
files = [{'name': 'mulki2019ar.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
df = pd.read_csv(tmp_file_path, sep='\t')
df.to_csv(tmp_file_path, index=False)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mulki2019ar.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['Tweet']
labels = [row['Class']]
row['labels'] = labels
row = row.drop(['Class', 'Tweet'])
return row
|
class Novak2021(dataset.Dataset):
name = 'novak2021'
url = 'https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1398/IMSyPP_SI_anotacije_training-clarin.csv?sequence=6&isAllowed=y'
hash = 'fd6b85fa783afee7b6a61c99eb2eb16d59edda75af8b7df9a1f9ab4f2f59e458'
files = [{'name': 'novak2021sl.csv', 'language': 'gr', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'ID', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'novak2021sl.csv'))
@classmethod
def unify_row(cls, row):
row['labels'] = [str(row['vrsta'])]
row = row.drop(['vrsta', 'tarča', 'annotator'])
return row
|
class Ousidhoum2019(dataset.Dataset):
name = 'ousidhoum2019'
url = 'https://github.com/HKUST-KnowComp/MLMA_hate_speech/raw/master/hate_speech_mlma.zip'
hash = '56db7efb1b64a2570f63d0cdb48d119c5e32eccff13f3c22bf17a4331956dc43'
files = [{'name': 'ousidhoum2019ar.csv', 'language': 'ar', 'type': 'training', 'platform': 'twitter'}, {'name': 'ousidhoum2019en_with_stopwords.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}, {'name': 'ousidhoum2019en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}, {'name': 'ousidhoum2019fr.csv', 'language': 'fr', 'type': 'training', 'platform': 'twitter'}]
license = 'MIT License\n\nCopyright (c) 2019 HKUST-KnowComp\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_dir_path = helpers.unzip_file(tmp_file_path)
helpers.copy_file(os.path.join(tmp_dir_path, 'hate_speech_mlma/ar_dataset.csv'), os.path.join(dataset_folder, 'ousidhoum2019ar.csv'))
helpers.copy_file(os.path.join(tmp_dir_path, 'hate_speech_mlma/en_dataset.csv'), os.path.join(dataset_folder, 'ousidhoum2019en.csv'))
helpers.copy_file(os.path.join(tmp_dir_path, 'hate_speech_mlma/fr_dataset.csv'), os.path.join(dataset_folder, 'ousidhoum2019fr.csv'))
helpers.copy_file(os.path.join(tmp_dir_path, 'hate_speech_mlma/en_dataset_with_stop_words.csv'), os.path.join(dataset_folder, 'ousidhoum2019en_with_stopwords.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['tweet']
labels = row['sentiment'].split('_')
labels.append(row['directness'])
row['labels'] = labels
row = row.drop(['annotator_sentiment', 'target', 'group', 'directness', 'HITId', 'sentiment', 'tweet'])
return row
|
class Pitenis2020(dataset.Dataset):
name = 'pitenis2020'
url = 'https://zpitenis.com/downloads/offenseval2020-greek.zip'
hash = '4b1cbbcf1795b078d57640144b6cd72686b6e326dcc65e801799680f3a47bbb1'
files = [{'name': 'pitenis2020gr.csv', 'language': 'gr', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
extracted_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(extracted_file_path, 'offenseval-gr-testsetv1/offenseval-gr-labela-v1.csv'), names=['lid', 'category'])
file2 = helpers.clean_csv(os.path.join(extracted_file_path, 'offenseval-gr-testsetv1/offenseval-gr-test-v1.tsv'), names=['rid', 'tweet'], sep='\t', header=0)
tmp_file_path = helpers.join_csvs(file1, 'lid', file2, 'rid')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'pitenis2020gr.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['tweet']
row['labels'] = [row['category']]
row = row.drop(['lid', 'rid', 'tweet', 'category'])
return row
|
class Qian2019(dataset.Dataset):
name = 'qian2019'
url = 'https://github.com/jing-qian/A-Benchmark-Dataset-for-Learning-to-Intervene-in-Online-Hate-Speech/archive/master.zip'
hash = 'e2774f61af64942373e76e3928269bf6b7d8b41d5f5dcbcac9e760d4e93ef6b4'
files = [{'name': 'qian2019en_gab.csv', 'language': 'en', 'type': 'training', 'platform': 'gab'}, {'name': 'qian2019en_reddit.csv', 'language': 'en', 'type': 'training', 'platform': 'reddit'}]
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
helpers.copy_file(os.path.join(tmp_file_path, 'A-Benchmark-Dataset-for-Learning-to-Intervene-in-Online-Hate-Speech-master/data/gab.csv'), os.path.join(dataset_folder, 'qian2019en_gab.csv'))
helpers.copy_file(os.path.join(tmp_file_path, 'A-Benchmark-Dataset-for-Learning-to-Intervene-in-Online-Hate-Speech-master/data/reddit.csv'), os.path.join(dataset_folder, 'qian2019en_reddit.csv'))
@classmethod
def unify_format(cls, df):
df = df.fillna({'hate_speech_idx': '[]'})
clean_data = []
for (i, row) in df.iterrows():
for (idx, comment) in enumerate(row['text'].split('\n')):
labels = []
if ((idx + 1) in json.loads(row['hate_speech_idx'])):
labels.append('hate')
if comment:
clean_data.append({'text': comment, 'labels': labels})
return pd.DataFrame(clean_data)
|
class Ross2017(dataset.Dataset):
name = 'ross2017'
url = 'https://github.com/UCSM-DUE/IWG_hatespeech_public/raw/master/german%20hatespeech%20refugees.csv'
hash = 'b0784b8c00f02d16cee8b1227b8e8968760885d3d87b68762ba51b4c3156714f'
files = [{'name': 'ross2018de.csv', 'language': 'de', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'ross2018de.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['Tweet']
labels = []
if ((row['HatespeechOrNot (Expert 1)'] == 'YES') or (row['HatespeechOrNot (Expert 2)'] == 'YES')):
labels.append('hate')
else:
labels.append('nohate')
row['labels'] = labels
row = row.drop(['Tweet', 'HatespeechOrNot (Expert 1)', 'HatespeechOrNot (Expert 2)', 'Hatespeech Rating (Expert 2)'])
return row
|
class Sanguinetti2018(dataset.Dataset):
name = 'sanguinetti2018'
url = 'https://github.com/msang/hate-speech-corpus/raw/master/IHSC_ids.tsv'
hash = '9c8fd7224362e5fa488ba70dbc1ae55cfc0a452d303c1508e3607e2cc2e20fa1'
files = [{'name': 'sanguinetti2018it.csv', 'language': 'it', 'type': 'training', 'platform': 'twitter'}]
comment = ''
license = 'If you use the resource, please cite:\n\n@InProceedings{SanguinettiEtAlLREC2018,\n author = {Manuela Sanguinetti and Fabio Poletto and Cristina Bosco and Viviana Patti and Marco Stranisci},\n title = {An Italian Twitter Corpus of Hate Speech against Immigrants},\n booktitle = {Proceedings of the 11th Conference on Language Resources and Evaluation (LREC2018), May 2018, Miyazaki, Japan},\n month = {},\n year = {2018},\n address = {},\n publisher = {},\n pages = {2798--2895},\n url = {}\n}\n'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'tweet_id', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'sanguinetti2018it.csv'))
@classmethod
def unify_row(cls, row):
labels = []
if (row['hs'] == 'yes'):
labels.append('hate')
if (row['aggressiveness'] != 'no'):
labels.append((row['aggressiveness'] + '_aggressiveness'))
if (row['offensiveness'] != 'no'):
labels.append((row['aggressiveness'] + '_offensiveness'))
if (row['irony'] == 'yes'):
labels.append('irony')
if (row['stereotype'] == 'yes'):
labels.append('stereotype')
row['labels'] = labels
row = row.drop(['aggressiveness', 'hs', 'irony', 'offensiveness', 'stereotype'])
return row
|
class Sigurbergsson2019(dataset.Dataset):
name = 'sigurbergsson2019'
url = 'https://ndownloader.figshare.com/files/22476731'
hash = 'fb5c41c385062af222f68c8eb298912644b2f7a86d91769451a26c081f6822f0'
files = [{'name': 'sigurbergsson2019da.csv', 'language': 'da', 'type': 'training', 'platform': 'unknown'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
helpers.untarbz_file(tmp_file_path)
tmp_file_path = os.path.join(os.path.dirname(tmp_file_path), 'dkhate/oe20da_data/offenseval-da-training-v1.tsv')
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'sigurbergsson2019da.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['tweet']
if ((row['subtask_a'] != 'OFF') and (row['subtask_a'] != 'NOT')):
row['labels'] = []
else:
row['labels'] = [row['subtask_a']]
row = row.drop(['id', 'tweet', 'subtask_a'])
return row
|
class Waseem2016(dataset.Dataset):
name = 'waseem2016'
url = 'https://github.com/ZeerakW/hatespeech/raw/master/NAACL_SRW_2016.csv'
hash = 'a23875e68792a9d66cafea3c1c42b0b563b35fbd6163a66c3c4451976ebcdcff'
files = [{'name': 'waseem2016en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
license = ' '
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, ['twitter_ids', 'tag'])
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'twitter_ids', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'waseem2016en.csv'))
@classmethod
def unify_row(cls, row):
row['labels'] = [row['tag']]
row = row.drop(['tag'])
return row
|
class Wiegand2018(dataset.Dataset):
name = 'wiegand2018'
url = 'https://github.com/uds-lsv/GermEval-2018-Data/raw/master/germeval2018.test.txt'
hash = '45f31510b305d080a933d4087b8d34f7a5e4087141718955b90357ca730074f2'
files = [{'name': 'wiegand2018de.csv', 'language': 'de', 'type': 'training', 'platform': 'twitter'}]
license = 'lf you publish any work using the GermEval-2018 data, please cite the following publication:\n\nMichael Wiegand, Melanie Siegel, and Josef Ruppenhofer: "Overview of the GermEval 2018 Shared Task on the Identification of Offensive Language", in Proceedings of the GermEval, 2018, Vienna, Austria.'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, names=['text', 'tag1', 'tag2'], sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'wiegand2018de.csv'))
@classmethod
def unify_row(cls, row):
labels = [row['tag1'], row['tag2']]
row['labels'] = labels
row = row.drop(['tag1', 'tag2'])
return row
|
class Wulczyn2017aggressive(dataset.Dataset):
name = 'wulczyn2017aggressive'
url = 'https://ndownloader.figshare.com/articles/4267550/versions/5'
hash = '9e48068af1fbbe893af4df1b629ceebf924dc723a290c7bc473d2a8a8aac3529'
files = [{'name': 'wulczyn2017en_aggressive.csv', 'language': 'en', 'type': 'training', 'platform': 'wikipedia'}]
license = ' '
@classmethod
def valid_hash(cls, file):
'do not check hash since it differs for each download'
return
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'aggression_annotated_comments.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'aggression_annotations.tsv'), sep='\t')
df1 = pd.read_csv(file1)
df1['comment'] = df1['comment'].apply((lambda x: x.replace('NEWLINE_TOKEN', ' ')))
df1['comment'] = df1['comment'].apply((lambda x: x.replace('TAB_TOKEN', ' ')))
df1.to_csv((file1 + '_endings'), index=False)
df2 = pd.read_csv(file2)
labels = (df2.groupby(['rev_id']).mean() > 0.5)
labels.to_csv((file2 + '_grouped'))
tmp_file_path = helpers.join_csvs((file1 + '_endings'), 'rev_id', (file2 + '_grouped'), 'rev_id')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'wulczyn2017en_aggressive.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['comment']
labels = []
if row['aggression']:
labels.append('aggressive')
else:
labels.append('none')
row['labels'] = labels
row = row.drop(['rev_id', 'comment', 'year', 'logged_in', 'ns', 'sample', 'split', 'worker_id', 'aggression', 'aggression_score'])
return row
|
class Wulczyn2017attack(dataset.Dataset):
name = 'wulczyn2017attack'
url = 'https://ndownloader.figshare.com/articles/4054689/versions/6'
hash = '9e48068af1fbbe893af4df1b629ceebf924dc723a290c7bc473d2a8a8aac3529'
files = [{'name': 'wulczyn2017en_attack.csv', 'language': 'en', 'type': 'training', 'platform': 'wikipedia'}]
license = ' '
@classmethod
def valid_hash(cls, file):
'do not check hash since it differs for each download'
return
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'attack_annotated_comments.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'attack_annotations.tsv'), sep='\t')
df1 = pd.read_csv(file1)
df1['comment'] = df1['comment'].apply((lambda x: x.replace('NEWLINE_TOKEN', ' ')))
df1['comment'] = df1['comment'].apply((lambda x: x.replace('TAB_TOKEN', ' ')))
df1.to_csv((file1 + '_endings'), index=False)
df2 = pd.read_csv(file2)
labels = (df2.groupby(['rev_id']).mean() > 0.5)
labels.to_csv((file2 + '_grouped'))
tmp_file_path = helpers.join_csvs((file1 + '_endings'), 'rev_id', (file2 + '_grouped'), 'rev_id')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'wulczyn2017en_attack.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['comment']
labels = []
if row['attack']:
labels.append('attack')
else:
labels.append('none')
row['labels'] = labels
row = row.drop(['rev_id', 'comment', 'year', 'logged_in', 'ns', 'sample', 'split', 'worker_id', 'quoting_attack', 'recipient_attack', 'third_party_attack', 'other_attack', 'attack'])
return row
|
class Wulczyn2017toxic(dataset.Dataset):
name = 'wulczyn2017toxic'
url = 'https://ndownloader.figshare.com/articles/4563973/versions/2'
hash = '9e48068af1fbbe893af4df1b629ceebf924dc723a290c7bc473d2a8a8aac3529'
files = [{'name': 'wulczyn2017en_toxic.csv', 'language': 'en', 'type': 'training', 'platform': 'wikipedia'}]
license = ' '
@classmethod
def valid_hash(cls, file):
'do not check hash since it differs for each download'
return
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'toxicity_annotated_comments.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'toxicity_annotations.tsv'), sep='\t')
df1 = pd.read_csv(file1)
df1['comment'] = df1['comment'].apply((lambda x: x.replace('NEWLINE_TOKEN', ' ')))
df1['comment'] = df1['comment'].apply((lambda x: x.replace('TAB_TOKEN', ' ')))
df1.to_csv((file1 + '_endings'), index=False)
df2 = pd.read_csv(file2)
labels = (df2.groupby(['rev_id']).mean() > 0.5)
labels.to_csv((file2 + '_grouped'))
tmp_file_path = helpers.join_csvs((file1 + '_endings'), 'rev_id', (file2 + '_grouped'), 'rev_id')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'wulczyn2017en_toxic.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['comment']
labels = []
if row['toxicity']:
labels.append('toxic')
else:
labels.append('none')
row['labels'] = labels
row = row.drop(['rev_id', 'comment', 'year', 'logged_in', 'ns', 'sample', 'split', 'worker_id', 'toxicity', 'toxicity_score'])
return row
|
class Zampieri2019(dataset.Dataset):
name = 'zampieri2019'
url = 'https://github.com/idontflow/OLID/raw/master/olid-training-v1.0.tsv'
hash = '907e186e75876f1a77aeff72c97c988bdcd533493926567f7206da6f82f45ae9'
files = [{'name': 'zampieri2019en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
@classmethod
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, sep='\t')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'zampieri2019en.csv'))
@classmethod
def unify_row(cls, row):
row['text'] = row['tweet']
row['labels'] = [row['subtask_a']]
row = row.drop(['id', 'tweet', 'subtask_a', 'subtask_b', 'subtask_c'])
return row
|
def generate_statistics(dataset_directory_path):
generate_statistics_file(dataset_directory_path)
|
def get_all_datasets(*, config_path=None, reset=False, skip_combine=False, skip_download=False, api_config_path=None):
config = __get_config(config_path)
api_config = __get_api_config(api_config_path)
if reset:
clear_all(config)
return
if (not skip_download):
max_suffix_length = download_datasets(config)
max_suffix_length = process_datasets(config, api_config, max_suffix_length)
else:
max_suffix_length = 0
max_suffix_length = unify_datasets(config, max_suffix_length)
if (not skip_combine):
combine_datasets(config)
|
def get_dataset(name, *, config_path=None, skip_download=False, api_config_path=None):
config = __get_config(config_path)
api_config = __get_api_config(api_config_path)
dataset = get_dataset_by_name(name)
if (not dataset):
raise RuntimeError(f'Dataset: {name} not found')
if (not skip_download):
file = dataset.download(os.path.join(config['raw_directory'], dataset.name))
dataset.valid_hash(file)
__process_dataset(config, dataset, api_config)
__unify_dataset(config, dataset)
|
def combine_datasets(config, output_file_name='combined.tsv'):
filedir = config['file_directory']
output_file = os.path.join(filedir, output_file_name)
combined_df = pd.DataFrame()
for dataset in __filter_datasets_from_config(config):
for ds_file in dataset.files:
try:
df = pd.read_csv(os.path.join(filedir, dataset.name, ds_file['name']), sep='\t')
df.insert(loc=0, column='file_name', value=([ds_file['name']] * df.count().max()))
df.insert(loc=0, column='file_language', value=([ds_file['language']] * df.count().max()))
df.insert(loc=0, column='file_platform', value=([ds_file['platform']] * df.count().max()))
df.drop(columns=['id'], inplace=True)
combined_df = combined_df.append(df)
except:
print('Could not add {0} to the combined dataset. Continuing with next file.'.format(ds_file['name']))
combined_df.to_csv(output_file, index_label='id', quoting=csv.QUOTE_NONNUMERIC, sep='\t')
|
def unify_datasets(config, max_suffix_length=0):
_clear_directory(config['file_directory'])
possible_datasets = __filter_datasets_from_config(config)
for (idx, dataset) in enumerate(possible_datasets):
max_suffix_length = _print_progress_bar((idx + (len(possible_datasets) * 2)), (len(possible_datasets) * 3), ('Unify ' + dataset.name), max_suffix_length)
__unify_dataset(config, dataset)
_print_progress_bar((len(possible_datasets) * 3), (len(possible_datasets) * 3), 'Done', max_suffix_length)
return max_suffix_length
|
def __unify_dataset(config, dataset):
try:
for ds_file in dataset.files:
helpers.copy_file(os.path.join(config['raw_directory'], (dataset.name + '_dir'), ds_file['name']), os.path.join(config['file_directory'], dataset.name, ds_file['name']))
dataset.unify(config, dataset.name)
except Exception:
print('\nCould not unify {0}. Continuing with next dataset.'.format(dataset.name))
|
def process_datasets(config, api_config, max_suffix_length=0):
possible_datasets = __filter_datasets_from_config(config)
for (idx, dataset) in enumerate(possible_datasets):
max_suffix_length = _print_progress_bar((idx + len(possible_datasets)), (len(possible_datasets) * 3), ('Process ' + dataset.name), max_suffix_length)
__process_dataset(config, dataset, api_config)
return max_suffix_length
|
def __process_dataset(config, dataset, api_config):
_clear_directory(config['temp_directory'])
tmp_file_name = os.path.join(config['temp_directory'], dataset.name)
helpers.copy_file(os.path.join(config['raw_directory'], dataset.name), tmp_file_name)
try:
dataset.process(tmp_file_name, os.path.join(config['raw_directory'], (dataset.name + '_dir')), api_config)
except Exception as e:
print('\nError while processing {0}. Continuing with next one.'.format(dataset.name))
raise e
|
def download_datasets(config, max_suffix_length=0) -> int:
_clear_directory(config['raw_directory'])
possible_datasets = __filter_datasets_from_config(config)
for (idx, dataset) in enumerate(possible_datasets):
max_suffix_length = _print_progress_bar(idx, (len(possible_datasets) * 3), ('Download ' + dataset.name), max_suffix_length)
if ((dataset.name in config['data_sources']) and config['data_sources'].get(dataset.name)['download']):
file = dataset.download(os.path.join(config['raw_directory'], dataset.name))
dataset.valid_hash(file)
return max_suffix_length
|
def generate_statistics_file(filedir):
sg = Statistics_generator(filedir)
sg.generate('statistics.txt')
|
def clear_all(config):
_clear_directory(config['file_directory'])
_clear_directory(config['temp_directory'])
_clear_directory(config['raw_directory'])
|
def _clear_directory(directory):
if os.path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
|
def _print_progress_bar(iteration, total, suffix='', max_suffix_length=0):
percent = '{0:.1f}'.format((100 * (iteration / float(total))))
filledLength = int(((50 * iteration) // total))
bar = (('█' * filledLength) + ('-' * (50 - filledLength)))
suffix_string = (suffix + (' ' * (max_suffix_length - len(suffix))))
print(f'''
Progress: [{bar}] {percent}% {suffix_string}''', end='\r')
if (iteration == total):
print()
return len(suffix_string)
|
def __get_api_config(config_path):
if config_path:
return __get_config_from_path(config_path)
return None
|
def __get_config(config_path):
if config_path:
config = __get_config_from_path(config_path)
else:
config = json.loads(importlib.resources.read_text(__package__, 'config.json'))
return config
|
def __get_config_from_path(config_path):
try:
with open(config_path, 'r') as config_file:
return json.loads(config_file.read())
except Exception:
print('Failure loading config.json. Run the following command to reset config.json:\n\n\tmain.py --genconfig')
|
def __filter_datasets_from_config(config):
all_possible_datasets: List[dataset.Dataset] = datasets.get_datasets()
return list(filter((lambda dataset: (dataset.name in config['data_sources'])), all_possible_datasets))
|
def find_tables(dump_filename):
table_list = []
with open(dump_filename, 'r') as f:
for line in f:
line = line.strip()
if line.lower().startswith('create table'):
table_name = re.findall('create table `([\\w_]+)`', line.lower())
table_list.extend(table_name)
return table_list
|
def read_dump(dump_filename, target_table):
column_names = []
rows = []
read_mode = 0
with open(dump_filename, 'r') as f:
for line in f:
line = line.strip()
if (line.lower().startswith('insert') and (target_table in line)):
read_mode = 2
if (line.lower().startswith('create table') and (target_table in line)):
read_mode = 1
continue
if (read_mode == 0):
continue
elif (read_mode == 1):
if line.lower().startswith('primary'):
read_mode = 0
continue
colheader = re.findall('`([\\w_]+)`', line)
for col in colheader:
column_names.append(col.strip())
elif (read_mode == 2):
if line.endswith(';'):
end_index = (- 1)
else:
end_index = 0
data = make_tuple(line[(line.find('VALUES') + 7):end_index])
try:
for item in data:
row = {}
for (key, value) in zip(column_names, item):
row[key] = value
rows.append(row)
except IndexError:
pass
if line.endswith(';'):
df = pd.DataFrame(rows, columns=column_names)
break
df.to_csv((((dump_filename[:(- 4)] + '_') + target_table) + '_df.csv'), index=False)
return
|
class Statistics_generator():
def __init__(self, filedir):
self.filedir = filedir
def generate(self, output_file=None):
ds_data = self._collect_data()
overall_data = self._calculate_overall_data(ds_data)
if output_file:
self._generate_output(output_file, ds_data, overall_data)
return (ds_data, overall_data)
def _generate_output(self, output_file, dataset_data, overall_data):
str = self._generate_headline('Overall Statistics')
str += self._generate_table(overall_data)
str += self._generate_headline('Dataset Statistics')
for i in dataset_data:
str += (i + '\n')
str += self._generate_table(dataset_data[i], 4)
str += '\n'
with open(output_file, 'w') as f:
f.write(str)
def _generate_headline(self, text):
str = (('#' * (len(text) + 4)) + '\n')
str += (('# ' + text) + ' #')
str += (('\n' + ('#' * (len(text) + 4))) + '\n\n')
return str
def _generate_table(self, data, indentation=0):
left_width = 0
string = ''
for i in data:
left_width = max(len(str(i)), left_width)
for i in data:
string += (' ' * indentation)
string += ((str(i) + ':') + (' ' * ((left_width - len(i)) + 2)))
if (type(data[i]) == dict):
string += '\n'
string += self._generate_table(data[i], (indentation + 4))
else:
string += (str(data[i]) + '\n')
return string
def _collect_data(self):
files = {}
for (idx, dataset) in enumerate(datasets.get_datasets()):
for file in dataset.files:
file_path = os.path.join(self.filedir, dataset.name, file['name'])
if (not os.path.isfile(file_path)):
continue
files[file['name']] = self._generate_file_statistics(file_path)
return files
def _generate_file_statistics(self, file):
df = pd.read_csv(file, sep='\t')
statistics = {}
statistics['rows'] = len(df)
statistics['file size'] = os.path.getsize(file)
statistics['labels'] = self._get_label_count(df, 'labels')
return statistics
def _get_label_count(self, df, column):
counts = {}
raw_counts = dict(df[column].value_counts())
for i in raw_counts:
for j in ast.literal_eval(i):
if (j not in counts):
counts[j] = 0
counts[j] += int(raw_counts[i])
return counts
def _calculate_overall_data(self, dataset_data):
rows = 0
file_size = 0
labels = {}
for i in dataset_data:
rows += dataset_data[i]['rows']
file_size += dataset_data[i]['file size']
for (label, count) in dataset_data[i]['labels'].items():
if (not (label in labels)):
labels[label] = 0
labels[label] += count
return {'rows': rows, 'file size': file_size, 'labels': labels}
|
class Evaluator():
' Computes intersection and union between prediction and ground-truth '
@classmethod
def initialize(cls):
cls.ignore_index = 255
@classmethod
def classify_prediction(cls, pred_mask, batch):
gt_mask = batch.get('query_mask')
query_ignore_idx = batch.get('query_ignore_idx')
if (query_ignore_idx is not None):
assert (torch.logical_and(query_ignore_idx, gt_mask).sum() == 0)
query_ignore_idx *= cls.ignore_index
gt_mask = (gt_mask + query_ignore_idx)
pred_mask[(gt_mask == cls.ignore_index)] = cls.ignore_index
(area_inter, area_pred, area_gt) = ([], [], [])
for (_pred_mask, _gt_mask) in zip(pred_mask, gt_mask):
_inter = _pred_mask[(_pred_mask == _gt_mask)]
if (_inter.size(0) == 0):
_area_inter = torch.tensor([0, 0], device=_pred_mask.device)
else:
_area_inter = torch.histc(_inter, bins=2, min=0, max=1)
area_inter.append(_area_inter)
area_pred.append(torch.histc(_pred_mask, bins=2, min=0, max=1))
area_gt.append(torch.histc(_gt_mask, bins=2, min=0, max=1))
area_inter = torch.stack(area_inter).t()
area_pred = torch.stack(area_pred).t()
area_gt = torch.stack(area_gt).t()
area_union = ((area_pred + area_gt) - area_inter)
return (area_inter, area_union)
|
class AverageMeter():
' Stores loss, evaluation results '
def __init__(self, dataset):
self.benchmark = dataset.benchmark
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
if (self.benchmark == 'pascal'):
self.nclass = 20
elif (self.benchmark == 'coco'):
self.nclass = 80
elif (self.benchmark == 'fss'):
self.nclass = 1000
self.intersection_buf = torch.zeros([2, self.nclass]).float().cuda()
self.union_buf = torch.zeros([2, self.nclass]).float().cuda()
self.ones = torch.ones_like(self.union_buf)
self.loss_buf = []
def update(self, inter_b, union_b, class_id, loss):
self.intersection_buf.index_add_(1, class_id, inter_b.float())
self.union_buf.index_add_(1, class_id, union_b.float())
if (loss is None):
loss = torch.tensor(0.0)
self.loss_buf.append(loss)
def compute_iou(self):
iou = (self.intersection_buf.float() / torch.max(torch.stack([self.union_buf, self.ones]), dim=0)[0])
iou = iou.index_select(1, self.class_ids_interest)
miou = (iou[1].mean() * 100)
fb_iou = ((self.intersection_buf.index_select(1, self.class_ids_interest).sum(dim=1) / self.union_buf.index_select(1, self.class_ids_interest).sum(dim=1)).mean() * 100)
return (miou, fb_iou)
def write_result(self, split, epoch):
(iou, fb_iou) = self.compute_iou()
loss_buf = torch.stack(self.loss_buf)
msg = ('\n*** %s ' % split)
msg += ('[@Epoch %02d] ' % epoch)
msg += ('Avg L: %6.5f ' % loss_buf.mean())
msg += ('mIoU: %5.2f ' % iou)
msg += ('FB-IoU: %5.2f ' % fb_iou)
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch, write_batch_idx=20):
if ((batch_idx % write_batch_idx) == 0):
msg = (('[Epoch: %02d] ' % epoch) if (epoch != (- 1)) else '')
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
(iou, fb_iou) = self.compute_iou()
if (epoch != (- 1)):
loss_buf = torch.stack(self.loss_buf)
msg += ('L: %6.5f ' % loss_buf[(- 1)])
msg += ('Avg L: %6.5f ' % loss_buf.mean())
msg += ('mIoU: %5.2f | ' % iou)
msg += ('FB-IoU: %5.2f' % fb_iou)
Logger.info(msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.