activations: Tanh batch_size: 16 class_identifier: referenceless_regression_metric dropout: 0.1 encoder_learning_rate: 1.0e-06 encoder_model: XLM-RoBERTa final_activation: null hidden_sizes: - 2048 - 1024 keep_embeddings_frozen: true layer: mix layer_norm: false layer_transformation: sparsemax layerwise_decay: 0.95 learning_rate: 1.5e-05 load_pretrained_weights: true local_files_only: false loss: mse nr_frozen_epochs: 0.3 optimizer: AdamW pool: avg pretrained_model: xlm-roberta-large train_data: - data/combined_dataset.csv validation_data: - data/xcomet_noref_Chinese_test.csv - data/xcomet_noref_Czech_test.csv - data/xcomet_noref_Estonian_test.csv - data/xcomet_noref_Finnish_test.csv - data/xcomet_noref_German_test.csv - data/xcomet_noref_Hausa_test.csv - data/xcomet_noref_Japanese_test.csv - data/xcomet_noref_Lithuanian_test.csv - data/xcomet_noref_Russian_test.csv - data/xcomet_noref_Turkish_test.csv warmup_steps: 0