2022
Huertas-Tato, Javier; Martín, Alejandro; Camacho, David
SILT: Efficient transformer training for inter-lingual inference Journal Article
In: Expert Systems with Applications, vol. 200, pp. 116923, 2022, ISSN: 0957-4174.
Abstract | Links | BibTeX | Tags: Deep Learning, Disinformation, Embeddings, Natural language inference, Natural Language Processing, Sentence alignment, Transformers
@article{huertas-tato_silt_2022,
title = {SILT: Efficient transformer training for inter-lingual inference},
author = {Javier Huertas-Tato and Alejandro Martín and David Camacho},
url = {https://www.sciencedirect.com/science/article/pii/S0957417422003578},
doi = {10.1016/j.eswa.2022.116923},
issn = {0957-4174},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
journal = {Expert Systems with Applications},
volume = {200},
pages = {116923},
abstract = {The ability of transformers to perform precision tasks such as question answering, Natural Language Inference (NLI) or summarizing, has enabled them to be ranked as one of the best paradigms to address Natural Language Processing (NLP) tasks. NLI is one of the best scenarios to test these architectures, due to the knowledge required to understand complex sentences and established relationships between a hypothesis and a premise. Nevertheless, these models suffer from the incapacity to generalize to other domains or from difficulties to face multilingual and interlingual scenarios. The leading pathway in the literature to address these issues involve designing and training extremely large architectures, but this causes unpredictable behaviors and establishes barriers which impede broad access and fine tuning. In this paper, we propose a new architecture called Siamese Inter-Lingual Transformer (SILT). This architecture is able to efficiently align multilingual embeddings for Natural Language Inference, allowing for unmatched language pairs to be processed. SILT leverages siamese pre-trained multi-lingual transformers with frozen weights where the two input sentences attend to each other to later be combined through a matrix alignment method. The experimental results carried out in this paper evidence that SILT allows to reduce drastically the number of trainable parameters while allowing for inter-lingual NLI and achieving state-of-the-art performance on common benchmarks.},
keywords = {Deep Learning, Disinformation, Embeddings, Natural language inference, Natural Language Processing, Sentence alignment, Transformers},
pubstate = {published},
tppubtype = {article}
}
Huertas-García, Álvaro; Martín, Alejandro; Huertas-Tato, Javier; Camacho, David
Exploring Dimensionality Reduction Techniques in Multilingual Transformers Miscellaneous
CoRR, 2022.
Abstract | Links | BibTeX | Tags: Artificial Intelligence, Computational Intelligence, Deep Learning, Embeddings, Feature Selection, Machine Learning, Natural Language Processing, Sentence alignment, Text Analysis, Transformers
@misc{nokey,
title = {Exploring Dimensionality Reduction Techniques in Multilingual Transformers},
author = {Álvaro Huertas-García and Alejandro Martín and Javier Huertas-Tato and David Camacho},
url = {https://doi.org/10.48550/arxiv.2204.08415},
doi = {10.48550/ARXIV.2204.08415},
year = {2022},
date = {2022-04-18},
urldate = {2022-04-18},
abstract = {Both in scientific literature and in industry,, Semantic and context-aware Natural Language Processing-based solutions have been gaining importance in recent years. The possibilities and performance shown by these models when dealing with complex Language Understanding tasks is unquestionable, from conversational agents to the fight against disinformation in social networks. In addition, considerable attention is also being paid to developing multilingual models to tackle the language bottleneck. The growing need to provide more complex models implementing all these features has been accompanied by an increase in their size, without being conservative in the number of dimensions required. This paper aims to give a comprehensive account of the impact of a wide variety of dimensional reduction techniques on the performance of different state-of-the-art multilingual Siamese Transformers, including unsupervised dimensional reduction techniques such as linear and nonlinear feature extraction, feature selection, and manifold techniques. In order to evaluate the effects of these techniques, we considered the multilingual extended version of Semantic Textual Similarity Benchmark (mSTSb) and two different baseline approaches, one using the pre-trained version of several models and another using their fine-tuned STS version. The results evidence that it is possible to achieve an average reduction in the number of dimensions of 91.58%±2.59% and 54.65%±32.20%, respectively. This work has also considered the consequences of dimensionality reduction for visualization purposes. The results of this study will significantly contribute to the understanding of how different tuning approaches affect performance on semantic-aware tasks and how dimensional reduction techniques deal with the high-dimensional embeddings computed for the STS task and their potential for highly demanding NLP tasks },
howpublished = {CoRR},
keywords = {Artificial Intelligence, Computational Intelligence, Deep Learning, Embeddings, Feature Selection, Machine Learning, Natural Language Processing, Sentence alignment, Text Analysis, Transformers},
pubstate = {published},
tppubtype = {misc}
}