diff --git a/README.md b/README.md index e51b6f6..6e342f6 100644 --- a/README.md +++ b/README.md @@ -70,21 +70,30 @@ Coming soon! ## Citation Please cite the model with the following citation: ```bibtex -@INPROCEEDINGS{shah-etal-2022-flang, - author = {Shah, Raj Sanjay and - Chawla, Kunal and - Eidnani, Dheeraj and - Shah, Agam and - Du, Wendi and - Chava, Sudheer and - Raman, Natraj and - Smiley, Charese and - Chen, Jiaao and - Yang, Diyi }, - title = {When FLUE Meets FLANG: Benchmarks and Large Pretrained Language Model for Financial Domain}, - booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, - year = {2022}, - publisher = {Association for Computational Linguistics} +@inproceedings{shah-etal-2022-flue, + title = "When {FLUE} Meets {FLANG}: Benchmarks and Large Pretrained Language Model for Financial Domain", + author = "Shah, Raj and + Chawla, Kunal and + Eidnani, Dheeraj and + Shah, Agam and + Du, Wendi and + Chava, Sudheer and + Raman, Natraj and + Smiley, Charese and + Chen, Jiaao and + Yang, Diyi", + editor = "Goldberg, Yoav and + Kozareva, Zornitsa and + Zhang, Yue", + booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", + month = dec, + year = "2022", + address = "Abu Dhabi, United Arab Emirates", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.emnlp-main.148", + doi = "10.18653/v1/2022.emnlp-main.148", + pages = "2322--2335", + abstract = "Pre-trained language models have shown impressive performance on a variety of tasks and domains. Previous research on financial language models usually employs a generic training scheme to train standard model architectures, without completely leveraging the richness of the financial data. We propose a novel domain specific Financial LANGuage model (FLANG) which uses financial keywords and phrases for better masking, together with span boundary objective and in-filing objective. Additionally, the evaluation benchmarks in the field have been limited. To this end, we contribute the Financial Language Understanding Evaluation (FLUE), an open-source comprehensive suite of benchmarks for the financial domain. These include new benchmarks across 5 NLP tasks in financial domain as well as common benchmarks used in the previous research. Experiments on these benchmarks suggest that our model outperforms those in prior literature on a variety of NLP tasks. Our models, code and benchmark data will be made publicly available on Github and Huggingface.", } ```