@techreport{TR-IC-24-01,
   number = {IC-24-01},
   author  =  {Fagner  {Leal}  and  André Santanchè and Claudia Bauzer
                   Medeiros},
   title  =  {{A  bibliographic  survey of Neural Language Models with
                   applications   in   topic   modeling  and  clinical 
                   studies}},
   month = {August},
   year = {2024},
   institution = {Institute of Computing, University of Campinas},
   note = {In English, 25 pages.
    \par\selectlanguage{english}\textbf{Abstract}
       This  text  presents  a  literature  review  of Neural Language
       Models,  which  are  deep  neural  networks  to  encode a given
       language.  The scope of this review covers two main topics: (i)
       Transformers-based     Neural    Networks,    established    as   
       state-of-the-art  in  addressing  Natural  Language  Processing 
       (NLP)  problems  and  a  suitable  approach  to  train Language
       Models;  and  (ii)  Neural  Language  Models  that compress the
       statistical  semantics of textual data into word vectors. These
       word  vectors  computationally represent the basic units of the
       language   at   hand.   In   fact,  obtaining  a  computational 
       representation   for  textual  constructs  is  a  long-standing 
       problem that has challenged diverse NLP approaches. We analyzed
       the  usage  of  language  models  for  Topic  Modeling  and for
       Semantic  Annotation  of Virtual Patients. The establishment of
       transformers-based  language models opens up vast possibilities
       and   perspectives   on  interdisciplinary  topics.  This  text 
       concludes  with a critical analysis addressing issues regarding
       applications based on language models.
  }
}