<?xml version="1.0" encoding="UTF-8" ?>
<oai_dc:dc schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>Asr based pronunciation evaluation with automatically generated competing vocabulary and classifier fusion</dc:title>
<dc:creator>Vivanco-Torres, Roberto</dc:creator>
<dc:creator>Becerra-Yoma, Nestor</dc:creator>
<dc:creator>Wuth-Sepúlveda, Jorge</dc:creator>
<dc:creator>Molina-Sánchez, Carlos</dc:creator>
<dc:description>In this paper, the application of automatic speech recognition (ASR) technology in computer aided pronunciation training (CAPT) is addressed. A method to automatically generate the competitive lexicon, required by an ASR engine to compare the pronunciation of a target word with its correct and wrong phonetic realizations, is proposed. In order to enable the efficient deployment of CAPT applications, the generation of this competitive lexicon does not require any human assistance or a priori information of mother language dependent error rules. Moreover, a Bayes based multi-classifier fusion approach to map ASR objective confidence scores to subjective evaluations in pronunciation assessment is presented. The method proposed here to generate a competitive lexicon given a target word leads to averaged subjective-objective score correlation equal to 0.67 and 0.82 with five and two levels of pronunciation quality, respectively. Finally, multi-classifier systems (MCS) provide a promising formal framework to combine poorly correlated scores in CAPT. When applied to ASR confidence metrics, MCS can lead to an increase of 2.4% and a reduction of 10.2% in subjective-objective score correlation and classification error, respectively, with two pronunciation quality levels. (c) 2009 Elsevier B.V. All rights reserved</dc:description>
<dc:date>2009</dc:date>
<dc:type>info:eu-repo/semantics/article</dc:type>
<dc:type>info:eu-repo/semantics/publishedVersion</dc:type>
<dc:identifier>http://hdl.handle.net/10533/197747</dc:identifier>
<dc:identifier>doi: 10.1016/j.specom.2009.01.002</dc:identifier>
<dc:identifier>wos: WOS:000265988800001</dc:identifier>
<dc:identifier>issn: 0167-6393</dc:identifier>
<dc:language>eng</dc:language>
<dc:relation>instname: Conicyt</dc:relation>
<dc:relation>reponame: Repositorio Digital RI2.0</dc:relation>
<dc:relation>instname: Conicyt</dc:relation>
<dc:relation>reponame: Repositorio Digital RI2.0</dc:relation>
<dc:relation>info:eu-repo/grantAgreement/Fondef/D05I10243</dc:relation>
<dc:relation>info:eu-repo/semantics/dataset/hdl.handle.net/10533/93477</dc:relation>
<dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
<dc:coverage>NLD</dc:coverage>
<dc:coverage>AMSTERDAM</dc:coverage>
<dc:publisher>ELSEVIER SCIENCE BV</dc:publisher>
<dc:source>SPEECH COMMUNICATION</dc:source>
<dc:source>reponame:Artículos CONICYT</dc:source>
<dc:source>instname:CONICYT Chile</dc:source>
<dc:source>instacron:CONICYT</dc:source>
<about>
<provenance>
<originDescription altered="" harvestDate="">
<datestamp>2020-01-27T14:04:19Z</datestamp>
<metadataNamespace>http://www.openarchives.org/OAI/2.0/oai_dc/</metadataNamespace>
<repositoryName>Artículos CONICYT - CONICYT Chile</repositoryName>
</originDescription>
</provenance>
</about>
</oai_dc:dc>
<?xml version="1.0" encoding="UTF-8" ?>
<metadata schemaLocation="http://www.lyncode.com/xoai http://www.lyncode.com/xsd/xoai.xsd">
<element name="dc">
<element name="title">
<element name="none">
<field name="value">Asr based pronunciation evaluation with automatically generated competing vocabulary and classifier fusion</field>
</element>
</element>
<element name="creator">
<element name="none">
<field name="value">Vivanco-Torres, Roberto</field>
<field name="value">Becerra-Yoma, Nestor</field>
<field name="value">Wuth-Sepúlveda, Jorge</field>
<field name="value">Molina-Sánchez, Carlos</field>
</element>
</element>
<element name="description">
<element name="none">
<field name="value">In this paper, the application of automatic speech recognition (ASR) technology in computer aided pronunciation training (CAPT) is addressed. A method to automatically generate the competitive lexicon, required by an ASR engine to compare the pronunciation of a target word with its correct and wrong phonetic realizations, is proposed. In order to enable the efficient deployment of CAPT applications, the generation of this competitive lexicon does not require any human assistance or a priori information of mother language dependent error rules. Moreover, a Bayes based multi-classifier fusion approach to map ASR objective confidence scores to subjective evaluations in pronunciation assessment is presented. The method proposed here to generate a competitive lexicon given a target word leads to averaged subjective-objective score correlation equal to 0.67 and 0.82 with five and two levels of pronunciation quality, respectively. Finally, multi-classifier systems (MCS) provide a promising formal framework to combine poorly correlated scores in CAPT. When applied to ASR confidence metrics, MCS can lead to an increase of 2.4% and a reduction of 10.2% in subjective-objective score correlation and classification error, respectively, with two pronunciation quality levels. (c) 2009 Elsevier B.V. All rights reserved</field>
</element>
</element>
<element name="publisher">
<element name="none">
<field name="value">ELSEVIER SCIENCE BV</field>
</element>
</element>
<element name="date">
<element name="none">
<field name="value">2009</field>
</element>
</element>
<element name="type">
<element name="none">
<field name="value">info:eu-repo/semantics/article</field>
<field name="value">info:eu-repo/semantics/publishedVersion</field>
</element>
</element>
<element name="identifier">
<element name="none">
<field name="value">http://hdl.handle.net/10533/197747</field>
<field name="value">doi: 10.1016/j.specom.2009.01.002</field>
<field name="value">wos: WOS:000265988800001</field>
<field name="value">issn: 0167-6393</field>
</element>
</element>
<element name="source">
<element name="none">
<field name="value">SPEECH COMMUNICATION</field>
<field name="value">reponame:Artículos CONICYT</field>
<field name="value">instname:CONICYT Chile</field>
<field name="value">instacron:CONICYT</field>
</element>
</element>
<element name="relation">
<element name="none">
<field name="value">instname: Conicyt</field>
<field name="value">reponame: Repositorio Digital RI2.0</field>
<field name="value">instname: Conicyt</field>
<field name="value">reponame: Repositorio Digital RI2.0</field>
<field name="value">info:eu-repo/grantAgreement/Fondef/D05I10243</field>
<field name="value">info:eu-repo/semantics/dataset/hdl.handle.net/10533/93477</field>
</element>
</element>
<element name="coverage">
<element name="none">
<field name="value">NLD</field>
<field name="value">AMSTERDAM</field>
</element>
</element>
<element name="rights">
<element name="none">
<field name="value">info:eu-repo/semantics/openAccess</field>
</element>
</element>
<element name="language">
<element name="none">
<field name="value">eng</field>
</element>
</element>
</element>
<element name="others">
<field name="lastModifyDate">2020-01-27T14:04:19Z</field>
</element>
</metadata>