voice text transcription, the voice segment cut in the previous step is transcribed into a complete meeting text; The third part is the extraction of the meeting minutes, extracting meaningful key phrases and abstract sentences from the complete meeting text; The fourth part is the next work plan extraction, through the emotion recognition algorithm to filter out the negative emotion summary from the multiple meeting minutes, so as to show the next work plan of the meeting. By comparing and evaluating different baseline algorithms on real-world audio meeting datasets, experiments have proven that SmartMeeting can accurately summarize meetings and analyze agreed actions.
@article{DBLP:journals/monet/LiuLWSWDLS20,
author = {Hui Liu and
Huan Liu and
Xin Wang and
Wei Shao and
Xiao Wang and
Junzhao Du and
Jonathan Liono and
Flora D. Salim},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/journals/monet/LiuLWSWDLS20.bib},
doi = {10.1007/s11036-019-01310-x},
journal = {MONET},
number = {2},
pages = {521--536},
timestamp = {Tue, 21 Apr 2020 01:00:00 +0200},
title = {SmartMeeting: An Novel Mobile Voice Meeting Minutes Generation and
Analysis System},
url = {https://doi.org/10.1007/s11036-019-01310-x},
volume = {25},
year = {2020}
}
© 2021 Flora Salim - CRUISE Research Group.