@phdthesis{MoellerPhd, author = {Andreas M\"{o}ller}, title = {{Leveraging Mobile Interaction with Multimodal and Sensor-Driven User Interfaces}}, year = {2015}, month = {Jun}, abstract = {{The increasing functionality of mobile devices and operating systems often entails an increment in complexity and complicatedness. This problem takes on greater significance with the opening towards new application areas (e.g., health and fitness) and new user groups (e.g., technically unversed people and the elderly). Interaction channels or modalities play herea central role: The increasingly pervasive use (in the context of Ubiquitous Computing) requires a stronger adaptation of these channels to changing usage contexts in order to ensure optimal interaction. This work investigates multimodality as an approach to leverage the user experience with mobile devices. The use of multimodality is motivated by the numerous advantages identified in prior work, such as naturalness, efficiency, robustness, and popularity with users. The design space of multimodal interaction, as well as comprehensive support of multimodality from scratch in the development process has not been investigated so far in a holistic way for mobile devices. The central research question of this dissertation is how to make multimodality usable to achieve better mobile interaction. This includes ease of operation and usability in existing scenarios, as well as opening up entirely new use cases. The dissertation focuses on two aspects. First, an improvement from a user-centric point of view shall be achieved (measurable by, e.g., efficiency, error rate, and usability metrics). Therefore, the use of selected modalities and interaction methods is outlined in exemplary use cases, leading to a profound understanding of multimodality and its advantages in heteroge-neous application areas. Second, from a developer-centric point of view, the implementation of multimodal interaction methods shall be simplified, thereby stronger motivating the consideration of multimodality in application development. To this end, a rule-based model and a software framework is presented, which supports multimodal in- and output and makes it usable for application programming. Moreover, approaches for end users to define multimodal behavior, as well as awareness on active modalities, are presented and evaluated.Beyond that, the dissertation exposes suitable evaluation methods for multimodal systems and points out characteristics of multimodal systems to be respected. The work thereby high-lights and discusses all fundamental steps of the software development process, from design, prototyping, implementation to evaluation, with regard to mobile multimodal interaction.}}, school = {Technische Universit\"{a}t M\"{u}nchen}, }