Publications

Authors: Type:

2024

    [PDF] [DOI] [Bibtex]
    @inproceedings{correll2024bodydata,
    abstract = {With changing attitudes around knowledge, medicine, art, and technology, the human body has become a source of information and, ultimately, shareable and analyzable data. Centuries of illustrations and visualizations of the body occur within particular historical, social, and political contexts. These contexts are enmeshed in different so-called data cultures: ways that data, knowledge, and information are conceptualized and collected, structured and shared. In this work, we explore how information about the body was collected as well as the circulation, impact, and persuasive force of the resulting images. We show how mindfulness of data cultural influences remain crucial for today's designers, researchers, and consumers of visualizations. We conclude with a call for the field to reflect on how visualizations are not timeless and contextless mirrors on objective data, but as much a product of our time and place as the visualizations of the past.},
    author = {Correll, Michael and Garrison, Laura A.},
    booktitle = {arXiv, Proc CHI24},
    doi = {10.48550/arXiv.2402.05014},
    publisher = {arXiv},
    title = {When the Body Became Data: Historical Data Cultures and Anatomical Illustration},
    year = {2024},
    month = {Feb},
    pdf = {pdfs/garrisonCHI24.pdf},
    images = {images/garrisonCHI24.png},
    thumbnails = {images/garrisonCHI24.png},
    project = {VIDI}
    }

2023

    [PDF] [Bibtex]
    @MISC {balaka2023MoBaExplorer,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine (Posters)},
    editor = {Garrison, Laura and Linares, Mathieu},
    title = {{MoBa Explorer: Enabling the navigation of data from the Norwegian Mother, Father, and Child cohort study (MoBa)}},
    author = {Balaka, Hanna and Garrison, Laura A. and Valen, Ragnhild and Vaudel, Marc},
    year = {2023},
    howpublished = {Poster presented at VCBM 2023.},
    publisher = {The Eurographics Association},
    abstract = {Studies in public health have generated large amounts of data helping researchers to better understand human diseases and improve patient care. The Norwegian Mother, Father and Child Cohort Study (MoBa) has collected information about pregnancy
    and childhood to better understand this crucial time of life. However, the volume of the data and its sensitive nature make its
    dissemination and examination challenging. We present a work-in-progress design study and accompanying web application,
    the MoBa Explorer, which presents aggregated MoBa study data genotypes and phenotypes. Our research explores how to
    serve two distinct purposes in one application: (1) allow researchers to interactively explore MoBa data to identify variables of
    interest for further study and (2) provide MoBa study details to an interested general public.},
    pdf = {pdfs/balaka2023MoBaExplorer.pdf},
    images = {images/balaka2023MoBaExplorer.png},
    thumbnails = {images/balaka2023MoBaExplorer-thumb.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {budich2023AIstories,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    editor = {Hansen, Christian and Procter, James and Renata G. Raidou and Jönsson, Daniel and Höllt, Thomas},
    title = {{Reflections on AI-Assisted Character Design for Data-Driven Medical Stories}},
    author = {Budich, Beatrice and Garrison, Laura A. and Preim, Bernhard and Meuschke, Monique},
    year = {2023},
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-216-5},
    DOI = {10.2312/vcbm.20231216},
    abstract = {Data-driven storytelling has experienced significant growth in recent years to become a common practice in various application areas, including healthcare. Within the realm of medical narratives, characters play a pivotal role in connecting audiences with data and conveying complex medical information in an engaging manner that may influence positive behavioral and lifestyle changes on the part of the viewer. However, the process of designing characters that are both informative and engaging remains a challenge. In this paper, we propose an AI-assisted pipeline for character design in the context of data-driven medical stories. Our iterative pipeline blends design sensibilities with automation to reduce the time and artistic expertise needed to develop characters reflective of the underlying data, even when that data is time-oriented as in a cohort study.},
    pdf = {pdfs/budichAIstories.pdf},
    images = {images/budichAIstories.png},
    thumbnails = {images/budichAIstories-thumb.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @article{mittenentzwei2023heros,
    journal = {Computer Graphics Forum},
    title = {{Do Disease Stories need a Hero? Effects of Human Protagonists on a Narrative Visualization about Cerebral Small Vessel Disease}},
    author = {Mittenentzwei, Sarah and Weiß, Veronika and Schreiber, Stefanie and Garrison, Laura A. and Bruckner, Stefan and Pfister, Malte and Preim, Bernhard and Meuschke, Monique},
    year = {2023},
    publisher = {The Eurographics Association and John Wiley & Sons Ltd.},
    ISSN = {1467-8659},
    DOI = {10.1111/cgf.14817},
    abstract = {Authors use various media formats to convey disease information to a broad audience, from articles and videos to interviews or documentaries. These media often include human characters, such as patients or treating physicians, who are involved with the disease. While artistic media, such as hand-crafted illustrations and animations are used for health communication in many cases, our goal is to focus on data-driven visualizations. Over the last decade, narrative visualization has experienced increasing prominence, employing storytelling techniques to present data in an understandable way. Similar to classic storytelling formats, narrative medical visualizations may also take a human character-centered design approach. However, the impact of this form of data communication on the user is largely unexplored. This study investigates the protagonist's influence on user experience in terms of engagement, identification, self-referencing, emotional response, perceived credibility, and time spent in the story. Our experimental setup utilizes a character-driven story structure for disease stories derived from Joseph Campbell's Hero's Journey. Using this structure, we generated three conditions for a cerebral small vessel disease story that vary by their protagonist: (1) a patient, (2) a physician, and (3) a base condition with no human protagonist. These story variants formed the basis for our hypotheses on the effect of a human protagonist in disease stories, which we evaluated in an online study with 30 participants. Our findings indicate that a human protagonist exerts various influences on the story perception and that these also vary depending on the type of protagonist.},
    pdf = {pdfs/garrison-diseasestories.pdf},
    images = {images/garrison-diseasestories.png},
    thumbnails = {images/garrison-diseasestories-thumb.png},
    project = {VIDI}
    }
    [PDF] [Bibtex]
    @incollection{garrison2023narrativemedvisbook,
    title = {Current Approaches in Narrative Medical Visualization},
    author = {Garrison, Laura Ann and Meuschke, Monique and Preim, Bernhard and Bruckner, Stefan},
    year = 2023,
    booktitle = {Approaches for Science Illustration and Communication},
    publisher = {Springer},
    address = {Gewerbestrasse 11, 6330 Cham, Switzerland},
    pages = {},
    note = {in publication},
    editor = {Mark Roughley},
    chapter = 4,
    pdf = {pdfs/garrison2023narrativemedvisbook.pdf},
    images = {images/garrison2023narrativemedvisbook.png},
    thumbnails = {images/garrison2023narrativemedvisbook-thumb.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @article{mittenentzwei2023investigating,
    title={Investigating user behavior in slideshows and scrollytelling as narrative genres in medical visualization},
    author={Mittenentzwei, Sarah and Garrison, Laura A and M{\"o}rth, Eric and Lawonn, Kai and Bruckner, Stefan and Preim, Bernhard and Meuschke, Monique},
    journal={Computers \& Graphics},
    year={2023},
    publisher={Elsevier},
    abstract={In this study, we explore the impact of genre and navigation on user comprehension, preferences, and behaviors when experiencing data-driven disease stories. Our between-subject study (n=85) evaluated these aspects in-the-wild, with results pointing towards some general design considerations to keep in mind when authoring data-driven disease stories. Combining storytelling with interactive new media techniques, narrative medical visualization is a promising approach to communicating topics in medicine to a general audience in an accessible manner. For patients, visual storytelling may help them to better understand medical procedures and treatment options for more informed decision-making, boost their confidence and alleviate anxiety, and promote stronger personal health advocacy. Narrative medical visualization provides the building blocks for producing data-driven disease stories, which may be presented in several visual styles. These different styles correspond to different narrative genres, e.g., a Slideshow. Narrative genres can employ different navigational approaches. For instance, a Slideshow may rely on click interactions to advance through a story, while Scrollytelling typically uses vertical scrolling for navigation. While a common goal of a narrative medical visualization is to encourage a particular behavior, e.g., quitting smoking, it is unclear to what extent the choice of genre influences subsequent user behavior. Our study opens a new research direction into choice of narrative genre on user preferences and behavior in data-driven disease stories.},
    pdf = {pdfs/mittenentzwei2023userbehavior.pdf},
    images = {images/mittenentzwei2023userbehavior.png},
    thumbnails = {images/mittenentzwei2023userbehavior-thumb.png},
    project = {VIDI},
    doi={10.1016/j.cag.2023.06.011}
    }
    [PDF] [DOI] [Bibtex]
    @article{garrison2023molaesthetics,
    author={Garrison, Laura A. and Goodsell, David S. and Bruckner, Stefan},
    journal={IEEE Computer Graphics and Applications},
    title={Changing Aesthetics in Biomolecular Graphics},
    year={2023},
    volume={43},
    number={3},
    pages={94-101},
    doi={10.1109/MCG.2023.3250680},
    abstract={Aesthetics for the visualization of biomolecular structures have evolved over the years according to technological advances, user needs, and modes of dissemination. In this article, we explore the goals, challenges, and solutions that have shaped the current landscape of biomolecular imagery from the overlapping perspectives of computer science, structural biology, and biomedical illustration. We discuss changing approaches to rendering, color, human–computer interface, and narrative in the development and presentation of biomolecular graphics. With this historical perspective on the evolving styles and trends in each of these areas, we identify opportunities and challenges for future aesthetics in biomolecular graphics that encourage continued collaboration from multiple intersecting fields.},
    pdf = {pdfs/garrison-aestheticsmol.pdf},
    images = {images/garrison-aestheticsmol.png},
    thumbnails = {images/garrison-aestheticsmol-thumb.png},
    project = {VIDI}
    }

2022

    [PDF] [Bibtex]
    @phdthesis{moerth2022thesis,
    title = {Scaling Up Medical Visualization: Multi-Modal, Multi-Patient, and Multi-Audience Approaches for Medical Data Exploration, Analysis and Communication},
    author = {Mörth, Eric},
    year = 2022,
    month = {September},
    isbn = 9788230862193,
    url = {https://hdl.handle.net/11250/3014336},
    school = {Department of Informatics, University of Bergen, Norway},
    abstract = {
    Medical visualization is one of the most application-oriented areas of visualization research. Close collaboration with medical experts is essential for interpreting medical imaging data and creating meaningful visualization techniques and visualization applications. Cancer is one of the most common causes of death, and with increasing average age in developed countries, gynecological malignancy case numbers are rising. Modern imaging techniques are an essential tool in assessing tumors and produce an increasing number of imaging data radiologists must interpret. Besides the number of imaging modalities, the number of patients is also rising, leading to visualization solutions that must be scaled up to address the rising complexity of multi-modal and multi-patient data. Furthermore, medical visualization is not only targeted toward medical professionals but also has the goal of informing patients, relatives, and the public about the risks of certain diseases and potential treatments. Therefore, we identify the need to scale medical visualization solutions to cope with multi-audience data.
    This thesis addresses the scaling of these dimensions in different contributions we made. First, we present our techniques to scale medical visualizations in multiple modalities. We introduced a visualization technique using small multiples to display the data of multiple modalities within one imaging slice. This allows radiologists to explore the data efficiently without having several juxtaposed windows. In the next step, we developed an analysis platform using radiomic tumor profiling on multiple imaging modalities to analyze cohort data and to find new imaging biomarkers. Imaging biomarkers are indicators based on imaging data that predict clinical outcome related variables. Radiomic tumor profiling is a technique that generates potential imaging biomarkers based on first and second-order statistical measurements. The application allows medical experts to analyze the multi-parametric imaging data to find potential correlations between clinical parameters and the radiomic tumor profiling data. This approach scales up in two dimensions, multi-modal and multi-patient. In a later version, we added features to scale the multi-audience dimension by making our application applicable to cervical and prostate cancer data and the endometrial cancer data the application was designed for. In a subsequent contribution, we focus on tumor data on another scale and enable the analysis of tumor sub-parts by using the multi-modal imaging data in a hierarchical clustering approach. Our application finds potentially interesting regions that could inform future treatment decisions. In another contribution, the digital probing interaction, we focus on multi-patient data. The imaging data of multiple patients can be compared to find interesting tumor patterns potentially linked to the aggressiveness of the tumors. Lastly, we scale the multi-audience dimension with our similarity visualization applicable to endometrial cancer research, neurological cancer imaging research, and machine learning research on the automatic segmentation of tumor data. In contrast to the previously highlighted contributions, our last contribution, ScrollyVis, focuses primarily on multi-audience communication. We enable the creation of dynamic scientific scrollytelling experiences for a specific or general audience. Such stories can be used for specific use cases such as patient-doctor communication or communicating scientific results via stories targeting the general audience in a digital museum exhibition.
    Our proposed applications and interaction techniques have been demonstrated in application use cases and evaluated with domain experts and focus groups. As a result, we brought some of our contributions to usage in practice at other research institutes. We want to evaluate their impact on other scientific fields and the general public in future work.
    },
    pdf = {pdfs/Moerth-PhD-Thesis-2022.pdf},
    images = {images/Moerth-PhD-Thesis-2022.PNG},
    thumbnails = {images/Moerth-PhD-Thesis-2022.PNG},
    project = {ttmedvis}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @inproceedings {Trautner-2022-HCP,
    author = {Trautner, Thomas and Sbardellati, Maximilian and Stoppel, Sergej and Bruckner, Stefan},
    title = {{Honeycomb Plots: Visual Enhancements for Hexagonal Maps}},
    booktitle = {Proc. of VMV 2022: Vision, Modeling, and Visualization},
    editor = {Bender, Jan and Botsch, Mario and Keim, Daniel A.},
    pages = {65--73},
    year = {2022},
    publisher = {The Eurographics Association},
    ISBN = {978-3-03868-189-2},
    DOI = {10.2312/vmv.20221205},
    abstract = {Aggregation through binning is a commonly used technique for visualizing large, dense, and overplotted two-dimensional data sets. However, aggregation can hide nuanced data-distribution features and complicates the display of multiple data-dependent variables, since color mapping is the primary means of encoding. In this paper, we present novel techniques for enhancing hexplots with spatialization cues while avoiding common disadvantages of three-dimensional visualizations. In particular, we focus on techniques relying on preattentive features that exploit shading and shape cues to emphasize relative value differences. Furthermore, we introduce a novel visual encoding that conveys information about the data distributions or trends within individual tiles. Based on multiple usage examples from different domains and real-world scenarios, we generate expressive visualizations that increase the information content of classic hexplots and validate their effectiveness in a user study.},
    pdf = "pdfs/Trautner-2022-HCP.pdf",
    thumbnails = "images/Trautner-2022-HCP-thumb.png",
    images = "images/Trautner-2022-HCP-thumb.png",
    youtube = "https://youtu.be/mU7QFVP3yKQ",
    git = "https://github.com/TTrautner/HoneycombPlots"
    }
    [PDF] [Bibtex]
    @phdthesis{trautner2022thesis,
    title = {Visualization Hybridization with Spatialization Cues},
    author = {Thomas Bernhard Trautner},
    year = 2022,
    month = {November},
    isbn = 9788230855515,
    url = {https://hdl.handle.net/11250/3031041},
    school = {Department of Informatics, University of Bergen, Norway},
    abstract = {
    Visualization as a tool for visual processing of any underlying data has proven to be an accepted and legitimate part of the scientific reasoning process. Many different techniques help gaining new insights from captured phenomena, support the development or evaluation of hypotheses about collected data, reveal potential misconceptions or false assumptions, simplify communicating knowledge and novel findings, and enable a multitude of additional opportunities. The reason for this effectiveness is that the human visual system is ideally suited to capture and process visually encoded data. The development of visualization from a niche to an established scientific field has made a significant contribution to this success story. A large number of journals, conferences, seminars, and workshops regularly publish new results, evaluate presented approaches, and help making knowledge globally accessible. However, this large number of contributions tailored to variable user groups, the underlying data, and the wide variety of tasks that could be performed with them, emphasizes the plethora of available techniques and the resulting difficulty in choosing the most suitable visualizations.
    Therefore, we investigated common data sets and analyzed typical tasks normally performed with them. Based on this, we selected well-established and most effective visualization techniques, combining them to form a hybrid representation. The goal of such a visualization hybridization was to merge advantages of individual techniques and, thereby, simultaneously eliminate their limitations. We present so-called hybrid vigors that make the underlying visualizations more widely applicable instead of either having to change required techniques sequentially, or not being able to perform certain tasks at all. Our contributions are intended to simplify the process of finding suitable visualizations for already established data sets. During our research, we focused on two-dimensional point data, depicted on the one hand as scatter plots and, on the other hand, as relationships between consecutive point such as in line charts. Our techniques can be used especially when data sets are so large, dense, and overplotted that conventional techniques reach their limits. We show that hybrid representations are well suited for combining discrete, continuous, or aggregated forms of visual representation. Our hybridizations additionally exploit spatialization cues. Such visual cues emphasize spatiality of the underlying data through shading, without having to embed the data in 3D space including its potential disadvantages. We chose this method of encoding as we consider it the most appropriate choice, given that visualization users interact naturally and preattentively with a spatial world on a daily basis.
    },
    pdf = {pdfs/Trautner-PhD-Thesis-2022.pdf},
    images = {images/Trautner-2022-PhD.png},
    thumbnails = {images/Trautner-2022-PhD.png},
    project = {MetaVis}
    }
    [PDF] [Bibtex]
    @phdthesis{garrison2022thesis,
    title = {
    From Molecules to the Masses: Visual Exploration, Analysis, and Communication
    of Human Physiology
    },
    author = {Laura Ann Garrison},
    year = 2022,
    month = {September},
    isbn = 9788230841389,
    url = {https://hdl.handle.net/11250/3015990},
    school = {Department of Informatics, University of Bergen, Norway},
    abstract = {
    The overarching theme of this thesis is the cross-disciplinary application of
    medical illustration and visualization techniques to address challenges in
    exploring, analyzing, and communicating aspects of physiology to audiences
    with differing expertise.
    Describing the myriad biological processes occurring in living beings over
    time, the science of physiology is complex and critical to our understanding
    of how life works. It spans many spatio-temporal scales to combine and bridge
    the basic sciences (biology, physics, and chemistry) to medicine. Recent
    years have seen an explosion of new and finer-grained experimental and
    acquisition methods to characterize these data. The volume and complexity of
    these data necessitate effective visualizations to complement standard
    analysis practice. Visualization approaches must carefully consider and be
    adaptable to the user's main task, be it exploratory, analytical, or
    communication-oriented. This thesis contributes to the areas of theory,
    empirical findings, methods, applications, and research replicability in
    visualizing physiology. Our contributions open with a state-of-the-art report
    exploring the challenges and opportunities in visualization for physiology.
    This report is motivated by the need for visualization researchers, as well
    as researchers in various application domains, to have a centralized,
    multiscale overview of visualization tasks and techniques. Using a
    mixed-methods search approach, this is the first report of its kind to
    broadly survey the space of visualization for physiology. Our approach to
    organizing the literature in this report enables the lookup of topics of
    interest according to spatio-temporal scale. It further subdivides works
    according to any combination of three high-level visualization tasks:
    exploration, analysis, and communication. This provides an easily-navigable
    foundation for discussion and future research opportunities for audience- and
    task-appropriate visualization for physiology. From this report, we identify
    two key areas for continued research that begin narrowly and subsequently
    broaden in scope: (1) exploratory analysis of multifaceted physiology data
    for expert users, and (2) communication for experts and non-experts alike.
    Our investigation of multifaceted physiology data takes place over two
    studies. Each targets processes occurring at different spatio-temporal scales
    and includes a case study with experts to assess the applicability of our
    proposed method. At the molecular scale, we examine data from magnetic
    resonance spectroscopy (MRS), an advanced biochemical technique used to
    identify small molecules (metabolites) in living tissue that are indicative
    of metabolic pathway activity. Although highly sensitive and specific, the
    output of this modality is abstract and difficult to interpret. Our design
    study investigating the tasks and requirements for expert exploratory
    analysis of these data led to SpectraMosaic, a novel application enabling
    domain researchers to analyze any permutation of metabolites in ratio form
    for an entire cohort, or by sample region, individual, acquisition date, or
    brain activity status at the time of acquisition. A second approach considers
    the exploratory analysis of multidimensional physiological data at the
    opposite end of the spatio-temporal scale: population. An effective
    exploratory data analysis workflow critically must identify interesting
    patterns and relationships, which becomes increasingly difficult as data
    dimensionality increases. Although this can be partially addressed with
    existing dimensionality reduction techniques, the nature of these techniques
    means that subtle patterns may be lost in the process. In this approach, we
    describe DimLift, an iterative dimensionality reduction technique enabling
    user identification of interesting patterns and relationships that may lie
    subtly within a dataset through dimensional bundles. Key to this method is
    the user's ability to steer the dimensionality reduction technique to follow
    their own lines of inquiry.
    Our third question considers the crafting of visualizations for communication
    to audiences with different levels of expertise. It is natural to expect that
    experts in a topic may have different preferences and criteria to evaluate a
    visual communication relative to a non-expert audience. This impacts the
    success of an image in communicating a given scenario. Drawing from diverse
    techniques in biomedical illustration and visualization, we conducted an
    exploratory study of the criteria that audiences use when evaluating a
    biomedical process visualization targeted for communication. From this study,
    we identify opportunities for further convergence of biomedical illustration
    and visualization techniques for more targeted visual communication design.
    One opportunity that we discuss in greater depth is the development of
    semantically-consistent guidelines for the coloring of molecular scenes. The
    intent of such guidelines is to elevate the scientific literacy of non-expert
    audiences in the context of molecular visualization, which is particularly
    relevant to public health communication.
    All application code and empirical findings are open-sourced and available
    for reuse by the scientific community and public. The methods and findings
    presented in this thesis contribute to a foundation of cross-disciplinary
    biomedical illustration and visualization research, opening several
    opportunities for continued work in visualization for physiology.
    },
    pdf = {pdfs/garrison-phdthesis.pdf},
    images = {images/garrison-thesis.png},
    thumbnails = {images/garrison-thesis-thumb.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {EichnerMoerth2022MuSIC,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    editor = {Renata G. Raidou and Björn Sommer and Torsten W. Kuhlen and Michael Krone and Thomas Schultz and Hsiang-Yun Wu},
    title = {{MuSIC: Multi-Sequential Interactive Co-Registration for Cancer Imaging Data based on Segmentation Masks}},
    author = {Eichner, Tanja* and Mörth, Eric* and Wagner-Larsen, Kari S. and Lura, Njål and Haldorsen, Ingfrid S. and Gröller, Eduard and Bruckner, Stefan and Smit, Noeska N.},
    note = {Best Paper Honorable Mention at VCBM2022},
    project = {ttmedvis},
    year = {2022},
    abstract = {In gynecologic cancer imaging, multiple magnetic resonance imaging (MRI) sequences are acquired per patient to reveal different tissue characteristics. However, after image acquisition, the anatomical structures can be misaligned in the various sequences due to changing patient location in the scanner and organ movements. The co-registration process aims to align the sequences to allow for multi-sequential tumor imaging analysis. However, automatic co-registration often leads to unsatisfying results. To address this problem, we propose the web-based application MuSIC (Multi-Sequential Interactive Co-registration). The approach allows medical experts to co-register multiple sequences simultaneously based on a pre-defined segmentation mask generated for one of the sequences. Our contributions lie in our proposed workflow. First, a shape matching algorithm based on dual annealing searches for the tumor position in each sequence. The user can then interactively adapt the proposed segmentation positions if needed. During this procedure, we include a multi-modal magic lens visualization for visual quality assessment. Then, we register the volumes based on the segmentation mask positions. We allow for both rigid and deformable registration. Finally, we conducted a usability analysis with seven medical and machine learning experts to verify the utility of our approach. Our participants highly appreciate the multi-sequential setup and see themselves using MuSIC in the future.
    Best Paper Honorable Mention at VCBM2022},
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-177-9},
    DOI = {10.2312/vcbm.20221190},
    pdf = {pdfs/EichnerMoerth_2022.pdf},
    thumbnails = {images/EichnerMoerth_2022.PNG},
    images = {images/EichnerMoerth_2022.PNG},
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Kleinau2022Tornado,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    editor = {Renata G. Raidou and Björn Sommer and Torsten W. Kuhlen and Michael Krone and Thomas Schultz and Hsiang-Yun Wu},
    title = {{Is there a Tornado in Alex's Blood Flow? A Case Study for Narrative Medical Visualization}},
    project = {ttmedvis},
    author = {Kleinau, Anna and Stupak, Evgenia and Mörth, Eric and Garrison, Laura A. and Mittenentzwei, Sarah and Smit, Noeska N. and Lawonn, Kai and Bruckner, Stefan and Gutberlet, Matthias and Preim, Bernhard and Meuschke, Monique},
    year = {2022},
    abstract = {Narrative visualization advantageously combines storytelling with new media formats and techniques, like interactivity, to create improved learning experiences. In medicine, it has the potential to improve patient understanding of diagnostic procedures and treatment options, promote confidence, reduce anxiety, and support informed decision-making. However, limited scientific research has been conducted regarding the use of narrative visualization in medicine. To explore the value of narrative visualization in this domain, we introduce a data-driven story to inform a broad audience about the usage of measured blood flow data to diagnose and treat cardiovascular diseases. The focus of the story is on blood flow vortices in the aorta, with which imaging technique they are examined, and why they can be dangerous. In an interdisciplinary team, we define the main contents of the story and the resulting design questions. We sketch the iterative design process and implement the story based on two genres. In a between-subject study, we evaluate the suitability and understandability of the story and the influence of different navigation concepts on user experience. Finally, we discuss reusable concepts for further narrative medical visualization projects.},
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-177-9},
    DOI = {10.2312/vcbm.20221183},
    pdf = {pdfs/Kleinau_2022.pdf},
    thumbnails = {images/Kleinau_2022.PNG},
    images = {images/Kleinau_2022.PNG},
    }
    [PDF] [DOI] [Bibtex]
    @article{Meuschke2022narrative,
    title = {Narrative medical visualization to communicate disease data},
    author = {Meuschke, Monique and Garrison, Laura A. and Smit, Noeska N. and Bach, Benjamin and Mittenentzwei, Sarah and Wei{\ss}, Veronika and Bruckner, Stefan and Lawonn, Kai and Preim, Bernhard},
    year = 2022,
    journal = {Computers & Graphics},
    volume = 107,
    pages = {144--157},
    doi = {10.1016/j.cag.2022.07.017},
    issn = {0097-8493},
    url = {https://www.sciencedirect.com/science/article/pii/S009784932200139X},
    abstract = {This paper explores narrative techniques combined with medical visualizations to tell data-driven stories about diseases for a general audience. The field of medical illustration uses narrative visualization through hand-crafted techniques to promote health literacy. However, data-driven narrative visualization has rarely been applied to medical data. We derived a template for creating stories about diseases and applied it to three selected diseases to demonstrate how narrative techniques could support visual communication and facilitate understanding of medical data. One of our main considerations is how interactive 3D anatomical models can be integrated into the story and whether this leads to compelling stories in which the users feel involved. A between-subject study with 90 participants suggests that the combination of a carefully designed narrative structure, the constant involvement of a specific patient, high-qualitative visualizations combined with easy-to-use interactions, are critical for an understandable story about diseases that would be remembered by participants.},
    pdf = {pdfs/Narrative_medical_MEUSCHKE_DOA18072022_AFV.pdf},
    thumbnails = {images/Meuschke2022narrative-thumb.png},
    images = {images/Meuschke2022narrative.png},
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @Article{Moerth2022ScrollyVis,
    author = {Mörth, Eric and Bruckner, Stefan and Smit, Noeska N.},
    title = {ScrollyVis: Interactive visual authoring of guided dynamic narratives for scientific scrollytelling},
    journal = {IEEE Transactions on Visualization and Computer Graphics},
    year = {2022},
    volume = {},
    abstract = {Visual stories are an effective and powerful tool to convey specific information to a diverse public. Scrollytelling is a recent visual storytelling technique extensively used on the web, where content appears or changes as users scroll up or down a page. By employing the familiar gesture of scrolling as its primary interaction mechanism, it provides users with a sense of control, exploration and discoverability while still offering a simple and intuitive interface. In this paper, we present a novel approach for authoring, editing, and presenting data-driven scientific narratives using scrollytelling. Our method flexibly integrates common sources such as images, text, and video, but also supports more specialized visualization techniques such as interactive maps as well as scalar field and mesh data visualizations. We show that scrolling navigation can be used to traverse dynamic narratives and demonstrate how it can be combined with interactive parameter exploration. The resulting system consists of an extensible web-based authoring tool capable of exporting stand-alone stories that can be hosted on any web server. We demonstrate the power and utility of our approach with case studies from several diverse scientific fields and with a user study including 12 participants of diverse professional backgrounds. Furthermore, an expert in creating interactive articles assessed the usefulness of our approach and the quality of the created stories.},
    project = {ttmedvis},
    pdf = {pdfs/Moerth_2022_ScrollyVis.pdf},
    thumbnails = {images/Moerth_2022_ScrollyVis.png},
    images = {images/Moerth_2022_ScrollyVis.png},
    pages={1-12},
    doi={10.1109/TVCG.2022.3205769},
    }
    [PDF] [DOI] [VID] [Bibtex]
    @article{Moerth2022ICEVis,
    title = {ICEVis: Interactive Clustering Exploration for tumor sub-region analysis in multiparametric cancer imaging},
    author = {Mörth, Eric and Eichner, Tanja and Ingfrid, Haldorsen and Bruckner, Stefan and Smit, Noeska N.},
    year = 2022,
    journal = {Proceedings of the International Symposium on Visual Information Communication and Interaction (VINCI'22)},
    volume = {15},
    pages = {5},
    doi = {10.1145/3554944.3554958},
    issn = {},
    url = {},
    abstract = {Tumor tissue characteristics derived from imaging data are gaining importance in clinical research. Tumor sub-regions may play a critical role in defining tumor types and may hold essential information about tumor aggressiveness. Depending on the tumor’s location within the body, such sub-regions can be easily identified and determined by physiology, but these sub-regions are not readily visible to others. Regions within a tumor are currently explored by comparing the image sequences and analyzing the tissue heterogeneity present. To improve the exploration of such tumor sub-regions, we propose a visual analytics tool called ICEVis. ICEVis supports the identification of tumor sub-regions and corresponding features combined with cluster visualizations highlighting cluster validity. It is often difficult to estimate the optimal number of clusters; we provide rich facilities to support this task, incorporating various statistical measures and interactive exploration of the results. We evaluated our tool with three clinical researchers to show the potential of our approach.
    Best Short Paper at VINCI2022},
    images = "images/Moerth_2022_ICEVis.png",
    thumbnails = "images/Moerth_2022_ICEVis.png",
    pdf = {pdfs/Moerth_2022_ICEVis.pdf},
    vid = {vids/ICEVis.mp4},
    project = "ttmedvis",
    }
    [PDF] [Bibtex]
    @article{Kristiansen2022ContentDriven,
    title = {Content-Driven Layout for Visualization Design},
    author = {Kristiansen, Yngve and Garrison, Laura and Bruckner, Stefan},
    year = 2022,
    journal = {Proceedings of the International Symposium on Visual Information Communication and Interaction (to appear)},
    volume = {},
    pages = {},
    doi = {},
    issn = {},
    url = {},
    abstract = {Multi-view visualizations are typically presented in a grid layout with elements positioned according to their bounding rectangles. These rectangles often contain unused white space. In cases where Tufte’s Shrink Principle can be applied to reduce non-data-ink without impairing the communication of information, unused white space can be utilized for the placement of other elements. This is often done in manually “hand-crafted” layouts by designers. However, upon changes to individual elements, this design process has to be repeated. To reduce non-data-ink and repetitive manual design, we contribute a method for automatically turning a grid layout into a content-driven layout, where elements are positioned with respect to their contents. Existing approaches have explored the use of a force simulation in conjunction with proxy geometries to simplify collision handling for irregular shapes. Such customized force directed layouts are usually unstable, and often require additional constraints to run properly. In addition, proxy geometries become less accurate and effective with more irregular shapes. To solve these shortcomings, we contribute an approach for identifying central elements in an original grid layout in order to set up corresponding attractive forces. Furthermore, we utilize an imagebased approach for collision detection and avoidance that works accurately for highly irregular shapes. We demonstrate the utility of our approach with three case studies.},
    images = "images/Kristiansen-2022-LungsDt.PNG",
    thumbnails = "images/Kristiansen-2022-LungsDt.PNG",
    pdf = {pdfs/Kristiansen-2022-CDL.pdf},
    project = "MetaVis",
    }
    [DOI] [YT] [Bibtex]
    @article{Sugathan2022Longitudinal,
    title = {Longitudinal visualization for exploratory analysis of multiple sclerosis lesions},
    author = {Sugathan, Sherin and Bartsch, Hauke and Riemer, Frank and Gr{\"u}ner, Renate and Lawonn, Kai and Smit, Noeska},
    year = 2022,
    journal = {Computers & Graphics},
    volume = 107,
    pages = {208--219},
    doi = {10.1016/j.cag.2022.07.023},
    issn = {0097-8493},
    url = {https://www.sciencedirect.com/science/article/pii/S0097849322001479},
    images = "images/Sugathan-2022-Longitudinal.PNG",
    thumbnails = "images/Sugathan-2022-Longitudinal.PNG",
    project = {ttmedvis},
    youtube = "https://youtu.be/uwcqSf1W-dc"
    }
    [DOI] [Bibtex]
    @article{VandenBossche2022Digital,
    title = {Digital body preservation: Technique and applications},
    author = {Vandenbossche, Vicky and Van de Velde, Joris and Avet, Stind and Willaert, Wouter and Soltvedt, Stian and Smit, Noeska and Audenaert, Emmanuel},
    year = 2022,
    journal = {Anatomical Sciences Education},
    volume = {15},
    number = {4},
    pages = {731--744},
    doi = {https://doi.org/10.1002/ase.2199},
    url = {https://anatomypubs.onlinelibrary.wiley.com/doi/abs/10.1002/ase.2199},
    images = "images/VandenBossche-2022-Digital.PNG",
    thumbnails = "images/VandenBossche-2022-Digital.PNG",
    project = {ttmedvis}
    }
    [DOI] [Bibtex]
    @article{Wagner2022Interobserver,
    title = {Interobserver agreement and prognostic impact for {MRI}--based 2018 {FIGO} staging parameters in uterine cervical cancer},
    author = {Wagner-Larsen, Kari S and Lura, Nj{\aa}l and Salvesen, {\O}yvind and Halle, Mari Kylles{\o} and Forsse, David and Trovik, Jone and Smit, Noeska and Krakstad, Camilla and Haldorsen, Ingfrid S},
    year = 2022,
    journal = {European Radiology},
    publisher = {Springer},
    pages = {1--12},
    doi = {10.1007/s00330-022-08666-x},
    url = {https://link.springer.com/article/10.1007/s00330-022-08666-x},
    images = "images/Wagner-2022-Interobserver.PNG",
    thumbnails = "images/Wagner-2022-Interobserver.PNG",
    project = {ttmedvis}
    }
    [DOI] [Bibtex]
    @article{Hodneland2022Fully,
    title = {Fully Automatic Whole-Volume Tumor Segmentation in Cervical Cancer},
    author = {Hodneland, Erlend and Kaliyugarasan, Satheshkumar and Wagner-Larsen, Kari Str{\o}no and Lura, Nj{\aa}l and Andersen, Erling and Bartsch, Hauke and Smit, Noeska and Halle, Mari Kylles{\o} and Krakstad, Camilla and Lundervold, Alexander Selvikv{\aa}g and Haldorsen, Ingfrid S},
    year = 2022,
    journal = {Cancers},
    publisher = {MDPI},
    volume = 14,
    number = 10,
    pages = 2372,
    doi = {10.3390/cancers14102372},
    url = {https://pubmed.ncbi.nlm.nih.gov/35625977/},
    images = "images/Hodneland-2022-Fully.PNG",
    thumbnails = "images/Hodneland-2022-Fully.PNG",
    project = {ttmedvis}
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Garrison2022MolColor,
    author = "Laura A. Garrison and Stefan Bruckner",
    title = "Considering Best Practices in Color Palettes for Molecular Visualizations",
    journal = "Journal of Integrative Bioinformatics",
    year = "2022",
    abstract = "Biomedical illustration and visualization techniques provide a window into complex molecular worlds that are difficult to capture through experimental means alone. Biomedical illustrators frequently employ color to help tell a molecular story, e.g., to identify key molecules in a signaling pathway. Currently, color use for molecules is largely arbitrary and often chosen based on the client, cultural factors, or personal taste. The study of molecular dynamics is relatively young, and some stakeholders argue that color use guidelines would throttle the growth of the field. Instead, content authors have ample creative freedom to choose an aesthetic that, e.g., supports the story they want to tell. However, such creative freedom comes at a price. The color design process is challenging, particularly for those without a background in color theory. The result is a semantically inconsistent color space that reduces the interpretability and effectiveness of molecular visualizations as a whole. Our contribution in this paper is threefold. We first discuss some of the factors that contribute to this array of color palettes. Second, we provide a brief sampling of color palettes used in both industry and research sectors. Lastly, we suggest considerations for developing best practices around color palettes applied to molecular visualization.",
    images = "images/garrison-molecularcolor-full.png",
    thumbnails = "images/garrison-molecularcolor-thumb.png",
    pdf = "pdfs/garrison-molecularcolor.pdf",
    publisher = "De Gruyter",
    doi = "10.1515/jib-2022-0016",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Garrison2022PhysioSTAR,
    author = "Laura A. Garrison and Ivan Kolesar and Ivan Viola and Helwig Hauser and Stefan Bruckner",
    title = "Trends & Opportunities in Visualization for Physiology: A Multiscale Overview",
    journal = "Computer Graphics Forum",
    year = "2022",
    volume = "41",
    number = "3",
    publisher = "The Eurographics Association and John Wiley & Sons Ltd.",
    pages = "609-643",
    doi = "10.1111/cgf.14575",
    abstract = "Combining elements of biology, chemistry, physics, and medicine, the science of human physiology is complex and multifaceted. In this report, we offer a broad and multiscale perspective on key developments and challenges in visualization for physiology. Our literature search process combined standard methods with a state-of-the-art visual analysis search tool to identify surveys and representative individual approaches for physiology. Our resulting taxonomy sorts literature on two levels. The first level categorizes literature according to organizational complexity and ranges from molecule to organ. A second level identifies any of three high-level visualization tasks within a given work: exploration, analysis, and communication. The findings of this report may be used by visualization researchers to understand the overarching trends, challenges, and opportunities in visualization for physiology and to provide a foundation for discussion and future research directions in this area. ",
    images = "images/garrison-STAR-taxonomy.png",
    thumbnails = "images/garrison-STAR-thumb.png",
    pdf = "pdfs/Garrison_STAR_cameraready.pdf",
    publisher = "The Eurographics Association and John Wiley \& Sons Ltd.",
    project = "VIDI"
    }

2021

    [PDF] [YT] [Bibtex]
    @inproceedings{Rijken-2021-Illegible,
    title = {Illegible Semantics: Exploring the Design Space of Metal Logos},
    author = {Gerrit J. Rijken and Rene Cutura and Frank Heyen and Michael Sedlmair and Michael Correll and Jason Dykes and Noeska Smit},
    year = 2021,
    booktitle = {Proceedings of the {alt.VIS} workshop at {IEEE VIS}},
    eprint = {2109.01688},
    archiveprefix = {arXiv},
    primaryclass = {cs.HC},
    pdf = {pdfs/Rijken-2021-Illegible.pdf},
    thumbnails = {images/Rijken-2021-Illegible.png},
    images = {images/Rijken-2021-Illegible.png},
    abstract = {The logos of metal bands can be by turns gaudy, uncouth, or nearly illegible. Yet, these logos work: they communicate sophisticated notions of genre and emotional affect. In this paper we use the design considerations of metal logos to explore the space of ``illegible semantics'': the ways that text can communicate information at the cost of readability, which is not always the most important objective. In this work, drawing on formative visualization theory, professional design expertise, and empirical assessments of a corpus of metal band logos, we describe a design space of metal logos and present a tool through which logo characteristics can be explored through visualization. We investigate ways in which logo designers imbue their text with meaning and consider opportunities and implications for visualization more widely.},
    youtube = "https://youtu.be/BZOdIhU-mrA",
    }
    [PDF] [DOI] [YT] [Bibtex]
    @inproceedings{Smit-2021-DataKnitualization,
    title = {Data Knitualization: An Exploration of Knitting as a Visualization Medium},
    author = {Noeska Smit},
    year = 2021,
    booktitle = {Proceedings of the {alt.VIS} workshop at {IEEE VIS}},
    doi = {10.31219/osf.io/xahj9},
    pdf = {pdfs/Smit-2021-DataKnitualization.pdf},
    thumbnails = {images/Smit-2021-DataKnitualization.jpg},
    images = {images/Smit-2021-DataKnitualization.jpg},
    abstract = {While data visualization can be achieved in many media, from hand-drawn on paper to 3D printed via data physicalization, the ancient craft of knitting is not often considered as a visualization medium. With this work, I explore hand knitting as a potential data visualization medium based on my personal experience as a knitter and visualization researcher.},
    youtube = "https://youtu.be/K3D-M7jzbMs",
    }
    [PDF] [DOI] [Bibtex]
    @article{Gillmann-2021-Viewpoints,
    author = {C. Gillmann and N. N. Smit and E. Groller and B. Preim and A. Vilanova and T. Wischgoll},
    journal = {IEEE Computer Graphics and Applications},
    title = {Ten Open Challenges in Medical Visualization},
    year = {2021},
    volume = {41},
    number = {05},
    issn = {1558-1756},
    pages = {7-15},
    keywords = {deep learning;uncertainty;data visualization;medical services;standardization;artificial intelligence;biomedical imaging},
    doi = {10.1109/MCG.2021.3094858},
    publisher = {IEEE Computer Society},
    address = {Los Alamitos, CA, USA},
    pdf = {pdfs/Gillmann-2021-Viewpoints.pdf},
    thumbnails = {images/Gillmann-2021-Viewpoints.png},
    images = {images/Gillmann-2021-Viewpoints.png},
    project = {ttmedvis},
    abstract = {The medical domain has been an inspiring application area in visualization research for many years already, but many open challenges remain. The driving forces of medical visualization research have been strengthened by novel developments, for example, in deep learning, the advent of affordable VR technology, and the need to provide medical visualizations for broader audiences. At IEEE VIS 2020, we hosted an Application Spotlight session to highlight recent medical visualization research topics. With this article, we provide the visualization community with ten such open challenges, primarily focused on challenges related to the visualization of medical imaging data. We first describe the unique nature of medical data in terms of data preparation, access, and standardization. Subsequently, we cover open visualization research challenges related to uncertainty, multimodal and multiscale approaches, and evaluation. Finally, we emphasize challenges related to users focusing on explainable AI, immersive visualization, P4 medicine, and narrative visualization.}
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings{Hushagen-2021-VCBM,
    title = {The Role of Depth Perception in {XR} from a Neuroscience Perspective: A Primer and Survey},
    author = {Hushagen, Vetle and Tresselt, Gustav C. and Smit, Noeska N. and Specht, Karsten},
    year = 2021,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    publisher = {The Eurographics Association},
    doi = {10.2312/vcbm.20211344},
    isbn = {978-3-03868-140-3},
    issn = {2070-5786},
    url = {https://diglib.eg.org/handle/10.2312/vcbm20211344},
    pdf = {pdfs/Hushagen-2021-VCBM.pdf},
    thumbnails = {images/Hushagen-2021-VCBM.png},
    images = {images/Hushagen-2021-VCBM.png},
    project = {ttmedvis},
    abstract = {Augmented and virtual reality (XR) are potentially powerful tools for enhancing the efficiency of interactive visualization of complex data in biology and medicine. The benefits of visualization of digital objects in XR mainly arise from enhanced depth perception due to the stereoscopic nature of XR head mounted devices. With the added depth dimension, XR is in a prime position to convey complex information and support tasks where 3D information is important. In order to inform the development of novel XR applications in the biology and medicine domain, we present a survey which reviews the neuroscientific basis underlying the immersive features of XR. To make this literature more accessible to the visualization community, we first describe the basics of the visual system, highlighting how visual features are combined to objects
    and processed in higher cortical areas with a special focus on depth vision. Based on state of the art findings in neuroscience literature related to depth perception, we provide several recommendations for developers and designers. Our aim is to aid development of XR applications and strengthen development of tools aimed at molecular visualization, medical education, and surgery, as well as inspire new application areas.}
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings{Sugathan-2021-VCBM,
    title = {Interactive Multimodal Imaging Visualization for Multiple Sclerosis Lesion Analysis},
    author = {Sugathan, Sherin and Bartsch, Hauke and Riemer, Frank and Gr{\"u}ner, Renate and Lawonn, Kai and Smit, Noeska N},
    year = 2021,
    booktitle = {Eurographics Workshop on Visual Computing for Biology and Medicine},
    publisher = {The Eurographics Association},
    doi = {10.2312/vcbm.20211346},
    isbn = {978-3-03868-140-3},
    issn = {2070-5786},
    url = {https://diglib.eg.org/handle/10.2312/vcbm20211346},
    pdf = {pdfs/Sugathan-2021-VCBM.pdf},
    thumbnails = {images/Sugathan-2021-VCBM.png},
    images = {images/Sugathan-2021-VCBM.png},
    project = {ttmedvis},
    abstract = {Multiple Sclerosis (MS) is a brain disease that is diagnosed and monitored extensively through MRI scans. One of the criteria is the appearance of so-called brain lesions. The lesions show up on MRI scans as regions with elevated or reduced contrast compared to the surrounding healthy tissue.
    Understanding the complex interplay of contrast, location and shape in images from multiple modalities from 2D MRI slices is challenging.
    Advanced visualization of appearance- and location-related features of lesions would help researchers in defining better disease characterization through MS research.
    Since a permanent cure is not possible in MS and medication-based disease modification is a common treatment path, providing better visualizations would strengthen research which investigates the effect of white matter lesions. Here we present an advanced visualization solution that supports analysis from multiple imaging modalities acquired in a clinical routine examination. The solution holds potential for enabling researchers to have a more intuitive perception of lesion features. As an example for enhancing the analytic possibilities, we demonstrate the benefits of lesion projection using both Diffusion Tensor Imaging (DTI) and gradient-based techniques. This approach enables users to assess brain structures across individuals as the atlas-based analysis provides 3D anchoring and labeling of regions across a series of brain scans from the same participant and across different participants. The projections on the brain surface also enable researchers to conduct detailed studies on the relationship between cognitive disabilities and location of lesions. This allows researchers to correlate lesions to Brodmann areas and related brain functions.
    We realize the solutions in a prototype application that supports both DTI and structural data. A qualitative evaluation demonstrates that our approach supports MS researchers by providing new opportunities for MS research.}
    }
    [PDF] [DOI] [Bibtex]
    @article{Beiglboeck2021,
    doi = {10.1007/s11695-021-05763-6},
    year = {2021},
    month = nov,
    publisher = {Springer Science and Business Media {LLC}},
    author = {Hannes Beiglb\"{o}ck and Eric M\"{o}rth and Berthold Reichardt and Tanja Stamm and Bianca Itariu and J\"{u}rgen Harreiter and Miriam Hufgard-Leitner and Paul Fellinger and Jakob Eichelter and Gerhard Prager and Alexander Kautzky and Alexandra Kautzky-Willer and Peter Wolf and Michael Krebs},
    title = {Sex-Specific Differences in Mortality of Patients with a History of Bariatric Surgery: a Nation-Wide Population-Based Study},
    journal = {Obesity Surgery},
    abstract = {Bariatric surgery reduces mortality in patients with severe obesity and is predominantly performed in women.
    Therefore, an analysis of sex-specific differences after bariatric surgery in a population-based dataset from Austria was
    performed. The focus was on deceased patients after bariatric surgery.
    The Austrian health insurance funds cover about 98% of the Austrian population. Medical health
    claims data of all Austrians who underwent bariatric surgery from 01/2010 to 12/2018 were analyzed. In total, 19,901 patients
    with 107,806 observed years postoperative were eligible for this analysis. Comorbidities based on International Classification
    of Diseases (ICD)-codes and drug intake documented by Anatomical Therapeutical Chemical (ATC)-codes were analyzed
    in patients deceased and grouped according to clinically relevant obesity-associated comorbidities: diabetes mellitus (DM),
    cardiovascular disease (CV), psychiatric disorder (PSY), and malignancy (M).
    In total, 367 deaths were observed (1.8%) within the observation period from 01/2010 to 04/2020. The overall
    mortality rate was 0.34% per year of observation and significantly higher in men compared to women (0.64 vs. 0.24%;
    p < 0.001(Chi-squared)). Moreover, the 30-day mortality was 0.19% and sixfold higher in men compared to women (0.48
    vs. 0.08%; p < 0.001). CV (82%) and PSY (55%) were the most common comorbidities in deceased patients with no sex-
    specific differences. Diabetes (38%) was more common in men (43 vs. 33%; p = 0.034), whereas malignant diseases (36%)
    were more frequent in women (30 vs. 41%; p = 0.025).
    After bariatric surgery, short-term mortality as well as long-term mortality was higher in men compared to
    women. In deceased patients, diabetes was more common in men, whereas malignant diseases were more common in women.},
    pdf = {pdfs/Beiglboeck2021_Article_Sex-SpecificDifferencesInMorta.pdf},
    thumbnails = {images/2021-Moerth-Diabetes-thumb.png},
    images = {images/2021-Moerth-Diabetes.png},
    keywords = {Bariatric surgery, Sex differences, Mortality, Population-based registry analysis, Comorbidities, Healthcare, research},
    }
    [PDF] [DOI] [VID] [Bibtex]
    @Article{Kristiansen-2021-SSG,
    author = {Kristiansen, Y. S. and Garrison, L. and Bruckner, S.},
    title = {Semantic Snapping for Guided Multi-View Visualization Design},
    journal = {IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    volume = {},
    pages = {},
    doi = {},
    abstract = {Visual information displays are typically composed of multiple visualizations that are used to facilitate an understanding of the underlying data. A common example are dashboards, which are frequently used in domains such as finance, process monitoring and business intelligence. However, users may not be aware of existing guidelines and lack expert design knowledge when composing such multi-view visualizations. In this paper, we present semantic snapping, an approach to help non-expert users design effective multi-view visualizations from sets of pre-existing views. When a particular view is placed on a canvas, it is “aligned” with the remaining views–not with respect to its geometric layout, but based on aspects of the visual encoding itself, such as how data dimensions are mapped to channels. Our method uses an on-the-fly procedure to detect and suggest resolutions for conflicting, misleading, or ambiguous designs, as well as to provide suggestions for alternative presentations. With this approach, users can be guided to avoid common pitfalls encountered when composing visualizations. Our provided examples and case studies demonstrate the usefulness and validity of our approach.},
    note = {Accepted for publication, to be presented at IEEE VIS 2021},
    project = {MetaVis,VIDI},
    pdf = {pdfs/Kristiansen-2021-SSG.pdf},
    vid = {vids/Kristiansen-2021-SSG.mp4},
    thumbnails = {images/Kristiansen-2021-SSG.png},
    images = {images/Kristiansen-2021-SSG.jpg},
    keywords = {tabular data, guidelines, mixed initiative human-machine analysis, coordinated and multiple views},
    doi = {10.1109/TVCG.2021.3114860},
    }
    [PDF] [Bibtex]
    @InProceedings{Garrison-2021-EPP,
    author = {Laura Garrison and Monique Meuschke and Jennifer Fairman and Noeska Smit and Bernhard Preim and Stefan Bruckner},
    title = {An Exploration of Practice and Preferences for the Visual Communication of Biomedical Processes},
    booktitle = {Proceedings of VCBM},
    year = {2021},
    pages = {},
    doi = {},
    abstract = {The visual communication of biomedical processes draws from diverse techniques in both visualization and biomedical illustration. However, matching these techniques to their intended audience often relies on practice-based heuristics or narrow-scope evaluations. We present an exploratory study of the criteria that audiences use when evaluating a biomedical process visualization targeted for communication. Designed over a series of expert interviews and focus groups, our study focuses on common communication scenarios of five well-known biomedical processes and their standard visual representations. We framed these scenarios in a survey with participant expertise spanning from minimal to expert knowledge of a given topic. Our results show frequent overlap in abstraction preferences between expert and non-expert audiences, with similar prioritization of clarity and the ability of an asset to meet a given communication objective. We also found that some illustrative conventions are not as clear as we thought, e.g., glows have broadly ambiguous meaning, while other approaches were unexpectedly preferred, e.g., biomedical illustrations in place of data-driven visualizations. Our findings suggest numerous opportunities for the continued convergence of visualization and biomedical illustration techniques for targeted visualization design.
    Best Paper Honorable Mention at VCBM 2021},
    note = {Accepted for publication, to be presented at VCBM 2021},
    project = {VIDI,ttmedvis},
    pdf = {pdfs/Garrison-2021-EPP.pdf},
    thumbnails = {images/Garrison-2021-EPP.png},
    images = {images/Garrison-2021-EPP.jpg},
    url = {https://github.com/lauragarrison87/Biomedical_Process_Vis},
    keywords = {biomedical illustration, visual communication, survey},
    }
    [PDF] [DOI] [Bibtex]
    @article{brushingComparison,
    author={Fan, Chaoran and Hauser, Helwig},
    journal={IEEE Computer Graphics and Applications},
    title={On sketch-based selections from scatterplots using KDE, compared to Mahalanobis and CNN brushing},
    year={2021},
    volume={},
    number={},
    pages={1-13},
    doi={10.1109/MCG.2021.3097889},
    abstract = {"Fast and accurate brushing is crucial in visual data exploration and sketch-based solutions are successful methods. In this paper, we detail a solution, based on kernel density estimation (KDE), which computes a data subset selection in a scatterplot from a simple click-and-drag interaction. We explain, how this technique relates to two alternative approaches, i.e., Mahalanobis brushing and CNN brushing. To study this relation, we conducted two user studies and present both a quantitative three-fold comparison as well as additional details about the prevalence of all possible cases in that each technique succeeds/fails. With this, we also provide a comparison between empirical modeling and implicit modeling by deep learning in terms of accuracy, efficiency, generality and interpretability."},
    pdf = "pdfs/Fan-2021-brushingComparison.pdf",
    images = "images/Fan-2021-brushingComparison.png",
    thumbnails = "images/Fan-2021-brushingComparison.png",
    }
    [DOI] [Bibtex]
    @incollection{Smit-2021-COMULIS,
    author = {Smit, Noeska and Bühler, Katja and Vilanova, Anna and Falk, Martin},
    title = {Visualisation for correlative multimodal imaging},
    booktitle = {Imaging Modalities for Biological and Preclinical Research: A Compendium, Volume 2},
    publisher = {IOP Publishing},
    year = {2021},
    series = {2053-2563},
    type = {Book Chapter},
    pages = {III.4.e-1 to III.4.e-10},
    abstract = {In this chapter, we describe several approaches to interactive imaging data visualization in general, highlight several strategies for visualizing correlative multimodal imaging data, and provide examples and practical recommendations.},
    url = {http://dx.doi.org/10.1088/978-0-7503-3747-2ch28},
    doi = {10.1088/978-0-7503-3747-2ch28},
    isbn = {978-0-7503-3747-2},
    thumbnails = "images/Smit-2021-COMULIS.PNG",
    images = "images/Smit-2021-COMULIS.PNG",
    project = "ttmedvis",
    abstract = {The field of visualisation deals with finding appropriate visual representations of data so people can effectively carry out tasks related to data exploration, analysis, or presentation using the power of the human visual perceptual system. In the context of biomedical imaging data, interactive visualisation techniques can be employed, for example, to visually explore data, as image processing quality assurance, or in publications to communicate findings. When dealing with correlative imaging, challenges arise in how to effectively convey the information from multiple sources. In particular, the information density leads to the need for a critical reflection on the visual design with respect to which parts of the data are important to show and at what level of importance they should be visualised. In this chapter, we describe several approaches to interactive imaging data visualisation in general, highlight several strategies for visualising correlative multimodal imaging data, and provide examples and practical recommendations.}
    }
    [PDF] [DOI] [VID] [YT] [Bibtex]
    @article{Trautner-2021-LWI,
    author = {Trautner, Thomas and Bruckner, Stefan},
    title = {Line Weaver: Importance-Driven Order Enhanced Rendering of Dense Line Charts},
    journal = {Computer Graphics Forum},
    volume = {40},
    number = {3},
    pages = {399--410},
    keywords = {information visualization, visualization techniques, line charts},
    doi = {10.1111/cgf.14316},
    abstract = {Line charts are an effective and widely used technique for visualizing series of ordered two-dimensional data points. The relationship between consecutive points is indicated by connecting line segments, revealing potential trends or clusters in the underlying data. However, when dealing with an increasing number of lines, the render order substantially influences the resulting visualization. Rendering transparent lines can help but unfortunately the blending order is currently either ignored or naively used, for example, assuming it is implicitly given by the order in which the data was saved in a file. Due to the noncommutativity of classic alpha blending, this results in contradicting visualizations of the same underlying data set, so-called "hallucinators". In this paper, we therefore present line weaver, a novel visualization technique for dense line charts. Using an importance function, we developed an approach that correctly considers the blending order independently of the render order and without any prior sorting of the data. We allow for importance functions which are either explicitly given or implicitly derived from the geometric properties of the data if no external data is available. The importance can then be applied globally to entire lines, or locally per pixel which simultaneously supports various types of user interaction. Finally, we discuss the potential of our contribution based on different synthetic and real-world data sets where classic or naive approaches would fail.},
    year = {2021},
    pdf = "pdfs/Trautner-2021-LWI.pdf",
    thumbnails = "images/Trautner-2021-LWI-thumb.png",
    images = "images/Trautner-2021-LWI-thumb.png",
    vid = "vids/Trautner_2021_LineWeaver_video.mp4",
    youtube = "https://youtu.be/-hLF5XSR_ws",
    project = "MetaVis",
    git = "https://github.com/TTrautner/LineWeaver"
    }
    [PDF] [VID] [Bibtex]
    @article{Diehl-2021-HTC,
    author = {Alexandra Diehl and Rodrigo Pelorosso and Juan Ruiz and Renato Pajarola and Meister Eduard Gr\"{o}ller and Stefan Bruckner},
    title = {Hornero: Thunderstorms Characterization using Visual Analytics},
    journal = {Computer Graphics Forum},
    volume = {40},
    number = {3},
    pages = {},
    keywords = {visual analytics, weather forecasting, nowcasting},
    doi = {},
    abstract = {Analyzing the evolution of thunderstorms is critical in determining the potential for the development of severe weather events. Existing visualization systems for short-term weather forecasting (nowcasting) allow for basic analysis and prediction of storm developments. However, they lack advanced visual features for efficient decision-making. We developed a visual analytics tool for the detection of hazardous thunderstorms and their characterization, using a visual design centered on a reformulated expert task workflow that includes visual features to overview storms and quickly identify high-impact weather events, a novel storm graph visualization to inspect and analyze the storm structure, as well as a set of interactive views for efficient identification of similar storm cells (known as analogs) in historical data and their use for nowcasting. Our tool was designed with and evaluated by meteorologists and expert forecasters working in short-term operational weather forecasting of severe weather events. Results show that our solution suits the forecasters’ workflow. Our visual design is expressive, easy to use, and effective for prompt analysis and quick decision-making in the context of short-range operational weather forecasting.},
    year = {2021},
    pdf = "pdfs/Diehl-2021-HTC.pdf",
    thumbnails = "images/Diehl-2021-HTC.png",
    images = "images/Diehl-2021-HTC.jpg",
    vid = "vids/Diehl-2021-HTC.mp4",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Garrison-2021-DimLift,
    author = {Garrison, Laura and M\"{u}ller, Juliane and Schreiber, Stefanie and Oeltze-Jafra, Steffen and Hauser, Helwig and Bruckner, Stefan},
    title = {DimLift: Interactive Hierarchical Data Exploration through Dimensional Bundling},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    abstract = {The identification of interesting patterns and relationships is essential to exploratory data analysis. This becomes increasingly difficult in high dimensional datasets. While dimensionality reduction techniques can be utilized to reduce the analysis space, these may unintentionally bury key dimensions within a larger grouping and obfuscate meaningful patterns. With this work we introduce DimLift, a novel visual analysis method for creating and interacting with dimensional bundles. Generated through an iterative dimensionality reduction or user-driven approach, dimensional bundles are expressive groups of dimensions that contribute similarly to the variance of a dataset. Interactive exploration and reconstruction methods via a layered parallel coordinates plot allow users to lift interesting and subtle relationships to the surface, even in complex scenarios of missing and mixed data types. We exemplify the power of this technique in an expert case study on clinical cohort data alongside two additional case examples from nutrition and ecology.},
    volume = {27},
    number = {6},
    pages = {2908--2922},
    pdf = {pdfs/garrison-2021-dimlift.pdf},
    images = {images/garrison_dimlift.jpg},
    thumbnails = {images/garrison_dimlift_thumb.jpg},
    youtube = {https://youtu.be/JSZuhnDyugA},
    doi = {10.1109/TVCG.2021.3057519},
    git = {https://github.com/lauragarrison87/DimLift},
    project = {VIDI},
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Mueller-2021-IDA,
    author = {M\"{u}ller, Juliane and Garrison, Laura and Ulbrich, Philipp and Schreiber, Stefanie and Bruckner, Stefan and Hauser, Helwig and Oeltze-Jafra, Steffen},
    title = {Integrated Dual Analysis of Quantitative and Qualitative High-Dimensional Data},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    abstract = {The Dual Analysis framework is a powerful enabling technology for the exploration of high dimensional quantitative data by treating data dimensions as first-class objects that can be explored in tandem with data values. In this work, we extend the Dual Analysis framework through the joint treatment of quantitative (numerical) and qualitative (categorical) dimensions. Computing common measures for all dimensions allows us to visualize both quantitative and qualitative dimensions in the same view. This enables a natural joint treatment of mixed data during interactive visual exploration and analysis. Several measures of variation for nominal qualitative data can also be applied to ordinal qualitative and quantitative data. For example, instead of measuring variability from a mean or median, other measures assess inter-data variation or average variation from a mode. In this work, we demonstrate how these measures can be integrated into the Dual Analysis framework to explore and generate hypotheses about high-dimensional mixed data. A medical case study using clinical routine data of patients suffering from Cerebral Small Vessel Disease (CSVD), conducted with a senior neurologist and a medical student, shows that a joint Dual Analysis approach for quantitative and qualitative data can rapidly lead to new insights based on which new hypotheses may be generated.},
    volume = {27},
    number = {6},
    pages = {2953--2966},
    pdf = {pdfs/Mueller_2020_IDA.pdf},
    images = {images/Mueller_2020_IDA.jpg},
    thumbnails = {images/Mueller_2020_IDA.png},
    doi = {10.1109/TVCG.2021.3056424},
    git = {https://github.com/JulianeMu/IntegratedDualAnalysisAproach_MDA},
    project = {VIDI},
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE{Palenik-2020-IsoTrotter,
    author={P\'{a}lenik, Juraj and Spengler, Thomas and Hauser, Helwig},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={{IsoTrotter: Visually Guided Emprical Modelling of Atmospheric Convection}},
    abstract={Empirical models, fitted to data from observations, are often used in natural sciences to describe physical behaviour and support discoveries. However, with more complex models, the regression of parameters quickly becomes insufficient, requiring a visual parameter space analysis to understand and optimize the models. In this work, we present a design study for building a model describing atmospheric convection. We present a mixed-initiative approach to visually guided modelling, integrating an interactive visual parameter space analysis with partial automatic parameter optimization. Our approach includes a new, semi-automatic technique called IsoTrotting, where we optimize the procedure by navigating along isocontours of the model. We evaluate the model with unique observational data of atmospheric convection based on flight trajectories of paragliders.},
    year={2021},
    volume={27},
    number={2},
    pages={775-784},
    doi={10.1109/TVCG.2020.3030389},
    pdf={pdfs/2020-10-20-Palenik-IsoTrotter.pdf},
    images={images/IsoTrotter2020.png},
    thumbnails={images/IsoTrotter2020.png}
    }
    [PDF] [DOI] [Bibtex]
    @article{bolte2020splitstreams,
    author= {Bolte, Fabian and Nourani, Mahsan and Ragan, Eric and Bruckner, Stefan},
    journal= {IEEE Transactions on Visualization and Computer Graphics},
    title= {SplitStreams: A Visual Metaphor for Evolving Hierarchies},
    year= {2021},
    keywords= {Information Visualization, Trees, Data Structures and Data Types, Visualization Techniques and Methodologies},
    doi= {10.1109/TVCG.2020.2973564},
    url= {https://arxiv.org/pdf/2002.03891.pdf},
    volume = {27},
    number = {8},
    doi = {10.1109/TVCG.2020.2973564},
    abstract= {The visualization of hierarchically structured data over time is an ongoing challenge and several approaches exist trying to solve it. Techniques such as animated or juxtaposed tree visualizations are not capable of providing a good overview of the time series and lack expressiveness in conveying changes over time. Nested streamgraphs provide a better understanding of the data evolution, but lack the clear outline of hierarchical structures at a given timestep. Furthermore, these approaches are often limited to static hierarchies or exclude complex hierarchical changes in the data, limiting their use cases. We propose a novel visual metaphor capable of providing a static overview of all hierarchical changes over time, as well as clearly outlining the hierarchical structure at each individual time step. Our method allows for smooth transitions between tree maps and nested streamgraphs, enabling the exploration of the trade-off between dynamic behavior and hierarchical structure. As our technique handles topological changes of all types, it is suitable for a wide range of applications. We demonstrate the utility of our method on several use cases, evaluate it with a user study, and provide its full source code.},
    pdf= {pdfs/Bolte-2020-SplitStreams.pdf},
    images= {images/Bolte-2020-SplitStreams.png},
    thumbnails= {images/Bolte-2020-SplitStreams_thumb.png},
    project = "MetaVis",
    git = "https://github.com/cadanox/SplitStreams"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @article{bolte2019visavis,
    author= {Bolte, Fabian and Bruckner, Stefan},
    journal= {IEEE Transactions on Visualization and Computer Graphics},
    title= {Vis-a-Vis: Visual Exploration of Visualization Source Code Evolution},
    year= {2021},
    keywords= {Visualization System and Toolkit Design;User Interfaces;Integrating Spatial and Non-Spatial Data Visualization;Software Visualization},
    doi= {10.1109/TVCG.2019.2963651},
    issn= {2160-9306},
    url= {https://arxiv.org/pdf/2001.02092.pdf},
    abstract= {Developing an algorithm for a visualization prototype often involves the direct comparison of different development stages and design decisions, and even minor modifications may dramatically affect the results. While existing development tools provide visualizations for gaining general insight into performance and structural aspects of the source code, they neglect the central importance of result images unique to graphical algorithms. In this paper, we present a novel approach that enables visualization programmers to simultaneously explore the evolution of their algorithm during the development phase together with its corresponding visual outcomes by providing an automatically updating meta visualization. Our interactive system allows for the direct comparison of all development states on both the visual and the source code level, by providing easy to use navigation and comparison tools. The on-the-fly construction of difference images, source code differences, and a visual representation of the source code structure further enhance the user's insight into the states' interconnected changes over time. Our solution is accessible via a web-based interface that provides GPU-accelerated live execution of C++ and GLSL code, as well as supporting a domain-specific programming language for scientific visualization.},
    pdf= {pdfs/Bolte-2019-Visavis.pdf},
    images= {images/Bolte-2019-Visavis.png},
    thumbnails= {images/Bolte-2019-Visavis_thumb.png},
    youtube= {https://www.youtube.com/watch?v=5XO6BU4j1KQ},
    volume = {27},
    number = {7},
    pages = {3153--3167},
    project = "MetaVis"
    }

2020

    [PDF] [DOI] [Bibtex]
    @article{sketchingQuery,
    author={Fan, Chaoran and Matkovic, Kresimir and Hauser, Helwig},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={Sketch-based fast and accurate querying of time series using parameter-sharing LSTM networks},
    year={2020},
    volume={},
    number={},
    pages={1-12},
    doi={10.1109/TVCG.2020.3002950},
    abstract = {"Sketching is one common approach to query time series data for patterns of interest. Most existing solutions for matching the data with the interaction are based on an empirically modeled similarity function between the user's sketch and the time series data with limited efficiency and accuracy. In this paper, we introduce a machine learning based solution for fast and accurate querying of time series data based on a swift sketching interaction. We build on existing LSTM technology (long short-term memory) to encode both the sketch and the time series data in a network with shared parameters. We use data from a user study to let the network learn a proper similarity function. We focus our approach on perceived similarities and achieve that the learned model also includes a user-side aspect. To the best of our knowledge, this is the first data-driven solution for querying time series data in visual analytics. Besides evaluating the accuracy and efficiency directly in a quantitative way, we also compare our solution to the recently published Qetch algorithm as well as the commonly used dynamic time warping (DTW) algorithm.."},
    pdf = "pdfs/Fan-2020-sketchingQuery.pdf",
    images = "images/Fan-2020-sketchingQuery.png",
    thumbnails = "images/Fan-2020-sketchingQuery.png",
    }
    [PDF] [DOI] [Bibtex]
    @article{Garrison-2020-IVE,
    author = {Garrison, Laura and Va\v{s}\'{i}\v{c}ek, Jakub and Craven, Alex R. and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    title = {Interactive Visual Exploration of Metabolite Ratios in MR Spectroscopy Studies},
    journal = {Computers \& Graphics},
    volume = {92},
    pages = {1--12},
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    doi = {10.1016/j.cag.2020.08.001},
    abstract = {Magnetic resonance spectroscopy (MRS) is an advanced biochemical technique used to identify metabolic compounds in living tissue. While its sensitivity and specificity to chemical imbalances render it a valuable tool in clinical assessment, the results from this modality are abstract and difficult to interpret. With this design study we characterized and explored the tasks and requirements for evaluating these data from the perspective of a MRS research specialist. Our resulting tool, SpectraMosaic, links with upstream spectroscopy quantification software to provide a means for precise interactive visual analysis of metabolites with both single- and multi-peak spectral signatures. Using a layered visual approach, SpectraMosaic allows researchers to analyze any permutation of metabolites in ratio form for an entire cohort, or by sample region, individual, acquisition date, or brain activity status at the time of acquisition. A case study with three MRS researchers demonstrates the utility of our approach in rapid and iterative spectral data analysis.},
    year = {2020},
    pdf = "pdfs/Garrison-2020-IVE.pdf",
    thumbnails = "images/Garrison-2020-IVE.png",
    images = "images/Garrison-2020-IVE.jpg",
    project = "VIDI",
    git = "https://github.com/mmiv-center/spectramosaic-public",
    }
    [PDF] [DOI] [Bibtex]
    @article{Kristiansen-2020-VIV,
    author = {Yngve Sekse Kristiansen and Stefan Bruckner},
    title = {Visception: An Interactive Visual Framework for Nested Visualization Design},
    journal = {Computers \& Graphics},
    volume = {92},
    pages = {13--27},
    keywords = {information visualization, nested visualizations, nesting},
    doi = {10.1016/j.cag.2020.08.007},
    abstract = {Nesting is the embedding of charts into the marks of another chart. Related to principles such as Tufte’s rule of utilizing micro/macro readings, nested visualizations have been employed to increase information density, providing compact representations of multi-dimensional and multi-typed data entities. Visual authoring tools are becoming increasingly prevalent, as they make visualization technology accessible to non-expert users such as data journalists, but existing frameworks provide no or only very limited functionality related to the creation of nested visualizations. In this paper, we present an interactive visual approach for the flexible generation of nested multilayer visualizations. Based on a hierarchical representation of nesting relationships coupled with a highly customizable mechanism for specifying data mappings, we contribute a flexible framework that enables defining and editing data-driven multi-level visualizations. As a demonstration of the viability of our framework, we contribute a visual builder for exploring, customizing and switching between different designs, along with example visualizations to demonstrate the range of expression. The resulting system allows for the generation of complex nested charts with a high degree of flexibility and fluidity using a drag and drop interface.},
    year = {2020},
    pdf = "pdfs/Kristiansen-2020-VIV.pdf",
    thumbnails = "images/Kristiansen-2020-VIV.png",
    images = "images/Kristiansen-2020-VIV.jpg",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @article{RadEx,
    author = {M\"{o}rth, E. and Wagner-Larsen, K. and Hodneland, E. and Krakstad, C. and Haldorsen, I. S. and Bruckner, S. and Smit, N. N.},
    title = {RadEx: Integrated Visual Exploration of Multiparametric Studies for Radiomic Tumor Profiling},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {7},
    year = {2020},
    pages = {611--622},
    abstract = {Better understanding of the complex processes driving tumor growth and metastases is critical for developing targeted treatment strategies in cancer. Radiomics extracts large amounts of features from medical images which enables radiomic tumor profiling in combination with clinical markers. However, analyzing complex imaging data in combination with clinical data is not trivial and supporting tools aiding in these exploratory analyses are presently missing. In this paper, we present an approach that aims to enable the analysis of multiparametric medical imaging data in combination with numerical, ordinal, and categorical clinical parameters to validate established and unravel novel biomarkers. We propose a hybrid approach where dimensionality reduction to a single axis is combined with multiple linked views allowing clinical experts to formulate hypotheses based on all available imaging data and clinical parameters. This may help to reveal novel tumor characteristics in relation to molecular targets for treatment, thus providing better tools for enabling more personalized targeted treatment strategies. To confirm the utility of our approach, we closely collaborate with experts from the field of gynecological cancer imaging and conducted an evaluation with six experts in this field.},
    pdf = "pdfs/Moerth-2020-RadEx.pdf",
    images = "images/Moerth-2020-RadEx.jpg",
    youtube = "https://youtu.be/zwtDzwwX790",
    thumbnails = "images/Moerth-2020-RadEx-thumb.jpg",
    project = "ttmedvis",
    doi = {10.1111/cgf.14172}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS{Moerth-2020-CGI,
    author = "M\"{o}rth, E. and Haldorsen, I.S. and Bruckner, S. and Smit, N.N.",
    title = "ParaGlyder: Probe-driven Interactive Visual Analysis for Multiparametric Medical Imaging Data",
    booktitle = "Proceedings of Computer Graphics International",
    pages = "351--363",
    year = "2020",
    abstract = "Multiparametric medical imaging describes approaches that include multiple imaging sequences acquired within the same imaging examination, as opposed to one single imaging sequence or imaging from multiple imaging modalities. Multiparametric imaging in cancer has been shown to be useful for tumor detection and may also depict functional tumor characteristics relevant for clinical phenotypes. However, when confronted with datasets consisting of multiple values per voxel, traditional reading of the imaging series fails to capture complicated patterns. Those patterns of potentially important imaging properties of the parameter space may be critical for the analysis. Standard approaches, such as transfer functions and juxtapositioned visualizations, fail to convey the shape of the multiparametric parameter distribution in sufficient detail. For these reasons, in this paper we present an approach that aims to enable the exploration and analysis of such multiparametric studies using an interactive visual analysis application to remedy the trade-offs between details in the value domain and in spatial resolution. Interactive probing within or across subjects allows for a digital biopsy that is able to uncover multiparametric tissue properties. This may aid in the discrimination between healthy and cancerous tissue, unravel radiomic tissue features that could be linked to targetable pathogenic mechanisms, and potentially highlight metastases that evolved from the primary tumor. We conducted an evaluation with eleven domain experts from the field of gynecological cancer imaging, neurological imaging, and machine learning research to confirm the utility of our approach.",
    note= "The final authenticated version is available online at https://doi.org/10.1007/978-3-030-61864-3_29",
    pdf = "pdfs/Moerth-2020-CGI-ParaGlyder.pdf",
    images = "images/Moerth-2020-ParaGlyder.PNG",
    thumbnails = "images/Moerth-2020-ParaGlyder-thumb.png",
    youtube = "https://youtu.be/S_M4CWXKz0U",
    publisher = "LNCS by Springer",
    project = "ttmedvis",
    doi = "10.1007/978-3-030-61864-3_29"
    }
    [PDF] [DOI] [Bibtex]
    @article{StormFurru-2020-VGT,
    author = {Syver Storm-Furru and Stefan Bruckner},
    title = {VA-TRAC: Geospatial Trajectory Analysis for Monitoring, Identification, and Verification in Fishing Vessel Operations},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {101--114},
    keywords = {visual analytics, fisheries, monitoring},
    doi = {10.1111/cgf.13966},
    abstract = {In order to ensure sustainability, fishing operations are governed by many rules and regulations that restrict the use of certain techniques and equipment, specify the species and size of fish that can be harvested, and regulate commercial activities based on licensing schemes. As the world's second largest exporter of fish and seafood products, Norway invests a significant amount of effort into maintaining natural ecosystem dynamics by ensuring compliance with its constantly evolving sciencebased regulatory body. This paper introduces VA-TRAC, a geovisual analytics application developed in collaboration with the Norwegian Directorate of Fisheries in order to address this complex task. Our approach uses automatic methods to identify possible catch operations based on fishing vessel trajectories, embedded in an interactive web-based visual interface used to explore the results, compare them with licensing information, and incorporate the analysts' domain knowledge into the decision making process. We present a data and task analysis based on a close collaboration with domain experts, and the design and implementation of VA-TRAC to address the identified requirements.},
    year = {2020},
    pdf = "pdfs/StormFurru-2020-VGT.pdf",
    thumbnails = "images/StormFurru-2020-VGT.png",
    images = "images/StormFurru-2020-VGT.jpg",
    project = "MetaVis"
    }
    [PDF] [DOI] [VID] [YT] [Bibtex]
    @article{Trautner-2020-SunspotPlots,
    author = {Trautner, T. and Bolte, F. and Stoppel, S. and Bruckner, S.},
    title = {Sunspot Plots: Model-based Structure Enhancement for Dense Scatter Plots},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {551--563},
    keywords = {information visualization, scatterplots, kernel density estimation},
    doi = {10.1111/cgf.14001},
    abstract = {Scatter plots are a powerful and well-established technique for visualizing the relationships between two variables as a collection of discrete points. However, especially when dealing with large and dense data, scatter plots often exhibit problems such as overplotting, making the data interpretation arduous. Density plots are able to overcome these limitations in highly populated regions, but fail to provide accurate information of individual data points. This is particularly problematic in sparse regions where the density estimate may not provide a good representation of the underlying data. In this paper, we present sunspot plots, a visualization technique that communicates dense data as a continuous data distribution, while preserving the discrete nature of data samples in sparsely populated areas. We furthermore demonstrate the advantages of our approach on typical failure cases of scatter plots within synthetic and real-world data sets and validate its effectiveness in a user study.},
    year = {2020},
    pdf = "pdfs/Trautner_2020_SunspotPlots_PDF.pdf",
    thumbnails = "images/Trautner_2020_SunspotPlots_thumb.png",
    images = "images/Trautner_2020_SunspotPlots_thumb.png",
    vid = "vids/Trautner_2020_SunspotPlots_video.mp4",
    youtube = "https://youtu.be/G6l-y6YGjzQ",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Bolte-2020-ONC,
    author = "Fabian Bolte and Stefan Bruckner",
    title = "Organic Narrative Charts",
    booktitle = "Proceedings of Eurographics 2020 (Short Papers)",
    year = "2020",
    pages = "93--96",
    doi = "10.2312/egs.20201026",
    month = "may",
    abstract = "Storyline visualizations display the interactions of groups and entities and their development over time. Existing approaches have successfully adopted the general layout from hand-drawn illustrations to automatically create similar depictions. Ward Shelley is the author of several diagrammatic paintings that show the timeline of art-related subjects, such as Downtown Body, a history of art scenes. His drawings include many stylistic elements that are not covered by existing storyline visualizations, like links between entities, splits and merges of streams, and tags or labels to describe the individual elements. We present a visualization method that provides a visual mapping for the complex relationships in the data, creates a layout for their display, and adopts a similar styling of elements to imitate the artistic appeal of such illustrations.We compare our results to the original drawings and provide an open-source authoring tool prototype.",
    pdf = "pdfs/Bolte-2020-ONC.pdf",
    images = "images/Bolte-2020-ONC.jpg",
    thumbnails = "images/Bolte-2020-ONC.png",
    event = "Eurographics 2020",
    keywords = "narrative charts, storylines, aesthetics",
    project = "MetaVis",
    git = "https://github.com/cadanox/orcha"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE{Palenik-2019-Splatting,
    author={J. P\'{a}lenik and J. By\v{s}ka and S. Bruckner and H. Hauser},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={Scale-Space Splatting: Reforming Spacetime for Cross-Scale Exploration of Integral Measures in Molecular Dynamics},
    year={2020},
    volume={26},
    number={1},
    pages={643--653},
    keywords={Data visualization;Computational modeling;Time series analysis;Atmospheric measurements;Particle measurements;Analytical models;Kernel;Scale space;time-series;scientific simulation;multi-scale analysis;space-time cube;molecular dynamics},
    doi={10.1109/TVCG.2019.2934258},
    ISSN={1077-2626},
    month={},
    pdf = "pdfs/scale-space-splatting.pdf",
    images = "images/scale-space-teaser.png",
    thumbnails = "images/scale-space-teaser-thumb.png",
    abstract = "Understanding large amounts of spatiotemporal data from particle-based simulations, such as molecular dynamics, often relies on the computation and analysis of aggregate measures. These, however, by virtue of aggregation, hide structural information about the space/time localization of the studied phenomena. This leads to degenerate cases where the measures fail to capture distinct behaviour. In order to drill into these aggregate values, we propose a multi-scale visual exploration technique. Our novel representation, based on partial domain aggregation, enables the construction of a continuous scale-space for discrete datasets and the simultaneous exploration of scales in both space and time. We link these two scale-spaces in a scale-space space-time cube and model linked views as orthogonal slices through this cube, thus enabling the rapid identification of spatio-temporal patterns at multiple scales. To demonstrate the effectiveness of our approach, we showcase an advanced exploration of a protein-ligand simulation.",
    }
    [PDF] [DOI] [Bibtex]
    @incollection {Bolte-2019-MVS,
    author = {Bolte, Fabian and Bruckner, Stefan},
    title = {Measures in Visualization Space},
    booktitle = {Foundations of Data Visualization},
    chapter = {3},
    publisher = {Springer},
    year = {2020},
    pdf = {pdfs/Bolte-2019-MVS.pdf},
    images = {images/Bolte-2019-MVS.png},
    thumbnails = {images/Bolte-2019-MVS.png},
    abstract = {Measurement is an integral part of modern science, providing the fundamental means for evaluation, comparison, and prediction. In the context of visualization, several different types of measures have been proposed, ranging from approaches that evaluate particular aspects of individual visualization techniques, their perceptual characteristics, and even economic factors. Furthermore, there are approaches that attempt to provide means for measuring general properties of the visualization process as a whole. Measures can be quantitative or qualitative, and one of the primary goals is to provide objective means for reasoning about visualizations and their effectiveness. As such, they play a central role in the development of scientific theories for visualization. In this chapter, we provide an overview of the current state of the art, survey and classify different types of visualization measures, characterize their strengths and drawbacks, and provide an outline of open challenges for future research.},
    note = {This is a preprint of a chapter for a planned book that was initiated by participants of the Dagstuhl Seminar 18041 ("Foundations of Data Visualization") and that is expected to be published by Springer. The final book chapter will differ from this preprint.},
    url = {https://arxiv.org/abs/1909.05295},
    project = "MetaVis",
    isbn = {978-3-030-34443-6},
    doi = {10.1007/978-3-030-34444-3_3}
    }
    [PDF] [DOI] [Bibtex]
    @article{Solteszova-2019-MLT,
    author = {Solteszova, V. and Smit, N. N. and Stoppel, S. and Gr\"{u}ner, R. and Bruckner, S.},
    title = {Memento: Localized Time-Warping for Spatio-Temporal Selection},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {1},
    pages = {231--243},
    year = {2020},
    keywords = {interaction, temporal data, visualization, spatio-temporal projection},
    images = "images/Solteszova-2019-MLT.jpg",
    thumbnails = "images/Solteszova-2019-MLT-1.jpg",
    pdf = "pdfs/Solteszova-2019-MLT.pdf",
    doi = {10.1111/cgf.13763},
    abstract = {Abstract Interaction techniques for temporal data are often focused on affecting the spatial aspects of the data, for instance through the use of transfer functions, camera navigation or clipping planes. However, the temporal aspect of the data interaction is often neglected. The temporal component is either visualized as individual time steps, an animation or a static summary over the temporal domain. When dealing with streaming data, these techniques are unable to cope with the task of re-viewing an interesting local spatio-temporal event, while continuing to observe the rest of the feed. We propose a novel technique that allows users to interactively specify areas of interest in the spatio-temporal domain. By employing a time-warp function, we are able to slow down time, freeze time or even travel back in time, around spatio-temporal events of interest. The combination of such a (pre-defined) time-warp function and brushing directly in the data to select regions of interest allows for a detailed review of temporally and spatially localized events, while maintaining an overview of the global spatio-temporal data. We demonstrate the utility of our technique with several usage scenarios.},
    project = "MetaVis,ttmedvis,VIDI"
    }

2019

    [PDF] [DOI] [Bibtex]
    @article{Byska-2019-LongMolecularDynamicsSimulations,
    author = {Byška, J. and Trautner, T. and Marques, S.M. and Damborský, J. and Kozlíková, B. and Waldner, M.},
    title = {Analysis of Long Molecular Dynamics Simulations Using Interactive Focus+Context Visualization},
    journal = {Computer Graphics Forum},
    volume = {38},
    number = {3},
    pages = {441-453},
    keywords = {CCS Concepts, Human-centered computing -- Scientific visualization; User centered design},
    doi = {10.1111/cgf.13701},
    url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13701},
    eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/cgf.13701},
    abstract = {Abstract Analyzing molecular dynamics (MD) simulations is a key aspect to understand protein dynamics and function. With increasing computational power, it is now possible to generate very long and complex simulations, which are cumbersome to explore using traditional 3D animations of protein movements. Guided by requirements derived from multiple focus groups with protein engineering experts, we designed and developed a novel interactive visual analysis approach for long and crowded MD simulations. In this approach, we link a dynamic 3D focus+context visualization with a 2D chart of time series data to guide the detection and navigation towards important spatio-temporal events. The 3D visualization renders elements of interest in more detail and increases the temporal resolution dependent on the time series data or the spatial region of interest. In case studies with different MD simulation data sets and research questions, we found that the proposed visual analysis approach facilitates exploratory analysis to generate, confirm, or reject hypotheses about causalities. Finally, we derived design guidelines for interactive visual analysis of complex MD simulation data.},
    year = {2019},
    pdf = "pdfs/AnalysisOfLongMolecularDynamicsSimulationsUsingInteractiveFocusAndContextVisualization_Trautner.pdf",
    images = "images/Byska-2019-LongMolecularDynamicsSimulations.png",
    thumbnails = "images/Byska-2019-LongMolecularDynamicsSimulations.png"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Bartsch-2019-MVA,
    booktitle = {Proceedings of VCBM 2019 (Short Papers)},
    title = {MedUse: A Visual Analysis Tool for Medication Use Data in the ABCD Study},
    author = {Bartsch, Hauke and Garrison, Laura and Bruckner, Stefan and Wang, Ariel and Tapert, Susan F. and Gr\"{u}ner, Renate},
    abstract = {The RxNorm vocabulary is a yearly-published biomedical resource providing normalized names for medications. It is used to capture medication use in the Adolescent Brain Cognitive Development (ABCD) study, an active and publicly available longitudinal research study following 11,800 children over 10 years. In this work, we present medUse, a visual tool allowing researchers to explore and analyze the relationship of drug category to cognitive or imaging derived measures using ABCD study data. Our tool provides position-based context for tree traversal and selection granularity of both study participants and drug category. Developed as part of the Data Exploration and Analysis Portal (DEAP), medUse is available to more than 600 ABCD researchers world-wide. By integrating medUse into an actively used research product we are able to reach a wide audience and increase the practical relevance of visualization for the biomedical field.},
    year = {2019},
    pages = {97--101},
    images = "images/Bartsch-2019-MVA.jpg",
    thumbnails = "images/Bartsch-2019-MVA.png",
    pdf = "pdfs/Bartsch-2019-MVA.pdf",
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-081-9},
    DOI = {10.2312/vcbm.20191236},
    project = {VIDI}
    }
    [DOI] [Bibtex]
    @article{kraima2019role,
    title={The role of the longitudinal muscle in the anal sphincter complex: Implications for the Intersphincteric Plane in Low Rectal Cancer Surgery?},
    author={Kraima, Anne C and West, Nicholas P and Roberts, Nicholas and Magee, Derek R and Smit, Noeska N and van de Velde, Cornelis JH and DeRuiter, Marco C and Rutten, Harm J and Quirke, Philip},
    journal={Clinical Anatomy},
    year={2019},
    doi="10.1002/ca.23444",
    url = "https://onlinelibrary.wiley.com/doi/full/10.1002/ca.23444",
    publisher={Wiley Online Library},
    project = "ttmedvis",
    images = {images/kraima-2019-role.png},
    thumbnails = {images/kraima-2019-role.png},
    abstract = {Intersphincteric resection (ISR) enables radical sphincter-preserving surgery in a subset of low rectal tumors impinging on the anal sphincter complex (ASC). Excellent anatomical knowledge is essential for optimal ISR. This study describes the role of the longitudinal muscle (LM) in the ASC and implications for ISR and other low rectal and anal pathologies. Six human adult en bloc cadaveric specimens (three males, three females) were obtained from the University of Leeds GIFT Research Tissue Programme. Paraffin-embedded mega blocks containing the ASC were produced and serially sectioned at 250?µm intervals. Whole mount microscopic sections were histologically stained and digitally scanned. The intersphincteric plane was shown to be potentially very variable. In some places adipose tissue is located between the external anal sphincter (EAS) and internal anal sphincter (IAS), whereas in others the LM interdigitates to obliterate the plane. Elsewhere the LM is (partly) absent with the intersphincteric plane lying on the IAS. The LM gave rise to the formation of the submucosae and corrugator ani muscles by penetrating the IAS and EAS. In four of six specimens, striated muscle fibers from the EAS curled around the distal IAS reaching the anal submucosa. The ASC formed a complex structure, varying between individuals with an inconstant LM affecting the potential location of the intersphincteric plane as well as a high degree of intermingling striated and smooth muscle fibers potentially further disrupting the plane. The complexity of identifying the correct pathological staging of low rectal cancer is also demonstrated.}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Garrison2019SM,
    author = {Garrison, Laura and Va\v{s}\'{\i}\v{c}ek, Jakub and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    title = {SpectraMosaic: An Exploratory Tool for the Interactive Visual Analysis of Magnetic Resonance Spectroscopy Data},
    journal = {Computer Graphics Forum},
    month = {sep},
    year = {2019},
    booktitle = {Proceedings of VCBM 2019},
    pages = {1--10},
    event = "VCBM 2019",
    proceedings = "Proceedings of the 9th Eurographics Workshop on Visual Computing in Biology and Medicine",
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    images = "images/garrison_VCBM19spectramosaic_full.PNG",
    thumbnails = "images/garrison_VCBM19spectramosaic_thumb.png",
    pdf = "pdfs/garrison_VCBM19spectramosaic.pdf",
    youtube = "https://www.youtube.com/watch?v=Rzl7sl4WvdQ",
    abstract = {Magnetic resonance spectroscopy (MRS) allows for assessment of tissue metabolite characteristics used often for early detection and treatment evaluation of brain-related pathologies. However, meaningful variations in ratios of tissue metabolites within a sample area are difficult to capture with current visualization tools. Furthermore, the learning curve to interpretation is steep and limits the more widespread adoption of MRS in clinical practice. In this design study, we collaborated with domain experts to design a novel visualization tool for the exploration of tissue metabolite concentration ratios in spectroscopy clinical and research studies. We present a data and task analysis for this domain, where MRS data attributes can be categorized into tiers of visual priority. We furthermore introduce a novel set of visual encodings for these attributes. Our result is SpectraMosaic (see Figure~\ref{fig:teaser}), an interactive insight-generation tool for rapid exploration and comparison of metabolite ratios. We validate our approach with two case studies from MR spectroscopy experts, providing early qualitative evidence of the efficacy of the system for visualization of spectral data and affording deeper insights into these complex heterogeneous data.},
    git = "https://git.app.uib.no/Laura.Garrison/spectramosaic",
    doi = "0.2312/vcbm.20191225",
    project = "VIDI"
    }
    [DOI] [Bibtex]
    @incollection{Smit-2019-AtlasVis,
    title={Towards Advanced Interactive Visualization for Virtual Atlases},
    author={Smit, Noeska and Bruckner, Stefan},
    booktitle={Biomedical Visualisation},
    pages={85--96},
    year={2019},
    publisher={Springer},
    doi = {10.1007/978-3-030-19385-0_6},
    url = "http://noeskasmit.com/wp-content/uploads/2019/07/Smit_AtlasVis_2019.pdf",
    images = "images/Smit-2019-AtlasVis.png",
    thumbnails = "images/Smit-2019-AtlasVis.png",
    abstract = "An atlas is generally defined as a bound collection of tables, charts or illustrations describing a phenomenon. In an anatomical atlas for example, a collection of representative illustrations and text describes anatomy for the purpose of communicating anatomical knowledge. The atlas serves as reference frame for comparing and integrating data from different sources by spatially or semantically relating collections of drawings, imaging data, and/or text. In the field of medical image processing, atlas information is often constructed from a collection of regions of interest, which are based on medical images that are annotated by domain experts. Such an atlas may be employed for example for automatic segmentation of medical imaging data. The combination of interactive visualization techniques with atlas information opens up new possibilities for content creation, curation, and navigation in virtual atlases. With interactive visualization of atlas information, students are able to inspect and explore anatomical atlases in ways that were not possible with the traditional method of presenting anatomical atlases in book format, such as viewing the illustrations from other viewpoints. With advanced interaction techniques, it becomes possible to query the data that forms the basis for the atlas, thus empowering researchers to access a wealth of information in new ways. So far, atlasbased visualization has been employed for mainly medical education, as well as biological research. In this survey, we provide an overview of current digital biomedical atlas tasks and applications and summarize relevant visualization techniques. We discuss recent approaches for providing next-generation visual interfaces to navigate atlas data that go beyond common text-based search and hierarchical lists. Finally, we reflect on open challenges and opportunities for the next steps in interactive atlas visualization. ",
    project = "ttmedvis,MetaVis,VIDI"
    }
    [DOI] [Bibtex]
    @article{Meuschke-2019-EvalViz,
    title = {EvalViz--Surface Visualization Evaluation Wizard for Depth and Shape Perception Tasks},
    author = {Meuschke, Monique and Smit, Noeska N and Lichtenberg, Nils and Preim, Bernhard and Lawonn, Kai},
    journal = {Computers \& Graphics},
    year = {2019},
    publisher = {Elsevier},
    number = "1",
    volume = "82",
    DOI = {10.1016/j.cag.2019.05.022},
    images = "images/Meuschke_EvalViz_2019.png",
    thumbnails = "images/Meuschke_EvalViz_2019.png",
    abstract = "User studies are indispensable for visualization application papers in order to assess the value and limitations of the presented approach. Important aspects are how well depth and shape information can be perceived, as coding of these aspects is essential to enable an understandable representation of complex 3D data. In practice, there is usually little time to perform such studies, and the establishment and conduction of user studies can be labour-intensive. In addition, it can be difficult to reach enough participants to obtain expressive results regarding the quality of different visualization techniques.
    In this paper, we propose a framework that allows visualization researchers to quickly create task-based user studies on depth and shape perception for different surface visualizations and perform the resulting tasks via a web interface. With our approach, the effort for generating user studies is reduced and at the same time the web-based component allows researchers to attract more participants to their study. We demonstrate our framework by applying shape and depth evaluation tasks to visualizations of various surface representations used in many technical and biomedical applications.",
    project = "ttmedvis"
    }
    [PDF] [YT] [Bibtex]
    @MISC {Garrison2019SM_eurovis,
    title = {A Visual Encoding System for Comparative Exploration of Magnetic Resonance Spectroscopy Data},
    author = {Garrison, Laura and Va\v{s}\'{\i}\v{c}ek, Jakub and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    abstract = "Magnetic resonance spectroscopy (MRS) allows for assessment of tissue metabolite characteristics used often for early detection and treatment evaluation of intracranial pathologies. In particular, this non-invasive technique is important in the study of metabolic changes related to brain tumors, strokes, seizure disorders, Alzheimer's disease, depression, as well as other diseases and disorders affecting the brain. However, meaningful variations in ratios of tissue metabolites within a sample area are difficult to capture with current visualization tools. Furthermore, the learning curve to interpretation is steep and limits the more widespread adoption of MRS in clinical practice. In this work we present a novel, tiered visual encoding system for multi-dimensional MRS data to aid in the visual exploration of metabolite concentration ratios. Our system was developed in close collaboration with domain experts including detailed data and task analyses. This visual encoding system was subsequently realized as part of an interactive insight-generation tool for rapid exploration and comparison of metabolite ratio variation for deeper insights to these complex data.",
    booktitle = {Proceedings of the EuroVis Conference - Posters (EuroVis 2019)},
    year = {2019},
    howpublished = "Poster presented at the EuroVis conference 2019",
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    images = "images/garrison_eurovis2019_SM_encodings.png",
    thumbnails = "images/garrison_eurovis2019_SM_encodings.png",
    pdf = "pdfs/garrison_eurovis2019_SM.pdf",
    youtube = "https://youtu.be/Rzl7sl4WvdQ",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Smit-2019-DBP,
    booktitle = {Eurographics 2019 - Dirk Bartz Prize},
    editor = {Bruckner, Stefan and Oeltze-Jafra, Steffen},
    title = {{Model-based Visualization for Medical Education and Training}},
    author = {Smit, Noeska and Lawonn, Kai and Kraima, Annelot and deRuiter, Marco and Bruckner, Stefan and Eisemann, Elmar and Vilanova, Anna},
    year = {2019},
    publisher = {The Eurographics Association},
    ISSN = {1017-4656},
    DOI = {10.2312/egm.20191033},
    pdf = "pdfs/Smit_DBPrize_2019.pdf",
    images = "images/Smit_DBPrize_2019.png",
    thumbnails = "images/Smit_DBPrize_2019.png",
    abstract = "Anatomy, or the study of the structure of the human body, is an essential component of medical education. Certain parts of human anatomy are considered to be more complex to understand than others, due to a multitude of closely related structures. Furthermore, there are many potential variations in anatomy, e.g., different topologies of vessels, and knowledge of these variations is critical for many in medical practice.
    Some aspects of individual anatomy, such as the autonomic nerves, are not visible in individuals through medical imaging techniques or even during surgery, placing these nerves at risk for damage.
    3D models and interactive visualization techniques can be used to improve understanding of this complex anatomy, in combination with traditional medical education paradigms.
    We present a framework incorporating several advanced medical visualization techniques and applications for teaching and training purposes, which is the result of an interdisciplinary project.
    In contrast to previous approaches which focus on general anatomy visualization or direct visualization of medical imaging data, we employ model-based techniques to represent variational anatomy, as well as anatomy not visible from imaging. Our framework covers the complete spectrum including general anatomy, anatomical variations, and anatomy in individual patients.
    Applications within our framework were evaluated positively with medical users, and our educational tool for general anatomy is in use in a Massive Open Online Course (MOOC) on anatomy, which had over 17000 participants worldwide in the first run.",
    project = "ttmedvis,VIDI"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2019-LFL,
    author = "Stoppel, Sergej and Bruckner, Stefan",
    title = "LinesLab: A Flexible Low-Cost Approach for the Generation of Physical Monochrome Art",
    journal = "Computer Graphics Forum",
    year = "2019",
    abstract = "The desire for the physical generation of computer art has seen a significant body of research that has resulted in sophisticated robots and painting machines, together with specialized algorithms mimicking particular artistic techniques. The resulting setups are often expensive and complex, making them unavailable for recreational and hobbyist use. In recent years, however, a new class of affordable low-cost plotters and cutting machines has reached the market. In this paper, we present a novel system for the physical generation of line and cut-out art based on digital images, targeted at such off-the-shelf devices. Our approach uses a meta-optimization process to generate results that represent the tonal content of a digital image while conforming to the physical and mechanical constraints of home-use devices. By flexibly combining basic sets of positional and shape encodings, we are able to recreate a wide range of artistic styles. Furthermore, our system optimizes the output in terms of visual perception based on the desired viewing distance, while remaining scalable with respect to the medium size.",
    pdf = "pdfs/Stoppel-2019-LFL.pdf",
    images = "images/Stoppel-2019-LFL.jpg",
    thumbnails = "images/Stoppel-2019-LFL.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13609",
    youtube = "https://www.youtube.com/watch?v=WdZJmU6fOAY",
    project = "MetaVis"
    }
    [PDF] [Bibtex]
    @article{fan2019personalized,
    title={Personalized Sketch-Based Brushing in Scatterplots},
    author={Chaoran Fan and Helwig Hauser},
    journal={IEEE Computer Graphics and Applications},
    volume={39},
    number={4},
    pages={28--39},
    year={2019},
    publisher={IEEE},
    pdf="pdfs/personalizedBrush.pdf",
    images="images/personalizedBrush.png",
    thumbnails = "images/personalizedBrush.png",
    abstract="Brushing is at the heart of most modern visual analytics solutions and effective and efficient brushing is crucial for successful interactive data exploration and analysis. As the user plays a central role in brushing, several data-driven brushing tools have been designed that are based on predicting the user’s brushing goal. All of these general brushing models learn the users’ average brushing preference, which is not optimal for every single user. In this paper, we propose an innovative framework that offers the user opportunities to improve the brushing technique while using it. We realized this framework with a CNN-based brushing technique and the result shows that with additional data from a particular user, the model can be refined (better performance in terms of accuracy), eventually converging to a personalized model based on a moderate amount of retraining."
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Fan-2019-KDE,
    author = "Chaoran Fan and Helwig Hauser",
    title = "On KDE-based brushing in scatterplots and how it compares to CNN-based brushing",
    booktitle = "Proceedings of MLVis: Machine Learning Methods in Visualisation for Big Data",
    year = "2019",
    publisher = "Eurographics Association",
    abstract = "In this paper, we investigate to which degree the human should be involved into the model design and how good the empirical model can be with more careful design. To find out, we extended our previously published Mahalanobis brush (the best current empirical model in terms of accuracy for brushing points in a scatterplot) by further incorporating the data distribution information that is captured by the kernel density estimation (KDE). Based on this work, we then include a short discussion between the empirical model, designed in detail by an expert and the deep learning-based model that is learned from user data directly",
    pdf = "pdfs/On-KDE-based-brushing-in-scatterplotsand-how-it-compares-to-CNN-based-brushing.pdf",
    images = "images/pic-2.png",
    thumbnails = "images/pic-2.png",
    doi = "10.2312/mlvis.20191157",
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2019-DVM,
    author = "Bruckner, Stefan",
    title = "Dynamic Visibility-Driven Molecular Surfaces",
    journal = "Computer Graphics Forum",
    year = "2019",
    volume = "38",
    number = "2",
    pages = "317--329",
    abstract = "Molecular surface representations are an important tool for the visual analysis of molecular structure and function. In this paper, we present a novel method for the visualization of dynamic molecular surfaces based on the Gaussian model. In contrast to previous approaches, our technique does not rely on the construction of intermediate representations such as grids or triangulated surfaces. Instead, it operates entirely in image space, which enables us to exploit visibility information to efficiently skip unnecessary computations. With this visibility-driven approach, we can visualize dynamic high-quality surfaces for molecules consisting of millions of atoms. Our approach requires no preprocessing, allows for the interactive adjustment of all properties and parameters, and is significantly faster than previous approaches, while providing superior quality.",
    pdf = "pdfs/Bruckner-2019-DVM.pdf",
    images = "images/Bruckner-2019-DVM-1.jpg",
    thumbnails = "images/Bruckner-2019-DVM.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13640",
    youtube = "https://www.youtube.com/watch?v=aZmDhTbJlAM",
    git = "https://github.com/sbruckner/dynamol.git",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Bruckner-2018-MSD,
    author = "Stefan Bruckner and Tobias Isenberg and Timo Ropinski and Alexander Wiebel",
    title = "A Model of Spatial Directness in Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    volume = "25",
    number = "8",
    year = "2019",
    abstract = "We discuss the concept of directness in the context of spatial interaction with visualization. In particular, we propose a modelthat allows practitioners to analyze and describe the spatial directness of interaction techniques, ultimately to be able to better understandinteraction issues that may affect usability. To reach these goals, we distinguish between different types of directness. Each type ofdirectness depends on a particular mapping between different spaces, for which we consider the data space, the visualization space, theoutput space, the user space, the manipulation space, and the interaction space. In addition to the introduction of the model itself, we alsoshow how to apply it to several real-world interaction scenarios in visualization, and thus discuss the resulting types of spatial directness,without recommending either more direct or more indirect interaction techniques. In particular, we will demonstrate descriptive andevaluative usage of the proposed model, and also briefly discuss its generative usage.",
    pdf = "pdfs/Bruckner-2018-MSD.pdf",
    images = "images/Bruckner-2018-MSD.jpg",
    thumbnails = "images/Bruckner-2018-MSD.png",
    doi = "10.1109/TVCG.2018.2848906",
    project = "MetaVis"
    }
    [PDF] [DOI] [VID] [Bibtex]
    @ARTICLE {Stoppel-2019-FVI,
    author = "Sergej Stoppel and Magnus Paulson Erga and Stefan Bruckner",
    title = "Firefly: Virtual Illumination Drones for Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2019",
    volume = "25",
    pages = "1204-1213",
    abstract = "Light specification in three dimensional scenes is a complex problem and several approaches have been presented that aim to automate this process. However, there are many scenarios where a static light setup is insufficient, as the scene content and camera position may change. Simultaneous manual control over the camera and light position imposes a high cognitive load on the user. To address this challenge, we introduce a novel approach for automatic scene illumination with Fireflies. Fireflies are intelligent virtual light drones that illuminate the scene by traveling on a closed path. The Firefly path automatically adapts to changes in the scene based on an outcome-oriented energy function. To achieve interactive performance, we employ a parallel rendering pipeline for the light path evaluations. We provide a catalog of energy functions for various application scenarios and discuss the applicability of our method on several examples.",
    pdf = "pdfs/VIS2018-Firefly.pdf",
    vid = "vids/FinalVideo.mp4",
    images = "images/Teaser.png",
    thumbnails = "images/HeadRightCroppedThumbnail.png",
    doi = "10.1109/TVCG.2018.2864656",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Moerth-2019-VCBM,
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine",
    editor = "Kozlíková, Barbora and Linsen, Lars and Vázquez, Pere-Pau and Lawonn, Kai and Raidou, Renata Georgia",
    abstract = "Three-dimensional (3D) ultrasound imaging and visualization
    is often used in medical diagnostics, especially in prenatal
    screening. Screening the development of the fetus is
    important to assess possible complications early on. State
    of the art approaches involve taking standardized
    measurements to compare them with standardized tables. The
    measurements are taken in a 2D slice view, where precise
    measurements can be difficult to acquire due to the fetal
    pose. Performing the analysis in a 3D view would enable the
    viewer to better discriminate between artefacts and
    representative information. Additionally making data
    comparable between different investigations and patients is
    a goal in medical imaging techniques and is often achieved
    by standardization. With this paper, we introduce a novel
    approach to provide a standardization method for 3D
    ultrasound fetus screenings. Our approach is called “The
    Vitruvian Baby” and incorporates a complete pipeline for
    standardized measuring in fetal 3D ultrasound. The input of
    the method is a 3D ultrasound screening of a fetus and the
    output is the fetus in a standardized T-pose. In this pose,
    taking measurements is easier and comparison of different
    fetuses is possible. In addition to the transformation of
    the 3D ultrasound data, we create an abstract representation
    of the fetus based on accurate measurements. We demonstrate
    the accuracy of our approach on simulated data where the
    ground truth is known.",
    title = "The Vitruvian Baby: Interactive Reformation of Fetal Ultrasound Data to a T-Position",
    author = "M\"{o}rth, Eric and Raidou, Renata Georgia and Viola, Ivan and Smit, Noeska",
    year = "2019",
    publisher = "The Eurographics Association",
    ISSN = "2070-5786",
    ISBN = "978-3-03868-081-9",
    DOI = "10.2312/vcbm.20191245",
    pdf = "pdfs/VCBM_TheVitruvianBaby_ShortPaper_201-205.pdf",
    images = "images/vcbmVitruvianBaby.jpg",
    thumbnails = "images/vcbmVitruvianBaby.jpg",
    url = "https://diglib.eg.org/handle/10.2312/vcbm20191245",
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @MISC {Moerth-2019-EUROVIS,
    booktitle = "EuroVis 2019 - Posters",
    editor = "Madeiras Pereira, João and Raidou, Renata Georgia",
    title = "The Vitruvian Baby: Interactive Reformation of Fetal Ultrasound Data to a T-Position",
    author = "M\"{o}rth, Eric and Raidou, Renata Georgia and Smit, Noeska and Viola, Ivan",
    year = "2019",
    abstract = "Three dimensional (3D) ultrasound is commonly used in prenatal screening, because it provides insight into the shape as well
    as the organs of the fetus. Currently, gynecologists take standardized measurements of the fetus and check for abnormalities by
    analyzing the data in a 2D slice view. The fetal pose may complicate taking precise measurements in such a view. Analyzing the
    data in a 3D view would enable the viewer to better distinguish between artefacts and representative information. Standardization
    in medical imaging techniques aims to make the data comparable between different investigations and patients. It is
    already used in different medical applications for example in magnetic resonance imaging (MRI). With this work, we introduce
    a novel approach to provide a standardization method for 3D ultrasound screenings of fetuses. The approach consists of six
    steps and is called “The Vitruvian Baby”. The input is the data of the 3D ultrasound screening of a fetus and the output shows
    the fetus in a standardized T-pose in which measurements can be made. The precision of standardized measurements compared
    to the gold standard is for the finger to finger span 91,08% and for the head to toe measurement 94,05%.",
    publisher = "The Eurographics Association",
    howpublished = "Poster presented at the EuroVis conference 2019",
    ISBN = "978-3-03868-088-8",
    DOI = "10.2312/eurp.20191147",
    pdf = "pdfs/EUROVIS_TheVitruvianBaby_Poster.pdf",
    images = "images/EUROVISTheVitruvianBabyPoster.png",
    thumbnails = "images/EUROVISTheVitruvianBabyPoster.png",
    url = "https://diglib.eg.org/handle/10.2312/eurp20191147"
    }

2018

    [Bibtex]
    @ARTICLE {PhDThesis2018Stoppel,
    author = "Stoppel, Sergej",
    title = "User-Centric Parameter Specification for Interactive Virtual and Physical Visual Representations",
    journal = "Universitetet i Bergen",
    year = "2018"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {hauser2018foundations,
    author = "Hauser, Helwig and Rheingans, Penny and Scheuermann, Gerik",
    title = "Foundations of Data Visualization (Dagstuhl Seminar 18041)",
    booktitle = "Dagstuhl Reports",
    year = "2018",
    volume = "8",
    organization = "Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik",
    abstract = "This report documents the program and the outcomes of Dagstuhl Seminar 18041 “Foundations
    of Data Visualization”. It includes a discussion of the motivation and overall organization, an
    abstract from each of the participants, and a report about each of the working groups.",
    pdf = "pdfs/foundations.pdf",
    thumbnails = "images/foundations.png",
    number = "1"
    }
    [Bibtex]
    @ARTICLE {Jurcik2018Caver,
    author = "Adam Jur\v{c}\'{i}k and David Bedn\'{a}\v{r} and Jan By\v{s}ka and Sergio M. Marques and Katar\'{i}na Furmanov\'{a} and Luk\'{a}\v{s} Daniel and Piia Kokkonen and Jan Brezovsk\'{y} and Ond\v{r}ej Strnad and Jan \v{s}\v{t}oura\v{c} and Anton\'{i}n Pavelka and Martin Ma\v{n}\'{a}k and Ji\v{r}\'{i} Damborsk\'{y} and Barbora Kozl\'{i}kov\'{a}",
    title = "CAVER Analyst 2.0: analysis and visualization of channels and tunnels in protein structures and molecular dynamics trajectories",
    journal = "Bioinformatics",
    year = "2018",
    abstract = "MOTIVATION:Studying the transport paths of ligands, solvents, or ions in transmembrane proteins and proteins with buried binding sites is fundamental to the understanding of their biological function. A detailed analysis of the structural features influencing the transport paths is also important for engineering proteins for biomedical and biotechnological applications.RESULTS:CAVER Analyst 2.0 is a software tool for quantitative analysis and real-time visualization of tunnels and channels in static and dynamic structures. This version provides the users with many new functions, including advanced techniques for intuitive visual inspection of the spatiotemporal behavior of tunnels and channels. Novel integrated algorithms allow efficient analysis and data reduction in large protein structures and molecular dynamics simulations.",
    images = "images/analyst.jpg",
    thumbnails = "images/analyst.jpg"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Meuschke2018VCBM,
    author = "Monique Meuschke and Noeska N. Smit and Nils Lichtenberg and Bernhard Preim and Kai Lawonn",
    title = "Automatic Generation of Web-Based User Studies to Evaluate Depth Perception in Vascular Surface Visualizations",
    booktitle = "Proceedings of VCBM 2018",
    year = "2018",
    editor = "Anna Puig Puig and Thomas Schultz and Anna Vilanova and Ingrid Hotz and Barbora Kozlikova and Pere-Pau Vázquez",
    pages = "033-044",
    address = "Granada, Spain",
    publisher = "Eurographics Association",
    abstract = "User studies are often required in biomedical visualization application papers in order to provide evidence for the utility of the presented approach. An important aspect is how well depth information can be perceived, as depth encoding is important to enable an understandable representation of complex data.Unfortunately, in practice there is often little time available to perform such studies, and setting up and conducting user studies may be labor-intensive. In addition, it can be challenging to reach enough participants to support the contribution claims of the paper. In this paper, we propose a system that allows biomedical visualization researchers to quickly generate perceptual task-based user studies for novel surface visualizations, and to perform the resulting experiment via a web interface. This approach helps to reduce effort in the setup of user studies themselves, and at the same time leverages a web-based approach that can help researchers attract more participants to their study. We demonstrate our system using the specific application of depth judgment tasks to evaluate vascular surface visualizations, since there is a lot of recent interest in this area.However, the system is also generally applicable for conducting other task-baseduser studies in biomedical visualization.",
    pdf = "pdfs/meuschke2018VCBM.pdf",
    images = "images/vcbm2018.png",
    thumbnails = "images/vcbm2018.png",
    youtube = "https://www.youtube.com/watch?v=8lns8GGpPJI",
    crossref = "VCBM-proc",
    doi = "10.2312/vcbm.20181227",
    project = "ttmedvis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Magnus-2018-VPI,
    author = "Jens G. Magnus and Stefan Bruckner",
    title = "Interactive Dynamic Volume Illumination with Refraction and Caustics",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2018",
    volume = "24",
    number = "1",
    pages = "984--993",
    month = "jan",
    abstract = "In recent years, significant progress has been made indeveloping high-quality interactive methods for realistic volumeillumination. However, refraction -- despite being an important aspectof light propagation in participating media -- has so far only receivedlittle attention. In this paper, we present a novel approach forrefractive volume illumination including caustics capable of interactiveframe rates. By interleaving light and viewing ray propagation, ourtechnique avoids memory-intensive storage of illumination informationand does not require any precomputation. It is fully dynamic and allparameters such as light position and transfer function can be modifiedinteractively without a performance penalty.",
    pdf = "pdfs/Magnus-2018-IDV.pdf",
    images = "images/Magnus-2018-IDV.jpg",
    thumbnails = "images/Magnus-2018-IDV.png",
    youtube = "https://www.youtube.com/watch?v=3tn6sSXw4NQ",
    doi = "10.1109/TVCG.2017.2744438",
    event = "IEEE SciVis 2017",
    keywords = "interactive volume rendering, illumination, refraction, shadows, caustics",
    location = "Phoenix, USA",
    project = "MetaVis"
    }
    [PDF] [YT] [Bibtex]
    @ARTICLE {lichtenbergsmithansenlawonn2018,
    author = "Nils Lichtenberg and Noeska Smit and Christian Hansen and Kai Lawonn",
    title = "Real-time field aligned stripe patterns",
    journal = "Computers & Graphics",
    year = "2018",
    volume = "74",
    pages = "137-149",
    month = "aug",
    abstract = "In this paper, we present a parameterization technique that can be applied to surface meshes in real-time without time-consuming preprocessing steps. The parameterization is suitable for the display of (un-)oriented patterns and texture patches, and to sample a surface in a periodic fashion. The method is inspired by existing work that solves a global optimization problem to generate a continuous stripe pattern on the surface, from which texture coordinates can be derived. We propose a local optimization approach that is suitable for parallel execution on the GPU, which drastically reduces computation time. With this, we achieve on-the-fly texturing of 3D, medium-sized (up to 70k vertices) surface meshes. The algorithm takes a tangent vector field as input and aligns the texture coordinates to it. Our technique achieves real-time parameterization of the surface meshes by employing a parallelizable local search algorithm that converges to a local minimum in a few iterations. The calculation in real-time allows for live parameter updates and determination of varying texture coordinates. Furthermore, the method can handle non-manifold meshes. The technique is useful in various applications, e.g., biomedical visualization and flow visualization. We highlight our method\s potential by providing usage scenarios for several applications.A PDF of the accepted manuscript is available via noeskasmit.com/wp-content/uploads/2018/08/lichtenberg_2018.pdf.",
    pdf = "pdfs/lichtenberg_2018.pdf",
    images = "images/Selection_384.png",
    thumbnails = "images/1-s2.0-S0097849318300591-fx1_lrg.jpg",
    youtube = "https://www.youtube.com/watch?v=7CpkHy8KPK8",
    project = "ttmedvis"
    }
    [PDF] [Bibtex]
    @MISC {Smit18MMIV,
    author = "N. N. Smit and S. Bruckner and H. Hauser and I. Haldorsen and A. Lundervold and A. S. Lundervold and E. Hodneland and L. Oltedal and K. Specht and E. R. Gruner",
    title = "Research Agenda of the Mohn Medical Imaging and Visualization Centre in Bergen, Norway",
    howpublished = "Poster presented at the EG VCBM workshop 2018",
    month = "September",
    year = "2018",
    abstract = "The Mohn Medical Imaging and Visualization Centre (MMIV) was recently established in collaboration between the University of Bergen, Norway, and the Haukeland University Hospital in Bergen with generous financial support from the Bergen Research Foundation (BFS) to conduct cross-disciplinary research related to state-of-the-art medical imaging, including preclinical and clinical high-field MRI, CT and hybrid PET/CT/MR.The overall goal of the Centre is to research new methods in quantitative imaging and interactive visualization to predict changes in health and disease across spatial and temporal scales. This encompasses research in feature detection, feature extraction, and feature prediction, as well as on methods and techniques for the interactive visualization of spatial and abstract data related to and derived from these features.With special emphasis on the natural and medical sciences, the long-term goal of the Centre is to consolidate excellence in the interplay between medical imaging (physics, chemistry, radiography, radiology), and visualization (computer science and mathematics) and develop novel and refined imaging methods that may ultimately improve patient care. In this poster, we describe the overall research agenda of MMIV and describe the four core projects in the centre.",
    pdf = "pdfs/smit2018posterabstract.pdf",
    images = "images/MMIVPoster.png",
    thumbnails = "images/MMIVPoster.png",
    location = "Granada, Spain",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {cnn-brush,
    author = "Fan, Chaoran and Hauser, Helwig",
    title = "{Fast and Accurate CNN-based Brushing in Scatterplots}",
    journal = "Computer Graphics Forum (Eurovis 2018)",
    year = "2018",
    abstract = "Brushing plays a central role in most modern visual analytics solutions and effective and efficient techniques for data selection are key to establishing a successful human-computer dialogue. With this paper, we address the need for brushing techniques that are both fast, enabling a fluid interaction in visual data exploration and analysis, and also accurate, i.e., enabling the user to effectively select specific data subsets, even when their geometric delimination is non-trivial. We present a new solution for a near-perfect sketch-based brushing technique, where we exploit a convolutional neural network (CNN) for estimating the intended data selection from a fast and simple click-and-drag interaction and from the data distribution in the visualization. Our key contributions include a drastically reduced error rate-now below 3%, i.e., less than half of the so far best accuracy- and an extension to a larger variety of selected data subsets, going beyond previous limitations due to linear estimation models.",
    pdf = "pdfs/eurovis18.pdf",
    images = "images/cnn.png",
    thumbnails = "images/cnn.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    issn = "1467-8659",
    doi = "10.1111/cgf.13405"
    }
    [Bibtex]
    @ARTICLE {Furmanova2018COZOID,
    author = "Furmanov{\'a}, Katar{\'\i}na and By{\v{s}}ka, Jan and Gr{\"o}ller, Eduard M and Viola, Ivan and Pale{\v{c}}ek, Jan J and Kozl{\'i}kov{\'a}, Barbora",
    title = "COZOID: contact zone identifier for visual analysis of protein-protein interactions",
    journal = "BMC bioinformatics",
    year = "2018",
    abstract = "BackgroundStudying the patterns of protein-protein interactions (PPIs) is fundamental for understanding the structure and function of protein complexes. The exploration of the vast space of possible mutual configurations of interacting proteins and their contact zones is very time consuming and requires the proteomic expert knowledge.ResultsIn this paper, we propose a novel tool containing a set of visual abstraction techniques for the guided exploration of PPI configuration space. It helps proteomic experts to select the most relevant configurations and explore their contact zones at different levels of detail. The system integrates a set of methods that follow and support the workflow of proteomics experts. The first visual abstraction method, the Matrix view, is based on customized interactive heat maps and provides the users with an overview of all possible residue-residue contacts in all PPI configurations and their interactive filtering. In this step, the user can traverse all input PPI configurations and obtain an overview of their interacting amino acids. Then, the models containing a particular pair of interacting amino acids can be selectively picked and traversed. Detailed information on the individual amino acids in the contact zones and their properties is presented in the Contact-Zone list-view. The list-view provides a comparative tool to rank the best models based on the similarity of their contacts to the template-structure contacts. All these techniques are interactively linked with other proposed methods, the Exploded view and the Open-Book view, which represent individual configurations in three-dimensional space. These representations solve the high overlap problem associated with many configurations. Using these views, the structural alignment of the best models can also be visually confirmed.ConclusionsWe developed a system for the exploration of large sets of protein-protein complexes in a fast and intuitive way. The usefulness of our system has been tested and verified on several docking structures covering the three major types of PPIs, including coiled-coil, pocket-string, and surface-surface interactions. Our case studies prove that our tool helps to analyse and filter protein-protein complexes in a fraction of the time compared to using previously available techniques.",
    images = "images/cozoid.jpg",
    thumbnails = "images/cozoid.jpg"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2018-SSW,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Smart Surrogate Widgets for Direct Volume Manipulation",
    booktitle = "Proceedings of IEEE PacificVis 2018",
    year = "2018",
    pages = "36--45",
    month = "apr",
    abstract = "Interaction is an essential aspect in volume visualization, yet commonmanipulation tools such as bounding boxes or clipping planewidgets provide rather crude tools as they neglect the complex structureof the underlying data. In this paper, we introduce a novelvolume interaction approach based on smart widgets that are automaticallyplaced directly into the data in a visibility-driven manner.By adapting to what the user actually sees, they act as proxies thatallow for goal-oriented modifications while still providing an intuitiveset of simple operations that is easy to control. In particular, ourmethod is well-suited for direct manipulation scenarios such as touchscreens, where traditional user interface elements commonly exhibitlimited utility. To evaluate out approach we conducted a qualitativeuser study with nine participants with various backgrounds.",
    pdf = "pdfs/Stoppel-2018-SSW.pdf",
    images = "images/Stoppel-2018-SSW.jpg",
    thumbnails = "images/Stoppel-2018-SSW.png",
    youtube = "https://www.youtube.com/watch?v=wMRw-W0SrLk",
    event = "IEEE PacificVis 2018",
    keywords = "smart interfaces, volume manipulation, volume visualization",
    doi = "10.1109/PacificVis.2018.00014",
    project = "MetaVis"
    }

2017

    [Bibtex]
    @ARTICLE {UiB2017Ivan,
    author = "Kolesar, Ivan",
    title = "Partial Spatiotemporal Abstraction for Comparative Visualization of Molecular Processes",
    journal = "Universitetet i Bergen",
    year = "2017"
    }
    [PDF] [Bibtex]
    @ARTICLE {matkovic2017quantitative,
    author = "Matkovi{\'c}, Kre{\v{s}}imir and Abraham, Hrvoje and Jelovi{\'c}, Mario and Hauser, Helwig",
    title = "Quantitative externalization of visual data analysis results using local regression models",
    journal = "International Cross-Domain Conference for Machine Learning and Knowledge Extraction",
    year = "2017",
    pages = "199-218",
    abstract = "Both interactive visualization and computational analysis
    methods are useful for data studies and an integration of both approaches
    is promising to successfully combine the benefits of both methodologies.
    In interactive data exploration and analysis workflows, we need successful
    means to quantitatively externalize results from data studies, amounting
    to a particular challenge for the usually qualitative visual data analysis.
    In this paper, we propose a hybrid approach in order to quantitatively
    externalize valuable findings from interactive visual data exploration and
    analysis, based on local linear regression models. The models are built on
    user-selected subsets of the data, and we provide a way of keeping track
    of these models and comparing them. As an additional benefit, we also
    provide the user with the numeric model coefficients. Once the models are
    available, they can be used in subsequent steps of the workflow. A modelbased
    optimization can then be performed, for example, or more complex
    models can be reconstructed using an inversion of the local models. We
    study two datasets to exemplify the proposed approach, a meteorological
    data set for illustration purposes and a simulation ensemble from the
    automotive industry as an actual case study.",
    pdf = "pdfs/Matkovic2017.pdf",
    thumbnails = "images/matkovic_10.png"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2017-VPI,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Vol²velle: Printable Interactive Volume Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "861--870",
    month = "jan",
    abstract = "Interaction is an indispensable aspect of data visualization. The  presentation of volumetric data, in particular, often significantly  benefits from interactive manipulation of parameters such as transfer  functions, rendering styles, or clipping planes. However, when we  want to create hardcopies of such visualizations, this essential  aspect is lost. In this paper, we present a novel approach for creating  hardcopies of volume visualizations which preserves a certain degree  of interactivity. We present a method for automatically generating  Volvelles, printable tangible wheel charts that can be manipulated  to explore different parameter settings. Our interactive system allows  the flexible mapping of arbitrary visualization parameters and supports  advanced features such as linked views. The resulting designs can  be easily reproduced using a standard printer and assembled within  a few minutes.",
    pdf = "pdfs/Stoppel-2017-VPI.pdf",
    images = "images/Stoppel-2017-VPI.jpg",
    thumbnails = "images/Stoppel-2017-VPI.png",
    youtube = "https://www.youtube.com/watch?v=Z1K8t-FCiXI",
    doi = "10.1109/TVCG.2016.2599211",
    event = "IEEE SciVis 2016",
    keywords = "physical visualization, interaction, volume visualization, illustrative visualization",
    location = "Baltimore, USA"
    }
    [PDF] [Bibtex]
    @ARTICLE {Furmanova2017Ligand,
    author = "Furmanov{\'a}, Katar{\'\i}na and Jare{\v{s}}ov{\'a}, Miroslava and By{\v{s}}ka, Jan and Jur{\v{c}}{\'i}k, Adam and Parulek, J{\'u}lius and Hauser, Helwig and Kozl{\'i}kov{\'a}, Barbora",
    title = "Interactive exploration of ligand transportation through protein tunnels",
    journal = "BMC Bioinformatics",
    year = "2017",
    volume = "18(Suppl 2)",
    number = "22",
    month = "feb",
    abstract = "Background: Protein structures and their interaction with ligands have been in the focus of biochemistry andstructural biology research for decades. The transportation of ligand into the protein active site is often complexprocess, driven by geometric and physico-chemical properties, which renders the ligand path full of jitter andimpasses. This prevents understanding of the ligand transportation and reasoning behind its behavior along the path.Results: To address the needs of the domain experts we design an explorative visualization solution based on amulti-scale simplification model. It helps to navigate the user to the most interesting parts of the ligand trajectory byexploring different attributes of the ligand and its movement, such as its distance to the active site, changes of aminoacids lining the ligand, or ligand “stuckness�. The process is supported by three linked views – 3D representation of thesimplified trajectory, scatterplot matrix, and bar charts with line representation of ligand-lining amino acids.Conclusions: The usage of our tool is demonstrated on molecular dynamics simulations provided by the domainexperts. The tool was tested by the domain experts from protein engineering and the results confirm that it helps tonavigate the user to the most interesting parts of the ligand trajectory and to understand the ligand behavior",
    pdf = "pdfs/Furmanova2017.pdf",
    images = "images/Furmanova2016Interactive.png",
    thumbnails = "images/Furmanova2016Interactive.png",
    note = "https://doi.org/10.1186/s12859-016-1448-0"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {newMahalanobisBrush,
    author = "Fan, Chaoran and Hauser, Helwig",
    title = "{User-study Based Optimization of Fast and Accurate Mahalanobis Brushing in Scatterplots}",
    booktitle = "Vision, Modeling & Visualization",
    year = "2017",
    editor = "Matthias Hullin and Reinhard Klein and Thomas Schultz and Angela Yao",
    publisher = "The Eurographics Association",
    abstract = "Brushing is at the heart of most modern visual analytics solutions with coordinated, multiple views and effective brushing is crucial for swift and efficient processes in data exploration and analysis. Given a certain data subset that the user wishes to brush in a data visualization, traditional brushes are usually either accurate (like the lasso) or fast (e.g., a simple geometry like a rectangle or circle). In this paper, we now present a new, fast and accurate brushing technique for scatterplots, based on the Mahalanobis brush, which we have extended and then optimized using data from a user study. We explain the principal, sketchbased model of our new brushing technique (based on a simple click-and-drag interaction), the details of the user study and the related parameter optimization, as well as a quantitative evaluation, considering efficiency, accuracy, and also a comparison with the original Mahalanobis brush.",
    pdf = "pdfs/vmv-final.pdf",
    images = "images/Mahalanobis.png",
    thumbnails = "images/Mahalanobis.png",
    isbn = "978-3-03868-049-9",
    doi = "10.2312/vmv.20171262"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Smit-2017-PAS,
    author = "Noeska Smit and Kai Lawonn and Annelot Kraima and Marco DeRuiter and Hessam Sokooti and Stefan Bruckner and Elmar Eisemann and Anna Vilanova",
    title = "PelVis: Atlas-based Surgical Planning for Oncological Pelvic Surgery",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "741--750",
    month = "jan",
    abstract = "Due to the intricate relationship between the pelvic organs and vital  structures, such as vessels and nerves, pelvic anatomy is often considered  to be complex to comprehend. In oncological pelvic surgery, a trade-off  has to be made between complete tumor resection and preserving function  by preventing damage to the nerves. Damage to the autonomic nerves  causes undesirable post-operative side-effects such as fecal and  urinal incontinence, as well as sexual dysfunction in up to 80 percent  of the cases. Since these autonomic nerves are not visible in pre-operative  MRI scans or during surgery, avoiding nerve damage during such a  surgical procedure becomes challenging. In this work, we present  visualization methods to represent context, target, and risk structures  for surgical planning. We employ distance-based and occlusion management  techniques in an atlas-based surgical planning tool for oncological  pelvic surgery. Patient-specific pre-operative MRI scans are registered  to an atlas model that includes nerve information. Through several  interactive linked views, the spatial relationships and distances  between the organs, tumor and risk zones are visualized to improve  understanding, while avoiding occlusion. In this way, the surgeon  can examine surgically relevant structures and plan the procedure  before going into the operating theater, thus raising awareness of  the autonomic nerve zone regions and potentially reducing post-operative  complications. Furthermore, we present the results of a domain expert  evaluation with surgical oncologists that demonstrates the advantages  of our approach.",
    pdf = "pdfs/Smit-2017-PAS.pdf",
    images = "images/Smit-2017-PAS.jpg",
    thumbnails = "images/Smit-2017-PAS.png",
    youtube = "https://www.youtube.com/watch?v=vHp05I5-hp8",
    doi = "10.1109/TVCG.2016.2598826",
    event = "IEEE SciVis 2016",
    keywords = "atlas, surgical planning, medical visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Lind-2017-CCR,
    author = "Andreas Johnsen Lind and Stefan Bruckner",
    title = "Comparing Cross-Sections and 3D Renderings for Surface Matching Tasks using Physical Ground Truths",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "781--790",
    month = "jan",
    abstract = "Within the visualization community there are some well-known techniques  for visualizing 3D spatial data and some general assumptions about  how perception affects the performance of these techniques in practice.  However, there is a lack of empirical research backing up the possible  performance differences among the basic techniques for general tasks.  One such assumption is that 3D renderings are better for obtaining  an overview, whereas cross sectional visualizations such as the commonly  used Multi- Planar Reformation (MPR) are better for supporting detailed  analysis tasks. In the present study we investigated this common  assumption by examining the difference in performance between MPR  and 3D rendering for correctly identifying a known surface. We also  examined whether prior experience working with image data affects  the participant’s performance, and whether there was any difference  between interactive or static versions of the visualizations. Answering  this question is important because it can be used as part of a scientific  and empirical basis for determining when to use which of the two  techniques. An advantage of the present study compared to other studies  is that several factors were taken into account to compare the two  techniques. The problem was examined through an experiment with 45  participants, where physical objects were used as the known surface  (ground truth). Our findings showed that: 1. The 3D renderings largely  outperformed the cross sections; 2. Interactive visualizations were  partially more effective than static visualizations; and 3. The high  experience group did not generally outperform the low experience  group.",
    pdf = "pdfs/Lind-2017-CCR.pdf",
    images = "images/Lind-2017-CCR.jpg",
    thumbnails = "images/Lind-2017-CCR.png",
    doi = "10.1109/TVCG.2016.2598602",
    event = "IEEE SciVis 2016",
    keywords = "human-computer interaction, quantitative evaluation, volume visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {LawonnSmit-2017-Survey,
    author = "Lawonn, K. and Smit, N.N. and B{\"u}hler, K. and Preim, B.",
    title = "A Survey on Multimodal Medical Data Visualization",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "37",
    number = "1",
    pages = "413-438",
    abstract = "Multi-modal data of the complex human anatomy contain a wealth of information. To visualize and explore such data, techniques for emphasizing important structures and controlling visibility are essential. Such fused overview visualizations guide physicians to suspicious regions to be analysed in detail, e.g. with slice-based viewing. We give an overview of state of the art in multi-modal medical data visualization techniques. Multi-modal medical data consist of multiple scans of the same subject using various acquisition methods, often combining multiple complimentary types of information. Three-dimensional visualization techniques for multi-modal medical data can be used in diagnosis, treatment planning, doctor–patient communication as well as interdisciplinary communication. Over the years, multiple techniques have been developed in order to cope with the various associated challenges and present the relevant information from multiple sources in an insightful way. We present an overview of these techniques and analyse the specific challenges that arise in multi-modal data visualization and how recent works aimed to solve these, often using smart visibility techniques. We provide a taxonomy of these multi-modal visualization applications based on the modalities used and the visualization techniques employed. Additionally, we identify unsolved problems as potential future research directions.",
    pdf = "pdfs/LawonnSmit-2017-MULTI.pdf",
    images = "images/LawonnSmit-2017-MULTI.jpg",
    thumbnails = "images/LawonnSmit-2017-MULTI-TN.png",
    issn = "1467-8659",
    url = "http://dx.doi.org/10.1111/cgf.13306",
    doi = "10.1111/cgf.13306",
    keywords = "medical imaging, visualization, scientific visualization, visualization, volume visualization, visualization, Medical Imaging [Visualization], Scientific Visualization [Visualization], Volume Visualization [Visualization], Multimodal Medical Data"
    }
    [PDF] [Bibtex]
    @ARTICLE {Kocincova2017SS,
    author = "Kocincov{\'a}, Lucia and Jare{\v{s}}ov{\'a}, Miroslava and By{\v{s}}ka, Jan and Parulek, J{\'u}lius and Hauser, Helwig and Kozl{\'i}kov{\'a}, Barbora",
    title = "Comparative visualization of protein secondary structures",
    journal = "BMC Bioinformatics",
    year = "2017",
    volume = "18(Suppl 2)",
    number = "23",
    month = "feb",
    abstract = "Background: Protein function is determined by many factors, namely by its constitution, spatial arrangement, anddynamic behavior. Studying these factors helps the biochemists and biologists to better understand the proteinbehavior and to design proteins with modified properties. One of the most common approaches to these studies is tocompare the protein structure with other molecules and to reveal similarities and differences in their polypeptidechains.Results: We support the comparison process by proposing a new visualization technique that bridges the gapbetween traditionally used 1D and 3D representations. By introducing the information about mutual positions ofprotein chains into the 1D sequential representation the users are able to observe the spatial differences between theproteins without any occlusion commonly present in 3D view. Our representation is designed to serve namely forcomparison of multiple proteins or a set of time steps of molecular dynamics simulation.Conclusions: The novel representation is demonstrated on two usage scenarios. The first scenario aims to compare aset of proteins from the family of cytochromes P450 where the position of the secondary structures has a significantimpact on the substrate channeling. The second scenario focuses on the protein flexibility when by comparing a setof time steps our representation helps to reveal the most dynamically changing parts of the protein chain.",
    pdf = "pdfs/Kocincova2017.pdf",
    images = "images/Lucia2016Comparative.png",
    thumbnails = "images/Lucia2016Comparative.png",
    note = "https://doi.org/10.1186/s12859-016-1449-z"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2017-OFS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner",
    title = "Output-Sensitive Filtering of Streaming Volume Data",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "249--262",
    month = "jan",
    abstract = "Real-time volume data acquisition poses substantial challenges for  the traditional visualization pipeline where data enhancement is  typically seen as a pre-processing step. In the case of 4D ultrasound  data, for instance, costly processing operations to reduce noise  and to remove artifacts need to be executed for every frame. To enable  the use of high quality filtering operations in such scenarios, we  propose an output-sensitive approach to the visualization of streaming  volume data. Our method evaluates the potential contribution of all  voxels to the final image, allowing us to skip expensive processing  operations that have little or no effect on the visualization. As  filtering operations modify the data values which may affect the  visibility, our main contribution is a fast scheme to predict their  maximum effect on the final image. Our approach prioritizes filtering  of voxels with high contribution to the final visualization based  on a maximal permissible error per pixel. With zero permissible error,  the optimized filtering will yield a result identical to filtering  of the entire volume. We provide a thorough technical evaluation  of the approach and demonstrate it on several typical scenarios that  require on-the-fly processing.",
    pdf = "pdfs/Solteszova-2017-OFS.pdf",
    images = "images/Solteszova-2017-OFS.jpg",
    thumbnails = "images/Solteszova-2017-OFS.png",
    youtube = "https://www.youtube.com/watch?v=xGPs560ttp0",
    doi = "10.1111/cgf.12799",
    keywords = "output-sensitive processing, volume data, filtering"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2017-DVN,
    author = "Peter Mindek and Gabriel Mistelbauer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Data-Sensitive Visual Navigation",
    journal = "Computers \& Graphics",
    year = "2017",
    volume = "67",
    pages = "77--85",
    month = "oct",
    abstract = "In visualization systems it is often the case that thechanges of the input parameters are not proportional to the visualchange of the generated output. In this paper, we propose a model forenabling data-sensitive navigation for user-interface elements. Thismodel is applied to normalize the user input according to the visualchange, and also to visually communicate this normalization. In thisway, the exploration of heterogeneous data using common interactionelements can be performed in an efficient way. We apply our model to thefield of medical visualization and present guided navigation tools fortraversing vascular structures and for camera rotation around 3Dvolumes. The presented examples demonstrate that the model scales touser-interface elements where multiple parameters are setsimultaneously.",
    pdf = "pdfs/Mindek-2017-DVN.pdf",
    images = "images/Mindek-2017-DVN.jpg",
    thumbnails = "images/Mindek-2017-DVN.png",
    youtube = "https://www.youtube.com/watch?v=FnhbjX7BRXI",
    note = "SCCG 2017 Best Paper Award",
    doi = "10.1016/j.cag.2017.05.012",
    event = "SCCG 2017",
    keywords = "navigation, exploration, medical visualization",
    location = "Mikulov, Czech Republic",
    project = "MetaVis"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {vad_viktor-2017-WVE,
    author = "Viktor Vad and Jan By\v{s}ka and Adam Jur\v{c}\'{i}k and Ivan Viola and Meister Eduard Gr{\"o}ller and Helwig Hauser and Sergio M. Margues and Ji\v{r}\'{i} Damborsk\'{y} and Barbora Kozl\'{i}kov\'{a}",
    title = "Watergate: Visual Exploration of Water Trajectories in Protein Dynamics",
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine 2017",
    year = "2017",
    pages = "33--42",
    abstract = "The function of proteins is tightly related to their interactions with other molecules. The study of such interactions often requires to track the molecules that enter or exit specific regions of the proteins. This is investigated with molecular dynamics simulations, producing the trajectories of thousands of water molecules during hundreds of thousands of time steps. To ease the exploration of such rich spatio-temporal data, we propose a novel workflow for the analysis and visualization of large sets of water-molecule trajectories. Our solution consists of a set of visualization techniques, which help biochemists to classify, cluster, and filter the trajectories and to explore the properties and behavior of selected subsets in detail. Initially, we use an interactive histogram and a time-line visualization to give an overview of all water trajectories and select the interesting ones for further investigation. Further, we depict clusters of trajectories in a novel 2D representation illustrating the flows of water molecules. These views are interactively linked with a 3D representation where we show individual paths, including their simplification, as well as extracted statistical information displayed by isosurfaces. The proposed solution has been designed in tight collaboration with experts to support specific tasks in their scientific workflows. They also conducted several case studies to evaluate the usability and effectiveness of our new solution with respect to their research scenarios. These confirmed that our proposed solution helps in analyzing water trajectories and in extracting the essential information out of the large amount of input data.",
    pdf = "pdfs/Vad_Victor2017.pdf",
    images = "images/Watergate.png",
    thumbnails = "images/Watergate.png",
    proceedings = "In Proceedings of Eurographics Workshop on Visual Computing for Biology and Medicine",
    location = "September, 2017 Bremen, Germany",
    url = "https://www.cg.tuwien.ac.at/research/publications/2017/vad_viktor-2017-WVE/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2017-AVA,
    author = "Alexandra Diehl and Leandro Pelorosso and Kresimir Matkovic and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Albero: A Visual Analytics Approach for Probabilistic Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "7",
    pages = "135--144",
    month = "oct",
    abstract = "Probabilistic weather forecasts are amongst the most popularways to quantify numerical forecast uncertainties. The analogregression method can quantify uncertainties and express them asprobabilities. The method comprises the analysis of errorsfrom a large database of past forecasts generated with a specificnumerical model and observational data. Current visualizationtools based on this method are essentially automated and provide limitedanalysis capabilities. In this paper, we propose a novelapproach that breaks down the automatic process using the experience andknowledge of the users and creates a new interactivevisual workflow. Our approach allows forecasters to study probabilisticforecasts, their inner analogs and observations, theirassociated spatial errors, and additional statistical information bymeans of coordinated and linked views. We designed thepresented solution following a participatory methodology together withdomain experts. Several meteorologists with differentbackgrounds validated the approach. Two case studies illustrate thecapabilities of our solution. It successfully facilitates theanalysis of uncertainty and systematic model biases for improveddecision-making and process-quality measurements.",
    pdf = "pdfs/Diehl-2017-AVA.pdf",
    images = "images/Diehl-2017-AVA.jpg",
    thumbnails = "images/Diehl-2017-AVA.png",
    youtube = "https://www.youtube.com/watch?v=-yqoeEgkz28",
    doi = "10.1111/cgf.13279",
    keywords = "visual analytics, weather forecasting, uncertainty",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Swoboda-2017-VQI,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visualization and Quantification for Interactive Analysis of Neural Connectivity in Drosophila",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "160--171",
    month = "jan",
    abstract = "Neurobiologists investigate the brain of the common fruit fly Drosophila  melanogaster to discover neural circuits and link them to complex  behavior. Formulating new hypotheses about connectivity requires  potential connectivity information between individual neurons, indicated  by overlaps of arborizations of two or more neurons. As the number  of higher order overlaps (i.e., overlaps of three or more arborizations)  increases exponentially with the number of neurons under investigation,  visualization is impeded by clutter and quantification becomes a  burden. Existing solutions are restricted to visual or quantitative  analysis of pairwise overlaps, as they rely on precomputed overlap  data. We present a novel tool that complements existing methods for  potential connectivity exploration by providing for the first time  the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in both its spatial anatomical context and on a quantitative level.  Qualitative evaluation by neuroscientists and non-experts demonstrated  the utility and usability of the tool",
    pdf = "pdfs/Swoboda-2017-VQI.pdf",
    images = "images/Swoboda-2017-VQI.jpg",
    thumbnails = "images/Swoboda-2017-VQI.png",
    youtube = "https://www.youtube.com/watch?v=bycWGQQpqks",
    doi = "10.1111/cgf.12792",
    keywords = "visual analysis, neurobiology"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Turkay2017VIS,
    author = "C. Turkay and E. Kaya and S. Balcisoy and H. Hauser",
    title = "Designing Progressive and Interactive Analytics Processes for High-Dimensional Data Analysis",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "PP",
    number = "99",
    pages = "1-1",
    month = "jan",
    abstract = "In interactive data analysis processes, the dialogue between the human and the computer is the enabling mechanism that can lead to actionable observations about the phenomena being investigated. It is of paramount importance that this dialogue is not interrupted by slow computational mechanisms that do not consider any known temporal human-computer interaction characteristics that prioritize the perceptual and cognitive capabilities of the users. In cases where the analysis involves an integrated computational method, for instance to reduce the dimensionality of the data or to perform clustering, such non-optimal processes are often likely. To remedy this, progressive computations, where results are iteratively improved, are getting increasing interest in visual analytics. In this paper, we present techniques and design considerations to incorporate progressive methods within interactive analysis processes that involve high-dimensional data. We define methodologies to facilitate processes that adhere to the perceptual characteristics of users and describe how online algorithms can be incorporated within these. A set of design recommendations and according methods to support analysts in accomplishing high-dimensional data analysis tasks are then presented. Our arguments and decisions here are informed by observations gathered over a series of analysis sessions with analysts from finance. We document observations and recommendations from this study and present evidence on how our approach contribute to the efficiency and productivity of interactive visual analysis sessions involving high-dimensional data.",
    pdf = "pdfs/Turkay2017VIS.pdf",
    images = "images/Turkay-2017-VIS.png",
    thumbnails = "images/Turkay-2017-VIS.png",
    doi = "10.1109/TVCG.2016.2598470",
    issn = "1077-2626"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2017-FCC,
    author = "Ivan Kolesar and Stefan Bruckner and Ivan Viola and Helwig Hauser",
    title = "A Fractional Cartesian Composition Model for Semi-spatial Comparative Visualization Design",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "851--860",
    month = "jan",
    abstract = "The study of spatial data ensembles leads to substantial visualization  challenges in a variety of applications. In this paper, we present  a model for comparative visualization that supports the design of  according ensemble visualization solutions by partial automation.  We focus on applications, where the user is interested in preserving  selected spatial data characteristics of the data as much as possible—even  when many ensemble members should be jointly studied using comparative  visualization. In our model, we separate the design challenge into  a minimal set of user-specified parameters and an optimization component  for the automatic configuration of the remaining design variables.  We provide an illustrated formal description of our model and exemplify  our approach in the context of several application examples from  different domains in order to demonstrate its generality within the  class of comparative visualization problems for spatial data ensembles.",
    pdf = "pdfs/Kolesar-2017-FCC.pdf",
    images = "images/Kolesar-2017-FCC.jpg",
    thumbnails = "images/Kolesar-2017-FCC.png",
    youtube = "https://www.youtube.com/watch?v=_zk67fmryok",
    doi = "10.1109/TVCG.2016.2598870",
    event = "IEEE SciVis 2016",
    keywords = "visualization models, integrating spatial and non-spatial data visualization, design methodologies",
    location = "Baltimore, USA",
    project = "physioillustration"
    }

2016

    [Bibtex]
    @MISC {moller2016winter,
    author = "Moller, Torsten and Brambilla, Andrea and Hotz, Ingrid and Gordon, Kindlmann and Schulz, Hans Jorg and Hauser, Helwig and Brodtkorb, Andre",
    title = "Geilo Winter School in eScience on Scientific Visualization",
    year = "2016",
    thumbnails = "images/winter.png",
    note = "https://www.cs.ubc.ca/~tmm/talks.html",
    journal = "Geilo Winter School of eSience"
    }
    [PDF] [Bibtex]
    @ARTICLE {preim2016visual,
    author = "Preim, Bernhard and Klemm, Paul and Hauser, Helwig and Hegenscheid, Katrin and Oeltze, Steffen and Toennies, Klaus and V{\"o}lzke, Henry",
    title = "Visual analytics of image-centric cohort studies in epidemiology",
    journal = "Visualization in Medicine and Life Sciences III, Springer",
    year = "2016",
    pages = "221-248",
    abstract = "Epidemiology characterizes the influence of causes to disease and health conditions of defined populations. Cohort studies are population-based studies involving usually large numbers of randomly selected individuals and comprising numerous attributes, ranging from self-reported interview data to results from various medical examinations, e.g., blood and urine samples. Since recently, medical imaging has been used as an additional instrument to assess risk factors and potential prognostic information. In this chapter, we discuss such studies and how the evaluation may benefit from visual analytics. Cluster analysis to define groups, reliable image analysis of organs in medical imaging data and shape space exploration to characterize anatomical shapes are among the visual analytics tools that may enable epidemiologists to fully exploit the potential of their huge and complex data. To gain acceptance, visual analytics tools need to complement more classical epidemiologic tools, primarily hypothesis-driven statistical analysis.",
    pdf = "pdfs/Preim2016_Centric.pdf",
    thumbnails = "images/Preim2016_Centric_1.png"
    }
    [PDF] [Bibtex]
    @ARTICLE {brambilla2016comparative,
    author = "Brambilla, Andrea and Angelelli, Paolo and Andreassen, yvind and Hauser, Helwig",
    title = "Comparative visualization of multiple time surfaces by planar surface reformation",
    journal = "Pacific Visualization Symposium (PacificVis), 2016 IEEE",
    year = "2016",
    pages = "88--95",
    abstract = "Comparing time surfaces at different integration time points, or
    from different seeding areas, can provide valuable insight into
    transport phenomena of fluid flows. Such a comparative study is
    challenging due to the often convoluted shapes of these surfaces.
    We propose a new approach for comparative flow visualization
    based on time surfaces, which exploits the idea of embedding the
    surfaces in a carefully designed, reformed 2D visualization space.
    Such an embedding enables new opportunities for comparative flow
    visualization. We present three different strategies for comparative
    flow visualization that take advantage of the reformation. By reforming the time surfaces, we not only mitigate occlusion issues,
    but we can devote also the third dimension of the visualization
    space to the comparative aspects of the visualization. Our approach
    is effective in a variety of flow study cases. The direct comparison
    of individual time surfaces reveals small scale differences and fine
    details about the fluid’s motion. The concurrent study of multiple
    surface families enables the identification and the comparison of
    the most prominent motion patterns. This work was developed in
    close collaboration with an expert in fluid dynamics, who assessed
    the potential usefulness of this approach in his field.",
    pdf = "pdfs/bambarilla.pdf",
    thumbnails = "images/bambarilla_1.png"
    }
    [PDF] [Bibtex]
    @ARTICLE {radovs2016towards,
    author = "Rado{\v{s}}, Sanjin and Splechtna, Rainer and Matkovi{\'c}, K and Juras, M and Gr{\"o}ller, Eduard and Hauser, Helwig",
    title = "Towards quantitative visual analytics with structured brushing and linked statistics",
    journal = "Computer Graphics Forum",
    year = "2016",
    volume = "35",
    number = "3",
    pages = "251--260",
    abstract = "Until now a lot of visual analytics predominantly delivers qualitative results—based, for example, on a continuous color map or a detailed spatial encoding. Important target applications, however, such as medical diagnosis and decision making, clearly benefit from quantitative analysis results. In this paper we propose several specific extensions to the well-established concept oflinking&brushing in order to make the analysis results more quantitative. We structure the brushing space in order to improvethe reproducibility of the brushing operation, e.g., by introducing the percentile grid. We also enhance the linked visualization with overlaid descriptive statistics to enable a more quantitative reading of the resulting focus+context visualization. Addition-ally, we introduce two novel brushing techniques: the percentile brush and the Mahalanob is brush. Both use the underlying data to support statistically meaningful interactions with the data. We illustrate the use of the new techniques in the context of two case studies, one based on meteorological data and the other one focused on data from the automotive industry where we evaluate a shaft design in the context of mechanical power transmission in cars.",
    pdf = "pdfs/Rado-_et_al-2016-Computer_Graphics_Forum.pdf",
    thumbnails = "images/Rado-_et_al-2016-Computer_Graphics_Forum_1.png"
    }
    [DOI] [Bibtex]
    @ARTICLE {Michael2016Visual,
    author = "Michael Krone and Barbora Kozlikova and Norbert Lindow and Marc Baaden and Daniel Baum, and Julius Parulek and Hans-Christian Hege and Ivan Viola",
    title = "Visual Analysis of Biomolecular Cavities: State of the Art",
    journal = "Computer Graphics Forum",
    year = "2016",
    abstract = "In this report we review and structure the branch of molecular visualization that is concerned with the visual analysis of cavities in macromolecular protein structures. First the necessary background, the domain terminology, and the goals of analytical reasoning are introduced. Based on a comprehensive collection of relevant research works, we present a novel classification for cavity detection approaches and structure them into four distinct classes: grid-based, Voronoi-based, surface-based, and probe-based methods. The subclasses are then formed by their combinations. We match these approaches with corresponding visualization technologies starting with direct 3D visualization, followed with non-spatial visualization techniques that for example abstract the interactions between structures into a relational graph, straighten the cavity of interest to see its profile in one view, or aggregate the time sequence into a single contour plot. We also discuss the current state of methods for the visual analysis of cavities in dynamic data such as molecular dynamics simulations. Finally, we give an overview of the most common tools that are actively developed and used in the structural biology and biochemistry research. Our report is concluded by an outlook on future challenges in the field.",
    images = "images/STARcavities2016.png",
    thumbnails = "images/STARcavities2016.png",
    publisher = "The Eurographics Association and John Wiley \& Sons Ltd.",
    issn = "1467-8659",
    doi = "10.1111/cgf.12928",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2016-GIR,
    author = "Sergej Stoppel and Erlend Hodneland and Helwig Hauser and Stefan Bruckner",
    title = "Graxels: Information Rich Primitives for the Visualization of Time-Dependent Spatial Data",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    pages = "183--192",
    month = "sep",
    abstract = "Time-dependent volumetric data has important applications in areas  as diverse as medicine, climatology, and engineering. However, the  simultaneous quantitative assessment of spatial and temporal features  is very challenging. Common visualization techniques show either  the whole volume in one time step (for example using direct volume  rendering) or let the user select a region of interest (ROI) for  which a collection of time-intensity curves is shown. In this paper,  we propose a novel approach that dynamically embeds quantitative  detail views in a spatial layout. Inspired by the concept of small  multiples, we introduce a new primitive graxel (graph pixel). Graxels  are view dependent primitives of time-intensity graphs, generated  on-the-fly by aggregating per-ray information over time and image  regions. Our method enables the detailed feature-aligned visual analysis  of time-dependent volume data and allows interactive refinement and  filtering. Temporal behaviors like frequency relations, aperiodic  or periodic oscillations and their spatial context are easily perceived  with our method. We demonstrate the power of our approach using examples  from medicine and the natural sciences.",
    pdf = "pdfs/Stoppel-2016-GIR.pdf",
    images = "images/Stoppel-2016-GIR.jpg",
    thumbnails = "images/Stoppel-2016-GIR.png",
    youtube = "https://www.youtube.com/watch?v=UsClj3ytd0Y",
    doi = "10.2312/vcbm.20161286",
    event = "VCBM 2016",
    keywords = "time-dependent data, volume data, small multiples",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Labschuetz-2016-JJC,
    author = "Matthias Labsch{\"u}tz and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2016",
    volume = "22",
    number = "1",
    pages = "1025--1034",
    month = "jan",
    abstract = "Abstract—Sparse volume data structures enable the efficient representation  of large but sparse volumes in GPU memory for com putation and visualization.  However, the choice of a specific data structure for a given data  set depends on several factors, such as the memory budget, the sparsity  of the data, and data access patterns. In general, there is no single  optimal sparse data structure, but a set of several candidates with  individual strengths and drawbacks. One solution to this problem  are hybrid data structures which locally adapt themselves to the  sparsity. However, they typically suffer from increased traversal  overhead which limits their utility in many applications. This paper  presents JiTTree, a novel sparse hybrid volume data structure that  uses just-in-time compilation to overcome these problems. By combining  multiple sparse data structures and reducing traversal overhead we  leverage their individual advantages. We demonstrate that hybrid  data structures adapt well to a large range of data sets. They are  especially superior to other sparse data structures for data sets  that locally vary in sparsity. Possible optimization criteria are  memory, performance and a combination thereof. Through just-in-time  (JIT) compilation, JiTTree reduces the traversal overhead of the  resulting optimal data structure. As a result, our hybrid volume  data structure enables efficient computations on the GPU, while being  superior in terms of memory usage when compared to non-hybrid data  structures.",
    pdf = "pdfs/Labschuetz-2016-JJC.pdf",
    images = "images/Labschuetz-2016-JJC.jpg",
    thumbnails = "images/Labschuetz-2016-JJC.png",
    doi = "10.1109/TVCG.2015.2467331",
    event = "IEEE SciVis 2015",
    keywords = "data transformation and representation, GPUs and multi-core architectures, volume rendering",
    location = "Chicago, USA"
    }
    [Bibtex]
    @INPROCEEDINGS {Kolesar2016VCBM,
    author = "Ivan Kolesar and Jan By\v{s}ka and Julius Parulek and Helwig Hauser and Barbora Kozl\'{i}kov\'{a}",
    title = "Unfolding and Interactive Exploration of Protein Tunnels andtheir Dynamics",
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine",
    year = "2016",
    pages = "1--10",
    month = "sep",
    abstract = "The presence of tunnels in protein structures substantially influences their reactivity with other molecules. Therefore, studying their properties and changes over time has been in the scope of biochemists for decades. In this paper we introduce a novel approach for comparative visualization and exploration of ensembles of tunnels. Our goal is to overcome occlusion problems present in traditional tunnel representations while providing users a quick way to navigate through the input dataset to identify potentially interesting tunnels. First, we unfold the input tunnels to a 2D representation enabling to observe the mutual position of amino acids forming the tunnel surface and the amount of surface they influence. These 2D images are subsequently described by image moments commonly used in image processing. This way we are able to detect similarities and outliers in the dataset, which are visualized as clusters in a scatterplot graph. The same coloring scheme is used in the linked bar chart enabling to detect the position of the cluster members over time. These views provide a way to select a subset of potentially interesting tunnels that can be further explored in detail using the 2D unfolded view and also traditional 3D representation. The usability of our approach is demonstrated on case studies conducted by the domain experts.",
    images = "images/Kolesar-2016-VCBM.png",
    thumbnails = "images/Kolesar-2016-VCBM-thumbnail.jpg",
    proceedings = "Proceedings of Eurographics Workshop on Visual Computing in Biology and Medicine",
    keywords = "unfolding, storytelling, game visualization",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Smit2016SLINE,
    author = "Nils Lichtenberg and Noeska Smit and Christian Hansen and Kai Lawonn",
    title = "Sline: Seamless Line Illustration for Interactive Biomedical Visualization",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    month = "sep",
    abstract = "In medical visualization of surface information, problems often arise when visualizing several overlapping structures simultaneously. There is a trade-off between visualizing multiple structures in a detailed way and limiting visual clutter, in order to allow users to focus on the main structures. Illustrative visualization techniques can help alleviate these problems by defining a level of abstraction per structure. However, clinical uptake of these advanced visualization techniques so far has been limited due to the complex parameter settings required. To bring advanced medical visualization closer to clinical application, we propose a novel illustrative technique that offers a seamless transition between various levels of abstraction and detail. Using a single comprehensive parameter, users are able to quickly define a visual representation per structure that fits the visualization requirements for focus and context structures. This technique can be applied to any biomedical context in which multiple surfaces are routinely visualized, such as neurosurgery, radiotherapy planning or drug design. Additionally, we introduce a novel hatching technique, that runs in real-time and does not require texture coordinates. An informal evaluation with experts from different biomedical domains reveals that our technique allows users to design focus-and-context visualizations in a fast and intuitive manner.",
    pdf = "pdfs/Lichtenberg-2016-SLINE.pdf",
    images = "images/Smit-2016-SLINE.PNG",
    thumbnails = "images/Smit-2016-SLINE.jpg",
    proceedings = "Proceedings of Eurographics Workshop on Visual Computing in Biology and Medicine",
    event = "VCBM 2016",
    keywords = "surface rendering, medical visualization, illustrative rendering",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Klein-2016-TIV,
    author = "Tobias Klein and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language",
    booktitle = "Proceedings of the International Workshop on OpenCL 2016",
    year = "2016",
    month = "apr",
    abstract = "The use of GPUs and the massively parallel computing paradigm have  become wide-spread. We describe a framework for the interactive visualization  and visual analysis of the run-time behavior of massively parallel  programs, especially OpenCL kernels. This facilitates understanding  a program's function and structure, finding the causes of possible  slowdowns, locating program bugs, and interactively exploring and  visually comparing different code variants in order to improve performance  and correctness. Our approach enables very specific, user-centered  analysis, both in terms of the recording of the run-time behavior  and the visualization itself. Instead of having to manually write  instrumented code to record data, simple code annotations tell the  source-to-source compiler which code instrumentation to generate  automatically. The visualization part of our framework then enables  the interactive analysis of kernel run-time behavior in a way that  can be very specific to a particular problem or optimization goal,  such as analyzing the causes of memory bank conflicts or understanding  an entire parallel algorithm.",
    pdf = "pdfs/Klein-2016-TIV.pdf",
    images = "images/Klein-2016-TIV.jpg",
    thumbnails = "images/Klein-2016-TIV.png",
    doi = "10.1145/2909437.2909459",
    event = "IWOCL 2016",
    extra = "pdfs/Klein-2016-TIV-Poster.pdf",
    keywords = "domain specific languages, GPU programming, visual exploration",
    location = "Vienna, Austria",
    owner = "bruckner"
    }
    [Bibtex]
    @MISC {Stoppel2015ConfReport,
    author = "Sergej Stoppel",
    title = "Conference Report IEEE VIS 2014",
    month = "January",
    year = "2016",
    abstract = "Conference report about the IEEE VIS 2014 in Paris.",
    images = "images/Shneiderman_Gerson_Pushups.PNG",
    thumbnails = "images/Shneiderman_Gerson_Pushups.PNG",
    url = "http://www.norsigd.no/norsigd_info/nsi-1-15.pdf"
    }
    [PDF] [Bibtex]
    @ARTICLE {Byska2016AnimoAminoMiner,
    author = "Jan By{\v{s}}ka and Mathieu Le Muzic and Eduard M. Gr{\"o}ller and Ivan Viola and Barbora Kozl{\'i}kov{\'a}",
    title = "AnimoAminoMiner: Exploration of Protein Tunnels and their Properties in Molecular Dynamics",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2016",
    volume = "22",
    number = "1",
    pages = "747--756",
    abstract = "In this paper we propose a novel method for the interactive exploration of protein tunnels. The basic principle of our approach is that we entirely abstract from the 3D/4D space the simulated phenomenon is embedded in. A complex 3D structure and its curvature information is represented only by a straightened tunnel centerline and its width profile. This representation focuses on a key aspect of the studied geometry and frees up graphical estate to key chemical and physical properties represented by surroundingamino acids. The method shows the detailed tunnel profile and its temporal aggregation. The profile is interactively linked with a visual overview of all amino acids which are lining the tunnel over time. In this overview, each amino acid is represented by a set of colored lines depicting the spatial and temporal impact of the amino acid on the corresponding tunnel. This representation clearly shows the importance of amino acids with respect to selected criteria. It helps the biochemists to select the candidate amino acids for mutation which changes the protein function in a desired way. The AnimoAminoMiner was designed in close cooperation with domain experts. Its usefulness is documented by their feedback and a case study, which are included.",
    pdf = "pdfs/2016-Byska-AnimoAminoMiner.pdf",
    images = "images/IvanViola2016.png",
    thumbnails = "images/IvanViola2016.png",
    publisher = "IEEE"
    }

2015

    [PDF] [Bibtex]
    @ARTICLE {Angelelli-2015-PQA,
    author = "Paolo Angelelli and Stefan Bruckner",
    title = "Performance and Quality Analysis of Convolution-Based Volume Illumination",
    journal = "Journal of WSCG",
    year = "2015",
    volume = "23",
    number = "2",
    pages = "131--138",
    month = "jun",
    abstract = "Convolution-based techniques for volume rendering are among the fastest  in the on-the-fly volumetric illumination category. Such methods,  however, are still considerably slower than conventional local illumination  techniques. In this paper we describe how to adapt two commonly used  strategies for reducing aliasing artifacts, namely pre-integration  and supersampling, to such techniques. These strategies can help  reduce the sampling rate of the lighting information (thus the number  of convolutions), bringing considerable performance benefits. We  present a comparative analysis of their effectiveness in offering  performance improvements. We also analyze the (negligible) differences  they introduce when comparing their output to the reference method.  These strategies can be highly beneficial in setups where direct  volume rendering of continuously streaming data is desired and continuous  recomputation of full lighting information is too expensive, or where  memory constraints make it preferable not to keep additional precomputed  volumetric data in memory. In such situations these strategies make  single pass, convolution-based volumetric illumination models viable  for a broader range of applications, and this paper provides practical  guidelines for using and tuning such strategies to specific use cases.",
    pdf = "pdfs/Angelelli-2015-PQA.pdf",
    images = "images/Angelelli-2015-PQA.jpg",
    thumbnails = "images/Angelelli-2015-PQA.png",
    keywords = "volume rendering, global illumination, scientific visualization, medical visualization"
    }
    [DOI] [Bibtex]
    @INPROCEEDINGS {eurovisstar20151112,
    author = "Kozlikova, Barbora and Krone, Michael and Lindow, Norbert and Falk, Martin and Baaden, Marc and Baum, Daniel and Viola, Ivan and Parulek, Julius and Hege, Hans-Christian",
    title = "Visualization of Biomolecular Structures: State of the Art",
    booktitle = "Eurographics Conference on Visualization (EuroVis) - STARs",
    year = "2015",
    editor = "R. Borgo and F. Ganovelli and I. Viola",
    volume = "-",
    publisher = "The Eurographics Association",
    abstract = "Structural properties of molecules are of primary concern in many fields. This report provides a comprehensiveoverview on techniques that have been developed in the fields of molecular graphics and visualization with a focuson applications in structural biology. The field heavily relies on computerized geometric and visual representationsof three-dimensional, complex, large, and time-varying molecular structures. The report presents a taxonomy thatdemonstrates which areas of molecular visualization have already been extensively investigated and where the fieldis currently heading. It discusses visualizations for molecular structures, strategies for efficient display regardingimage quality and frame rate, covers different aspects of level of detail, and reviews visualizations illustrating thedynamic aspects of molecular simulation data. The report concludes with an outlook on promising and importantresearch topics to enable further success in advancing the knowledge about interaction of molecular structures.",
    images = "images/molvis_star.png",
    thumbnails = "images/molvis_star.png",
    proceedings = "Eurographics Conference on Visualization (EuroVis) - STARs",
    doi = "10.2312/eurovisstar.20151112",
    journal = "-",
    number = "-",
    keywords = "-",
    project = "physioillustration"
    }
    [DOI] [Bibtex]
    @ARTICLE {Byska2015MC,
    author = "Jan By\v{s}ka and Adam Jur\v{c}\'{i}­k and Eduard M. Gr{\"o}ller and Ivan Viola and Barbora Kozl{\'i}kov{\'a}",
    title = "MoleCollar and Tunnel Heat Map Visualizations for Conveying Spatio-Temporo-Chemical Properties Across and Along Protein Voids",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "1--10",
    abstract = "Studying the characteristics of proteins and their inner void space, including their geometry,physico-chemical properties and dynamics are instrumental for evaluating the reactivity of theprotein with other small molecules. The analysis of long simulations of molecular dynamics produces a large number of voids which have to be further explored and evaluated. In this paper we propose three new methods: two of them convey important properties along the long axis of a selected void during molecular dynamics and one provides a comprehensive picture across the void. The first two proposed methods use a specific heat map to present two types of information: an overview of all detected tunnels in the dynamics and their bottleneck width andstability over time, and an overview of a specific tunnel in the dynamics showing the bottleneck position and changes of the tunnel length over time. These methods help to select asmall subset of tunnels, which are explored individually and in detail. For this stage we propose the third method, which shows in one static image the temporal evolvement of the shapeof the most critical tunnel part, i.e., its bottleneck. This view is enriched with abstractdepictions of different physicochemical properties of the amino acids surrounding the bottleneck. The usefulness of our newly proposed methods is demonstrated on a case study andthe feedback from the domain experts is included. The biochemists confirmed that our novel methods help to convey the information about the appearance and properties of tunnels in a very intuitive and comprehensible manner.",
    images = "images/cgf12612-fig-0001.png",
    thumbnails = "images/cgf12612-fig-0001.png",
    issn = "1467-8659",
    url = "http://dx.doi.org/10.1111/cgf.12612",
    doi = "10.1111/cgf.12612",
    keywords = "Categories and Subject Descriptors (according to ACM CCS), I.3.6 [Computer Graphics]: Picture/Image Generation—Line and curve generation"
    }
    [Bibtex]
    @INPROCEEDINGS {cellVIEW_2015,
    author = "Mathieu Le Muzic and Ludovic Autin and Julius Parulek and Ivan Viola",
    title = "cellVIEW: a Tool for Illustrative and Multi-Scale Rendering of Large Biomolecular Datasets",
    booktitle = "EG Workshop on Visual Computing for Biology and Medicine",
    year = "2015",
    month = "sep",
    abstract = "In this article we introduce cellVIEW, a new system to interactively visualize large biomolecular datasets on the atomic level. Our tool is unique and has been specifically designed to match the ambitions of our domain experts to model and interactively visualize structures comprised of several billions atom. The cellVIEW system integrates acceleration techniques to allow for real-time graphics performance of 60 Hz display rate on datasets representing large viruses and bacterial organisms. Inspired by the work of scientific illustrators, we propose a level-of-detail scheme which purpose is two-fold: accelerating the rendering and reducing visual clutter. The main part of our datasets is made out of macromolecules, but it also comprises nucleic acids strands which are stored as sets of control points. For that specific case, we extend our rendering method to support the dynamic generation of DNA strands directly on the GPU. It is noteworthy that our tool has been directly implemented inside a game engine. We chose to rely on a third party engine to reduce software development work-load and to make bleeding-edge graphics techniques more accessible to the end-users. To our knowledge cellVIEW is the only suitable solution for interactive visualization of large bimolecular landscapes on the atomic level and is freely available to use and extend.",
    images = "images/cellview2015.png",
    thumbnails = "images/cellview2015.png",
    proceedings = "Proceedings of Eurographics Workshop on Visual Computing in Biology and Medicine",
    project = "physioillustration"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Mindek-2015-ASM,
    author = "Peter Mindek and Ladislav \v{C}mol{\'i}k and Ivan Viola and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Automatized Summarization of Multiplayer Games",
    booktitle = "Proceedings of SCCG 2015",
    year = "2015",
    pages = "93--100",
    month = "apr",
    abstract = "We present a novel method for creating automatized gameplay dramatization  of multiplayer video games. The dramatization serves as a visual  form of guidance through dynamic 3D scenes with multiple foci, typical  for such games. Our goal is to convey interesting aspects of the  gameplay by animated sequences creating a summary of events which  occurred during the game. Our technique is based on processing many  cameras, which we refer to as a flock of cameras, and events captured  during the gameplay, which we organize into a so-called event graph.  Each camera has a lifespan with a certain time interval and its parameters  such as position or look-up vector are changing over time. Additionally,  during its lifespan each camera is assigned an importance function,  which is dependent on the significance of the structures that are  being captured by the camera. The images captured by the cameras  are composed into a single continuous video using a set of operators  based on cinematographic effects. The sequence of operators is selected  by traversing the event graph and looking for specific patterns corresponding  to the respective operators. In this way, a large number of cameras  can be processed to generate an informative visual story presenting  the gameplay. Our compositing approach supports insets of camera  views to account for several important cameras simultaneously. Additionally,  we create seamless transitions between individual selected camera  views in order to preserve temporal continuity, which helps the user  to follow the virtual story of the gameplay.",
    pdf = "pdfs/Mindek-2015-ASM.pdf",
    images = "images/Mindek-2015-ASM.jpg",
    thumbnails = "images/Mindek-2015-ASM.png",
    note = "SCCG 2015 Best Paper Award",
    doi = "10.1145/2788539.2788549",
    keywords = "animation, storytelling, game visualization",
    location = "Smolenice, Slovakia",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {PBVRVis2015026,
    author = "Matkovic, K and Gracanin, D and Jelovi{\'{c}}, M and Hauser, H",
    title = "Interactive Visual Analysis of Large Simulation Ensembles",
    booktitle = "Proceedings of Winter Simulation Conference (WSC 2015, to appear)",
    year = "2015",
    abstract = "Recent advancements in simulation and computing make it possible to compute large simulation ensembles. A simulation ensemble consists of multiple simulation runs of the same model with different values of control parameters. In order to cope with ensemble data, a modern analysis methodology is necessary. In this paper, we present our experience with simulation ensemble exploration and steering by means of interactive visual analysis. We describe our long-term collaboration with fuel injection experts from the automotive industry. We present how interactive visual analysis can be used to gain a deep understanding in the ensemble data, and how it can be used, in a combination with automatic methods, to steer the ensemble creation, even for very complex systems. Very positive feedback from domain experts motivated us, a team of visualization and simulation experts, to present this research to the simulation community.",
    pdf = "pdfs/matkovic_2015_winter_simConf.pdf",
    images = "images/IVA_matkovic.png",
    thumbnails = "images/IVA_matkovic.png"
    }
    [DOI] [Bibtex]
    @INPROCEEDINGS {7156384,
    author = "Le Muzic, Mathieu and Waldner, Manuela and Parulek, Julius and Viola, Ivan",
    title = "Illustrative Timelapse: A technique for illustrative visualization of particle-based simulations",
    booktitle = "Visualization Symposium (PacificVis), 2015 IEEE Pacific",
    year = "2015",
    pages = "247-254",
    month = "April",
    abstract = "Animated movies are a popular way to communicate complex phenomena in cell biology to the broadaudience. Animation artists apply sophisticated illustration techniques to communicate a story, while trying to maintain a realistic representation of a complex dynamic environment. Since suchhand-crafted animations are time-consuming and cost-intensive to create, our goal is to formalizeillustration techniques used by artists to facilitate the automatic creation of visualizations generated from mesoscale particle-based molecular simulations. Our technique Illustrative Timelapse supports visual exploration of complex biochemical processes in dynamic environments by(1) seamless temporal zooming to observe phenomena in different temporal resolutions, (2) visualabstraction of molecular trajectories to ensure that observers are able to visually follow themain actors, (3) increased visual focus on events of interest, and (4) lens effects to preserve arealistic representation of the environment in the context. Results from a first user studyindicate that visual abstraction of trajectories improves the ability to follow a story and isalso appreciated by users. Lens effects increased the perceived amount of molecular motion in theenvironment while trading off traceability of individual molecules.",
    images = "images/illustrative_timelapse.png",
    thumbnails = "images/illustrative_timelapse.png",
    proceedings = "Proceedings of IEEE Pacific Visualization",
    keywords = "Biological system modeling;Data models;Data visualization;Lenses;Trajectory;Videos;Visualization;I.3.7[COMPUTER GRAPHICS]: Three-Dimensional Graphics and Realism—Animation;I.6.3 [SIMULATION AND MODELING]: Applications—",
    doi = "10.1109/PACIFICVIS.2015.7156384",
    project = "physioillustration"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {alsallakh2015state,
    author = "Alsallakh, Bilal and Micallef, Luana and Aigner, Wolfgang and Hauser, Helwig and Miksch, Silvia and Rodgers, Peter",
    title = "The State-of-the-Art of Set Visualization",
    journal = "Computer Graphics Forum",
    year = "2015",
    abstract = "Sets comprise a generic data model that has been used in a variety of data analysis problems. Such problems involve analysing and visualizing set relations between multiple sets defined over the same collection of elements. However, visualizing sets is a non-trivial problem due to the large number of possible relations between them. We provide a systematic overview of state-of-the-art techniques for visualizing different kinds of set relations. We classify these techniques into six main categories according to the visual representations they use and the tasks they support. We compare the categories to provide guidance for choosing an appropriate technique for a given problem. Finally, we identify challenges in this area that need further research and propose possible directions to address these challenges. Further resources on set visualization are available at http://www.setviz.net.",
    pdf = "pdfs/Alsallakh_et_al-2016-Computer_Graphics_Forum.pdf",
    images = "images/ThumbNailIMG-SetVisSTAR.png",
    thumbnails = "images/ThumbNailIMG-SetVisSTAR.png",
    organization = "Wiley Online Library",
    booktitle = "Computer Graphics Forum",
    doi = "10.1111/cgf.12722"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2015-VAS,
    author = "Alexandra Diehl and Leandro Pelorosso and Claudio Delrieux and Celeste Saulo and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "381--390",
    month = "may",
    abstract = "Weather conditions affect multiple aspects of human life such as economy,  safety, security, and social activities. For this reason, weather  forecast plays a major role in society. Currently weather forecasts  are based on Numerical Weather Prediction (NWP) models that generate  a representation of the atmospheric flow. Interactive visualization  of geo-spatial data has been widely used in order to facilitate the  analysis of NWP models. This paper presents a visualization system  for the analysis of spatio-temporal patterns in short-term weather  forecasts. For this purpose, we provide an interactive visualization  interface that guides users from simple visual overviews to more  advanced visualization techniques. Our solution presents multiple  views that include a timeline with geo-referenced maps, an integrated  webmap view, a forecast operation tool, a curve-pattern selector,  spatial filters, and a linked meteogram. Two key contributions of  this work are the timeline with geo-referenced maps and the curve-pattern  selector. The latter provides novel functionality that allows users  to specify and search for meaningful patterns in the data. The visual  interface of our solution allows users to detect both possible weather  trends and errors in the weather forecast model.We illustrate the  usage of our solution with a series of case studies that were designed  and validated in collaboration with domain experts.",
    pdf = "pdfs/Diehl-2015-VAS.pdf",
    images = "images/Diehl-2015-VAS.jpg",
    thumbnails = "images/Diehl-2015-VAS.png",
    youtube = "https://www.youtube.com/watch?v=hhQwsuXpHo8",
    doi = "10.1111/cgf.12650",
    event = "EuroVis 2015",
    keywords = "weather forecasting, visual analysis, spatiotemporal data",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015IRIS,
    author = "Helwig Hauser",
    title = "Medical Visualization Research at the VisGroup @ UiB.no/ii",
    howpublished = "Invited talk at IRIS",
    month = "November",
    year = "2015",
    abstract = "Established about eight years ago, the Visualization Research Group is the youngest of six research groups at the Department of Informatics, UiB, focusing on application-oriented basic research in visualization. Medicine and related disciplines (such as biomedicine, biology, etc.) stand for a growing number of grand visualization challenges and the vivid interdisciplinary MedViz network in Bergen gives ample opportunities for internationally recognized visualization research. In this talk, an overview of the visualization research group [1] is given, together with a short review of selected research projects in medical visualization.",
    pdf = "pdfs/2015-11-25-HH-IRIS.pdf",
    images = "images/ThumbNailIRIS.jpg",
    thumbnails = "images/ThumbNailIRIS.jpg",
    day = "25",
    location = "Bergen, Norway"
    }
    [DOI] [Bibtex]
    @ARTICLE {Brambilla15Expressive,
    author = "Andrea Brambilla and Helwig Hauser",
    title = "Expressive Seeding of Multiple Stream Surfaces for Interactive Flow Exploration",
    journal = "Computers \& Graphics",
    year = "2015",
    volume = "47",
    pages = "123--134",
    abstract = "Integral surfaces, such as stream and path surfaces, are highly effective in the context of the exploration and the analysis of the long-term behavior of three-dimensional flows. However, specifying the seeding curves that lead to an expressive set of integral surfaces is a challenging and cumbersome task. In this paper, we propose an algorithm for automatically seeding multiple stream surfaces around a user-specified location of interest. The process is guided by a streamline similarity measure. Within the resulting integral surfaces, adjacent streamlines are as similar as possible to each other. In addition, we aim at conveying different aspects of the flow behavior with each surface. This is achieved by maximizing the dissimilarity between streamlines from different stream surfaces. The capabilities of our technique are demonstrated on a number of application cases. We provide a qualitative comparison with two state-of-the-art approaches. We report from our detailed exchange with a domain expert concerning the expressiveness and usefulness of our approach. A thorough analysis of the few parameters involved is provided. ",
    images = "images/Brambilla15Expressive01.png, images/Brambilla15Expressive02.png",
    thumbnails = "images/Brambilla15Expressive01_thumb.png, images/Brambilla15Expressive02_thumb.png",
    publisher = "Elsevier",
    doi = "http://dx.doi.org/10.1016/j.cag.2015.01.002",
    url = "http://www.sciencedirect.com/science/article/pii/S0097849315000035",
    keywords = "Flow visualization; Stream surface selection; Visibility management"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {splechtna2015interactive,
    author = "Splechtna, Rainer and Matkovic, Kresimir and Gracanin, Denis and Jelovic, Mario and Hauser, Helwig",
    title = "Interactive visual steering of hierarchical simulation ensembles",
    booktitle = "Visual Analytics Science and Technology (VAST), 2015 IEEE Conference on",
    year = "2015",
    pages = "89--96",
    organization = "IEEE",
    abstract = "Multi-level simulation models, i.e., models where different components are simulated using sub-models of varying levels of complexity, belong to the current state-of-the-art in simulation. The existing analysis practice for multi-level simulation results is to manually compare results from different levels of complexity, amounting to a very tedious and error-prone, trial-and-error exploration process. In this paper, we introduce hierarchical visual steering, a new approach to the exploration and design of complex systems. Hierarchical visual steering makes it possible to explore and analyze hierarchical simulation ensembles at different levels of complexity. At each level, we deal with a dynamic simulation ensemble - the ensemble grows during the exploration process. There is at least one such ensemble per simulation level, resulting in a collection of dynamic ensembles, analyzed simultaneously. The key challenge is to map the multi-dimensional parameter space of one ensemble to the multi-dimensional parameter space of another ensemble (from another level). In order to support the interactive visual analysis of such complex data we propose a novel approach to interactive and semi-automatic parameter space segmentation and comparison. The approach combines a novel interaction technique and automatic, computational methods - clustering, concave hull computation, and concave polygon overlapping - to support the analysts in the cross-ensemble parameter space mapping. In addition to the novel parameter space segmentation we also deploy coordinated multiple views with standard plots. We describe the abstract analysis tasks, identified during a case study, i.e., the design of a variable valve actuation system of a car engine. The study is conducted in cooperation with experts from the automotive industry. Very positive feedback indicates the usefulness and efficiency of the newly proposed approach.",
    pdf = "pdfs/Splechtna_2015.pdf",
    images = "images/ThumbNailIMG-HierSteering.png",
    thumbnails = "images/ThumbNailIMG-HierSteering.png",
    doi = "10.1109/VAST.2015.7347635"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015Austria,
    author = "Helwig Hauser",
    title = "Integrating Spatial \& Non-spatial Data in Visualization",
    howpublished = "Invited talk",
    month = "October",
    year = "2015",
    abstract = "New opportunities in data science, such as the consideration of cohort study data, require new approaches to the appropriate design of an effective visualization. We need to capitalize on successful solutions from previous research, of course, but we should also explore new strategies that challenge our already established mindset in visualization. In this talk, I address the specific challenge of integrating spatial and non-spatial data in visualization, in particular, when the spatial aspect of the data is of great importance to the user---this could relate to the morphological information in a 3D medical scan or the geometrical aspects of flow features in a CFD simulation. In data visualizaiton, the actual mapping step---from data to a visual form---is certainly crucial and we should strive to optimally exploit the great opportunities that we have in designing this step. In data-intensive sciences, the study objects of interest are increasingly often represented by extensive and rich datasets (aka. big data)---while traditionally the focus of visualization was on individual, static datasets, we now face dynamic data, representing entire ensembles of study entities, etc. Visualization gets a lot harder, when facing such new 'big data' challenges---both on the designer sider as well as also on the user side. At the same time, however, also the potential for impact is increasing, which amounts to a fantastic motivation for new basic research in visualization.",
    pdf = "pdfs/2015-10-14-HHauser-InvTalk.pdf",
    images = "images/ThumbPicHHAustria2015.png",
    thumbnails = "images/ThumbPicHHAustria2015.png",
    location = "Vienna, Austria"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015VIS,
    author = "Helwig Hauser",
    title = "From Anatomy to Physiology in Medical Visualization",
    howpublished = "Tutorial talk at IEEE VIS 2015",
    month = "October",
    year = "2015",
    abstract = "Generally, medical visualization assists the diagnosis of diseases as well as the treatment of patients. Capturing the patients anatomy, which to a large degree is in the focus of traditional MedViz, certainly is one important key to the success of medical visualization. At least equally important, if not even more, is the consideration of physiology, entailing the complex of function (or malfunction) of the patient. Modern imaging modalities extend beyond the simple depiction of static anatomical snapshots to capturing temporal processes as well as to covering multiple scales of physiology eventually linking molecular biology to medicine. The visualization of human physiology complements other techniques, for example lab tests for quantifying certain physiological functions. We deem ourselves at the beginning of an interesting extension of MedViz research to increasingly capture physiology in addition to anatomy.",
    pdf = "pdfs/2015-10-25-VIS2015-TutTalkHH-print2up.pdf",
    images = "images/ThumbnailVisTut.png",
    thumbnails = "images/ThumbnailVisTut.png",
    day = "25",
    location = "Chicago, Illinois, USA"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015SUBSEA,
    author = "Helwig Hauser",
    title = "Big Data - visualization and visual analytics",
    howpublished = "Invited talk at the NCE Subsea Forum",
    month = "March",
    year = "2015",
    pdf = "pdfs/2015-03-19-NCE-BigDataVA-print2up.pdf",
    images = "images/ThumbnailBigData.jpg",
    thumbnails = "images/ThumbnailBigData.jpg",
    day = "19",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Karimov-2015-GVE,
    author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner",
    title = "Guided Volume Editing based on Histogram Dissimilarity",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "91--100",
    month = "may",
    abstract = "Segmentation of volumetric data is an important part of many analysis  pipelines, but frequently requires manual inspection and correction.  While plenty of volume editing techniques exist, it remains cumbersome  and error-prone for the user to find and select appropriate regions  for editing. We propose an approach to improve volume editing by  detecting potential segmentation defects while considering the underlying  structure of the object of interest. Our method is based on a novel  histogram dissimilarity measure between individual regions, derived  from structural information extracted from the initial segmentation.  Based on this information, our interactive system guides the user  towards potential defects, provides integrated tools for their inspection,  and automatically generates suggestions for their resolution. We  demonstrate that our approach can reduce interaction effort and supports  the user in a comprehensive investigation for high-quality segmentations.",
    pdf = "pdfs/Karimov-2015-GVE.pdf",
    images = "images/Karimov-2015-GVE.jpg",
    thumbnails = "images/Karimov-2015-GVE.png",
    youtube = "https://www.youtube.com/watch?v=zjTYkXTm_dM",
    doi = "10.1111/cgf.12621",
    event = "EuroVis 2015",
    keywords = "medical visualization, segmentation, volume editing, interaction",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/"
    }

2014

    [PDF] [Bibtex]
    @MISC {Hauser2014BigData,
    author = "Helwig Hauser",
    title = "Big Data - a threat or an opportunity for our modern society?",
    howpublished = "Invited talk at the Alumni Event of the University of Bergen, Norway",
    month = "May",
    year = "2014",
    abstract = "Invited talk at the Alumni Event of the University of Bergen, Norway",
    pdf = "pdfs/2014-05-10-UiB-Alumni-BigDataTalkHH-print2up.pdf",
    images = "images/2014-05-10-UiB-Alumni-BigDataTalkHH-print2up_Image_0003.jpg",
    thumbnails = "images/2014-05-10-UiB-Alumni-BigDataTalkHH-print2up_Image_0003.jpg",
    location = "Bergen, Norway"
    }
    [PDF] [Bibtex]
    @ARTICLE {Natali14Rapid,
    author = "Mattia Natali and Julius Parulek and Daniel Patel",
    title = "Rapid Modelling of Interactive Geological Illustrations with Faults and Compaction",
    journal = "Proceedings of Spring Conference on Computer Graphics (SCCG)",
    year = "2014",
    abstract = "In this paper, we propose new methods for building geological illustrations and animations. We focus on allowing geologists to create their subsurface models by means of sketches, to quickly communicate concepts and ideas rather than detailed information. The result of our sketch-based modelling approach is a layer-cake volume representing geological phenomena, where each layer is rock material which has accumulated due to a user-defined depositional event. Internal geological structures can be inspected by different visualization techniques that we employ. Faulting and compaction of rock layers are important processes in geology. They can be modelled and visualized with our technique. Our representation supports non-planar faults that a user may define by means of sketches. Real-time illustrative animations are achieved by our GPU accelerated approach.",
    pdf = "pdfs/Natali14Rapid.pdf",
    images = "images/Natali2014Rapid0.png, images/Natali2014Rapid1.png",
    thumbnails = "images/Natali2014Rapid0.png, images/Natali2014Rapid1.png",
    url = "http://dx.doi.org/10.1145/2643188.2643201",
    project = "geoillustrator"
    }
    [Bibtex]
    @ARTICLE {alsallakh2014visual,
    author = "Alsallakh, Bilal and Hanbury, Allan and Hauser, Helwig and Miksch, Silvia and Rauber, Andreas",
    title = "Visual methods for analyzing probabilistic classification data",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "1703--1712",
    abstract = "Multi-class classifiers often compute scores for the classification samples describing probabilities to belong to different classes. In order to improve the performance of such classifiers, machine learning experts need to analyze classification results for a large number of labeled samples to find possible reasons for incorrect classification. Confusion matrices are widely used for this purpose. However, they provide no information about classification scores and features computed for the samples. We propose a set of integrated visual methods for analyzing the performance of probabilistic classifiers. Our methods provide insight into different aspects of the classification results for a large number of samples. One visualization emphasizes at which probabilities these samples were classified and how these probabilities correlate with classification error in terms of false positives and false negatives. Another view emphasizes the features of these samples and ranks them by their separation power between selected true and false classifications. We demonstrate the insight gained using our technique in a benchmarking classification dataset, and show how it enables improving classification performance by interactively defining and evaluating post-classification rules.",
    images = "images/alsallakh2014visual3.jpg, images/alsallakh2014visual1.jpg, images/alsallakh2014visual2.jpg",
    thumbnails = "images/alsallakh2014visual3.jpg",
    publisher = "IEEE"
    }
    [PDF] [DOI] [Bibtex]
    @MISC {Hauser2014SIBGRAPI,
    author = "Helwig Hauser",
    title = "Interactive Visual Exploration and Analysis of Multi-Faceted Scientific Data",
    howpublished = "Invited talk at SIBGRAPI Conference on Graphics, Patterns and Images in Rio de Janeiro, Brazil",
    month = "August",
    year = "2014",
    abstract = "Invited talk at SIBGRAPI Conference on Graphics, Patterns and Images in Rio de Janeiro, Brazil",
    pdf = "pdfs/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up.pdf",
    images = "images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0003.jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk--print-new-new-2up_Image_0001.jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0001(2).jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0001(3).jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0001(4).jpg",
    thumbnails = "images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0003.jpg",
    location = "Rio de Janeiro, Brazil",
    doi = "10.1007/978-1-4471-6497-5_15"
    }
    [Bibtex]
    @MISC {Kingman14GenomeMaking,
    author = "Pina Kingman",
    title = "Our Resilient Genome: The Making of a Science Film",
    howpublished = "Presentation in the EG VCBM workshop 2014",
    month = "September",
    year = "2014",
    abstract = "Every single human cell has to fix 10,000 to 20,000 lesions in its DNA every day. Our cells are constantly exposed to many different types of threats that damage our genome. These lesions could cause mutations in our DNA, potentially leading to cancer and other diseases. With such continuous onslaught, how can our cells possibly protect our DNA from damage and mutations? This presentation will showcase the first public screening of a short film about DNA repair, which blends computer graphics and biology to communicate exciting up-and-coming research. This film was developed in conjunction with the Department of Informatics and the Department of Molecular Biology at the University of Bergen, and the Institute of Computer Graphics and Algorithms at the Vienna University of Technology. Along with a discussion on the visualisation process, I will also talk about the intersection between film and science that helps us communicate complex information.",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    location = "Wien, Austria",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @MISC {LeMusic14Temporal,
    author = "Mathieu Le Muzic and Julius Parulek and Manuela Waldner and Ivan Viola",
    title = "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales",
    howpublished = "Poster presented at the EG VCBM workshop 2014",
    month = "September",
    year = "2014",
    pdf = "pdfs/LeMusic14Temporal.pdf",
    images = "images/LeMuzic14Temporal.png",
    thumbnails = "images/LeMuzic14Temporal_thumb.png",
    location = "Wien, Austria",
    project = "physioillustration"
    }
    [DOI] [Bibtex]
    @INPROCEEDINGS {alsallakh2014visualizing,
    author = "Alsallakh, Bilal and Micallef, Luana and Aigner, Wolfgang and Hauser, Helwig and Miksch, Silvia and Rodgers, Peter",
    title = "Visualizing sets and set-typed data: State-of-the-art and future challenges",
    booktitle = "Eurographics conference on Visualization (EuroVis)--State of The Art Reports",
    year = "2014",
    pages = "1--21",
    abstract = "A variety of data analysis problems can be modelled by defining multiple sets over a collection of elements and analyzing the relations between these sets. Despite their simple concept, visualizing sets is a non-trivial problem due to the large number of possible relations between them. We provide a systematic overview of state-of-theart techniques for visualizing different kinds of set relations. We classify these techniques into 7 main categories according to the visual representations they use and the tasks they support. We compare the categories to provide guidance for choosing an appropriate technique for a given problem. Finally, we identify challenges in this area that need further research and propose possible directions to address with these challenges.",
    images = "images/img_Page_13_Image_0001.jpg",
    thumbnails = "images/img_Page_13_Image_0001.jpg",
    proceedings = "Eurographics conference on Visualization (EuroVis)--stars",
    doi = "dx.doi.org/10.2312/eurovisstar.20141170"
    }
    [Bibtex]
    @MISC {Kingman14Integrating,
    author = "Pina Kingman and Anne-Kristin Stavrum and Ivan Viola and Helwig Hauser",
    title = "Integrating 2D and 3D Animation to Comprehensively Communicate Biology",
    howpublished = "Poster presented at the VizBi conference 2014",
    month = "March",
    year = "2014",
    abstract = "As research in cellular and molecular biology advances, so does the need to educated both the science research community and the general public. The former must be aware of developments in associated fields, the latter must be able to take responsibility for their own well-being. In both cases, we have a willing and capable audience, ready to delve deeper into the biological sciences. To exploit this opportunity, we need to research new and advanced visual language techniques to further improve communication. We are therefore investigating novel visual communication techniques to advance knowledge translation methods, focusing on effectively communicating abstract functional aspects of biological systems. To this end, we are creating several short animations, each one exploring different design solutions. These design solutions incorporate 2D motion graphics, information visualization, 3D animation, and can be applied to any biological story. In addition to our short animations, this research will culminate in a short film describing NAD-dependent DNA Repair, intended for the general public and researchers interested in these molecular systems.",
    images = "images/Kingman13Integrating.png",
    thumbnails = "images/Kingman13Integrating_thumb.jpg",
    location = "Heidelberg, Germany",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014USP,
    author = "Helwig Hauser",
    title = "About Visualization in Bergen and Interactive Visual Analysis",
    howpublished = "Invited talk at Institute of Computing and Mathematical Sciences, University of São Paolo, in São Carlos, Brazil",
    month = "August",
    year = "2014",
    abstract = "Invited talk at Institute of Computing and Mathematical Sciences, University of São Paolo, in São Carlos, Brazil",
    pdf = "pdfs/2014-08-26-SaoCarlos-USP-inv-talk-print2up.pdf",
    images = "images/2014-08-26-SaoCarlos-USP-invtalk-print2up_Image_0001.jpg, images/2014-08-26--SaoCarlos-USP-inv-talk-print2up_Image_0001(2).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0001(3).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0001(4).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0002.jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0003.jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0003(2).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0002(2).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0002(3).jpg",
    thumbnails = "images/2014-08-26-SaoCarlos-USP-invtalk-print2up_Image_0001.jpg",
    location = "São Carlos, Brazil"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014NCE,
    author = "Helwig Hauser",
    title = "About Visual Computing",
    howpublished = "Invited talk at the NCE Subsea Theme Meeting on Visual Computing in Bergen, Norway",
    month = "April",
    year = "2014",
    abstract = "Invited talk at the NCE Subsea Theme Meeting on Visual Computing in Bergen, Norway",
    pdf = "pdfs/2014-04-08-VisCompTalk-HH-print2up.pdf",
    images = "images/2014-04-08-VisCompTalk-HH-print2up_Image_0004.jpg, images/2014-04-08-VisCompTalk-HH-print2up_Image_0006.jpg, images/2014-04-08-VisCompTalk-HH-print2up_Image_0010.jpg, images/2014-04-08-VisCompTalk-HH-print2up_Image_0002.jpg",
    thumbnails = "images/2014-04-08-VisCompTalk-HH-print2up_Image_0010.jpg",
    location = "Bergen, Norway"
    }
    [Bibtex]
    @PHDTHESIS {brambilla14thesis,
    author = "Andrea Brambilla",
    title = "Visibility-oriented Visualization Design for Flow Illustration",
    school = "Department of Informatics, University of Bergen, Norway",
    year = "2014",
    month = "December",
    abstract = "Flow phenomena are ubiquitous in our world and they affect many aspects of our daily life. For this reason, they are the subject of extensive studies in several research fields. In medicine, the blood flow through our vessels can reveal important information about cardiovascular diseases. The air flow around a vehicle and the motion of fluids in a combustion engine are examples of relevant flow phenomena in engineering disciplines. Meteorologists, climatologists and oceanographers are instead concerned with winds and water currents. Thanks to the recent advancements in computational fluid dynamics and to the increasing power of modern hardware, accurate simulations of flow phenomena are feasible nowadays. The evolution of multiple flow attributes, such as velocity, temperature and pressure, can be simulated over large spatial and temporal domains (4D). The amount of data generated by this process is massive, therefore visualization techniques are often adopted in order to ease the analysis phase. The overall goal is to convey information about the phenomena of interest through a suitable representation of the data at hand. Due to the multivariate and multidimensional nature of the data, visibility issues (such as cluttering and occlusion), represent a significant challenge. Flow visualization can greatly benefit from studying and addressing visibility issues already in the design phase. In this thesis we investigate and demonstrate the effectiveness of taking visibility management into account early in the design process. We apply this principle to three characteristic flow visualization scenarios: (1) The simultaneous visualization of multiple flow attributes. (2) The visual inspection of single and multiple integral surfaces. (3) The selection of seeding curves for constructing families of integral surfaces. Our techniques result in clutter- and occlusion-free visualizations, which effectively illustrate the key aspects of the flow behavior. For demonstration purposes, we have applied our approaches to a number of application cases. Additionally, we have discussed our visualization designs with domain experts. They showed a genuine interest in our work and provided insightful suggestions for future research directions.",
    images = "images/brambilla14thesis_0.png, images/brambilla14thesis_1.png,",
    thumbnails = "images/brambilla14thesis_0_thumb.png, images/brambilla14thesis_1_thumb.png,",
    isbn = "978-82-308-2753-6",
    url = "http://hdl.handle.net/1956/8961"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Schmidt-2014-YMC,
    author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "YMCA - Your Mesh Comparison Application",
    booktitle = "Proceedings of IEEE VAST 2014",
    year = "2014",
    pages = "153--62",
    month = "nov",
    abstract = "Polygonal meshes can be created in several different ways. In this  paper we focus on the reconstruction of meshes from point clouds,  which are sets of points in 3D. Several algorithms that tackle this  task already exist, but they have different benefits and drawbacks,  which leads to a large number of possible reconstruction results  (i.e., meshes). The evaluation of those techniques requires extensive  comparisons between different meshes which is up to now done by either  placing images of rendered meshes side-by-side, or by encoding differences  by heat maps. A major drawback of both approaches is that they do  not scale well with the number of meshes. This paper introduces a  new comparative visual analysis technique for 3D meshes which enables  the simultaneous comparison of several meshes and allows for the  interactive exploration of their differences. Our approach gives  an overview of the differences of the input meshes in a 2D view.  By selecting certain areas of interest, the user can switch to a  3D representation and explore the spatial differences in detail.  To inspect local variations, we provide a magic lens tool in 3D.  The location and size of the lens provide further information on  the variations of the reconstructions in the selected area. With  our comparative visualization approach, differences between several  mesh reconstruction algorithms can be easily localized and inspected.",
    pdf = "pdfs/Schmidt-2014-YMC.pdf",
    images = "images/Schmidt-2014-YMC.jpg",
    thumbnails = "images/Schmidt-2014-YMC.png",
    youtube = "https://www.youtube.com/watch?v=1s-AmFCQRzM",
    doi = "10.1109/VAST.2014.7042491",
    event = "IEEE VIS 2014",
    keywords = "visual analysis, comparative visualization, 3D data exploration, focus+context, mesh comparison",
    location = "Paris, France",
    proceedings = "Proceedings of IEEE VAST 2014",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/ymca/"
    }
    [DOI] [VID] [Bibtex]
    @ARTICLE {Natali14Sketch,
    author = "Mattia Natali and Tore Grane Klausen and Daniel Patel",
    title = "Sketch-Based Modelling and Visualization of Geological Deposition",
    journal = "Computers \& Geosciences",
    year = "2014",
    volume = "67C",
    pages = "40--48",
    abstract = "We propose a method for sketching and visualizing geological models by sequentially defining stratigraphic layers, where each layer represents a unique erosion or deposition event. Evolution of rivers and deltas is important for geologists when interpreting the stratigraphy of the subsurface, in particular for hydrocarbon exploration. We illustratively visualize mountains, basins, lakes, rivers and deltas, and how they change the morphology of a terrain during their evolution. We present a compact representation of the model and a novel rendering algorithm that allows us to obtain an interactive and illustrative layer-cake visualization. A user study has been performed to evaluate our method.",
    vid = "vids/Natali2014Sketch.mp4",
    images = "images/Natali2014Sketch0.png, images/Natali2014Sketch1.png",
    thumbnails = "images/Natali2014Sketch0.png, images/Natali2014Sketch1.png",
    doi = "10.1016/j.cageo.2014.02.010",
    url = "http://www.sciencedirect.com/science/article/pii/S0098300414000508",
    project = "geoillustrator"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Waldner-2014-GHI,
    author = "Manuela Waldner and Stefan Bruckner and Ivan Viola",
    title = "Graphical Histories of Information Foraging",
    booktitle = "Proceedings of NordiCHI 2014",
    year = "2014",
    pages = "295--304",
    month = "oct",
    abstract = "During information foraging, knowledge workers iteratively seek, filter,  read, and extract information. When using multiple information sources  and different applications for information processing, re-examination  of activities for validation of previous decisions or re-discovery  of previously used information sources is challenging. In this paper,  we present a novel representation of cross-application histories  to support recall of past operations and re-discovery of information  resources. Our graphical history consists of a cross-scale visualization  combining an overview node-link diagram of used desktop resources  with nested (animated) snapshot sequences, based on a recording of  the visual screen output during the users’ desktop work. This representation  makes key elements of the users’ tasks visually stand out, while  exploiting the power of visual memory to recover subtle details of  their activities. In a preliminary study, users found our graphical  history helpful to recall details of an information foraging task  and commented positively on the ability to expand overview nodes  into snapshot and video sequences.",
    pdf = "pdfs/Waldner-2014-GHI.pdf",
    images = "images/Waldner-2014-GHI.jpg",
    thumbnails = "images/Waldner-2014-GHI.png",
    doi = "10.1145/2639189.2641202",
    keywords = "interaction history, graph visualization, provenance",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/"
    }
    [PDF] [Bibtex]
    @PHDTHESIS {natali14thesis,
    author = "Mattia Natali",
    title = "Sketch-based Modelling and Conceptual Visualization of Geomorphological Processes for Interactive Scientific Communication",
    school = "Department of Informatics, University of Bergen, Norway",
    year = "2014",
    month = "September",
    abstract = "Throughout this dissertation, solutions for rapid digitalization of ideas will be defined.More precisely, the focus is on interactive scientific sketching and communication of geology, where theresult is a digital illustrative 3D model. Results are achieved through a sketch-based modellingapproach which gives the user a more natural and intuitive modelling process, hence leading to aquicker definition of a geological illustration. To be able to quickly externalize and communicate onesideas as a digital 3D model, can be of importance. For instance, students may profit from explanationssupported by interactive illustrations. Exchange of information and hypotheses between domain expertsis also a targeted situation in our work. Furthermore, illustrative models are frequently employed in business, when decisional meetings take place for convincing the management that a project is worth to be funded. An advantage of digital models is that they can be saved and they are easy to distribute. In contrast to 2D images or paper sketches, one can interact with digital 3D models, and they can be transferred on portable devices for easy access (for instance during geological field studies). Another advantage, compared to standard geological illustrations, is that if a model has been created with internal structures, it can be arbitrarily cut and inspected. Different solutions for different aspects of subsurface geology are presented in this dissertation. To express folding and faulting processes, a first modelling approach based on cross-sectional sketches is introduced. User defined textures can be associated to each layer, and can then be deformed with sketch strokes, for communicating layer properties such as rock type and grain size. A following contribution includes a simple and compact representation to model and visualize 3D stratigraphic models. With this representation, erosion and deposition offluvial systems are easy to specify and display. Ancient river channels and other geological features, which are present in the subsurface, can be accessed by means of a volumetric representation. Geological models are obtained and visualized by sequentially defining stratigraphic layers, where each layer represents a unique erosion or deposition event. Evolution of rivers and deltas is important for geologists when interpreting the stratigraphy of the subsurface, in particular because it changes the landscape morphology and because river deposits are potential hydrocarbon reservoirs. Time plays a fundamental role in geological processes. Animations are well suited for communicating temporal change and a contribution in this direction is also given. With the techniques developed in this thesis, it becomes possible to produce a range of geological scenarios. The focus is on enabling geologists tocreate their subsurface models by means of sketches, to quickly communicate concepts and ideasrather than detailed information. Although the proposed techniques are simple to use and requirelittle design effort, complex models can be realized. ",
    pdf = "pdfs/natali14thesis.pdf",
    images = "images/Natali2014Rapid0.png, images/Natali2014Sketch0.png,",
    thumbnails = "images/Natali2014Rapid0.png, images/Natali2014Sketch0.png,",
    isbn = "?? ",
    url = "https://bora.uib.no/handle/1956/8570",
    project = "geoillustrator"
    }
    [Bibtex]
    @MISC {Kingman14ResilientGenome,
    author = "Pina Kingman",
    title = "Our Resilient Genome",
    howpublished = "Talk in the Forshkningsdagene UNG 2014",
    month = "September",
    year = "2014",
    abstract = "Motivation: Make science research accessible to the public through film. Inspire and instil an interest in science and molecular biology. Story: The short animated film will describe the molecular pathways involved in single strand break DNA repair. Every single human cell has to repair an estimated 10,000-20,000 DNA lesion every day. DNA is constantly exposed to a variety of genotoxic events, leading to many different types of lesions. If the damage is not repaired, these lesions may lead to mutations that in turn lead to cancer and ageing. Your cells, however, have fine tuned mechanisms that maintain the integrity of our genome. This film describes one of those mechanisms. Length: About 3 minutes. Audience: We are aiming for the type of person who would attend a science film festival. We are thus assuming an interest in biology and medicine, at least a high-school degree (with the high probability of intending to continue to higher education), and a basic understanding of biology. Timeline: The film is currently in production and will be finished mid/late summer.",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [DOI] [Bibtex]
    @INCOLLECTION {RobertLaramee2014HSH,
    author = "Robert Laramee and Hamish Carr and Min Chen and Helwig Hauser and Lars Linsen and Klaus Mueller and Vijay Natarajan and Harald Obermaier and Ronald Peikert and Eugene Zhang.",
    title = "Future Challenges and Unsolved Problems in Multi-field Visualization",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and  Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R.  Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "19",
    pages = "205-211",
    month = "sep",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    doi = "10.1007/978-1-4471-6497-5_19",
    keywords = "uncertainty, heuristics, problem solving",
    owner = "hausser",
    timestamp = "2015.02.06",
    isbn = "978-1-4471-6496-8",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [PDF] [VID] [Bibtex]
    @INPROCEEDINGS {Kolesar-2014-IPT,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Illustrating Polymerization using Three-level Model Fusion",
    booktitle = "Proceedings of IEEE BioVis 2014",
    year = "2014",
    month = "aug",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many different aspects of physiological processes like polymerization,  both with respect to the involved molecular structures as well as  their related function. Illustrations of the spatio-temporal development  of such processes are not only used in biomedical education, but  also can serve scientists as an additional platform for in-silico  experiments. In this paper, we contribute a new, three-level modeling  approach to illustrate physiological processes from the class of  polymerization at different time scales. We integrate physical and  empirical modeling, according to which approach suits the different  involved levels of detail best, and we additionally enable a simple  form of interactive steering while the process is illustrated. We  demonstrate the suitability of our approach in the context of several  polymerization processes and report from a first evaluation with  domain experts.",
    pdf = "pdfs/Kolesar-2014-IPT.pdf",
    vid = "vids/Kolesar14Polymers.mp4",
    images = "images/Kolesar-2014-IPT.jpg",
    thumbnails = "images/Kolesar-2014-IPT.png",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Parulek-2014-CLV,
    author = "Julius Parulek and Daniel J{\"o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola",
    title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "6",
    pages = "276--287",
    month = "sep",
    abstract = "Molecular visualization is often challenged with rendering of large  molecular structures in real time. We introduce a novel approach  that enables us to show even large protein complexes. Our method  is based on the level-of-detail concept, where we exploit three different  abstractions combined in one visualization. Firstly, molecular surface  abstraction exploits three different surfaces, solvent-excluded surface  (SES), Gaussian kernels and van der Waals spheres, combined as one  surface by linear interpolation. Secondly, we introduce three shading  abstraction levels and a method for creating seamless transitions  between these representations. The SES representation with full shading  and added contours stands in focus while on the other side a sphere  representation of a cluster of atoms with constant shading and without  contours provide the context. Thirdly, we propose a hierarchical  abstraction based on a set of clusters formed on molecular atoms.  All three abstraction models are driven by one importance function  classifying the scene into the near-, mid- and far-field. Moreover,  we introduce a methodology to render the entire molecule directly  using the A-buffer technique, which further improves the performance.  The rendering performance is evaluated on series of molecules of  varying atom counts.",
    pdf = "pdfs/Parulek-2014-CLV.pdf",
    images = "images/Parulek-2014-CLV.jpg",
    thumbnails = "images/Parulek-2014-CLV.png",
    issn = "1467-8659",
    doi = "10.1111/cgf.12349",
    keywords = "level of detail algorithms, implicit surfaces, clustering, scientific visualization",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Swoboda-2014-VQA,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visual and Quantitative Analysis of Higher Order Arborization Overlaps for Neural Circuit Research",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "107--116",
    month = "sep",
    abstract = "Neuroscientists investigate neural circuits in the brain of the common  fruit fly Drosophila melanogaster to discover how complex behavior  is generated. Hypothesis building on potential connections between  individual neurons is an essential step in the discovery of circuits  that govern a specific behavior. Overlaps of arborizations of two  or more neurons indicate a potential anatomical connection, i.e.  the presence of joint synapses responsible for signal transmission  between neurons. Obviously, the number of higher order overlaps (i.e.  overlaps of three and more arborizations) increases exponentially  with the number of neurons under investigation making it almost impossible  to precompute quantitative information for all possible combinations.  Thus, existing solutions are restricted to pairwise comparison of  overlaps as they are relying on precomputed overlap quantification.  Analyzing overlaps by visual inspection of more than two arborizations  in 2D sections or in 3D is impeded by visual clutter or occlusion.  This work contributes a novel tool that complements existing methods  for potential connectivity exploration by providing for the first  time the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in its spatial anatomical context and on a quantitative level. Qualitative  evaluation with neuroscientists and non-expert users demonstrated  the utility and usability of the tool.",
    pdf = "pdfs/Swoboda-2014-VQA.pdf",
    images = "images/Swoboda-2014-VQA.jpg",
    thumbnails = "images/Swoboda-2014-VQA.png",
    youtube = "https://www.youtube.com/watch?v=iW2iVppPnsE",
    note = "VCBM 2014 Best Paper Honorable Mention",
    doi = "10.2312/vcbm.20141189",
    event = "VCBM 2014",
    keywords = "visual analysis, neurobiology",
    location = "Vienna, Austria"
    }
    [Bibtex]
    @MISC {Brambilla14Video,
    author = "Andrea Brambilla",
    title = "Video Visualization: An Overview",
    howpublished = "Trial lecture, University of Bergen",
    month = "November",
    year = "2014",
    abstract = "Videos are one of the most widespread media for collecting, communicating and archiving information. Nowadays, acquiring videos is a relatively straightforward process, and this explains their success in the context of entertainment, surveillance, sport events, and so on. On the other hand, watching and extracting information from a video stream is a lengthy process. Automatic techniques are only partially sucessful because of the intrinsic complexity of this kind of data. Video visualization is a growing research field which aims at easying the study of video data. It relies on both automatic techniques and user interaction, exploiting the best of both worlds. In this talk, I will introduce this field, focusing on its evolution from computer vision. I will discuss the main challenges and present an overview of the state-of-the-art. The talk will conclude with a discussion of the open problems and the expected future developments.",
    images = "images/Brambilla14Video.png",
    thumbnails = "images/Brambilla14Video.png",
    location = "Bergen, Norway",
    pres = "pdfs/Brambilla14Video.pptx"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Amirkhanov-2014-HSH,
    author = "Artem Amirkhanov and Stefan Bruckner and Christoph Heinzl and Meister Eduard Gr{\"o}ller",
    title = "The Haunted Swamps of Heuristics: Uncertainty in Problem Solving",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "5",
    pages = "51--60",
    month = "sep",
    abstract = "In scientific visualization the key task of research is the provision  of insight into a problem. Finding the solution to a problem may  be seen as finding a path through some rugged terrain which contains  mountains, chasms, swamps, and few flatlands. This path - an algorithm  discovered by the researcher - helps users to easily move around  this unknown area. If this way is a wide road paved with stones it  will be used for a long time by many travelers. However, a narrow  footpath leading through deep forests and deadly swamps will attract  only a few adventure seekers. There are many different paths with  different levels of comfort, length, and stability, which are uncertain  during the research process. Finding a systematic way to deal with  this uncertainty can greatly assist the search for a safe path which  is in our case the development of a suitable visualization algorithm  for a specific problem. In this work we will analyze the sources  of uncertainty in heuristically solving visualization problems and  will propose directions to handle these uncertainties.",
    pdf = "pdfs/Amirkhanov-2014-HSH.pdf",
    images = "images/Amirkhanov-2014-HSH.jpg",
    thumbnails = "images/Amirkhanov-2014-HSH.png",
    doi = "10.1007/978-1-4471-6497-5_5",
    keywords = "uncertainty, heuristics, problem solving",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [DOI] [Bibtex]
    @INCOLLECTION {turkay2014computationally,
    author = "Turkay, Cagatay and Jeanquartier, Fleur and Holzinger, Andreas and Hauser, Helwig",
    title = "On computationally-enhanced visual analysis of heterogeneous data and its application in biomedical informatics",
    booktitle = "Interactive Knowledge Discovery and Data Mining in Biomedical Informatics",
    publisher = "Springer",
    year = "2014",
    pages = "117--140",
    abstract = "With the advance of new data acquisition and generation technologies, the biomedical domain is becoming increasingly data-driven. Thus, understanding the information in large and complex data sets has been in the focus of several research fields such as statistics, data mining, machine learning, and visualization. While the first three fields predominantly rely on computational power, visualization relies mainly on human perceptual and cognitive capabilities for extracting information. Data visualization, similar to Human–Computer Interaction, attempts an appropriate interaction between human and data to interactively exploit data sets. Specifically within the analysis of complex data sets, visualization researchers have integrated computational methods to enhance the interactive processes. In this state-of-the-art report, we investigate how such an integration is carried out. We study the related literature with respect to the underlying analytical tasks and methods of integration. In addition, we focus on how such methods are applied to the biomedical domain and present a concise overview within our taxonomy. Finally, we discuss some open problems and future challenges.",
    images = "images/img_Page_12_Image_0001.jpg, images/img_Page_12_Image_0002.jpg, images/img_Page_12_Image_0003.jpg",
    thumbnails = "images/img_Page_12_Image_0001.jpg",
    doi = "10.1007/978-3-662-43968-5_7)"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Sedlmair-2014-VPS,
    author = "Michael Sedlmair and Christoph Heinzl and Stefan Bruckner and Harald Piringer and Torsten M{\"o}ller",
    title = "Visual Parameter Space Analysis: A Conceptual Framework",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2161--2170",
    month = "dec",
    abstract = "Various case studies in different application domains have shown the  great potential of visual parameter space analysis to support validating  and using simulation models. In order to guide and systematize research  endeavors in this area, we provide a conceptual framework for visual  parameter space analysis problems. The framework is based on our  own experience and a structured analysis of the visualization literature.  It contains three major components: (1) a data flow model that helps  to abstractly describe visual parameter space analysis problems independent  of their application domain; (2) a set of four navigation strategies  of how parameter space analysis can be supported by visualization  tools; and (3) a characterization of six analysis tasks. Based on  our framework, we analyze and classify the current body of literature,  and identify three open research gaps in visual parameter space analysis.  The framework and its discussion are meant to support visualization  designers and researchers in characterizing parameter space analysis  problems and to guide their design and evaluation processes.",
    pdf = "pdfs/Sedlmair-2014-VPS.pdf",
    images = "images/Sedlmair-2014-VPS.jpg",
    thumbnails = "images/Sedlmair-2014-VPS.png",
    doi = "10.1109/TVCG.2014.2346321",
    event = "IEEE VIS 2014",
    keywords = "parameter space analysis, input-output model, simulation, task characterization, literature analysis",
    location = "Paris, France"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014Dagstuhl,
    author = "Helwig Hauser",
    title = "Semi-abstract visualization of rich scientific data",
    howpublished = "Invited talk at the Dagstuhl 14231 Seminar on Scientific Visualization, Dagstuhl, Germany",
    month = "June",
    year = "2014",
    abstract = "Invited talk at the Dagstuhl 14231 Seminar on Scientific Visualization, Dagstuhl, Germany",
    pdf = "pdfs/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up.pdf",
    images = "images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002(3).jpg, images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002(2).jpg, images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002.jpg, images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0008.jpg",
    thumbnails = "images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002(3).jpg",
    location = "Dagstuhl, Germany"
    }