Publications

Authors: Type:

2021

    [PDF] [VID] [Bibtex]
    @Article{Kristiansen-2021-SSG,
    author = {Kristiansen, Y. S. and Garrison, L. and Bruckner, S.},
    title = {Semantic Snapping for Guided Multi-View Visualization Design},
    journal = {IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    volume = {},
    pages = {},
    doi = {},
    abstract = {Visual information displays are typically composed of multiple visualizations that are used to facilitate an understanding of the underlying data. A common example are dashboards, which are frequently used in domains such as finance, process monitoring and business intelligence. However, users may not be aware of existing guidelines and lack expert design knowledge when composing such multi-view visualizations. In this paper, we present semantic snapping, an approach to help non-expert users design effective multi-view visualizations from sets of pre-existing views. When a particular view is placed on a canvas, it is “aligned” with the remaining views–not with respect to its geometric layout, but based on aspects of the visual encoding itself, such as how data dimensions are mapped to channels. Our method uses an on-the-fly procedure to detect and suggest resolutions for conflicting, misleading, or ambiguous designs, as well as to provide suggestions for alternative presentations. With this approach, users can be guided to avoid common pitfalls encountered when composing visualizations. Our provided examples and case studies demonstrate the usefulness and validity of our approach.},
    note = {Accepted for publication, to be presented at IEEE VIS 2021},
    project = {MetaVis,VIDI},
    pdf = {pdfs/Kristiansen-2021-SSG.pdf},
    vid = {vids/Kristiansen-2021-SSG.mp4},
    thumbnails = {images/Kristiansen-2021-SSG.png},
    images = {images/Kristiansen-2021-SSG.jpg},
    keywords = {tabular data, guidelines, mixed initiative human-machine analysis, coordinated and multiple views},
    }
    [PDF] [Bibtex]
    @InProceedings{Garrison-2021-EPP,
    author = {Laura Garrison and Monique Meuschke and Jennifer Fairman and Noeska Smit and Bernhard Preim and Stefan Bruckner},
    title = {An Exploration of Practice and Preferences for the Visual Communication of Biomedical Processes},
    booktitle = {Proceedings of VCBM},
    year = {2021},
    pages = {},
    doi = {},
    abstract = {The visual communication of biomedical processes draws from diverse techniques in both visualization and biomedical illustration. However, matching these techniques to their intended audience often relies on practice-based heuristics or narrow-scope evaluations. We present an exploratory study of the criteria that audiences use when evaluating a biomedical process visualization targeted for communication. Designed over a series of expert interviews and focus groups, our study focuses on common communication scenarios of five well-known biomedical processes and their standard visual representations. We framed these scenarios in a survey with participant expertise spanning from minimal to expert knowledge of a given topic. Our results show frequent overlap in abstraction preferences between expert and non-expert audiences, with similar prioritization of clarity and the ability of an asset to meet a given communication objective. We also found that some illustrative conventions are not as clear as we thought, e.g., glows have broadly ambiguous meaning, while other approaches were unexpectedly preferred, e.g., biomedical illustrations in place of data-driven visualizations. Our findings suggest numerous opportunities for the continued convergence of visualization and biomedical illustration techniques for targeted visualization design.},
    note = {Accepted for publication, to be presented at VCBM 2021},
    project = {VIDI,ttmedvis},
    pdf = {pdfs/Garrison-2021-EPP.pdf},
    thumbnails = {images/Garrison-2021-EPP.png},
    images = {images/Garrison-2021-EPP.jpg},
    url = {https://github.com/lauragarrison87/Biomedical_Process_Vis},
    keywords = {biomedical illustration, visual communication, survey},
    }
    [PDF] [DOI] [Bibtex]
    @article{brushingComparison,
    author={Fan, Chaoran and Hauser, Helwig},
    journal={IEEE Computer Graphics and Applications},
    title={On sketch-based selections from scatterplots using KDE, compared to Mahalanobis and CNN brushing},
    year={2021},
    volume={},
    number={},
    pages={1-13},
    doi={10.1109/MCG.2021.3097889},
    abstract = {"Fast and accurate brushing is crucial in visual data exploration and sketch-based solutions are successful methods. In this paper, we detail a solution, based on kernel density estimation (KDE), which computes a data subset selection in a scatterplot from a simple click-and-drag interaction. We explain, how this technique relates to two alternative approaches, i.e., Mahalanobis brushing and CNN brushing. To study this relation, we conducted two user studies and present both a quantitative three-fold comparison as well as additional details about the prevalence of all possible cases in that each technique succeeds/fails. With this, we also provide a comparison between empirical modeling and implicit modeling by deep learning in terms of accuracy, efficiency, generality and interpretability."},
    pdf = "pdfs/Fan-2021-brushingComparison.pdf",
    images = "images/Fan-2021-brushingComparison.png",
    thumbnails = "images/Fan-2021-brushingComparison.png",
    }
    [DOI] [Bibtex]
    @incollection{Smit-2021-COMULIS,
    author = {Smit, Noeska and Bühler, Katja and Vilanova, Anna and Falk, Martin},
    title = {Visualisation for correlative multimodal imaging},
    booktitle = {Imaging Modalities for Biological and Preclinical Research: A Compendium, Volume 2},
    publisher = {IOP Publishing},
    year = {2021},
    series = {2053-2563},
    type = {Book Chapter},
    pages = {III.4.e-1 to III.4.e-10},
    abstract = {In this chapter, we describe several approaches to interactive imaging data visualization in general, highlight several strategies for visualizing correlative multimodal imaging data, and provide examples and practical recommendations.},
    url = {http://dx.doi.org/10.1088/978-0-7503-3747-2ch28},
    doi = {10.1088/978-0-7503-3747-2ch28},
    isbn = {978-0-7503-3747-2},
    thumbnails = "images/Smit-2021-COMULIS.PNG",
    images = "images/Smit-2021-COMULIS.PNG",
    project = "ttmedvis",
    abstract = {The field of visualisation deals with finding appropriate visual representations of data so people can effectively carry out tasks related to data exploration, analysis, or presentation using the power of the human visual perceptual system. In the context of biomedical imaging data, interactive visualisation techniques can be employed, for example, to visually explore data, as image processing quality assurance, or in publications to communicate findings. When dealing with correlative imaging, challenges arise in how to effectively convey the information from multiple sources. In particular, the information density leads to the need for a critical reflection on the visual design with respect to which parts of the data are important to show and at what level of importance they should be visualised. In this chapter, we describe several approaches to interactive imaging data visualisation in general, highlight several strategies for visualising correlative multimodal imaging data, and provide examples and practical recommendations.}
    }
    [PDF] [DOI] [VID] [YT] [Bibtex]
    @article{Trautner-2021-LWI,
    author = {Trautner, Thomas and Bruckner, Stefan},
    title = {Line Weaver: Importance-Driven Order Enhanced Rendering of Dense Line Charts},
    journal = {Computer Graphics Forum},
    volume = {40},
    number = {3},
    pages = {399--410},
    keywords = {information visualization, visualization techniques, line charts},
    doi = {10.1111/cgf.14316},
    abstract = {Line charts are an effective and widely used technique for visualizing series of ordered two-dimensional data points. The relationship between consecutive points is indicated by connecting line segments, revealing potential trends or clusters in the underlying data. However, when dealing with an increasing number of lines, the render order substantially influences the resulting visualization. Rendering transparent lines can help but unfortunately the blending order is currently either ignored or naively used, for example, assuming it is implicitly given by the order in which the data was saved in a file. Due to the noncommutativity of classic alpha blending, this results in contradicting visualizations of the same underlying data set, so-called "hallucinators". In this paper, we therefore present line weaver, a novel visualization technique for dense line charts. Using an importance function, we developed an approach that correctly considers the blending order independently of the render order and without any prior sorting of the data. We allow for importance functions which are either explicitly given or implicitly derived from the geometric properties of the data if no external data is available. The importance can then be applied globally to entire lines, or locally per pixel which simultaneously supports various types of user interaction. Finally, we discuss the potential of our contribution based on different synthetic and real-world data sets where classic or naive approaches would fail.},
    year = {2021},
    pdf = "pdfs/Trautner-2021-LWI.pdf",
    thumbnails = "images/Trautner-2021-LWI-thumb.png",
    images = "images/Trautner-2021-LWI-thumb.png",
    vid = "vids/Trautner_2021_LineWeaver_video.mp4",
    youtube = "https://youtu.be/-hLF5XSR_ws",
    project = "MetaVis",
    git = "https://github.com/TTrautner/LineWeaver"
    }
    [PDF] [VID] [Bibtex]
    @article{Diehl-2021-HTC,
    author = {Alexandra Diehl and Rodrigo Pelorosso and Juan Ruiz and Renato Pajarola and Meister Eduard Gr\"{o}ller and Stefan Bruckner},
    title = {Hornero: Thunderstorms Characterization using Visual Analytics},
    journal = {Computer Graphics Forum},
    volume = {40},
    number = {3},
    pages = {},
    keywords = {visual analytics, weather forecasting, nowcasting},
    doi = {},
    abstract = {Analyzing the evolution of thunderstorms is critical in determining the potential for the development of severe weather events. Existing visualization systems for short-term weather forecasting (nowcasting) allow for basic analysis and prediction of storm developments. However, they lack advanced visual features for efficient decision-making. We developed a visual analytics tool for the detection of hazardous thunderstorms and their characterization, using a visual design centered on a reformulated expert task workflow that includes visual features to overview storms and quickly identify high-impact weather events, a novel storm graph visualization to inspect and analyze the storm structure, as well as a set of interactive views for efficient identification of similar storm cells (known as analogs) in historical data and their use for nowcasting. Our tool was designed with and evaluated by meteorologists and expert forecasters working in short-term operational weather forecasting of severe weather events. Results show that our solution suits the forecasters’ workflow. Our visual design is expressive, easy to use, and effective for prompt analysis and quick decision-making in the context of short-range operational weather forecasting.},
    year = {2021},
    pdf = "pdfs/Diehl-2021-HTC.pdf",
    thumbnails = "images/Diehl-2021-HTC.png",
    images = "images/Diehl-2021-HTC.jpg",
    vid = "vids/Diehl-2021-HTC.mp4",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Garrison-2021-DimLift,
    author = {Garrison, Laura and M\"{u}ller, Juliane and Schreiber, Stefanie and Oeltze-Jafra, Steffen and Hauser, Helwig and Bruckner, Stefan},
    title = {DimLift: Interactive Hierarchical Data Exploration through Dimensional Bundling},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    abstract = {The identification of interesting patterns and relationships is essential to exploratory data analysis. This becomes increasingly difficult in high dimensional datasets. While dimensionality reduction techniques can be utilized to reduce the analysis space, these may unintentionally bury key dimensions within a larger grouping and obfuscate meaningful patterns. With this work we introduce DimLift, a novel visual analysis method for creating and interacting with dimensional bundles. Generated through an iterative dimensionality reduction or user-driven approach, dimensional bundles are expressive groups of dimensions that contribute similarly to the variance of a dataset. Interactive exploration and reconstruction methods via a layered parallel coordinates plot allow users to lift interesting and subtle relationships to the surface, even in complex scenarios of missing and mixed data types. We exemplify the power of this technique in an expert case study on clinical cohort data alongside two additional case examples from nutrition and ecology.},
    volume = {27},
    number = {6},
    pages = {2908--2922},
    pdf = {pdfs/garrison-2021-dimlift.pdf},
    images = {images/garrison_dimlift.jpg},
    thumbnails = {images/garrison_dimlift_thumb.jpg},
    youtube = {https://youtu.be/JSZuhnDyugA},
    doi = {10.1109/TVCG.2021.3057519},
    git = {https://github.com/lauragarrison87/DimLift},
    project = {VIDI},
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Mueller-2021-IDA,
    author = {M\"{u}ller, Juliane and Garrison, Laura and Ulbrich, Philipp and Schreiber, Stefanie and Bruckner, Stefan and Hauser, Helwig and Oeltze-Jafra, Steffen},
    title = {Integrated Dual Analysis of Quantitative and Qualitative High-Dimensional Data},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    year = {2021},
    abstract = {The Dual Analysis framework is a powerful enabling technology for the exploration of high dimensional quantitative data by treating data dimensions as first-class objects that can be explored in tandem with data values. In this work, we extend the Dual Analysis framework through the joint treatment of quantitative (numerical) and qualitative (categorical) dimensions. Computing common measures for all dimensions allows us to visualize both quantitative and qualitative dimensions in the same view. This enables a natural joint treatment of mixed data during interactive visual exploration and analysis. Several measures of variation for nominal qualitative data can also be applied to ordinal qualitative and quantitative data. For example, instead of measuring variability from a mean or median, other measures assess inter-data variation or average variation from a mode. In this work, we demonstrate how these measures can be integrated into the Dual Analysis framework to explore and generate hypotheses about high-dimensional mixed data. A medical case study using clinical routine data of patients suffering from Cerebral Small Vessel Disease (CSVD), conducted with a senior neurologist and a medical student, shows that a joint Dual Analysis approach for quantitative and qualitative data can rapidly lead to new insights based on which new hypotheses may be generated.},
    volume = {27},
    number = {6},
    pages = {2953--2966},
    pdf = {pdfs/Mueller_2020_IDA.pdf},
    images = {images/Mueller_2020_IDA.jpg},
    thumbnails = {images/Mueller_2020_IDA.png},
    doi = {10.1109/TVCG.2021.3056424},
    git = {https://github.com/JulianeMu/IntegratedDualAnalysisAproach_MDA},
    project = {VIDI},
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE{Palenik-2020-IsoTrotter,
    author={P\'{a}lenik, Juraj and Spengler, Thomas and Hauser, Helwig},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={{IsoTrotter: Visually Guided Emprical Modelling of Atmospheric Convection}},
    abstract={Empirical models, fitted to data from observations, are often used in natural sciences to describe physical behaviour and support discoveries. However, with more complex models, the regression of parameters quickly becomes insufficient, requiring a visual parameter space analysis to understand and optimize the models. In this work, we present a design study for building a model describing atmospheric convection. We present a mixed-initiative approach to visually guided modelling, integrating an interactive visual parameter space analysis with partial automatic parameter optimization. Our approach includes a new, semi-automatic technique called IsoTrotting, where we optimize the procedure by navigating along isocontours of the model. We evaluate the model with unique observational data of atmospheric convection based on flight trajectories of paragliders.},
    year={2021},
    volume={27},
    number={2},
    pages={775-784},
    doi={10.1109/TVCG.2020.3030389},
    pdf={pdfs/2020-10-20-Palenik-IsoTrotter.pdf},
    images={images/IsoTrotter2020.png},
    thumbnails={images/IsoTrotter2020.png}
    }
    [PDF] [DOI] [Bibtex]
    @article{bolte2020splitstreams,
    author= {Bolte, Fabian and Nourani, Mahsan and Ragan, Eric and Bruckner, Stefan},
    journal= {IEEE Transactions on Visualization and Computer Graphics},
    title= {SplitStreams: A Visual Metaphor for Evolving Hierarchies},
    year= {2021},
    keywords= {Information Visualization, Trees, Data Structures and Data Types, Visualization Techniques and Methodologies},
    doi= {10.1109/TVCG.2020.2973564},
    url= {https://arxiv.org/pdf/2002.03891.pdf},
    volume = {27},
    number = {8},
    doi = {10.1109/TVCG.2020.2973564},
    abstract= {The visualization of hierarchically structured data over time is an ongoing challenge and several approaches exist trying to solve it. Techniques such as animated or juxtaposed tree visualizations are not capable of providing a good overview of the time series and lack expressiveness in conveying changes over time. Nested streamgraphs provide a better understanding of the data evolution, but lack the clear outline of hierarchical structures at a given timestep. Furthermore, these approaches are often limited to static hierarchies or exclude complex hierarchical changes in the data, limiting their use cases. We propose a novel visual metaphor capable of providing a static overview of all hierarchical changes over time, as well as clearly outlining the hierarchical structure at each individual time step. Our method allows for smooth transitions between tree maps and nested streamgraphs, enabling the exploration of the trade-off between dynamic behavior and hierarchical structure. As our technique handles topological changes of all types, it is suitable for a wide range of applications. We demonstrate the utility of our method on several use cases, evaluate it with a user study, and provide its full source code.},
    pdf= {pdfs/Bolte-2020-SplitStreams.pdf},
    images= {images/Bolte-2020-SplitStreams.png},
    thumbnails= {images/Bolte-2020-SplitStreams_thumb.png},
    project = "MetaVis",
    git = "https://github.com/cadanox/SplitStreams"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @article{bolte2019visavis,
    author= {Bolte, Fabian and Bruckner, Stefan},
    journal= {IEEE Transactions on Visualization and Computer Graphics},
    title= {Vis-a-Vis: Visual Exploration of Visualization Source Code Evolution},
    year= {2021},
    keywords= {Visualization System and Toolkit Design;User Interfaces;Integrating Spatial and Non-Spatial Data Visualization;Software Visualization},
    doi= {10.1109/TVCG.2019.2963651},
    issn= {2160-9306},
    url= {https://arxiv.org/pdf/2001.02092.pdf},
    abstract= {Developing an algorithm for a visualization prototype often involves the direct comparison of different development stages and design decisions, and even minor modifications may dramatically affect the results. While existing development tools provide visualizations for gaining general insight into performance and structural aspects of the source code, they neglect the central importance of result images unique to graphical algorithms. In this paper, we present a novel approach that enables visualization programmers to simultaneously explore the evolution of their algorithm during the development phase together with its corresponding visual outcomes by providing an automatically updating meta visualization. Our interactive system allows for the direct comparison of all development states on both the visual and the source code level, by providing easy to use navigation and comparison tools. The on-the-fly construction of difference images, source code differences, and a visual representation of the source code structure further enhance the user's insight into the states' interconnected changes over time. Our solution is accessible via a web-based interface that provides GPU-accelerated live execution of C++ and GLSL code, as well as supporting a domain-specific programming language for scientific visualization.},
    pdf= {pdfs/Bolte-2019-Visavis.pdf},
    images= {images/Bolte-2019-Visavis.png},
    thumbnails= {images/Bolte-2019-Visavis_thumb.png},
    youtube= {https://www.youtube.com/watch?v=5XO6BU4j1KQ},
    volume = {27},
    number = {7},
    pages = {3153--3167},
    project = "MetaVis"
    }

2020

    [PDF] [DOI] [Bibtex]
    @article{sketchingQuery,
    author={Fan, Chaoran and Matkovic, Kresimir and Hauser, Helwig},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={Sketch-based fast and accurate querying of time series using parameter-sharing LSTM networks},
    year={2020},
    volume={},
    number={},
    pages={1-12},
    doi={10.1109/TVCG.2020.3002950},
    abstract = {"Sketching is one common approach to query time series data for patterns of interest. Most existing solutions for matching the data with the interaction are based on an empirically modeled similarity function between the user's sketch and the time series data with limited efficiency and accuracy. In this paper, we introduce a machine learning based solution for fast and accurate querying of time series data based on a swift sketching interaction. We build on existing LSTM technology (long short-term memory) to encode both the sketch and the time series data in a network with shared parameters. We use data from a user study to let the network learn a proper similarity function. We focus our approach on perceived similarities and achieve that the learned model also includes a user-side aspect. To the best of our knowledge, this is the first data-driven solution for querying time series data in visual analytics. Besides evaluating the accuracy and efficiency directly in a quantitative way, we also compare our solution to the recently published Qetch algorithm as well as the commonly used dynamic time warping (DTW) algorithm.."},
    pdf = "pdfs/Fan-2020-sketchingQuery.pdf",
    images = "images/Fan-2020-sketchingQuery.png",
    thumbnails = "images/Fan-2020-sketchingQuery.png",
    }
    [PDF] [DOI] [Bibtex]
    @article{Garrison-2020-IVE,
    author = {Garrison, Laura and Va\v{s}\'{i}\v{c}ek, Jakub and Craven, Alex R. and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    title = {Interactive Visual Exploration of Metabolite Ratios in MR Spectroscopy Studies},
    journal = {Computers \& Graphics},
    volume = {92},
    pages = {1--12},
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    doi = {10.1016/j.cag.2020.08.001},
    abstract = {Magnetic resonance spectroscopy (MRS) is an advanced biochemical technique used to identify metabolic compounds in living tissue. While its sensitivity and specificity to chemical imbalances render it a valuable tool in clinical assessment, the results from this modality are abstract and difficult to interpret. With this design study we characterized and explored the tasks and requirements for evaluating these data from the perspective of a MRS research specialist. Our resulting tool, SpectraMosaic, links with upstream spectroscopy quantification software to provide a means for precise interactive visual analysis of metabolites with both single- and multi-peak spectral signatures. Using a layered visual approach, SpectraMosaic allows researchers to analyze any permutation of metabolites in ratio form for an entire cohort, or by sample region, individual, acquisition date, or brain activity status at the time of acquisition. A case study with three MRS researchers demonstrates the utility of our approach in rapid and iterative spectral data analysis.},
    year = {2020},
    pdf = "pdfs/Garrison-2020-IVE.pdf",
    thumbnails = "images/Garrison-2020-IVE.png",
    images = "images/Garrison-2020-IVE.jpg",
    project = "VIDI",
    git = "https://github.com/mmiv-center/spectramosaic-public",
    }
    [PDF] [DOI] [Bibtex]
    @article{Kristiansen-2020-VIV,
    author = {Yngve Sekse Kristiansen and Stefan Bruckner},
    title = {Visception: An Interactive Visual Framework for Nested Visualization Design},
    journal = {Computers \& Graphics},
    volume = {92},
    pages = {13--27},
    keywords = {information visualization, nested visualizations, nesting},
    doi = {10.1016/j.cag.2020.08.007},
    abstract = {Nesting is the embedding of charts into the marks of another chart. Related to principles such as Tufte’s rule of utilizing micro/macro readings, nested visualizations have been employed to increase information density, providing compact representations of multi-dimensional and multi-typed data entities. Visual authoring tools are becoming increasingly prevalent, as they make visualization technology accessible to non-expert users such as data journalists, but existing frameworks provide no or only very limited functionality related to the creation of nested visualizations. In this paper, we present an interactive visual approach for the flexible generation of nested multilayer visualizations. Based on a hierarchical representation of nesting relationships coupled with a highly customizable mechanism for specifying data mappings, we contribute a flexible framework that enables defining and editing data-driven multi-level visualizations. As a demonstration of the viability of our framework, we contribute a visual builder for exploring, customizing and switching between different designs, along with example visualizations to demonstrate the range of expression. The resulting system allows for the generation of complex nested charts with a high degree of flexibility and fluidity using a drag and drop interface.},
    year = {2020},
    pdf = "pdfs/Kristiansen-2020-VIV.pdf",
    thumbnails = "images/Kristiansen-2020-VIV.png",
    images = "images/Kristiansen-2020-VIV.jpg",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @article{RadEx,
    author = {M\"{o}rth, E. and Wagner-Larsen, K. and Hodneland, E. and Krakstad, C. and Haldorsen, I. S. and Bruckner, S. and Smit, N. N.},
    title = {RadEx: Integrated Visual Exploration of Multiparametric Studies for Radiomic Tumor Profiling},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {7},
    year = {2020},
    pages = {611--622},
    abstract = {Better understanding of the complex processes driving tumor growth and metastases is critical for developing targeted treatment strategies in cancer. Radiomics extracts large amounts of features from medical images which enables radiomic tumor profiling in combination with clinical markers. However, analyzing complex imaging data in combination with clinical data is not trivial and supporting tools aiding in these exploratory analyses are presently missing. In this paper, we present an approach that aims to enable the analysis of multiparametric medical imaging data in combination with numerical, ordinal, and categorical clinical parameters to validate established and unravel novel biomarkers. We propose a hybrid approach where dimensionality reduction to a single axis is combined with multiple linked views allowing clinical experts to formulate hypotheses based on all available imaging data and clinical parameters. This may help to reveal novel tumor characteristics in relation to molecular targets for treatment, thus providing better tools for enabling more personalized targeted treatment strategies. To confirm the utility of our approach, we closely collaborate with experts from the field of gynecological cancer imaging and conducted an evaluation with six experts in this field.},
    pdf = "pdfs/Moerth-2020-RadEx.pdf",
    images = "images/Moerth-2020-RadEx.jpg",
    thumbnails = "images/Moerth-2020-RadEx-thumb.jpg",
    project = "ttmedvis",
    doi = {10.1111/cgf.14172}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS{Moerth-2020-CGI,
    author = "M\"{o}rth, E. and Haldorsen, I.S. and Bruckner, S. and Smit, N.N.",
    title = "ParaGlyder: Probe-driven Interactive Visual Analysis for Multiparametric Medical Imaging Data",
    booktitle = "Proceedings of Computer Graphics International",
    pages = "351--363",
    year = "2020",
    abstract = "Multiparametric medical imaging describes approaches that include multiple imaging sequences acquired within the same imaging examination, as opposed to one single imaging sequence or imaging from multiple imaging modalities. Multiparametric imaging in cancer has been shown to be useful for tumor detection and may also depict functional tumor characteristics relevant for clinical phenotypes. However, when confronted with datasets consisting of multiple values per voxel, traditional reading of the imaging series fails to capture complicated patterns. Those patterns of potentially important imaging properties of the parameter space may be critical for the analysis. Standard approaches, such as transfer functions and juxtapositioned visualizations, fail to convey the shape of the multiparametric parameter distribution in sufficient detail. For these reasons, in this paper we present an approach that aims to enable the exploration and analysis of such multiparametric studies using an interactive visual analysis application to remedy the trade-offs between details in the value domain and in spatial resolution. Interactive probing within or across subjects allows for a digital biopsy that is able to uncover multiparametric tissue properties. This may aid in the discrimination between healthy and cancerous tissue, unravel radiomic tissue features that could be linked to targetable pathogenic mechanisms, and potentially highlight metastases that evolved from the primary tumor. We conducted an evaluation with eleven domain experts from the field of gynecological cancer imaging, neurological imaging, and machine learning research to confirm the utility of our approach.",
    note= "The final authenticated version is available online at https://doi.org/10.1007/978-3-030-61864-3_29",
    pdf = "pdfs/Moerth-2020-CGI-ParaGlyder.pdf",
    images = "images/Moerth-2020-ParaGlyder.PNG",
    thumbnails = "images/Moerth-2020-ParaGlyder-thumb.png",
    youtube = "https://youtu.be/S_M4CWXKz0U",
    publisher = "LNCS by Springer",
    project = "ttmedvis",
    doi = "10.1007/978-3-030-61864-3_29"
    }
    [PDF] [DOI] [Bibtex]
    @article{StormFurru-2020-VGT,
    author = {Syver Storm-Furru and Stefan Bruckner},
    title = {VA-TRAC: Geospatial Trajectory Analysis for Monitoring, Identification, and Verification in Fishing Vessel Operations},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {101--114},
    keywords = {visual analytics, fisheries, monitoring},
    doi = {10.1111/cgf.13966},
    abstract = {In order to ensure sustainability, fishing operations are governed by many rules and regulations that restrict the use of certain techniques and equipment, specify the species and size of fish that can be harvested, and regulate commercial activities based on licensing schemes. As the world's second largest exporter of fish and seafood products, Norway invests a significant amount of effort into maintaining natural ecosystem dynamics by ensuring compliance with its constantly evolving sciencebased regulatory body. This paper introduces VA-TRAC, a geovisual analytics application developed in collaboration with the Norwegian Directorate of Fisheries in order to address this complex task. Our approach uses automatic methods to identify possible catch operations based on fishing vessel trajectories, embedded in an interactive web-based visual interface used to explore the results, compare them with licensing information, and incorporate the analysts' domain knowledge into the decision making process. We present a data and task analysis based on a close collaboration with domain experts, and the design and implementation of VA-TRAC to address the identified requirements.},
    year = {2020},
    pdf = "pdfs/StormFurru-2020-VGT.pdf",
    thumbnails = "images/StormFurru-2020-VGT.png",
    images = "images/StormFurru-2020-VGT.jpg",
    project = "MetaVis"
    }
    [PDF] [DOI] [VID] [YT] [Bibtex]
    @article{Trautner-2020-SunspotPlots,
    author = {Trautner, T. and Bolte, F. and Stoppel, S. and Bruckner, S.},
    title = {Sunspot Plots: Model-based Structure Enhancement for Dense Scatter Plots},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {551--563},
    keywords = {information visualization, scatterplots, kernel density estimation},
    doi = {10.1111/cgf.14001},
    abstract = {Scatter plots are a powerful and well-established technique for visualizing the relationships between two variables as a collection of discrete points. However, especially when dealing with large and dense data, scatter plots often exhibit problems such as overplotting, making the data interpretation arduous. Density plots are able to overcome these limitations in highly populated regions, but fail to provide accurate information of individual data points. This is particularly problematic in sparse regions where the density estimate may not provide a good representation of the underlying data. In this paper, we present sunspot plots, a visualization technique that communicates dense data as a continuous data distribution, while preserving the discrete nature of data samples in sparsely populated areas. We furthermore demonstrate the advantages of our approach on typical failure cases of scatter plots within synthetic and real-world data sets and validate its effectiveness in a user study.},
    year = {2020},
    pdf = "pdfs/Trautner_2020_SunspotPlots_PDF.pdf",
    thumbnails = "images/Trautner_2020_SunspotPlots_thumb.png",
    images = "images/Trautner_2020_SunspotPlots_thumb.png",
    vid = "vids/Trautner_2020_SunspotPlots_video.mp4",
    youtube = "https://youtu.be/G6l-y6YGjzQ",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Bolte-2020-ONC,
    author = "Fabian Bolte and Stefan Bruckner",
    title = "Organic Narrative Charts",
    booktitle = "Proceedings of Eurographics 2020 (Short Papers)",
    year = "2020",
    pages = "93--96",
    doi = "10.2312/egs.20201026",
    month = "may",
    abstract = "Storyline visualizations display the interactions of groups and entities and their development over time. Existing approaches have successfully adopted the general layout from hand-drawn illustrations to automatically create similar depictions. Ward Shelley is the author of several diagrammatic paintings that show the timeline of art-related subjects, such as Downtown Body, a history of art scenes. His drawings include many stylistic elements that are not covered by existing storyline visualizations, like links between entities, splits and merges of streams, and tags or labels to describe the individual elements. We present a visualization method that provides a visual mapping for the complex relationships in the data, creates a layout for their display, and adopts a similar styling of elements to imitate the artistic appeal of such illustrations.We compare our results to the original drawings and provide an open-source authoring tool prototype.",
    pdf = "pdfs/Bolte-2020-ONC.pdf",
    images = "images/Bolte-2020-ONC.jpg",
    thumbnails = "images/Bolte-2020-ONC.png",
    event = "Eurographics 2020",
    keywords = "narrative charts, storylines, aesthetics",
    project = "MetaVis",
    git = "https://github.com/cadanox/orcha"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE{Palenik-2019-Splatting,
    author={J. P\'{a}lenik and J. By\v{s}ka and S. Bruckner and H. Hauser},
    journal={IEEE Transactions on Visualization and Computer Graphics},
    title={Scale-Space Splatting: Reforming Spacetime for Cross-Scale Exploration of Integral Measures in Molecular Dynamics},
    year={2020},
    volume={26},
    number={1},
    pages={643--653},
    keywords={Data visualization;Computational modeling;Time series analysis;Atmospheric measurements;Particle measurements;Analytical models;Kernel;Scale space;time-series;scientific simulation;multi-scale analysis;space-time cube;molecular dynamics},
    doi={10.1109/TVCG.2019.2934258},
    ISSN={1077-2626},
    month={},
    pdf = "pdfs/scale-space-splatting.pdf",
    images = "images/scale-space-teaser.png",
    thumbnails = "images/scale-space-teaser-thumb.png",
    abstract = "Understanding large amounts of spatiotemporal data from particle-based simulations, such as molecular dynamics, often relies on the computation and analysis of aggregate measures. These, however, by virtue of aggregation, hide structural information about the space/time localization of the studied phenomena. This leads to degenerate cases where the measures fail to capture distinct behaviour. In order to drill into these aggregate values, we propose a multi-scale visual exploration technique. Our novel representation, based on partial domain aggregation, enables the construction of a continuous scale-space for discrete datasets and the simultaneous exploration of scales in both space and time. We link these two scale-spaces in a scale-space space-time cube and model linked views as orthogonal slices through this cube, thus enabling the rapid identification of spatio-temporal patterns at multiple scales. To demonstrate the effectiveness of our approach, we showcase an advanced exploration of a protein-ligand simulation.",
    }
    [PDF] [DOI] [Bibtex]
    @incollection {Bolte-2019-MVS,
    author = {Bolte, Fabian and Bruckner, Stefan},
    title = {Measures in Visualization Space},
    booktitle = {Foundations of Data Visualization},
    chapter = {3},
    publisher = {Springer},
    year = {2020},
    pdf = {pdfs/Bolte-2019-MVS.pdf},
    images = {images/Bolte-2019-MVS.png},
    thumbnails = {images/Bolte-2019-MVS.png},
    abstract = {Measurement is an integral part of modern science, providing the fundamental means for evaluation, comparison, and prediction. In the context of visualization, several different types of measures have been proposed, ranging from approaches that evaluate particular aspects of individual visualization techniques, their perceptual characteristics, and even economic factors. Furthermore, there are approaches that attempt to provide means for measuring general properties of the visualization process as a whole. Measures can be quantitative or qualitative, and one of the primary goals is to provide objective means for reasoning about visualizations and their effectiveness. As such, they play a central role in the development of scientific theories for visualization. In this chapter, we provide an overview of the current state of the art, survey and classify different types of visualization measures, characterize their strengths and drawbacks, and provide an outline of open challenges for future research.},
    note = {This is a preprint of a chapter for a planned book that was initiated by participants of the Dagstuhl Seminar 18041 ("Foundations of Data Visualization") and that is expected to be published by Springer. The final book chapter will differ from this preprint.},
    url = {https://arxiv.org/abs/1909.05295},
    project = "MetaVis",
    isbn = {978-3-030-34443-6},
    doi = {10.1007/978-3-030-34444-3_3}
    }
    [PDF] [DOI] [Bibtex]
    @article{Solteszova-2019-MLT,
    author = {Solteszova, V. and Smit, N. N. and Stoppel, S. and Grüner, R. and Bruckner, S.},
    title = {Memento: Localized Time-Warping for Spatio-Temporal Selection},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {1},
    pages = {231--243},
    year = {2020},
    keywords = {interaction, temporal data, visualization, spatio-temporal projection},
    images = "images/Solteszova-2019-MLT.jpg",
    thumbnails = "images/Solteszova-2019-MLT-1.jpg",
    pdf = "pdfs/Solteszova-2019-MLT.pdf",
    doi = {10.1111/cgf.13763},
    abstract = {Abstract Interaction techniques for temporal data are often focused on affecting the spatial aspects of the data, for instance through the use of transfer functions, camera navigation or clipping planes. However, the temporal aspect of the data interaction is often neglected. The temporal component is either visualized as individual time steps, an animation or a static summary over the temporal domain. When dealing with streaming data, these techniques are unable to cope with the task of re-viewing an interesting local spatio-temporal event, while continuing to observe the rest of the feed. We propose a novel technique that allows users to interactively specify areas of interest in the spatio-temporal domain. By employing a time-warp function, we are able to slow down time, freeze time or even travel back in time, around spatio-temporal events of interest. The combination of such a (pre-defined) time-warp function and brushing directly in the data to select regions of interest allows for a detailed review of temporally and spatially localized events, while maintaining an overview of the global spatio-temporal data. We demonstrate the utility of our technique with several usage scenarios.},
    project = "MetaVis,ttmedvis,VIDI"
    }

2019

    [PDF] [DOI] [Bibtex]
    @article{Byska-2019-LongMolecularDynamicsSimulations,
    author = {Byška, J. and Trautner, T. and Marques, S.M. and Damborský, J. and Kozlíková, B. and Waldner, M.},
    title = {Analysis of Long Molecular Dynamics Simulations Using Interactive Focus+Context Visualization},
    journal = {Computer Graphics Forum},
    volume = {38},
    number = {3},
    pages = {441-453},
    keywords = {CCS Concepts, • Human-centered computing — Scientific visualization; User centered design},
    doi = {10.1111/cgf.13701},
    url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13701},
    eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/cgf.13701},
    abstract = {Abstract Analyzing molecular dynamics (MD) simulations is a key aspect to understand protein dynamics and function. With increasing computational power, it is now possible to generate very long and complex simulations, which are cumbersome to explore using traditional 3D animations of protein movements. Guided by requirements derived from multiple focus groups with protein engineering experts, we designed and developed a novel interactive visual analysis approach for long and crowded MD simulations. In this approach, we link a dynamic 3D focus+context visualization with a 2D chart of time series data to guide the detection and navigation towards important spatio-temporal events. The 3D visualization renders elements of interest in more detail and increases the temporal resolution dependent on the time series data or the spatial region of interest. In case studies with different MD simulation data sets and research questions, we found that the proposed visual analysis approach facilitates exploratory analysis to generate, confirm, or reject hypotheses about causalities. Finally, we derived design guidelines for interactive visual analysis of complex MD simulation data.},
    year = {2019},
    pdf = "pdfs/AnalysisOfLongMolecularDynamicsSimulationsUsingInteractiveFocusAndContextVisualization_Trautner.pdf",
    images = "images/Byska-2019-LongMolecularDynamicsSimulations.png",
    thumbnails = "images/Byska-2019-LongMolecularDynamicsSimulations.png"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Bartsch-2019-MVA,
    booktitle = {Proceedings of VCBM 2019 (Short Papers)},
    title = {MedUse: A Visual Analysis Tool for Medication Use Data in the ABCD Study},
    author = {Bartsch, Hauke and Garrison, Laura and Bruckner, Stefan and Wang, Ariel and Tapert, Susan F. and Grüner, Renate},
    abstract = {The RxNorm vocabulary is a yearly-published biomedical resource providing normalized names for medications. It is used to capture medication use in the Adolescent Brain Cognitive Development (ABCD) study, an active and publicly available longitudinal research study following 11,800 children over 10 years. In this work, we present medUse, a visual tool allowing researchers to explore and analyze the relationship of drug category to cognitive or imaging derived measures using ABCD study data. Our tool provides position-based context for tree traversal and selection granularity of both study participants and drug category. Developed as part of the Data Exploration and Analysis Portal (DEAP), medUse is available to more than 600 ABCD researchers world-wide. By integrating medUse into an actively used research product we are able to reach a wide audience and increase the practical relevance of visualization for the biomedical field.},
    year = {2019},
    pages = {97--101},
    images = "images/Bartsch-2019-MVA.jpg",
    thumbnails = "images/Bartsch-2019-MVA.png",
    pdf = "pdfs/Bartsch-2019-MVA.pdf",
    publisher = {The Eurographics Association},
    ISSN = {2070-5786},
    ISBN = {978-3-03868-081-9},
    DOI = {10.2312/vcbm.20191236},
    project = {VIDI}
    }
    [DOI] [Bibtex]
    @article{kraima2019role,
    title={The role of the longitudinal muscle in the anal sphincter complex: Implications for the Intersphincteric Plane in Low Rectal Cancer Surgery?},
    author={Kraima, Anne C and West, Nicholas P and Roberts, Nicholas and Magee, Derek R and Smit, Noeska N and van de Velde, Cornelis JH and DeRuiter, Marco C and Rutten, Harm J and Quirke, Philip},
    journal={Clinical Anatomy},
    year={2019},
    doi="10.1002/ca.23444",
    url = "https://onlinelibrary.wiley.com/doi/full/10.1002/ca.23444",
    publisher={Wiley Online Library},
    project = "ttmedvis",
    images = {images/kraima-2019-role.png},
    thumbnails = {images/kraima-2019-role.png},
    abstract = {Intersphincteric resection (ISR) enables radical sphincter-preserving surgery in a subset of low rectal tumors impinging on the anal sphincter complex (ASC). Excellent anatomical knowledge is essential for optimal ISR. This study describes the role of the longitudinal muscle (LM) in the ASC and implications for ISR and other low rectal and anal pathologies. Six human adult en bloc cadaveric specimens (three males, three females) were obtained from the University of Leeds GIFT Research Tissue Programme. Paraffin-embedded mega blocks containing the ASC were produced and serially sectioned at 250?µm intervals. Whole mount microscopic sections were histologically stained and digitally scanned. The intersphincteric plane was shown to be potentially very variable. In some places adipose tissue is located between the external anal sphincter (EAS) and internal anal sphincter (IAS), whereas in others the LM interdigitates to obliterate the plane. Elsewhere the LM is (partly) absent with the intersphincteric plane lying on the IAS. The LM gave rise to the formation of the submucosae and corrugator ani muscles by penetrating the IAS and EAS. In four of six specimens, striated muscle fibers from the EAS curled around the distal IAS reaching the anal submucosa. The ASC formed a complex structure, varying between individuals with an inconstant LM affecting the potential location of the intersphincteric plane as well as a high degree of intermingling striated and smooth muscle fibers potentially further disrupting the plane. The complexity of identifying the correct pathological staging of low rectal cancer is also demonstrated.}
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Garrison2019SM,
    author = {Garrison, Laura and Va\v{s}\'{\i}\v{c}ek, Jakub and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    title = {SpectraMosaic: An Exploratory Tool for the Interactive Visual Analysis of Magnetic Resonance Spectroscopy Data},
    journal = {Computer Graphics Forum},
    month = {sep},
    year = {2019},
    booktitle = {Proceedings of VCBM 2019},
    pages = {1--10},
    event = "VCBM 2019",
    proceedings = "Proceedings of the 9th Eurographics Workshop on Visual Computing in Biology and Medicine",
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    images = "images/garrison_VCBM19spectramosaic_full.PNG",
    thumbnails = "images/garrison_VCBM19spectramosaic_thumb.png",
    pdf = "pdfs/garrison_VCBM19spectramosaic.pdf",
    youtube = "https://www.youtube.com/watch?v=Rzl7sl4WvdQ",
    abstract = {Magnetic resonance spectroscopy (MRS) allows for assessment of tissue metabolite characteristics used often for early detection and treatment evaluation of brain-related pathologies. However, meaningful variations in ratios of tissue metabolites within a sample area are difficult to capture with current visualization tools. Furthermore, the learning curve to interpretation is steep and limits the more widespread adoption of MRS in clinical practice. In this design study, we collaborated with domain experts to design a novel visualization tool for the exploration of tissue metabolite concentration ratios in spectroscopy clinical and research studies. We present a data and task analysis for this domain, where MRS data attributes can be categorized into tiers of visual priority. We furthermore introduce a novel set of visual encodings for these attributes. Our result is SpectraMosaic (see Figure~\ref{fig:teaser}), an interactive insight-generation tool for rapid exploration and comparison of metabolite ratios. We validate our approach with two case studies from MR spectroscopy experts, providing early qualitative evidence of the efficacy of the system for visualization of spectral data and affording deeper insights into these complex heterogeneous data.},
    git = "https://git.app.uib.no/Laura.Garrison/spectramosaic",
    doi = "0.2312/vcbm.20191225",
    project = "VIDI"
    }
    [DOI] [Bibtex]
    @incollection{Smit-2019-AtlasVis,
    title={Towards Advanced Interactive Visualization for Virtual Atlases},
    author={Smit, Noeska and Bruckner, Stefan},
    booktitle={Biomedical Visualisation},
    pages={85--96},
    year={2019},
    publisher={Springer},
    doi = {10.1007/978-3-030-19385-0_6},
    url = "http://noeskasmit.com/wp-content/uploads/2019/07/Smit_AtlasVis_2019.pdf",
    images = "images/Smit-2019-AtlasVis.png",
    thumbnails = "images/Smit-2019-AtlasVis.png",
    abstract = "An atlas is generally defined as a bound collection of tables, charts or illustrations describing a phenomenon. In an anatomical atlas for example, a collection of representative illustrations and text describes anatomy for the purpose of communicating anatomical knowledge. The atlas serves as reference frame for comparing and integrating data from different sources by spatially or semantically relating collections of drawings, imaging data, and/or text. In the field of medical image processing, atlas information is often constructed from a collection of regions of interest, which are based on medical images that are annotated by domain experts. Such an atlas may be employed for example for automatic segmentation of medical imaging data. The combination of interactive visualization techniques with atlas information opens up new possibilities for content creation, curation, and navigation in virtual atlases. With interactive visualization of atlas information, students are able to inspect and explore anatomical atlases in ways that were not possible with the traditional method of presenting anatomical atlases in book format, such as viewing the illustrations from other viewpoints. With advanced interaction techniques, it becomes possible to query the data that forms the basis for the atlas, thus empowering researchers to access a wealth of information in new ways. So far, atlasbased visualization has been employed for mainly medical education, as well as biological research. In this survey, we provide an overview of current digital biomedical atlas tasks and applications and summarize relevant visualization techniques. We discuss recent approaches for providing next-generation visual interfaces to navigate atlas data that go beyond common text-based search and hierarchical lists. Finally, we reflect on open challenges and opportunities for the next steps in interactive atlas visualization. ",
    project = "ttmedvis,MetaVis,VIDI"
    }
    [DOI] [Bibtex]
    @article{Meuschke-2019-EvalViz,
    title = {EvalViz--Surface Visualization Evaluation Wizard for Depth and Shape Perception Tasks},
    author = {Meuschke, Monique and Smit, Noeska N and Lichtenberg, Nils and Preim, Bernhard and Lawonn, Kai},
    journal = {Computers \& Graphics},
    year = {2019},
    publisher = {Elsevier},
    number = "1",
    volume = "82",
    DOI = {10.1016/j.cag.2019.05.022},
    images = "images/Meuschke_EvalViz_2019.png",
    thumbnails = "images/Meuschke_EvalViz_2019.png",
    abstract = "User studies are indispensable for visualization application papers in order to assess the value and limitations of the presented approach. Important aspects are how well depth and shape information can be perceived, as coding of these aspects is essential to enable an understandable representation of complex 3D data. In practice, there is usually little time to perform such studies, and the establishment and conduction of user studies can be labour-intensive. In addition, it can be difficult to reach enough participants to obtain expressive results regarding the quality of different visualization techniques.
    In this paper, we propose a framework that allows visualization researchers to quickly create task-based user studies on depth and shape perception for different surface visualizations and perform the resulting tasks via a web interface. With our approach, the effort for generating user studies is reduced and at the same time the web-based component allows researchers to attract more participants to their study. We demonstrate our framework by applying shape and depth evaluation tasks to visualizations of various surface representations used in many technical and biomedical applications.",
    project = "ttmedvis"
    }
    [PDF] [YT] [Bibtex]
    @MISC {Garrison2019SM_eurovis,
    title = {A Visual Encoding System for Comparative Exploration of Magnetic Resonance Spectroscopy Data},
    author = {Garrison, Laura and Va\v{s}\'{\i}\v{c}ek, Jakub and Gr\"{u}ner, Renate and Smit, Noeska and Bruckner, Stefan},
    abstract = "Magnetic resonance spectroscopy (MRS) allows for assessment of tissue metabolite characteristics used often for early detection and treatment evaluation of intracranial pathologies. In particular, this non-invasive technique is important in the study of metabolic changes related to brain tumors, strokes, seizure disorders, Alzheimer's disease, depression, as well as other diseases and disorders affecting the brain. However, meaningful variations in ratios of tissue metabolites within a sample area are difficult to capture with current visualization tools. Furthermore, the learning curve to interpretation is steep and limits the more widespread adoption of MRS in clinical practice. In this work we present a novel, tiered visual encoding system for multi-dimensional MRS data to aid in the visual exploration of metabolite concentration ratios. Our system was developed in close collaboration with domain experts including detailed data and task analyses. This visual encoding system was subsequently realized as part of an interactive insight-generation tool for rapid exploration and comparison of metabolite ratio variation for deeper insights to these complex data.",
    booktitle = {Proceedings of the EuroVis Conference - Posters (EuroVis ’19)},
    year = {2019},
    howpublished = "Poster presented at the EuroVis conference 2019",
    keywords = {medical visualization, magnetic resonance spectroscopy data, information visualization, user-centered design},
    images = "images/garrison_eurovis2019_SM_encodings.png",
    thumbnails = "images/garrison_eurovis2019_SM_encodings.png",
    pdf = "pdfs/garrison_eurovis2019_SM.pdf",
    youtube = "https://youtu.be/Rzl7sl4WvdQ",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Smit-2019-DBP,
    booktitle = {Eurographics 2019 - Dirk Bartz Prize},
    editor = {Bruckner, Stefan and Oeltze-Jafra, Steffen},
    title = {{Model-based Visualization for Medical Education and Training}},
    author = {Smit, Noeska and Lawonn, Kai and Kraima, Annelot and deRuiter, Marco and Bruckner, Stefan and Eisemann, Elmar and Vilanova, Anna},
    year = {2019},
    publisher = {The Eurographics Association},
    ISSN = {1017-4656},
    DOI = {10.2312/egm.20191033},
    pdf = "pdfs/Smit_DBPrize_2019.pdf",
    images = "images/Smit_DBPrize_2019.png",
    thumbnails = "images/Smit_DBPrize_2019.png",
    abstract = "Anatomy, or the study of the structure of the human body, is an essential component of medical education. Certain parts of human anatomy are considered to be more complex to understand than others, due to a multitude of closely related structures. Furthermore, there are many potential variations in anatomy, e.g., different topologies of vessels, and knowledge of these variations is critical for many in medical practice.
    Some aspects of individual anatomy, such as the autonomic nerves, are not visible in individuals through medical imaging techniques or even during surgery, placing these nerves at risk for damage.
    3D models and interactive visualization techniques can be used to improve understanding of this complex anatomy, in combination with traditional medical education paradigms.
    We present a framework incorporating several advanced medical visualization techniques and applications for teaching and training purposes, which is the result of an interdisciplinary project.
    In contrast to previous approaches which focus on general anatomy visualization or direct visualization of medical imaging data, we employ model-based techniques to represent variational anatomy, as well as anatomy not visible from imaging. Our framework covers the complete spectrum including general anatomy, anatomical variations, and anatomy in individual patients.
    Applications within our framework were evaluated positively with medical users, and our educational tool for general anatomy is in use in a Massive Open Online Course (MOOC) on anatomy, which had over 17000 participants worldwide in the first run.",
    project = "ttmedvis,VIDI"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2019-LFL,
    author = "Stoppel, Sergej and Bruckner, Stefan",
    title = "LinesLab: A Flexible Low-Cost Approach for the Generation of Physical Monochrome Art",
    journal = "Computer Graphics Forum",
    year = "2019",
    abstract = "The desire for the physical generation of computer art has seen a significant body of research that has resulted in sophisticated robots and painting machines, together with specialized algorithms mimicking particular artistic techniques. The resulting setups are often expensive and complex, making them unavailable for recreational and hobbyist use. In recent years, however, a new class of affordable low-cost plotters and cutting machines has reached the market. In this paper, we present a novel system for the physical generation of line and cut-out art based on digital images, targeted at such off-the-shelf devices. Our approach uses a meta-optimization process to generate results that represent the tonal content of a digital image while conforming to the physical and mechanical constraints of home-use devices. By flexibly combining basic sets of positional and shape encodings, we are able to recreate a wide range of artistic styles. Furthermore, our system optimizes the output in terms of visual perception based on the desired viewing distance, while remaining scalable with respect to the medium size.",
    pdf = "pdfs/Stoppel-2019-LFL.pdf",
    images = "images/Stoppel-2019-LFL.jpg",
    thumbnails = "images/Stoppel-2019-LFL.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13609",
    youtube = "https://www.youtube.com/watch?v=WdZJmU6fOAY",
    project = "MetaVis"
    }
    [PDF] [Bibtex]
    @article{fan2019personalized,
    title={Personalized Sketch-Based Brushing in Scatterplots},
    author={Chaoran Fan and Helwig Hauser},
    journal={IEEE Computer Graphics and Applications},
    volume={39},
    number={4},
    pages={28--39},
    year={2019},
    publisher={IEEE},
    pdf="pdfs/personalizedBrush.pdf",
    images="images/personalizedBrush.png",
    thumbnails = "images/personalizedBrush.png",
    abstract="Brushing is at the heart of most modern visual analytics solutions and effective and efficient brushing is crucial for successful interactive data exploration and analysis. As the user plays a central role in brushing, several data-driven brushing tools have been designed that are based on predicting the user’s brushing goal. All of these general brushing models learn the users’ average brushing preference, which is not optimal for every single user. In this paper, we propose an innovative framework that offers the user opportunities to improve the brushing technique while using it. We realized this framework with a CNN-based brushing technique and the result shows that with additional data from a particular user, the model can be refined (better performance in terms of accuracy), eventually converging to a personalized model based on a moderate amount of retraining."
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Fan-2019-KDE,
    author = "Chaoran Fan and Helwig Hauser",
    title = "On KDE-based brushing in scatterplots and how it compares to CNN-based brushing",
    booktitle = "Proceedings of MLVis: Machine Learning Methods in Visualisation for Big Data",
    year = "2019",
    publisher = "Eurographics Association",
    abstract = "In this paper, we investigate to which degree the human should be involved into the model design and how good the empirical model can be with more careful design. To find out, we extended our previously published Mahalanobis brush (the best current empirical model in terms of accuracy for brushing points in a scatterplot) by further incorporating the data distribution information that is captured by the kernel density estimation (KDE). Based on this work, we then include a short discussion between the empirical model, designed in detail by an expert and the deep learning-based model that is learned from user data directly",
    pdf = "pdfs/On-KDE-based-brushing-in-scatterplotsand-how-it-compares-to-CNN-based-brushing.pdf",
    images = "images/pic-2.png",
    thumbnails = "images/pic-2.png",
    doi = "10.2312/mlvis.20191157",
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2019-DVM,
    author = "Bruckner, Stefan",
    title = "Dynamic Visibility-Driven Molecular Surfaces",
    journal = "Computer Graphics Forum",
    year = "2019",
    volume = "38",
    number = "2",
    pages = "317--329",
    abstract = "Molecular surface representations are an important tool for the visual analysis of molecular structure and function. In this paper, we present a novel method for the visualization of dynamic molecular surfaces based on the Gaussian model. In contrast to previous approaches, our technique does not rely on the construction of intermediate representations such as grids or triangulated surfaces. Instead, it operates entirely in image space, which enables us to exploit visibility information to efficiently skip unnecessary computations. With this visibility-driven approach, we can visualize dynamic high-quality surfaces for molecules consisting of millions of atoms. Our approach requires no preprocessing, allows for the interactive adjustment of all properties and parameters, and is significantly faster than previous approaches, while providing superior quality.",
    pdf = "pdfs/Bruckner-2019-DVM.pdf",
    images = "images/Bruckner-2019-DVM-1.jpg",
    thumbnails = "images/Bruckner-2019-DVM.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13640",
    youtube = "https://www.youtube.com/watch?v=aZmDhTbJlAM",
    git = "https://github.com/sbruckner/dynamol.git",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Bruckner-2018-MSD,
    author = "Stefan Bruckner and Tobias Isenberg and Timo Ropinski and Alexander Wiebel",
    title = "A Model of Spatial Directness in Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    volume = "25",
    number = "8",
    year = "2019",
    abstract = "We discuss the concept of directness in the context of spatial interaction with visualization. In particular, we propose a modelthat allows practitioners to analyze and describe the spatial directness of interaction techniques, ultimately to be able to better understandinteraction issues that may affect usability. To reach these goals, we distinguish between different types of directness. Each type ofdirectness depends on a particular mapping between different spaces, for which we consider the data space, the visualization space, theoutput space, the user space, the manipulation space, and the interaction space. In addition to the introduction of the model itself, we alsoshow how to apply it to several real-world interaction scenarios in visualization, and thus discuss the resulting types of spatial directness,without recommending either more direct or more indirect interaction techniques. In particular, we will demonstrate descriptive andevaluative usage of the proposed model, and also briefly discuss its generative usage.",
    pdf = "pdfs/Bruckner-2018-MSD.pdf",
    images = "images/Bruckner-2018-MSD.jpg",
    thumbnails = "images/Bruckner-2018-MSD.png",
    doi = "10.1109/TVCG.2018.2848906",
    project = "MetaVis"
    }
    [PDF] [DOI] [VID] [Bibtex]
    @ARTICLE {Stoppel-2019-FVI,
    author = "Sergej Stoppel and Magnus Paulson Erga and Stefan Bruckner",
    title = "Firefly: Virtual Illumination Drones for Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2019",
    volume = "25",
    pages = "1204-1213",
    abstract = "Light specification in three dimensional scenes is a complex problem and several approaches have been presented that aim to automate this process. However, there are many scenarios where a static light setup is insufficient, as the scene content and camera position may change. Simultaneous manual control over the camera and light position imposes a high cognitive load on the user. To address this challenge, we introduce a novel approach for automatic scene illumination with Fireflies. Fireflies are intelligent virtual light drones that illuminate the scene by traveling on a closed path. The Firefly path automatically adapts to changes in the scene based on an outcome-oriented energy function. To achieve interactive performance, we employ a parallel rendering pipeline for the light path evaluations. We provide a catalog of energy functions for various application scenarios and discuss the applicability of our method on several examples.",
    pdf = "pdfs/VIS2018-Firefly.pdf",
    vid = "vids/FinalVideo.mp4",
    images = "images/Teaser.png",
    thumbnails = "images/HeadRightCroppedThumbnail.png",
    doi = "10.1109/TVCG.2018.2864656",
    project = "MetaVis"
    }
    [PDF] [DOI] [Bibtex]
    @inproceedings {Moerth-2019-VCBM,
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine",
    editor = "Kozlíková, Barbora and Linsen, Lars and Vázquez, Pere-Pau and Lawonn, Kai and Raidou, Renata Georgia",
    abstract = "Three-dimensional (3D) ultrasound imaging and visualization
    is often used in medical diagnostics, especially in prenatal
    screening. Screening the development of the fetus is
    important to assess possible complications early on. State
    of the art approaches involve taking standardized
    measurements to compare them with standardized tables. The
    measurements are taken in a 2D slice view, where precise
    measurements can be difficult to acquire due to the fetal
    pose. Performing the analysis in a 3D view would enable the
    viewer to better discriminate between artefacts and
    representative information. Additionally making data
    comparable between different investigations and patients is
    a goal in medical imaging techniques and is often achieved
    by standardization. With this paper, we introduce a novel
    approach to provide a standardization method for 3D
    ultrasound fetus screenings. Our approach is called “The
    Vitruvian Baby” and incorporates a complete pipeline for
    standardized measuring in fetal 3D ultrasound. The input of
    the method is a 3D ultrasound screening of a fetus and the
    output is the fetus in a standardized T-pose. In this pose,
    taking measurements is easier and comparison of different
    fetuses is possible. In addition to the transformation of
    the 3D ultrasound data, we create an abstract representation
    of the fetus based on accurate measurements. We demonstrate
    the accuracy of our approach on simulated data where the
    ground truth is known.",
    title = "The Vitruvian Baby: Interactive Reformation of Fetal Ultrasound Data to a T-Position",
    author = "Mörth, Eric and Raidou, Renata Georgia and Viola, Ivan and Smit, Noeska",
    year = "2019",
    publisher = "The Eurographics Association",
    ISSN = "2070-5786",
    ISBN = "978-3-03868-081-9",
    DOI = "10.2312/vcbm.20191245",
    pdf = "pdfs/VCBM_TheVitruvianBaby_ShortPaper_201-205.pdf",
    images = "images/vcbmVitruvianBaby.jpg",
    thumbnails = "images/vcbmVitruvianBaby.jpg",
    url = "https://diglib.eg.org/handle/10.2312/vcbm20191245",
    project = {VIDI}
    }
    [PDF] [DOI] [Bibtex]
    @MISC {Moerth-2019-EUROVIS,
    booktitle = "EuroVis 2019 - Posters",
    editor = "Madeiras Pereira, João and Raidou, Renata Georgia",
    title = "The Vitruvian Baby: Interactive Reformation of Fetal Ultrasound Data to a T-Position",
    author = "Mörth, Eric and Raidou, Renata Georgia and Smit, Noeska and Viola, Ivan",
    year = "2019",
    abstract = "Three dimensional (3D) ultrasound is commonly used in prenatal screening, because it provides insight into the shape as well
    as the organs of the fetus. Currently, gynecologists take standardized measurements of the fetus and check for abnormalities by
    analyzing the data in a 2D slice view. The fetal pose may complicate taking precise measurements in such a view. Analyzing the
    data in a 3D view would enable the viewer to better distinguish between artefacts and representative information. Standardization
    in medical imaging techniques aims to make the data comparable between different investigations and patients. It is
    already used in different medical applications for example in magnetic resonance imaging (MRI). With this work, we introduce
    a novel approach to provide a standardization method for 3D ultrasound screenings of fetuses. The approach consists of six
    steps and is called “The Vitruvian Baby”. The input is the data of the 3D ultrasound screening of a fetus and the output shows
    the fetus in a standardized T-pose in which measurements can be made. The precision of standardized measurements compared
    to the gold standard is for the finger to finger span 91,08% and for the head to toe measurement 94,05%.",
    publisher = "The Eurographics Association",
    howpublished = "Poster presented at the EuroVis conference 2019",
    ISBN = "978-3-03868-088-8",
    DOI = "10.2312/eurp.20191147",
    pdf = "pdfs/EUROVIS_TheVitruvianBaby_Poster.pdf",
    images = "images/EUROVISTheVitruvianBabyPoster.png",
    thumbnails = "images/EUROVISTheVitruvianBabyPoster.png",
    url = "https://diglib.eg.org/handle/10.2312/eurp20191147"
    }

2018

    [Bibtex]
    @ARTICLE {PhDThesis2018Stoppel,
    author = "Stoppel, Sergej",
    title = "User-Centric Parameter Specification for Interactive Virtual and Physical Visual Representations",
    journal = "Universitetet i Bergen",
    year = "2018"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {hauser2018foundations,
    author = "Hauser, Helwig and Rheingans, Penny and Scheuermann, Gerik",
    title = "Foundations of Data Visualization (Dagstuhl Seminar 18041)",
    booktitle = "Dagstuhl Reports",
    year = "2018",
    volume = "8",
    organization = "Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik",
    abstract = "This report documents the program and the outcomes of Dagstuhl Seminar 18041 “Foundations
    of Data Visualization”. It includes a discussion of the motivation and overall organization, an
    abstract from each of the participants, and a report about each of the working groups.",
    pdf = "pdfs/foundations.pdf",
    thumbnails = "images/foundations.png",
    number = "1"
    }
    [Bibtex]
    @ARTICLE {Jurcik2018Caver,
    author = "Adam Jur\v{c}\'{i}k and David Bedn\'{a}\v{r} and Jan By\v{s}ka and Sergio M. Marques and Katar\'{i}na Furmanov\'{a} and Luk\'{a}\v{s} Daniel and Piia Kokkonen and Jan Brezovsk\'{y} and Ond\v{r}ej Strnad and Jan \v{s}\v{t}oura\v{c} and Anton\'{i}n Pavelka and Martin Ma\v{n}\'{a}k and Ji\v{r}\'{i} Damborsk\'{y} and Barbora Kozl\'{i}kov\'{a}",
    title = "CAVER Analyst 2.0: analysis and visualization of channels and tunnels in protein structures and molecular dynamics trajectories",
    journal = "Bioinformatics",
    year = "2018",
    abstract = "MOTIVATION:Studying the transport paths of ligands, solvents, or ions in transmembrane proteins and proteins with buried binding sites is fundamental to the understanding of their biological function. A detailed analysis of the structural features influencing the transport paths is also important for engineering proteins for biomedical and biotechnological applications.RESULTS:CAVER Analyst 2.0 is a software tool for quantitative analysis and real-time visualization of tunnels and channels in static and dynamic structures. This version provides the users with many new functions, including advanced techniques for intuitive visual inspection of the spatiotemporal behavior of tunnels and channels. Novel integrated algorithms allow efficient analysis and data reduction in large protein structures and molecular dynamics simulations.",
    images = "images/analyst.jpg",
    thumbnails = "images/analyst.jpg"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Meuschke2018VCBM,
    author = "Monique Meuschke and Noeska N. Smit and Nils Lichtenberg and Bernhard Preim and Kai Lawonn",
    title = "Automatic Generation of Web-Based User Studies to Evaluate Depth Perception in Vascular Surface Visualizations",
    booktitle = "Proceedings of VCBM 2018",
    year = "2018",
    editor = "Anna Puig Puig and Thomas Schultz and Anna Vilanova and Ingrid Hotz and Barbora Kozlikova and Pere-Pau Vázquez",
    pages = "033-044",
    address = "Granada, Spain",
    publisher = "Eurographics Association",
    abstract = "User studies are often required in biomedical visualization application papers in order to provide evidence for the utility of the presented approach. An important aspect is how well depth information can be perceived, as depth encoding is important to enable an understandable representation of complex data.Unfortunately, in practice there is often little time available to perform such studies, and setting up and conducting user studies may be labor-intensive. In addition, it can be challenging to reach enough participants to support the contribution claims of the paper. In this paper, we propose a system that allows biomedical visualization researchers to quickly generate perceptual task-based user studies for novel surface visualizations, and to perform the resulting experiment via a web interface. This approach helps to reduce effort in the setup of user studies themselves, and at the same time leverages a web-based approach that can help researchers attract more participants to their study. We demonstrate our system using the specific application of depth judgment tasks to evaluate vascular surface visualizations, since there is a lot of recent interest in this area.However, the system is also generally applicable for conducting other task-baseduser studies in biomedical visualization.",
    pdf = "pdfs/meuschke2018VCBM.pdf",
    images = "images/vcbm2018.png",
    thumbnails = "images/vcbm2018.png",
    youtube = "https://www.youtube.com/watch?v=8lns8GGpPJI",
    crossref = "VCBM-proc",
    doi = "10.2312/vcbm.20181227",
    project = "ttmedvis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Magnus-2018-VPI,
    author = "Jens G. Magnus and Stefan Bruckner",
    title = "Interactive Dynamic Volume Illumination with Refraction and Caustics",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2018",
    volume = "24",
    number = "1",
    pages = "984--993",
    month = "jan",
    abstract = "In recent years, significant progress has been made indeveloping high-quality interactive methods for realistic volumeillumination. However, refraction -- despite being an important aspectof light propagation in participating media -- has so far only receivedlittle attention. In this paper, we present a novel approach forrefractive volume illumination including caustics capable of interactiveframe rates. By interleaving light and viewing ray propagation, ourtechnique avoids memory-intensive storage of illumination informationand does not require any precomputation. It is fully dynamic and allparameters such as light position and transfer function can be modifiedinteractively without a performance penalty.",
    pdf = "pdfs/Magnus-2018-IDV.pdf",
    images = "images/Magnus-2018-IDV.jpg",
    thumbnails = "images/Magnus-2018-IDV.png",
    youtube = "https://www.youtube.com/watch?v=3tn6sSXw4NQ",
    doi = "10.1109/TVCG.2017.2744438",
    event = "IEEE SciVis 2017",
    keywords = "interactive volume rendering, illumination, refraction, shadows, caustics",
    location = "Phoenix, USA",
    project = "MetaVis"
    }
    [PDF] [YT] [Bibtex]
    @ARTICLE {lichtenbergsmithansenlawonn2018,
    author = "Nils Lichtenberg and Noeska Smit and Christian Hansen and Kai Lawonn",
    title = "Real-time field aligned stripe patterns",
    journal = "Computers & Graphics",
    year = "2018",
    volume = "74",
    pages = "137-149",
    month = "aug",
    abstract = "In this paper, we present a parameterization technique that can be applied to surface meshes in real-time without time-consuming preprocessing steps. The parameterization is suitable for the display of (un-)oriented patterns and texture patches, and to sample a surface in a periodic fashion. The method is inspired by existing work that solves a global optimization problem to generate a continuous stripe pattern on the surface, from which texture coordinates can be derived. We propose a local optimization approach that is suitable for parallel execution on the GPU, which drastically reduces computation time. With this, we achieve on-the-fly texturing of 3D, medium-sized (up to 70k vertices) surface meshes. The algorithm takes a tangent vector field as input and aligns the texture coordinates to it. Our technique achieves real-time parameterization of the surface meshes by employing a parallelizable local search algorithm that converges to a local minimum in a few iterations. The calculation in real-time allows for live parameter updates and determination of varying texture coordinates. Furthermore, the method can handle non-manifold meshes. The technique is useful in various applications, e.g., biomedical visualization and flow visualization. We highlight our method\s potential by providing usage scenarios for several applications.A PDF of the accepted manuscript is available via noeskasmit.com/wp-content/uploads/2018/08/lichtenberg_2018.pdf.",
    pdf = "pdfs/lichtenberg_2018.pdf",
    images = "images/Selection_384.png",
    thumbnails = "images/1-s2.0-S0097849318300591-fx1_lrg.jpg",
    youtube = "https://www.youtube.com/watch?v=7CpkHy8KPK8",
    project = "ttmedvis"
    }
    [PDF] [Bibtex]
    @MISC {Smit18MMIV,
    author = "N. N. Smit and S. Bruckner and H. Hauser and I. Haldorsen and A. Lundervold and A. S. Lundervold and E. Hodneland and L. Oltedal and K. Specht and E. R. Gruner",
    title = "Research Agenda of the Mohn Medical Imaging and Visualization Centre in Bergen, Norway",
    howpublished = "Poster presented at the EG VCBM workshop 2018",
    month = "September",
    year = "2018",
    abstract = "The Mohn Medical Imaging and Visualization Centre (MMIV) was recently established in collaboration between the University of Bergen, Norway, and the Haukeland University Hospital in Bergen with generous financial support from the Bergen Research Foundation (BFS) to conduct cross-disciplinary research related to state-of-the-art medical imaging, including preclinical and clinical high-field MRI, CT and hybrid PET/CT/MR.The overall goal of the Centre is to research new methods in quantitative imaging and interactive visualization to predict changes in health and disease across spatial and temporal scales. This encompasses research in feature detection, feature extraction, and feature prediction, as well as on methods and techniques for the interactive visualization of spatial and abstract data related to and derived from these features.With special emphasis on the natural and medical sciences, the long-term goal of the Centre is to consolidate excellence in the interplay between medical imaging (physics, chemistry, radiography, radiology), and visualization (computer science and mathematics) and develop novel and refined imaging methods that may ultimately improve patient care. In this poster, we describe the overall research agenda of MMIV and describe the four core projects in the centre.",
    pdf = "pdfs/smit2018posterabstract.pdf",
    images = "images/MMIVPoster.png",
    thumbnails = "images/MMIVPoster.png",
    location = "Granada, Spain",
    project = "VIDI"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {cnn-brush,
    author = "Fan, Chaoran and Hauser, Helwig",
    title = "{Fast and Accurate CNN-based Brushing in Scatterplots}",
    journal = "Computer Graphics Forum (Eurovis 2018)",
    year = "2018",
    abstract = "Brushing plays a central role in most modern visual analytics solutions and effective and efficient techniques for data selection are key to establishing a successful human-computer dialogue. With this paper, we address the need for brushing techniques that are both fast, enabling a fluid interaction in visual data exploration and analysis, and also accurate, i.e., enabling the user to effectively select specific data subsets, even when their geometric delimination is non-trivial. We present a new solution for a near-perfect sketch-based brushing technique, where we exploit a convolutional neural network (CNN) for estimating the intended data selection from a fast and simple click-and-drag interaction and from the data distribution in the visualization. Our key contributions include a drastically reduced error rate-now below 3%, i.e., less than half of the so far best accuracy- and an extension to a larger variety of selected data subsets, going beyond previous limitations due to linear estimation models.",
    pdf = "pdfs/eurovis18.pdf",
    images = "images/cnn.png",
    thumbnails = "images/cnn.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    issn = "1467-8659",
    doi = "10.1111/cgf.13405"
    }
    [Bibtex]
    @ARTICLE {Furmanova2018COZOID,
    author = "Furmanov{\'a}, Katar{\'\i}na and By{\v{s}}ka, Jan and Gr{\"o}ller, Eduard M and Viola, Ivan and Pale{\v{c}}ek, Jan J and Kozl{\'i}kov{\'a}, Barbora",
    title = "COZOID: contact zone identifier for visual analysis of protein-protein interactions",
    journal = "BMC bioinformatics",
    year = "2018",
    abstract = "BackgroundStudying the patterns of protein-protein interactions (PPIs) is fundamental for understanding the structure and function of protein complexes. The exploration of the vast space of possible mutual configurations of interacting proteins and their contact zones is very time consuming and requires the proteomic expert knowledge.ResultsIn this paper, we propose a novel tool containing a set of visual abstraction techniques for the guided exploration of PPI configuration space. It helps proteomic experts to select the most relevant configurations and explore their contact zones at different levels of detail. The system integrates a set of methods that follow and support the workflow of proteomics experts. The first visual abstraction method, the Matrix view, is based on customized interactive heat maps and provides the users with an overview of all possible residue-residue contacts in all PPI configurations and their interactive filtering. In this step, the user can traverse all input PPI configurations and obtain an overview of their interacting amino acids. Then, the models containing a particular pair of interacting amino acids can be selectively picked and traversed. Detailed information on the individual amino acids in the contact zones and their properties is presented in the Contact-Zone list-view. The list-view provides a comparative tool to rank the best models based on the similarity of their contacts to the template-structure contacts. All these techniques are interactively linked with other proposed methods, the Exploded view and the Open-Book view, which represent individual configurations in three-dimensional space. These representations solve the high overlap problem associated with many configurations. Using these views, the structural alignment of the best models can also be visually confirmed.ConclusionsWe developed a system for the exploration of large sets of protein-protein complexes in a fast and intuitive way. The usefulness of our system has been tested and verified on several docking structures covering the three major types of PPIs, including coiled-coil, pocket-string, and surface-surface interactions. Our case studies prove that our tool helps to analyse and filter protein-protein complexes in a fraction of the time compared to using previously available techniques.",
    images = "images/cozoid.jpg",
    thumbnails = "images/cozoid.jpg"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2018-SSW,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Smart Surrogate Widgets for Direct Volume Manipulation",
    booktitle = "Proceedings of IEEE PacificVis 2018",
    year = "2018",
    pages = "36--45",
    month = "apr",
    abstract = "Interaction is an essential aspect in volume visualization, yet commonmanipulation tools such as bounding boxes or clipping planewidgets provide rather crude tools as they neglect the complex structureof the underlying data. In this paper, we introduce a novelvolume interaction approach based on smart widgets that are automaticallyplaced directly into the data in a visibility-driven manner.By adapting to what the user actually sees, they act as proxies thatallow for goal-oriented modifications while still providing an intuitiveset of simple operations that is easy to control. In particular, ourmethod is well-suited for direct manipulation scenarios such as touchscreens, where traditional user interface elements commonly exhibitlimited utility. To evaluate out approach we conducted a qualitativeuser study with nine participants with various backgrounds.",
    pdf = "pdfs/Stoppel-2018-SSW.pdf",
    images = "images/Stoppel-2018-SSW.jpg",
    thumbnails = "images/Stoppel-2018-SSW.png",
    youtube = "https://www.youtube.com/watch?v=wMRw-W0SrLk",
    event = "IEEE PacificVis 2018",
    keywords = "smart interfaces, volume manipulation, volume visualization",
    doi = "10.1109/PacificVis.2018.00014",
    project = "MetaVis"
    }

2017

    [Bibtex]
    @ARTICLE {UiB2017Ivan,
    author = "Kolesar, Ivan",
    title = "Partial Spatiotemporal Abstraction for Comparative Visualization of Molecular Processes",
    journal = "Universitetet i Bergen",
    year = "2017"
    }
    [PDF] [Bibtex]
    @ARTICLE {matkovic2017quantitative,
    author = "Matkovi{\'c}, Kre{\v{s}}imir and Abraham, Hrvoje and Jelovi{\'c}, Mario and Hauser, Helwig",
    title = "Quantitative externalization of visual data analysis results using local regression models",
    journal = "International Cross-Domain Conference for Machine Learning and Knowledge Extraction",
    year = "2017",
    pages = "199-218",
    abstract = "Both interactive visualization and computational analysis
    methods are useful for data studies and an integration of both approaches
    is promising to successfully combine the benefits of both methodologies.
    In interactive data exploration and analysis workflows, we need successful
    means to quantitatively externalize results from data studies, amounting
    to a particular challenge for the usually qualitative visual data analysis.
    In this paper, we propose a hybrid approach in order to quantitatively
    externalize valuable findings from interactive visual data exploration and
    analysis, based on local linear regression models. The models are built on
    user-selected subsets of the data, and we provide a way of keeping track
    of these models and comparing them. As an additional benefit, we also
    provide the user with the numeric model coefficients. Once the models are
    available, they can be used in subsequent steps of the workflow. A modelbased
    optimization can then be performed, for example, or more complex
    models can be reconstructed using an inversion of the local models. We
    study two datasets to exemplify the proposed approach, a meteorological
    data set for illustration purposes and a simulation ensemble from the
    automotive industry as an actual case study.",
    pdf = "pdfs/Matkovic2017.pdf",
    thumbnails = "images/matkovic_10.png"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2017-VPI,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Vol²velle: Printable Interactive Volume Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "861--870",
    month = "jan",
    abstract = "Interaction is an indispensable aspect of data visualization. The  presentation of volumetric data, in particular, often significantly  benefits from interactive manipulation of parameters such as transfer  functions, rendering styles, or clipping planes. However, when we  want to create hardcopies of such visualizations, this essential  aspect is lost. In this paper, we present a novel approach for creating  hardcopies of volume visualizations which preserves a certain degree  of interactivity. We present a method for automatically generating  Volvelles, printable tangible wheel charts that can be manipulated  to explore different parameter settings. Our interactive system allows  the flexible mapping of arbitrary visualization parameters and supports  advanced features such as linked views. The resulting designs can  be easily reproduced using a standard printer and assembled within  a few minutes.",
    pdf = "pdfs/Stoppel-2017-VPI.pdf",
    images = "images/Stoppel-2017-VPI.jpg",
    thumbnails = "images/Stoppel-2017-VPI.png",
    youtube = "https://www.youtube.com/watch?v=Z1K8t-FCiXI",
    doi = "10.1109/TVCG.2016.2599211",
    event = "IEEE SciVis 2016",
    keywords = "physical visualization, interaction, volume visualization, illustrative visualization",
    location = "Baltimore, USA"
    }
    [PDF] [Bibtex]
    @ARTICLE {Furmanova2017Ligand,
    author = "Furmanov{\'a}, Katar{\'\i}na and Jare{\v{s}}ov{\'a}, Miroslava and By{\v{s}}ka, Jan and Jur{\v{c}}{\'i}k, Adam and Parulek, J{\'u}lius and Hauser, Helwig and Kozl{\'i}kov{\'a}, Barbora",
    title = "Interactive exploration of ligand transportation through protein tunnels",
    journal = "BMC Bioinformatics",
    year = "2017",
    volume = "18(Suppl 2)",
    number = "22",
    month = "feb",
    abstract = "Background: Protein structures and their interaction with ligands have been in the focus of biochemistry andstructural biology research for decades. The transportation of ligand into the protein active site is often complexprocess, driven by geometric and physico-chemical properties, which renders the ligand path full of jitter andimpasses. This prevents understanding of the ligand transportation and reasoning behind its behavior along the path.Results: To address the needs of the domain experts we design an explorative visualization solution based on amulti-scale simplification model. It helps to navigate the user to the most interesting parts of the ligand trajectory byexploring different attributes of the ligand and its movement, such as its distance to the active site, changes of aminoacids lining the ligand, or ligand “stuckness�. The process is supported by three linked views – 3D representation of thesimplified trajectory, scatterplot matrix, and bar charts with line representation of ligand-lining amino acids.Conclusions: The usage of our tool is demonstrated on molecular dynamics simulations provided by the domainexperts. The tool was tested by the domain experts from protein engineering and the results confirm that it helps tonavigate the user to the most interesting parts of the ligand trajectory and to understand the ligand behavior",
    pdf = "pdfs/Furmanova2017.pdf",
    images = "images/Furmanova2016Interactive.png",
    thumbnails = "images/Furmanova2016Interactive.png",
    note = "https://doi.org/10.1186/s12859-016-1448-0"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {newMahalanobisBrush,
    author = "Fan, Chaoran and Hauser, Helwig",
    title = "{User-study Based Optimization of Fast and Accurate Mahalanobis Brushing in Scatterplots}",
    booktitle = "Vision, Modeling & Visualization",
    year = "2017",
    editor = "Matthias Hullin and Reinhard Klein and Thomas Schultz and Angela Yao",
    publisher = "The Eurographics Association",
    abstract = "Brushing is at the heart of most modern visual analytics solutions with coordinated, multiple views and effective brushing is crucial for swift and efficient processes in data exploration and analysis. Given a certain data subset that the user wishes to brush in a data visualization, traditional brushes are usually either accurate (like the lasso) or fast (e.g., a simple geometry like a rectangle or circle). In this paper, we now present a new, fast and accurate brushing technique for scatterplots, based on the Mahalanobis brush, which we have extended and then optimized using data from a user study. We explain the principal, sketchbased model of our new brushing technique (based on a simple click-and-drag interaction), the details of the user study and the related parameter optimization, as well as a quantitative evaluation, considering efficiency, accuracy, and also a comparison with the original Mahalanobis brush.",
    pdf = "pdfs/vmv-final.pdf",
    images = "images/Mahalanobis.png",
    thumbnails = "images/Mahalanobis.png",
    isbn = "978-3-03868-049-9",
    doi = "10.2312/vmv.20171262"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Smit-2017-PAS,
    author = "Noeska Smit and Kai Lawonn and Annelot Kraima and Marco DeRuiter and Hessam Sokooti and Stefan Bruckner and Elmar Eisemann and Anna Vilanova",
    title = "PelVis: Atlas-based Surgical Planning for Oncological Pelvic Surgery",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "741--750",
    month = "jan",
    abstract = "Due to the intricate relationship between the pelvic organs and vital  structures, such as vessels and nerves, pelvic anatomy is often considered  to be complex to comprehend. In oncological pelvic surgery, a trade-off  has to be made between complete tumor resection and preserving function  by preventing damage to the nerves. Damage to the autonomic nerves  causes undesirable post-operative side-effects such as fecal and  urinal incontinence, as well as sexual dysfunction in up to 80 percent  of the cases. Since these autonomic nerves are not visible in pre-operative  MRI scans or during surgery, avoiding nerve damage during such a  surgical procedure becomes challenging. In this work, we present  visualization methods to represent context, target, and risk structures  for surgical planning. We employ distance-based and occlusion management  techniques in an atlas-based surgical planning tool for oncological  pelvic surgery. Patient-specific pre-operative MRI scans are registered  to an atlas model that includes nerve information. Through several  interactive linked views, the spatial relationships and distances  between the organs, tumor and risk zones are visualized to improve  understanding, while avoiding occlusion. In this way, the surgeon  can examine surgically relevant structures and plan the procedure  before going into the operating theater, thus raising awareness of  the autonomic nerve zone regions and potentially reducing post-operative  complications. Furthermore, we present the results of a domain expert  evaluation with surgical oncologists that demonstrates the advantages  of our approach.",
    pdf = "pdfs/Smit-2017-PAS.pdf",
    images = "images/Smit-2017-PAS.jpg",
    thumbnails = "images/Smit-2017-PAS.png",
    youtube = "https://www.youtube.com/watch?v=vHp05I5-hp8",
    doi = "10.1109/TVCG.2016.2598826",
    event = "IEEE SciVis 2016",
    keywords = "atlas, surgical planning, medical visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Lind-2017-CCR,
    author = "Andreas Johnsen Lind and Stefan Bruckner",
    title = "Comparing Cross-Sections and 3D Renderings for Surface Matching Tasks using Physical Ground Truths",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "781--790",
    month = "jan",
    abstract = "Within the visualization community there are some well-known techniques  for visualizing 3D spatial data and some general assumptions about  how perception affects the performance of these techniques in practice.  However, there is a lack of empirical research backing up the possible  performance differences among the basic techniques for general tasks.  One such assumption is that 3D renderings are better for obtaining  an overview, whereas cross sectional visualizations such as the commonly  used Multi- Planar Reformation (MPR) are better for supporting detailed  analysis tasks. In the present study we investigated this common  assumption by examining the difference in performance between MPR  and 3D rendering for correctly identifying a known surface. We also  examined whether prior experience working with image data affects  the participant’s performance, and whether there was any difference  between interactive or static versions of the visualizations. Answering  this question is important because it can be used as part of a scientific  and empirical basis for determining when to use which of the two  techniques. An advantage of the present study compared to other studies  is that several factors were taken into account to compare the two  techniques. The problem was examined through an experiment with 45  participants, where physical objects were used as the known surface  (ground truth). Our findings showed that: 1. The 3D renderings largely  outperformed the cross sections; 2. Interactive visualizations were  partially more effective than static visualizations; and 3. The high  experience group did not generally outperform the low experience  group.",
    pdf = "pdfs/Lind-2017-CCR.pdf",
    images = "images/Lind-2017-CCR.jpg",
    thumbnails = "images/Lind-2017-CCR.png",
    doi = "10.1109/TVCG.2016.2598602",
    event = "IEEE SciVis 2016",
    keywords = "human-computer interaction, quantitative evaluation, volume visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {LawonnSmit-2017-Survey,
    author = "Lawonn, K. and Smit, N.N. and B{\"u}hler, K. and Preim, B.",
    title = "A Survey on Multimodal Medical Data Visualization",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "37",
    number = "1",
    pages = "413-438",
    abstract = "Multi-modal data of the complex human anatomy contain a wealth of information. To visualize and explore such data, techniques for emphasizing important structures and controlling visibility are essential. Such fused overview visualizations guide physicians to suspicious regions to be analysed in detail, e.g. with slice-based viewing. We give an overview of state of the art in multi-modal medical data visualization techniques. Multi-modal medical data consist of multiple scans of the same subject using various acquisition methods, often combining multiple complimentary types of information. Three-dimensional visualization techniques for multi-modal medical data can be used in diagnosis, treatment planning, doctor–patient communication as well as interdisciplinary communication. Over the years, multiple techniques have been developed in order to cope with the various associated challenges and present the relevant information from multiple sources in an insightful way. We present an overview of these techniques and analyse the specific challenges that arise in multi-modal data visualization and how recent works aimed to solve these, often using smart visibility techniques. We provide a taxonomy of these multi-modal visualization applications based on the modalities used and the visualization techniques employed. Additionally, we identify unsolved problems as potential future research directions.",
    pdf = "pdfs/LawonnSmit-2017-MULTI.pdf",
    images = "images/LawonnSmit-2017-MULTI.jpg",
    thumbnails = "images/LawonnSmit-2017-MULTI-TN.png",
    issn = "1467-8659",
    url = "http://dx.doi.org/10.1111/cgf.13306",
    doi = "10.1111/cgf.13306",
    keywords = "medical imaging, visualization, scientific visualization, visualization, volume visualization, visualization, Medical Imaging [Visualization], Scientific Visualization [Visualization], Volume Visualization [Visualization], Multimodal Medical Data"
    }
    [PDF] [Bibtex]
    @ARTICLE {Kocincova2017SS,
    author = "Kocincov{\'a}, Lucia and Jare{\v{s}}ov{\'a}, Miroslava and By{\v{s}}ka, Jan and Parulek, J{\'u}lius and Hauser, Helwig and Kozl{\'i}kov{\'a}, Barbora",
    title = "Comparative visualization of protein secondary structures",
    journal = "BMC Bioinformatics",
    year = "2017",
    volume = "18(Suppl 2)",
    number = "23",
    month = "feb",
    abstract = "Background: Protein function is determined by many factors, namely by its constitution, spatial arrangement, anddynamic behavior. Studying these factors helps the biochemists and biologists to better understand the proteinbehavior and to design proteins with modified properties. One of the most common approaches to these studies is tocompare the protein structure with other molecules and to reveal similarities and differences in their polypeptidechains.Results: We support the comparison process by proposing a new visualization technique that bridges the gapbetween traditionally used 1D and 3D representations. By introducing the information about mutual positions ofprotein chains into the 1D sequential representation the users are able to observe the spatial differences between theproteins without any occlusion commonly present in 3D view. Our representation is designed to serve namely forcomparison of multiple proteins or a set of time steps of molecular dynamics simulation.Conclusions: The novel representation is demonstrated on two usage scenarios. The first scenario aims to compare aset of proteins from the family of cytochromes P450 where the position of the secondary structures has a significantimpact on the substrate channeling. The second scenario focuses on the protein flexibility when by comparing a setof time steps our representation helps to reveal the most dynamically changing parts of the protein chain.",
    pdf = "pdfs/Kocincova2017.pdf",
    images = "images/Lucia2016Comparative.png",
    thumbnails = "images/Lucia2016Comparative.png",
    note = "https://doi.org/10.1186/s12859-016-1449-z"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2017-OFS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner",
    title = "Output-Sensitive Filtering of Streaming Volume Data",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "249--262",
    month = "jan",
    abstract = "Real-time volume data acquisition poses substantial challenges for  the traditional visualization pipeline where data enhancement is  typically seen as a pre-processing step. In the case of 4D ultrasound  data, for instance, costly processing operations to reduce noise  and to remove artifacts need to be executed for every frame. To enable  the use of high quality filtering operations in such scenarios, we  propose an output-sensitive approach to the visualization of streaming  volume data. Our method evaluates the potential contribution of all  voxels to the final image, allowing us to skip expensive processing  operations that have little or no effect on the visualization. As  filtering operations modify the data values which may affect the  visibility, our main contribution is a fast scheme to predict their  maximum effect on the final image. Our approach prioritizes filtering  of voxels with high contribution to the final visualization based  on a maximal permissible error per pixel. With zero permissible error,  the optimized filtering will yield a result identical to filtering  of the entire volume. We provide a thorough technical evaluation  of the approach and demonstrate it on several typical scenarios that  require on-the-fly processing.",
    pdf = "pdfs/Solteszova-2017-OFS.pdf",
    images = "images/Solteszova-2017-OFS.jpg",
    thumbnails = "images/Solteszova-2017-OFS.png",
    youtube = "https://www.youtube.com/watch?v=xGPs560ttp0",
    doi = "10.1111/cgf.12799",
    keywords = "output-sensitive processing, volume data, filtering"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2017-DVN,
    author = "Peter Mindek and Gabriel Mistelbauer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Data-Sensitive Visual Navigation",
    journal = "Computers \& Graphics",
    year = "2017",
    volume = "67",
    pages = "77--85",
    month = "oct",
    abstract = "In visualization systems it is often the case that thechanges of the input parameters are not proportional to the visualchange of the generated output. In this paper, we propose a model forenabling data-sensitive navigation for user-interface elements. Thismodel is applied to normalize the user input according to the visualchange, and also to visually communicate this normalization. In thisway, the exploration of heterogeneous data using common interactionelements can be performed in an efficient way. We apply our model to thefield of medical visualization and present guided navigation tools fortraversing vascular structures and for camera rotation around 3Dvolumes. The presented examples demonstrate that the model scales touser-interface elements where multiple parameters are setsimultaneously.",
    pdf = "pdfs/Mindek-2017-DVN.pdf",
    images = "images/Mindek-2017-DVN.jpg",
    thumbnails = "images/Mindek-2017-DVN.png",
    youtube = "https://www.youtube.com/watch?v=FnhbjX7BRXI",
    note = "SCCG 2017 Best Paper Award",
    doi = "10.1016/j.cag.2017.05.012",
    event = "SCCG 2017",
    keywords = "navigation, exploration, medical visualization",
    location = "Mikulov, Czech Republic",
    project = "MetaVis"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {vad_viktor-2017-WVE,
    author = "Viktor Vad and Jan By\v{s}ka and Adam Jur\v{c}\'{i}k and Ivan Viola and Meister Eduard Gr{\"o}ller and Helwig Hauser and Sergio M. Margues and Ji\v{r}\'{i} Damborsk\'{y} and Barbora Kozl\'{i}kov\'{a}",
    title = "Watergate: Visual Exploration of Water Trajectories in Protein Dynamics",
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine 2017",
    year = "2017",
    pages = "33--42",
    abstract = "The function of proteins is tightly related to their interactions with other molecules. The study of such interactions often requires to track the molecules that enter or exit specific regions of the proteins. This is investigated with molecular dynamics simulations, producing the trajectories of thousands of water molecules during hundreds of thousands of time steps. To ease the exploration of such rich spatio-temporal data, we propose a novel workflow for the analysis and visualization of large sets of water-molecule trajectories. Our solution consists of a set of visualization techniques, which help biochemists to classify, cluster, and filter the trajectories and to explore the properties and behavior of selected subsets in detail. Initially, we use an interactive histogram and a time-line visualization to give an overview of all water trajectories and select the interesting ones for further investigation. Further, we depict clusters of trajectories in a novel 2D representation illustrating the flows of water molecules. These views are interactively linked with a 3D representation where we show individual paths, including their simplification, as well as extracted statistical information displayed by isosurfaces. The proposed solution has been designed in tight collaboration with experts to support specific tasks in their scientific workflows. They also conducted several case studies to evaluate the usability and effectiveness of our new solution with respect to their research scenarios. These confirmed that our proposed solution helps in analyzing water trajectories and in extracting the essential information out of the large amount of input data.",
    pdf = "pdfs/Vad_Victor2017.pdf",
    images = "images/Watergate.png",
    thumbnails = "images/Watergate.png",
    proceedings = "In Proceedings of Eurographics Workshop on Visual Computing for Biology and Medicine",
    location = "September, 2017 Bremen, Germany",
    url = "https://www.cg.tuwien.ac.at/research/publications/2017/vad_viktor-2017-WVE/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2017-AVA,
    author = "Alexandra Diehl and Leandro Pelorosso and Kresimir Matkovic and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Albero: A Visual Analytics Approach for Probabilistic Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "7",
    pages = "135--144",
    month = "oct",
    abstract = "Probabilistic weather forecasts are amongst the most popularways to quantify numerical forecast uncertainties. The analogregression method can quantify uncertainties and express them asprobabilities. The method comprises the analysis of errorsfrom a large database of past forecasts generated with a specificnumerical model and observational data. Current visualizationtools based on this method are essentially automated and provide limitedanalysis capabilities. In this paper, we propose a novelapproach that breaks down the automatic process using the experience andknowledge of the users and creates a new interactivevisual workflow. Our approach allows forecasters to study probabilisticforecasts, their inner analogs and observations, theirassociated spatial errors, and additional statistical information bymeans of coordinated and linked views. We designed thepresented solution following a participatory methodology together withdomain experts. Several meteorologists with differentbackgrounds validated the approach. Two case studies illustrate thecapabilities of our solution. It successfully facilitates theanalysis of uncertainty and systematic model biases for improveddecision-making and process-quality measurements.",
    pdf = "pdfs/Diehl-2017-AVA.pdf",
    images = "images/Diehl-2017-AVA.jpg",
    thumbnails = "images/Diehl-2017-AVA.png",
    youtube = "https://www.youtube.com/watch?v=-yqoeEgkz28",
    doi = "10.1111/cgf.13279",
    keywords = "visual analytics, weather forecasting, uncertainty",
    project = "MetaVis"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Swoboda-2017-VQI,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visualization and Quantification for Interactive Analysis of Neural Connectivity in Drosophila",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "160--171",
    month = "jan",
    abstract = "Neurobiologists investigate the brain of the common fruit fly Drosophila  melanogaster to discover neural circuits and link them to complex  behavior. Formulating new hypotheses about connectivity requires  potential connectivity information between individual neurons, indicated  by overlaps of arborizations of two or more neurons. As the number  of higher order overlaps (i.e., overlaps of three or more arborizations)  increases exponentially with the number of neurons under investigation,  visualization is impeded by clutter and quantification becomes a  burden. Existing solutions are restricted to visual or quantitative  analysis of pairwise overlaps, as they rely on precomputed overlap  data. We present a novel tool that complements existing methods for  potential connectivity exploration by providing for the first time  the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in both its spatial anatomical context and on a quantitative level.  Qualitative evaluation by neuroscientists and non-experts demonstrated  the utility and usability of the tool",
    pdf = "pdfs/Swoboda-2017-VQI.pdf",
    images = "images/Swoboda-2017-VQI.jpg",
    thumbnails = "images/Swoboda-2017-VQI.png",
    youtube = "https://www.youtube.com/watch?v=bycWGQQpqks",
    doi = "10.1111/cgf.12792",
    keywords = "visual analysis, neurobiology"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Turkay2017VIS,
    author = "C. Turkay and E. Kaya and S. Balcisoy and H. Hauser",
    title = "Designing Progressive and Interactive Analytics Processes for High-Dimensional Data Analysis",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "PP",
    number = "99",
    pages = "1-1",
    month = "jan",
    abstract = "In interactive data analysis processes, the dialogue between the human and the computer is the enabling mechanism that can lead to actionable observations about the phenomena being investigated. It is of paramount importance that this dialogue is not interrupted by slow computational mechanisms that do not consider any known temporal human-computer interaction characteristics that prioritize the perceptual and cognitive capabilities of the users. In cases where the analysis involves an integrated computational method, for instance to reduce the dimensionality of the data or to perform clustering, such non-optimal processes are often likely. To remedy this, progressive computations, where results are iteratively improved, are getting increasing interest in visual analytics. In this paper, we present techniques and design considerations to incorporate progressive methods within interactive analysis processes that involve high-dimensional data. We define methodologies to facilitate processes that adhere to the perceptual characteristics of users and describe how online algorithms can be incorporated within these. A set of design recommendations and according methods to support analysts in accomplishing high-dimensional data analysis tasks are then presented. Our arguments and decisions here are informed by observations gathered over a series of analysis sessions with analysts from finance. We document observations and recommendations from this study and present evidence on how our approach contribute to the efficiency and productivity of interactive visual analysis sessions involving high-dimensional data.",
    pdf = "pdfs/Turkay2017VIS.pdf",
    images = "images/Turkay-2017-VIS.png",
    thumbnails = "images/Turkay-2017-VIS.png",
    doi = "10.1109/TVCG.2016.2598470",
    issn = "1077-2626"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2017-FCC,
    author = "Ivan Kolesar and Stefan Bruckner and Ivan Viola and Helwig Hauser",
    title = "A Fractional Cartesian Composition Model for Semi-spatial Comparative Visualization Design",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "851--860",
    month = "jan",
    abstract = "The study of spatial data ensembles leads to substantial visualization  challenges in a variety of applications. In this paper, we present  a model for comparative visualization that supports the design of  according ensemble visualization solutions by partial automation.  We focus on applications, where the user is interested in preserving  selected spatial data characteristics of the data as much as possible—even  when many ensemble members should be jointly studied using comparative  visualization. In our model, we separate the design challenge into  a minimal set of user-specified parameters and an optimization component  for the automatic configuration of the remaining design variables.  We provide an illustrated formal description of our model and exemplify  our approach in the context of several application examples from  different domains in order to demonstrate its generality within the  class of comparative visualization problems for spatial data ensembles.",
    pdf = "pdfs/Kolesar-2017-FCC.pdf",
    images = "images/Kolesar-2017-FCC.jpg",
    thumbnails = "images/Kolesar-2017-FCC.png",
    youtube = "https://www.youtube.com/watch?v=_zk67fmryok",
    doi = "10.1109/TVCG.2016.2598870",
    event = "IEEE SciVis 2016",
    keywords = "visualization models, integrating spatial and non-spatial data visualization, design methodologies",
    location = "Baltimore, USA",
    project = "physioillustration"
    }

2016

    [Bibtex]
    @MISC {moller2016winter,
    author = "Moller, Torsten and Brambilla, Andrea and Hotz, Ingrid and Gordon, Kindlmann and Schulz, Hans Jorg and Hauser, Helwig and Brodtkorb, Andre",
    title = "Geilo Winter School in eScience on Scientific Visualization",
    year = "2016",
    thumbnails = "images/winter.png",
    note = "https://www.cs.ubc.ca/~tmm/talks.html",
    journal = "Geilo Winter School of eSience"
    }
    [PDF] [Bibtex]
    @ARTICLE {preim2016visual,
    author = "Preim, Bernhard and Klemm, Paul and Hauser, Helwig and Hegenscheid, Katrin and Oeltze, Steffen and Toennies, Klaus and V{\"o}lzke, Henry",
    title = "Visual analytics of image-centric cohort studies in epidemiology",
    journal = "Visualization in Medicine and Life Sciences III, Springer",
    year = "2016",
    pages = "221-248",
    abstract = "Epidemiology characterizes the influence of causes to disease and health conditions of defined populations. Cohort studies are population-based studies involving usually large numbers of randomly selected individuals and comprising numerous attributes, ranging from self-reported interview data to results from various medical examinations, e.g., blood and urine samples. Since recently, medical imaging has been used as an additional instrument to assess risk factors and potential prognostic information. In this chapter, we discuss such studies and how the evaluation may benefit from visual analytics. Cluster analysis to define groups, reliable image analysis of organs in medical imaging data and shape space exploration to characterize anatomical shapes are among the visual analytics tools that may enable epidemiologists to fully exploit the potential of their huge and complex data. To gain acceptance, visual analytics tools need to complement more classical epidemiologic tools, primarily hypothesis-driven statistical analysis.",
    pdf = "pdfs/Preim2016_Centric.pdf",
    thumbnails = "images/Preim2016_Centric_1.png"
    }
    [PDF] [Bibtex]
    @ARTICLE {brambilla2016comparative,
    author = "Brambilla, Andrea and Angelelli, Paolo and Andreassen, yvind and Hauser, Helwig",
    title = "Comparative visualization of multiple time surfaces by planar surface reformation",
    journal = "Pacific Visualization Symposium (PacificVis), 2016 IEEE",
    year = "2016",
    pages = "88--95",
    abstract = "Comparing time surfaces at different integration time points, or
    from different seeding areas, can provide valuable insight into
    transport phenomena of fluid flows. Such a comparative study is
    challenging due to the often convoluted shapes of these surfaces.
    We propose a new approach for comparative flow visualization
    based on time surfaces, which exploits the idea of embedding the
    surfaces in a carefully designed, reformed 2D visualization space.
    Such an embedding enables new opportunities for comparative flow
    visualization. We present three different strategies for comparative
    flow visualization that take advantage of the reformation. By reforming the time surfaces, we not only mitigate occlusion issues,
    but we can devote also the third dimension of the visualization
    space to the comparative aspects of the visualization. Our approach
    is effective in a variety of flow study cases. The direct comparison
    of individual time surfaces reveals small scale differences and fine
    details about the fluid’s motion. The concurrent study of multiple
    surface families enables the identification and the comparison of
    the most prominent motion patterns. This work was developed in
    close collaboration with an expert in fluid dynamics, who assessed
    the potential usefulness of this approach in his field.",
    pdf = "pdfs/bambarilla.pdf",
    thumbnails = "images/bambarilla_1.png"
    }
    [PDF] [Bibtex]
    @ARTICLE {radovs2016towards,
    author = "Rado{\v{s}}, Sanjin and Splechtna, Rainer and Matkovi{\'c}, K and Juras, M and Gr{\"o}ller, Eduard and Hauser, Helwig",
    title = "Towards quantitative visual analytics with structured brushing and linked statistics",
    journal = "Computer Graphics Forum",
    year = "2016",
    volume = "35",
    number = "3",
    pages = "251--260",
    abstract = "Until now a lot of visual analytics predominantly delivers qualitative results—based, for example, on a continuous color map or a detailed spatial encoding. Important target applications, however, such as medical diagnosis and decision making, clearly benefit from quantitative analysis results. In this paper we propose several specific extensions to the well-established concept oflinking&brushing in order to make the analysis results more quantitative. We structure the brushing space in order to improvethe reproducibility of the brushing operation, e.g., by introducing the percentile grid. We also enhance the linked visualization with overlaid descriptive statistics to enable a more quantitative reading of the resulting focus+context visualization. Addition-ally, we introduce two novel brushing techniques: the percentile brush and the Mahalanob is brush. Both use the underlying data to support statistically meaningful interactions with the data. We illustrate the use of the new techniques in the context of two case studies, one based on meteorological data and the other one focused on data from the automotive industry where we evaluate a shaft design in the context of mechanical power transmission in cars.",
    pdf = "pdfs/Rado-_et_al-2016-Computer_Graphics_Forum.pdf",
    thumbnails = "images/Rado-_et_al-2016-Computer_Graphics_Forum_1.png"
    }
    [DOI] [Bibtex]
    @ARTICLE {Michael2016Visual,
    author = "Michael Krone and Barbora Kozlikova and Norbert Lindow and Marc Baaden and Daniel Baum, and Julius Parulek and Hans-Christian Hege and Ivan Viola",
    title = "Visual Analysis of Biomolecular Cavities: State of the Art",
    journal = "Computer Graphics Forum",
    year = "2016",
    abstract = "In this report we review and structure the branch of molecular visualization that is concerned with the visual analysis of cavities in macromolecular protein structures. First the necessary background, the domain terminology, and the goals of analytical reasoning are introduced. Based on a comprehensive collection of relevant research works, we present a novel classification for cavity detection approaches and structure them into four distinct classes: grid-based, Voronoi-based, surface-based, and probe-based methods. The subclasses are then formed by their combinations. We match these approaches with corresponding visualization technologies starting with direct 3D visualization, followed with non-spatial visualization techniques that for example abstract the interactions between structures into a relational graph, straighten the cavity of interest to see its profile in one view, or aggregate the time sequence into a single contour plot. We also discuss the current state of methods for the visual analysis of cavities in dynamic data such as molecular dynamics simulations. Finally, we give an overview of the most common tools that are actively developed and used in the structural biology and biochemistry research. Our report is concluded by an outlook on future challenges in the field.",
    images = "images/STARcavities2016.png",
    thumbnails = "images/STARcavities2016.png",
    publisher = "The Eurographics Association and John Wiley \& Sons Ltd.",
    issn = "1467-8659",
    doi = "10.1111/cgf.12928",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2016-GIR,
    author = "Sergej Stoppel and Erlend Hodneland and Helwig Hauser and Stefan Bruckner",
    title = "Graxels: Information Rich Primitives for the Visualization of Time-Dependent Spatial Data",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    pages = "183--192",
    month = "sep",
    abstract = "Time-dependent volumetric data has important applications in areas  as diverse as medicine, climatology, and engineering. However, the  simultaneous quantitative assessment of spatial and temporal features  is very challenging. Common visualization techniques show either  the whole volume in one time step (for example using direct volume  rendering) or let the user select a region of interest (ROI) for  which a collection of time-intensity curves is shown. In this paper,  we propose a novel approach that dynamically embeds quantitative  detail views in a spatial layout. Inspired by the concept of small  multiples, we introduce a new primitive graxel (graph pixel). Graxels  are view dependent primitives of time-intensity graphs, generated  on-the-fly by aggregating per-ray information over time and image  regions. Our method enables the detailed feature-aligned visual analysis  of time-dependent volume data and allows interactive refinement and  filtering. Temporal behaviors like frequency relations, aperiodic  or periodic oscillations and their spatial context are easily perceived  with our method. We demonstrate the power of our approach using examples  from medicine and the natural sciences.",
    pdf = "pdfs/Stoppel-2016-GIR.pdf",
    images = "images/Stoppel-2016-GIR.jpg",
    thumbnails = "images/Stoppel-2016-GIR.png",
    youtube = "https://www.youtube.com/watch?v=UsClj3ytd0Y",
    doi = "10.2312/vcbm.20161286",
    event = "VCBM 2016",
    keywords = "time-dependent data, volume data, small multiples",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Labschuetz-2016-JJC,
    author = "Matthias Labsch{\"u}tz and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2016",
    volume = "22",
    number = "1",
    pages = "1025--1034",
    month = "jan",
    abstract = "Abstract—Sparse volume data structures enable the efficient representation  of large but sparse volumes in GPU memory for com putation and visualization.  However, the choice of a specific data structure for a given data  set depends on several factors, such as the memory budget, the sparsity  of the data, and data access patterns. In general, there is no single  optimal sparse data structure, but a set of several candidates with  individual strengths and drawbacks. One solution to this problem  are hybrid data structures which locally adapt themselves to the  sparsity. However, they typically suffer from increased traversal  overhead which limits their utility in many applications. This paper  presents JiTTree, a novel sparse hybrid volume data structure that  uses just-in-time compilation to overcome these problems. By combining  multiple sparse data structures and reducing traversal overhead we  leverage their individual advantages. We demonstrate that hybrid  data structures adapt well to a large range of data sets. They are  especially superior to other sparse data structures for data sets  that locally vary in sparsity. Possible optimization criteria are  memory, performance and a combination thereof. Through just-in-time  (JIT) compilation, JiTTree reduces the traversal overhead of the  resulting optimal data structure. As a result, our hybrid volume  data structure enables efficient computations on the GPU, while being  superior in terms of memory usage when compared to non-hybrid data  structures.",
    pdf = "pdfs/Labschuetz-2016-JJC.pdf",
    images = "images/Labschuetz-2016-JJC.jpg",
    thumbnails = "images/Labschuetz-2016-JJC.png",
    doi = "10.1109/TVCG.2015.2467331",
    event = "IEEE SciVis 2015",
    keywords = "data transformation and representation, GPUs and multi-core architectures, volume rendering",
    location = "Chicago, USA"
    }
    [Bibtex]
    @INPROCEEDINGS {Kolesar2016VCBM,
    author = "Ivan Kolesar and Jan By\v{s}ka and Julius Parulek and Helwig Hauser and Barbora Kozl\'{i}kov\'{a}",
    title = "Unfolding and Interactive Exploration of Protein Tunnels andtheir Dynamics",
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine",
    year = "2016",
    pages = "1--10",
    month = "sep",
    abstract = "The presence of tunnels in protein structures substantially influences their reactivity with other molecules. Therefore, studying their properties and changes over time has been in the scope of biochemists for decades. In this paper we introduce a novel approach for comparative visualization and exploration of ensembles of tunnels. Our goal is to overcome occlusion problems present in traditional tunnel representations while providing users a quick way to navigate through the input dataset to identify potentially interesting tunnels. First, we unfold the input tunnels to a 2D representation enabling to observe the mutual position of amino acids forming the tunnel surface and the amount of surface they influence. These 2D images are subsequently described by image moments commonly used in image processing. This way we are able to detect similarities and outliers in the dataset, which are visualized as clusters in a scatterplot graph. The same coloring scheme is used in the linked bar chart enabling to detect the position of the cluster members over time. These views provide a way to select a subset of potentially interesting tunnels that can be further explored in detail using the 2D unfolded view and also traditional 3D representation. The usability of our approach is demonstrated on case studies conducted by the domain experts.",
    images = "images/Kolesar-2016-VCBM.png",
    thumbnails = "images/Kolesar-2016-VCBM-thumbnail.jpg",
    proceedings = "Proceedings of Eurographics Workshop on Visual Computing in Biology and Medicine",
    keywords = "unfolding, storytelling, game visualization",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Smit2016SLINE,
    author = "Nils Lichtenberg and Noeska Smit and Christian Hansen and Kai Lawonn",
    title = "Sline: Seamless Line Illustration for Interactive Biomedical Visualization",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    month = "sep",
    abstract = "In medical visualization of surface information, problems often arise when visualizing several overlapping structures simultaneously. There is a trade-off between visualizing multiple structures in a detailed way and limiting visual clutter, in order to allow users to focus on the main structures. Illustrative visualization techniques can help alleviate these problems by defining a level of abstraction per structure. However, clinical uptake of these advanced visualization techniques so far has been limited due to the complex parameter settings required. To bring advanced medical visualization closer to clinical application, we propose a novel illustrative technique that offers a seamless transition between various levels of abstraction and detail. Using a single comprehensive parameter, users are able to quickly define a visual representation per structure that fits the visualization requirements for focus and context structures. This technique can be applied to any biomedical context in which multiple surfaces are routinely visualized, such as neurosurgery, radiotherapy planning or drug design. Additionally, we introduce a novel hatching technique, that runs in real-time and does not require texture coordinates. An informal evaluation with experts from different biomedical domains reveals that our technique allows users to design focus-and-context visualizations in a fast and intuitive manner.",
    pdf = "pdfs/Lichtenberg-2016-SLINE.pdf",
    images = "images/Smit-2016-SLINE.PNG",
    thumbnails = "images/Smit-2016-SLINE.jpg",
    proceedings = "Proceedings of Eurographics Workshop on Visual Computing in Biology and Medicine",
    event = "VCBM 2016",
    keywords = "surface rendering, medical visualization, illustrative rendering",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Klein-2016-TIV,
    author = "Tobias Klein and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language",
    booktitle = "Proceedings of the International Workshop on OpenCL 2016",
    year = "2016",
    month = "apr",
    abstract = "The use of GPUs and the massively parallel computing paradigm have  become wide-spread. We describe a framework for the interactive visualization  and visual analysis of the run-time behavior of massively parallel  programs, especially OpenCL kernels. This facilitates understanding  a program's function and structure, finding the causes of possible  slowdowns, locating program bugs, and interactively exploring and  visually comparing different code variants in order to improve performance  and correctness. Our approach enables very specific, user-centered  analysis, both in terms of the recording of the run-time behavior  and the visualization itself. Instead of having to manually write  instrumented code to record data, simple code annotations tell the  source-to-source compiler which code instrumentation to generate  automatically. The visualization part of our framework then enables  the interactive analysis of kernel run-time behavior in a way that  can be very specific to a particular problem or optimization goal,  such as analyzing the causes of memory bank conflicts or understanding  an entire parallel algorithm.",
    pdf = "pdfs/Klein-2016-TIV.pdf",
    images = "images/Klein-2016-TIV.jpg",
    thumbnails = "images/Klein-2016-TIV.png",
    doi = "10.1145/2909437.2909459",
    event = "IWOCL 2016",
    extra = "pdfs/Klein-2016-TIV-Poster.pdf",
    keywords = "domain specific languages, GPU programming, visual exploration",
    location = "Vienna, Austria",
    owner = "bruckner"
    }
    [Bibtex]
    @MISC {Stoppel2015ConfReport,
    author = "Sergej Stoppel",
    title = "Conference Report IEEE VIS 2014",
    month = "January",
    year = "2016",
    abstract = "Conference report about the IEEE VIS 2014 in Paris.",
    images = "images/Shneiderman_Gerson_Pushups.PNG",
    thumbnails = "images/Shneiderman_Gerson_Pushups.PNG",
    url = "http://www.norsigd.no/norsigd_info/nsi-1-15.pdf"
    }
    [PDF] [Bibtex]
    @ARTICLE {Byska2016AnimoAminoMiner,
    author = "Jan By{\v{s}}ka and Mathieu Le Muzic and Eduard M. Gr{\"o}ller and Ivan Viola and Barbora Kozl{\'i}kov{\'a}",
    title = "AnimoAminoMiner: Exploration of Protein Tunnels and their Properties in Molecular Dynamics",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2016",
    volume = "22",
    number = "1",
    pages = "747--756",
    abstract = "In this paper we propose a novel method for the interactive exploration of protein tunnels. The basic principle of our approach is that we entirely abstract from the 3D/4D space the simulated phenomenon is embedded in. A complex 3D structure and its curvature information is represented only by a straightened tunnel centerline and its width profile. This representation focuses on a key aspect of the studied geometry and frees up graphical estate to key chemical and physical properties represented by surroundingamino acids. The method shows the detailed tunnel profile and its temporal aggregation. The profile is interactively linked with a visual overview of all amino acids which are lining the tunnel over time. In this overview, each amino acid is represented by a set of colored lines depicting the spatial and temporal impact of the amino acid on the corresponding tunnel. This representation clearly shows the importance of amino acids with respect to selected criteria. It helps the biochemists to select the candidate amino acids for mutation which changes the protein function in a desired way. The AnimoAminoMiner was designed in close cooperation with domain experts. Its usefulness is documented by their feedback and a case study, which are included.",
    pdf = "pdfs/2016-Byska-AnimoAminoMiner.pdf",
    images = "images/IvanViola2016.png",
    thumbnails = "images/IvanViola2016.png",
    publisher = "IEEE"
    }

2015

    [PDF] [Bibtex]
    @ARTICLE {Angelelli-2015-PQA,
    author = "Paolo Angelelli and Stefan Bruckner",
    title = "Performance and Quality Analysis of Convolution-Based Volume Illumination",
    journal = "Journal of WSCG",
    year = "2015",
    volume = "23",
    number = "2",
    pages = "131--138",
    month = "jun",
    abstract = "Convolution-based techniques for volume rendering are among the fastest  in the on-the-fly volumetric illumination category. Such methods,  however, are still considerably slower than conventional local illumination  techniques. In this paper we describe how to adapt two commonly used  strategies for reducing aliasing artifacts, namely pre-integration  and supersampling, to such techniques. These strategies can help  reduce the sampling rate of the lighting information (thus the number  of convolutions), bringing considerable performance benefits. We  present a comparative analysis of their effectiveness in offering  performance improvements. We also analyze the (negligible) differences  they introduce when comparing their output to the reference method.  These strategies can be highly beneficial in setups where direct  volume rendering of continuously streaming data is desired and continuous  recomputation of full lighting information is too expensive, or where  memory constraints make it preferable not to keep additional precomputed  volumetric data in memory. In such situations these strategies make  single pass, convolution-based volumetric illumination models viable  for a broader range of applications, and this paper provides practical  guidelines for using and tuning such strategies to specific use cases.",
    pdf = "pdfs/Angelelli-2015-PQA.pdf",
    images = "images/Angelelli-2015-PQA.jpg",
    thumbnails = "images/Angelelli-2015-PQA.png",
    keywords = "volume rendering, global illumination, scientific visualization, medical visualization"
    }
    [DOI] [Bibtex]
    @INPROCEEDINGS {eurovisstar20151112,
    author = "Kozlikova, Barbora and Krone, Michael and Lindow, Norbert and Falk, Martin and Baaden, Marc and Baum, Daniel and Viola, Ivan and Parulek, Julius and Hege, Hans-Christian",
    title = "Visualization of Biomolecular Structures: State of the Art",
    booktitle = "Eurographics Conference on Visualization (EuroVis) - STARs",
    year = "2015",
    editor = "R. Borgo and F. Ganovelli and I. Viola",
    volume = "-",
    publisher = "The Eurographics Association",
    abstract = "Structural properties of molecules are of primary concern in many fields. This report provides a comprehensiveoverview on techniques that have been developed in the fields of molecular graphics and visualization with a focuson applications in structural biology. The field heavily relies on computerized geometric and visual representationsof three-dimensional, complex, large, and time-varying molecular structures. The report presents a taxonomy thatdemonstrates which areas of molecular visualization have already been extensively investigated and where the fieldis currently heading. It discusses visualizations for molecular structures, strategies for efficient display regardingimage quality and frame rate, covers different aspects of level of detail, and reviews visualizations illustrating thedynamic aspects of molecular simulation data. The report concludes with an outlook on promising and importantresearch topics to enable further success in advancing the knowledge about interaction of molecular structures.",
    images = "images/molvis_star.png",
    thumbnails = "images/molvis_star.png",
    proceedings = "Eurographics Conference on Visualization (EuroVis) - STARs",
    doi = "10.2312/eurovisstar.20151112",
    journal = "-",
    number = "-",
    keywords = "-",
    project = "physioillustration"
    }
    [DOI] [Bibtex]
    @ARTICLE {Byska2015MC,
    author = "Jan By\v{s}ka and Adam Jur\v{c}\'{i}­k and Eduard M. Gr{\"o}ller and Ivan Viola and Barbora Kozl{\'i}kov{\'a}",
    title = "MoleCollar and Tunnel Heat Map Visualizations for Conveying Spatio-Temporo-Chemical Properties Across and Along Protein Voids",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "1--10",
    abstract = "Studying the characteristics of proteins and their inner void space, including their geometry,physico-chemical properties and dynamics are instrumental for evaluating the reactivity of theprotein with other small molecules. The analysis of long simulations of molecular dynamics produces a large number of voids which have to be further explored and evaluated. In this paper we propose three new methods: two of them convey important properties along the long axis of a selected void during molecular dynamics and one provides a comprehensive picture across the void. The first two proposed methods use a specific heat map to present two types of information: an overview of all detected tunnels in the dynamics and their bottleneck width andstability over time, and an overview of a specific tunnel in the dynamics showing the bottleneck position and changes of the tunnel length over time. These methods help to select asmall subset of tunnels, which are explored individually and in detail. For this stage we propose the third method, which shows in one static image the temporal evolvement of the shapeof the most critical tunnel part, i.e., its bottleneck. This view is enriched with abstractdepictions of different physicochemical properties of the amino acids surrounding the bottleneck. The usefulness of our newly proposed methods is demonstrated on a case study andthe feedback from the domain experts is included. The biochemists confirmed that our novel methods help to convey the information about the appearance and properties of tunnels in a very intuitive and comprehensible manner.",
    images = "images/cgf12612-fig-0001.png",
    thumbnails = "images/cgf12612-fig-0001.png",
    issn = "1467-8659",
    url = "http://dx.doi.org/10.1111/cgf.12612",
    doi = "10.1111/cgf.12612",
    keywords = "Categories and Subject Descriptors (according to ACM CCS), I.3.6 [Computer Graphics]: Picture/Image Generation—Line and curve generation"
    }
    [Bibtex]
    @INPROCEEDINGS {cellVIEW_2015,
    author = "Mathieu Le Muzic and Ludovic Autin and Julius Parulek and Ivan Viola",
    title = "cellVIEW: a Tool for Illustrative and Multi-Scale Rendering of Large Biomolecular Datasets",
    booktitle = "EG Workshop on Visual Computing for Biology and Medicine",
    year = "2015",
    month = "sep",
    abstract = "In this article we introduce cellVIEW, a new system to interactively visualize large biomolecular datasets on the atomic level. Our tool is unique and has been specifically designed to match the ambitions of our domain experts to model and interactively visualize structures comprised of several billions atom. The cellVIEW system integrates acceleration techniques to allow for real-time graphics performance of 60 Hz display rate on datasets representing large viruses and bacterial organisms. Inspired by the work of scientific illustrators, we propose a level-of-detail scheme which purpose is two-fold: accelerating the rendering and reducing visual clutter. The main part of our datasets is made out of macromolecules, but it also comprises nucleic acids strands which are stored as sets of control points. For that specific case, we extend our rendering method to support the dynamic generation of DNA strands directly on the GPU. It is noteworthy that our tool has been directly implemented inside a game engine. We chose to rely on a third party engine to reduce software development work-load and to make bleeding-edge graphics techniques more accessible to the end-users. To our knowledge cellVIEW is the only suitable solution for interactive visualization of large bimolecular landscapes on the atomic level and is freely available to use and extend.",
    images = "images/cellview2015.png",
    thumbnails = "images/cellview2015.png",
    proceedings = "Proceedings of Eurographics Workshop on Visual Computing in Biology and Medicine",
    project = "physioillustration"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Mindek-2015-ASM,
    author = "Peter Mindek and Ladislav \v{C}mol{\'i}k and Ivan Viola and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Automatized Summarization of Multiplayer Games",
    booktitle = "Proceedings of SCCG 2015",
    year = "2015",
    pages = "93--100",
    month = "apr",
    abstract = "We present a novel method for creating automatized gameplay dramatization  of multiplayer video games. The dramatization serves as a visual  form of guidance through dynamic 3D scenes with multiple foci, typical  for such games. Our goal is to convey interesting aspects of the  gameplay by animated sequences creating a summary of events which  occurred during the game. Our technique is based on processing many  cameras, which we refer to as a flock of cameras, and events captured  during the gameplay, which we organize into a so-called event graph.  Each camera has a lifespan with a certain time interval and its parameters  such as position or look-up vector are changing over time. Additionally,  during its lifespan each camera is assigned an importance function,  which is dependent on the significance of the structures that are  being captured by the camera. The images captured by the cameras  are composed into a single continuous video using a set of operators  based on cinematographic effects. The sequence of operators is selected  by traversing the event graph and looking for specific patterns corresponding  to the respective operators. In this way, a large number of cameras  can be processed to generate an informative visual story presenting  the gameplay. Our compositing approach supports insets of camera  views to account for several important cameras simultaneously. Additionally,  we create seamless transitions between individual selected camera  views in order to preserve temporal continuity, which helps the user  to follow the virtual story of the gameplay.",
    pdf = "pdfs/Mindek-2015-ASM.pdf",
    images = "images/Mindek-2015-ASM.jpg",
    thumbnails = "images/Mindek-2015-ASM.png",
    note = "SCCG 2015 Best Paper Award",
    doi = "10.1145/2788539.2788549",
    keywords = "animation, storytelling, game visualization",
    location = "Smolenice, Slovakia",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {PBVRVis2015026,
    author = "Matkovic, K and Gracanin, D and Jelovi{\'{c}}, M and Hauser, H",
    title = "Interactive Visual Analysis of Large Simulation Ensembles",
    booktitle = "Proceedings of Winter Simulation Conference (WSC 2015, to appear)",
    year = "2015",
    abstract = "Recent advancements in simulation and computing make it possible to compute large simulation ensembles. A simulation ensemble consists of multiple simulation runs of the same model with different values of control parameters. In order to cope with ensemble data, a modern analysis methodology is necessary. In this paper, we present our experience with simulation ensemble exploration and steering by means of interactive visual analysis. We describe our long-term collaboration with fuel injection experts from the automotive industry. We present how interactive visual analysis can be used to gain a deep understanding in the ensemble data, and how it can be used, in a combination with automatic methods, to steer the ensemble creation, even for very complex systems. Very positive feedback from domain experts motivated us, a team of visualization and simulation experts, to present this research to the simulation community.",
    pdf = "pdfs/matkovic_2015_winter_simConf.pdf",
    images = "images/IVA_matkovic.png",
    thumbnails = "images/IVA_matkovic.png"
    }
    [DOI] [Bibtex]
    @INPROCEEDINGS {7156384,
    author = "Le Muzic, Mathieu and Waldner, Manuela and Parulek, Julius and Viola, Ivan",
    title = "Illustrative Timelapse: A technique for illustrative visualization of particle-based simulations",
    booktitle = "Visualization Symposium (PacificVis), 2015 IEEE Pacific",
    year = "2015",
    pages = "247-254",
    month = "April",
    abstract = "Animated movies are a popular way to communicate complex phenomena in cell biology to the broadaudience. Animation artists apply sophisticated illustration techniques to communicate a story, while trying to maintain a realistic representation of a complex dynamic environment. Since suchhand-crafted animations are time-consuming and cost-intensive to create, our goal is to formalizeillustration techniques used by artists to facilitate the automatic creation of visualizations generated from mesoscale particle-based molecular simulations. Our technique Illustrative Timelapse supports visual exploration of complex biochemical processes in dynamic environments by(1) seamless temporal zooming to observe phenomena in different temporal resolutions, (2) visualabstraction of molecular trajectories to ensure that observers are able to visually follow themain actors, (3) increased visual focus on events of interest, and (4) lens effects to preserve arealistic representation of the environment in the context. Results from a first user studyindicate that visual abstraction of trajectories improves the ability to follow a story and isalso appreciated by users. Lens effects increased the perceived amount of molecular motion in theenvironment while trading off traceability of individual molecules.",
    images = "images/illustrative_timelapse.png",
    thumbnails = "images/illustrative_timelapse.png",
    proceedings = "Proceedings of IEEE Pacific Visualization",
    keywords = "Biological system modeling;Data models;Data visualization;Lenses;Trajectory;Videos;Visualization;I.3.7[COMPUTER GRAPHICS]: Three-Dimensional Graphics and Realism—Animation;I.6.3 [SIMULATION AND MODELING]: Applications—",
    doi = "10.1109/PACIFICVIS.2015.7156384",
    project = "physioillustration"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {alsallakh2015state,
    author = "Alsallakh, Bilal and Micallef, Luana and Aigner, Wolfgang and Hauser, Helwig and Miksch, Silvia and Rodgers, Peter",
    title = "The State-of-the-Art of Set Visualization",
    journal = "Computer Graphics Forum",
    year = "2015",
    abstract = "Sets comprise a generic data model that has been used in a variety of data analysis problems. Such problems involve analysing and visualizing set relations between multiple sets defined over the same collection of elements. However, visualizing sets is a non-trivial problem due to the large number of possible relations between them. We provide a systematic overview of state-of-the-art techniques for visualizing different kinds of set relations. We classify these techniques into six main categories according to the visual representations they use and the tasks they support. We compare the categories to provide guidance for choosing an appropriate technique for a given problem. Finally, we identify challenges in this area that need further research and propose possible directions to address these challenges. Further resources on set visualization are available at http://www.setviz.net.",
    pdf = "pdfs/Alsallakh_et_al-2016-Computer_Graphics_Forum.pdf",
    images = "images/ThumbNailIMG-SetVisSTAR.png",
    thumbnails = "images/ThumbNailIMG-SetVisSTAR.png",
    organization = "Wiley Online Library",
    booktitle = "Computer Graphics Forum",
    doi = "10.1111/cgf.12722"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2015-VAS,
    author = "Alexandra Diehl and Leandro Pelorosso and Claudio Delrieux and Celeste Saulo and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "381--390",
    month = "may",
    abstract = "Weather conditions affect multiple aspects of human life such as economy,  safety, security, and social activities. For this reason, weather  forecast plays a major role in society. Currently weather forecasts  are based on Numerical Weather Prediction (NWP) models that generate  a representation of the atmospheric flow. Interactive visualization  of geo-spatial data has been widely used in order to facilitate the  analysis of NWP models. This paper presents a visualization system  for the analysis of spatio-temporal patterns in short-term weather  forecasts. For this purpose, we provide an interactive visualization  interface that guides users from simple visual overviews to more  advanced visualization techniques. Our solution presents multiple  views that include a timeline with geo-referenced maps, an integrated  webmap view, a forecast operation tool, a curve-pattern selector,  spatial filters, and a linked meteogram. Two key contributions of  this work are the timeline with geo-referenced maps and the curve-pattern  selector. The latter provides novel functionality that allows users  to specify and search for meaningful patterns in the data. The visual  interface of our solution allows users to detect both possible weather  trends and errors in the weather forecast model.We illustrate the  usage of our solution with a series of case studies that were designed  and validated in collaboration with domain experts.",
    pdf = "pdfs/Diehl-2015-VAS.pdf",
    images = "images/Diehl-2015-VAS.jpg",
    thumbnails = "images/Diehl-2015-VAS.png",
    youtube = "https://www.youtube.com/watch?v=hhQwsuXpHo8",
    doi = "10.1111/cgf.12650",
    event = "EuroVis 2015",
    keywords = "weather forecasting, visual analysis, spatiotemporal data",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015IRIS,
    author = "Helwig Hauser",
    title = "Medical Visualization Research at the VisGroup @ UiB.no/ii",
    howpublished = "Invited talk at IRIS",
    month = "November",
    year = "2015",
    abstract = "Established about eight years ago, the Visualization Research Group is the youngest of six research groups at the Department of Informatics, UiB, focusing on application-oriented basic research in visualization. Medicine and related disciplines (such as biomedicine, biology, etc.) stand for a growing number of grand visualization challenges and the vivid interdisciplinary MedViz network in Bergen gives ample opportunities for internationally recognized visualization research. In this talk, an overview of the visualization research group [1] is given, together with a short review of selected research projects in medical visualization.",
    pdf = "pdfs/2015-11-25-HH-IRIS.pdf",
    images = "images/ThumbNailIRIS.jpg",
    thumbnails = "images/ThumbNailIRIS.jpg",
    day = "25",
    location = "Bergen, Norway"
    }
    [DOI] [Bibtex]
    @ARTICLE {Brambilla15Expressive,
    author = "Andrea Brambilla and Helwig Hauser",
    title = "Expressive Seeding of Multiple Stream Surfaces for Interactive Flow Exploration",
    journal = "Computers \& Graphics",
    year = "2015",
    volume = "47",
    pages = "123--134",
    abstract = "Integral surfaces, such as stream and path surfaces, are highly effective in the context of the exploration and the analysis of the long-term behavior of three-dimensional flows. However, specifying the seeding curves that lead to an expressive set of integral surfaces is a challenging and cumbersome task. In this paper, we propose an algorithm for automatically seeding multiple stream surfaces around a user-specified location of interest. The process is guided by a streamline similarity measure. Within the resulting integral surfaces, adjacent streamlines are as similar as possible to each other. In addition, we aim at conveying different aspects of the flow behavior with each surface. This is achieved by maximizing the dissimilarity between streamlines from different stream surfaces. The capabilities of our technique are demonstrated on a number of application cases. We provide a qualitative comparison with two state-of-the-art approaches. We report from our detailed exchange with a domain expert concerning the expressiveness and usefulness of our approach. A thorough analysis of the few parameters involved is provided. ",
    images = "images/Brambilla15Expressive01.png, images/Brambilla15Expressive02.png",
    thumbnails = "images/Brambilla15Expressive01_thumb.png, images/Brambilla15Expressive02_thumb.png",
    publisher = "Elsevier",
    doi = "http://dx.doi.org/10.1016/j.cag.2015.01.002",
    url = "http://www.sciencedirect.com/science/article/pii/S0097849315000035",
    keywords = "Flow visualization; Stream surface selection; Visibility management"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {splechtna2015interactive,
    author = "Splechtna, Rainer and Matkovic, Kresimir and Gracanin, Denis and Jelovic, Mario and Hauser, Helwig",
    title = "Interactive visual steering of hierarchical simulation ensembles",
    booktitle = "Visual Analytics Science and Technology (VAST), 2015 IEEE Conference on",
    year = "2015",
    pages = "89--96",
    organization = "IEEE",
    abstract = "Multi-level simulation models, i.e., models where different components are simulated using sub-models of varying levels of complexity, belong to the current state-of-the-art in simulation. The existing analysis practice for multi-level simulation results is to manually compare results from different levels of complexity, amounting to a very tedious and error-prone, trial-and-error exploration process. In this paper, we introduce hierarchical visual steering, a new approach to the exploration and design of complex systems. Hierarchical visual steering makes it possible to explore and analyze hierarchical simulation ensembles at different levels of complexity. At each level, we deal with a dynamic simulation ensemble - the ensemble grows during the exploration process. There is at least one such ensemble per simulation level, resulting in a collection of dynamic ensembles, analyzed simultaneously. The key challenge is to map the multi-dimensional parameter space of one ensemble to the multi-dimensional parameter space of another ensemble (from another level). In order to support the interactive visual analysis of such complex data we propose a novel approach to interactive and semi-automatic parameter space segmentation and comparison. The approach combines a novel interaction technique and automatic, computational methods - clustering, concave hull computation, and concave polygon overlapping - to support the analysts in the cross-ensemble parameter space mapping. In addition to the novel parameter space segmentation we also deploy coordinated multiple views with standard plots. We describe the abstract analysis tasks, identified during a case study, i.e., the design of a variable valve actuation system of a car engine. The study is conducted in cooperation with experts from the automotive industry. Very positive feedback indicates the usefulness and efficiency of the newly proposed approach.",
    pdf = "pdfs/Splechtna_2015.pdf",
    images = "images/ThumbNailIMG-HierSteering.png",
    thumbnails = "images/ThumbNailIMG-HierSteering.png",
    doi = "10.1109/VAST.2015.7347635"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015Austria,
    author = "Helwig Hauser",
    title = "Integrating Spatial \& Non-spatial Data in Visualization",
    howpublished = "Invited talk",
    month = "October",
    year = "2015",
    abstract = "New opportunities in data science, such as the consideration of cohort study data, require new approaches to the appropriate design of an effective visualization. We need to capitalize on successful solutions from previous research, of course, but we should also explore new strategies that challenge our already established mindset in visualization. In this talk, I address the specific challenge of integrating spatial and non-spatial data in visualization, in particular, when the spatial aspect of the data is of great importance to the user---this could relate to the morphological information in a 3D medical scan or the geometrical aspects of flow features in a CFD simulation. In data visualizaiton, the actual mapping step---from data to a visual form---is certainly crucial and we should strive to optimally exploit the great opportunities that we have in designing this step. In data-intensive sciences, the study objects of interest are increasingly often represented by extensive and rich datasets (aka. big data)---while traditionally the focus of visualization was on individual, static datasets, we now face dynamic data, representing entire ensembles of study entities, etc. Visualization gets a lot harder, when facing such new 'big data' challenges---both on the designer sider as well as also on the user side. At the same time, however, also the potential for impact is increasing, which amounts to a fantastic motivation for new basic research in visualization.",
    pdf = "pdfs/2015-10-14-HHauser-InvTalk.pdf",
    images = "images/ThumbPicHHAustria2015.png",
    thumbnails = "images/ThumbPicHHAustria2015.png",
    location = "Vienna, Austria"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015VIS,
    author = "Helwig Hauser",
    title = "From Anatomy to Physiology in Medical Visualization",
    howpublished = "Tutorial talk at IEEE VIS 2015",
    month = "October",
    year = "2015",
    abstract = "Generally, medical visualization assists the diagnosis of diseases as well as the treatment of patients. Capturing the patients anatomy, which to a large degree is in the focus of traditional MedViz, certainly is one important key to the success of medical visualization. At least equally important, if not even more, is the consideration of physiology, entailing the complex of function (or malfunction) of the patient. Modern imaging modalities extend beyond the simple depiction of static anatomical snapshots to capturing temporal processes as well as to covering multiple scales of physiology eventually linking molecular biology to medicine. The visualization of human physiology complements other techniques, for example lab tests for quantifying certain physiological functions. We deem ourselves at the beginning of an interesting extension of MedViz research to increasingly capture physiology in addition to anatomy.",
    pdf = "pdfs/2015-10-25-VIS2015-TutTalkHH-print2up.pdf",
    images = "images/ThumbnailVisTut.png",
    thumbnails = "images/ThumbnailVisTut.png",
    day = "25",
    location = "Chicago, Illinois, USA"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2015SUBSEA,
    author = "Helwig Hauser",
    title = "Big Data - visualization and visual analytics",
    howpublished = "Invited talk at the NCE Subsea Forum",
    month = "March",
    year = "2015",
    pdf = "pdfs/2015-03-19-NCE-BigDataVA-print2up.pdf",
    images = "images/ThumbnailBigData.jpg",
    thumbnails = "images/ThumbnailBigData.jpg",
    day = "19",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Karimov-2015-GVE,
    author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner",
    title = "Guided Volume Editing based on Histogram Dissimilarity",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "91--100",
    month = "may",
    abstract = "Segmentation of volumetric data is an important part of many analysis  pipelines, but frequently requires manual inspection and correction.  While plenty of volume editing techniques exist, it remains cumbersome  and error-prone for the user to find and select appropriate regions  for editing. We propose an approach to improve volume editing by  detecting potential segmentation defects while considering the underlying  structure of the object of interest. Our method is based on a novel  histogram dissimilarity measure between individual regions, derived  from structural information extracted from the initial segmentation.  Based on this information, our interactive system guides the user  towards potential defects, provides integrated tools for their inspection,  and automatically generates suggestions for their resolution. We  demonstrate that our approach can reduce interaction effort and supports  the user in a comprehensive investigation for high-quality segmentations.",
    pdf = "pdfs/Karimov-2015-GVE.pdf",
    images = "images/Karimov-2015-GVE.jpg",
    thumbnails = "images/Karimov-2015-GVE.png",
    youtube = "https://www.youtube.com/watch?v=zjTYkXTm_dM",
    doi = "10.1111/cgf.12621",
    event = "EuroVis 2015",
    keywords = "medical visualization, segmentation, volume editing, interaction",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/"
    }

2014

    [PDF] [Bibtex]
    @MISC {Hauser2014BigData,
    author = "Helwig Hauser",
    title = "Big Data - a threat or an opportunity for our modern society?",
    howpublished = "Invited talk at the Alumni Event of the University of Bergen, Norway",
    month = "May",
    year = "2014",
    abstract = "Invited talk at the Alumni Event of the University of Bergen, Norway",
    pdf = "pdfs/2014-05-10-UiB-Alumni-BigDataTalkHH-print2up.pdf",
    images = "images/2014-05-10-UiB-Alumni-BigDataTalkHH-print2up_Image_0003.jpg",
    thumbnails = "images/2014-05-10-UiB-Alumni-BigDataTalkHH-print2up_Image_0003.jpg",
    location = "Bergen, Norway"
    }
    [PDF] [Bibtex]
    @ARTICLE {Natali14Rapid,
    author = "Mattia Natali and Julius Parulek and Daniel Patel",
    title = "Rapid Modelling of Interactive Geological Illustrations with Faults and Compaction",
    journal = "Proceedings of Spring Conference on Computer Graphics (SCCG)",
    year = "2014",
    abstract = "In this paper, we propose new methods for building geological illustrations and animations. We focus on allowing geologists to create their subsurface models by means of sketches, to quickly communicate concepts and ideas rather than detailed information. The result of our sketch-based modelling approach is a layer-cake volume representing geological phenomena, where each layer is rock material which has accumulated due to a user-defined depositional event. Internal geological structures can be inspected by different visualization techniques that we employ. Faulting and compaction of rock layers are important processes in geology. They can be modelled and visualized with our technique. Our representation supports non-planar faults that a user may define by means of sketches. Real-time illustrative animations are achieved by our GPU accelerated approach.",
    pdf = "pdfs/Natali14Rapid.pdf",
    images = "images/Natali2014Rapid0.png, images/Natali2014Rapid1.png",
    thumbnails = "images/Natali2014Rapid0.png, images/Natali2014Rapid1.png",
    url = "http://dx.doi.org/10.1145/2643188.2643201",
    project = "geoillustrator"
    }
    [Bibtex]
    @ARTICLE {alsallakh2014visual,
    author = "Alsallakh, Bilal and Hanbury, Allan and Hauser, Helwig and Miksch, Silvia and Rauber, Andreas",
    title = "Visual methods for analyzing probabilistic classification data",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "1703--1712",
    abstract = "Multi-class classifiers often compute scores for the classification samples describing probabilities to belong to different classes. In order to improve the performance of such classifiers, machine learning experts need to analyze classification results for a large number of labeled samples to find possible reasons for incorrect classification. Confusion matrices are widely used for this purpose. However, they provide no information about classification scores and features computed for the samples. We propose a set of integrated visual methods for analyzing the performance of probabilistic classifiers. Our methods provide insight into different aspects of the classification results for a large number of samples. One visualization emphasizes at which probabilities these samples were classified and how these probabilities correlate with classification error in terms of false positives and false negatives. Another view emphasizes the features of these samples and ranks them by their separation power between selected true and false classifications. We demonstrate the insight gained using our technique in a benchmarking classification dataset, and show how it enables improving classification performance by interactively defining and evaluating post-classification rules.",
    images = "images/alsallakh2014visual3.jpg, images/alsallakh2014visual1.jpg, images/alsallakh2014visual2.jpg",
    thumbnails = "images/alsallakh2014visual3.jpg",
    publisher = "IEEE"
    }
    [PDF] [DOI] [Bibtex]
    @MISC {Hauser2014SIBGRAPI,
    author = "Helwig Hauser",
    title = "Interactive Visual Exploration and Analysis of Multi-Faceted Scientific Data",
    howpublished = "Invited talk at SIBGRAPI Conference on Graphics, Patterns and Images in Rio de Janeiro, Brazil",
    month = "August",
    year = "2014",
    abstract = "Invited talk at SIBGRAPI Conference on Graphics, Patterns and Images in Rio de Janeiro, Brazil",
    pdf = "pdfs/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up.pdf",
    images = "images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0003.jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk--print-new-new-2up_Image_0001.jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0001(2).jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0001(3).jpg, images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0001(4).jpg",
    thumbnails = "images/2014-08-30-Rio-SIBGRAPI-invited-talk-print-new-new-2up_Image_0003.jpg",
    location = "Rio de Janeiro, Brazil",
    doi = "10.1007/978-1-4471-6497-5_15"
    }
    [Bibtex]
    @MISC {Kingman14GenomeMaking,
    author = "Pina Kingman",
    title = "Our Resilient Genome: The Making of a Science Film",
    howpublished = "Presentation in the EG VCBM workshop 2014",
    month = "September",
    year = "2014",
    abstract = "Every single human cell has to fix 10,000 to 20,000 lesions in its DNA every day. Our cells are constantly exposed to many different types of threats that damage our genome. These lesions could cause mutations in our DNA, potentially leading to cancer and other diseases. With such continuous onslaught, how can our cells possibly protect our DNA from damage and mutations? This presentation will showcase the first public screening of a short film about DNA repair, which blends computer graphics and biology to communicate exciting up-and-coming research. This film was developed in conjunction with the Department of Informatics and the Department of Molecular Biology at the University of Bergen, and the Institute of Computer Graphics and Algorithms at the Vienna University of Technology. Along with a discussion on the visualisation process, I will also talk about the intersection between film and science that helps us communicate complex information.",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    location = "Wien, Austria",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @MISC {LeMusic14Temporal,
    author = "Mathieu Le Muzic and Julius Parulek and Manuela Waldner and Ivan Viola",
    title = "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales",
    howpublished = "Poster presented at the EG VCBM workshop 2014",
    month = "September",
    year = "2014",
    pdf = "pdfs/LeMusic14Temporal.pdf",
    images = "images/LeMuzic14Temporal.png",
    thumbnails = "images/LeMuzic14Temporal_thumb.png",
    location = "Wien, Austria",
    project = "physioillustration"
    }
    [DOI] [Bibtex]
    @INPROCEEDINGS {alsallakh2014visualizing,
    author = "Alsallakh, Bilal and Micallef, Luana and Aigner, Wolfgang and Hauser, Helwig and Miksch, Silvia and Rodgers, Peter",
    title = "Visualizing sets and set-typed data: State-of-the-art and future challenges",
    booktitle = "Eurographics conference on Visualization (EuroVis)--State of The Art Reports",
    year = "2014",
    pages = "1--21",
    abstract = "A variety of data analysis problems can be modelled by defining multiple sets over a collection of elements and analyzing the relations between these sets. Despite their simple concept, visualizing sets is a non-trivial problem due to the large number of possible relations between them. We provide a systematic overview of state-of-theart techniques for visualizing different kinds of set relations. We classify these techniques into 7 main categories according to the visual representations they use and the tasks they support. We compare the categories to provide guidance for choosing an appropriate technique for a given problem. Finally, we identify challenges in this area that need further research and propose possible directions to address with these challenges.",
    images = "images/img_Page_13_Image_0001.jpg",
    thumbnails = "images/img_Page_13_Image_0001.jpg",
    proceedings = "Eurographics conference on Visualization (EuroVis)--stars",
    doi = "dx.doi.org/10.2312/eurovisstar.20141170"
    }
    [Bibtex]
    @MISC {Kingman14Integrating,
    author = "Pina Kingman and Anne-Kristin Stavrum and Ivan Viola and Helwig Hauser",
    title = "Integrating 2D and 3D Animation to Comprehensively Communicate Biology",
    howpublished = "Poster presented at the VizBi conference 2014",
    month = "March",
    year = "2014",
    abstract = "As research in cellular and molecular biology advances, so does the need to educated both the science research community and the general public. The former must be aware of developments in associated fields, the latter must be able to take responsibility for their own well-being. In both cases, we have a willing and capable audience, ready to delve deeper into the biological sciences. To exploit this opportunity, we need to research new and advanced visual language techniques to further improve communication. We are therefore investigating novel visual communication techniques to advance knowledge translation methods, focusing on effectively communicating abstract functional aspects of biological systems. To this end, we are creating several short animations, each one exploring different design solutions. These design solutions incorporate 2D motion graphics, information visualization, 3D animation, and can be applied to any biological story. In addition to our short animations, this research will culminate in a short film describing NAD-dependent DNA Repair, intended for the general public and researchers interested in these molecular systems.",
    images = "images/Kingman13Integrating.png",
    thumbnails = "images/Kingman13Integrating_thumb.jpg",
    location = "Heidelberg, Germany",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014USP,
    author = "Helwig Hauser",
    title = "About Visualization in Bergen and Interactive Visual Analysis",
    howpublished = "Invited talk at Institute of Computing and Mathematical Sciences, University of São Paolo, in São Carlos, Brazil",
    month = "August",
    year = "2014",
    abstract = "Invited talk at Institute of Computing and Mathematical Sciences, University of São Paolo, in São Carlos, Brazil",
    pdf = "pdfs/2014-08-26-SaoCarlos-USP-inv-talk-print2up.pdf",
    images = "images/2014-08-26-SaoCarlos-USP-invtalk-print2up_Image_0001.jpg, images/2014-08-26--SaoCarlos-USP-inv-talk-print2up_Image_0001(2).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0001(3).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0001(4).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0002.jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0003.jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0003(2).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0002(2).jpg, images/2014-08-26-SaoCarlos-USP-inv-talk-print2up_Image_0002(3).jpg",
    thumbnails = "images/2014-08-26-SaoCarlos-USP-invtalk-print2up_Image_0001.jpg",
    location = "São Carlos, Brazil"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014NCE,
    author = "Helwig Hauser",
    title = "About Visual Computing",
    howpublished = "Invited talk at the NCE Subsea Theme Meeting on Visual Computing in Bergen, Norway",
    month = "April",
    year = "2014",
    abstract = "Invited talk at the NCE Subsea Theme Meeting on Visual Computing in Bergen, Norway",
    pdf = "pdfs/2014-04-08-VisCompTalk-HH-print2up.pdf",
    images = "images/2014-04-08-VisCompTalk-HH-print2up_Image_0004.jpg, images/2014-04-08-VisCompTalk-HH-print2up_Image_0006.jpg, images/2014-04-08-VisCompTalk-HH-print2up_Image_0010.jpg, images/2014-04-08-VisCompTalk-HH-print2up_Image_0002.jpg",
    thumbnails = "images/2014-04-08-VisCompTalk-HH-print2up_Image_0010.jpg",
    location = "Bergen, Norway"
    }
    [Bibtex]
    @PHDTHESIS {brambilla14thesis,
    author = "Andrea Brambilla",
    title = "Visibility-oriented Visualization Design for Flow Illustration",
    school = "Department of Informatics, University of Bergen, Norway",
    year = "2014",
    month = "December",
    abstract = "Flow phenomena are ubiquitous in our world and they affect many aspects of our daily life. For this reason, they are the subject of extensive studies in several research fields. In medicine, the blood flow through our vessels can reveal important information about cardiovascular diseases. The air flow around a vehicle and the motion of fluids in a combustion engine are examples of relevant flow phenomena in engineering disciplines. Meteorologists, climatologists and oceanographers are instead concerned with winds and water currents. Thanks to the recent advancements in computational fluid dynamics and to the increasing power of modern hardware, accurate simulations of flow phenomena are feasible nowadays. The evolution of multiple flow attributes, such as velocity, temperature and pressure, can be simulated over large spatial and temporal domains (4D). The amount of data generated by this process is massive, therefore visualization techniques are often adopted in order to ease the analysis phase. The overall goal is to convey information about the phenomena of interest through a suitable representation of the data at hand. Due to the multivariate and multidimensional nature of the data, visibility issues (such as cluttering and occlusion), represent a significant challenge. Flow visualization can greatly benefit from studying and addressing visibility issues already in the design phase. In this thesis we investigate and demonstrate the effectiveness of taking visibility management into account early in the design process. We apply this principle to three characteristic flow visualization scenarios: (1) The simultaneous visualization of multiple flow attributes. (2) The visual inspection of single and multiple integral surfaces. (3) The selection of seeding curves for constructing families of integral surfaces. Our techniques result in clutter- and occlusion-free visualizations, which effectively illustrate the key aspects of the flow behavior. For demonstration purposes, we have applied our approaches to a number of application cases. Additionally, we have discussed our visualization designs with domain experts. They showed a genuine interest in our work and provided insightful suggestions for future research directions.",
    images = "images/brambilla14thesis_0.png, images/brambilla14thesis_1.png,",
    thumbnails = "images/brambilla14thesis_0_thumb.png, images/brambilla14thesis_1_thumb.png,",
    isbn = "978-82-308-2753-6",
    url = "http://hdl.handle.net/1956/8961"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Schmidt-2014-YMC,
    author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "YMCA - Your Mesh Comparison Application",
    booktitle = "Proceedings of IEEE VAST 2014",
    year = "2014",
    pages = "153--62",
    month = "nov",
    abstract = "Polygonal meshes can be created in several different ways. In this  paper we focus on the reconstruction of meshes from point clouds,  which are sets of points in 3D. Several algorithms that tackle this  task already exist, but they have different benefits and drawbacks,  which leads to a large number of possible reconstruction results  (i.e., meshes). The evaluation of those techniques requires extensive  comparisons between different meshes which is up to now done by either  placing images of rendered meshes side-by-side, or by encoding differences  by heat maps. A major drawback of both approaches is that they do  not scale well with the number of meshes. This paper introduces a  new comparative visual analysis technique for 3D meshes which enables  the simultaneous comparison of several meshes and allows for the  interactive exploration of their differences. Our approach gives  an overview of the differences of the input meshes in a 2D view.  By selecting certain areas of interest, the user can switch to a  3D representation and explore the spatial differences in detail.  To inspect local variations, we provide a magic lens tool in 3D.  The location and size of the lens provide further information on  the variations of the reconstructions in the selected area. With  our comparative visualization approach, differences between several  mesh reconstruction algorithms can be easily localized and inspected.",
    pdf = "pdfs/Schmidt-2014-YMC.pdf",
    images = "images/Schmidt-2014-YMC.jpg",
    thumbnails = "images/Schmidt-2014-YMC.png",
    youtube = "https://www.youtube.com/watch?v=1s-AmFCQRzM",
    doi = "10.1109/VAST.2014.7042491",
    event = "IEEE VIS 2014",
    keywords = "visual analysis, comparative visualization, 3D data exploration, focus+context, mesh comparison",
    location = "Paris, France",
    proceedings = "Proceedings of IEEE VAST 2014",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/ymca/"
    }
    [DOI] [VID] [Bibtex]
    @ARTICLE {Natali14Sketch,
    author = "Mattia Natali and Tore Grane Klausen and Daniel Patel",
    title = "Sketch-Based Modelling and Visualization of Geological Deposition",
    journal = "Computers \& Geosciences",
    year = "2014",
    volume = "67C",
    pages = "40--48",
    abstract = "We propose a method for sketching and visualizing geological models by sequentially defining stratigraphic layers, where each layer represents a unique erosion or deposition event. Evolution of rivers and deltas is important for geologists when interpreting the stratigraphy of the subsurface, in particular for hydrocarbon exploration. We illustratively visualize mountains, basins, lakes, rivers and deltas, and how they change the morphology of a terrain during their evolution. We present a compact representation of the model and a novel rendering algorithm that allows us to obtain an interactive and illustrative layer-cake visualization. A user study has been performed to evaluate our method.",
    vid = "vids/Natali2014Sketch.mp4",
    images = "images/Natali2014Sketch0.png, images/Natali2014Sketch1.png",
    thumbnails = "images/Natali2014Sketch0.png, images/Natali2014Sketch1.png",
    doi = "10.1016/j.cageo.2014.02.010",
    url = "http://www.sciencedirect.com/science/article/pii/S0098300414000508",
    project = "geoillustrator"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Waldner-2014-GHI,
    author = "Manuela Waldner and Stefan Bruckner and Ivan Viola",
    title = "Graphical Histories of Information Foraging",
    booktitle = "Proceedings of NordiCHI 2014",
    year = "2014",
    pages = "295--304",
    month = "oct",
    abstract = "During information foraging, knowledge workers iteratively seek, filter,  read, and extract information. When using multiple information sources  and different applications for information processing, re-examination  of activities for validation of previous decisions or re-discovery  of previously used information sources is challenging. In this paper,  we present a novel representation of cross-application histories  to support recall of past operations and re-discovery of information  resources. Our graphical history consists of a cross-scale visualization  combining an overview node-link diagram of used desktop resources  with nested (animated) snapshot sequences, based on a recording of  the visual screen output during the users’ desktop work. This representation  makes key elements of the users’ tasks visually stand out, while  exploiting the power of visual memory to recover subtle details of  their activities. In a preliminary study, users found our graphical  history helpful to recall details of an information foraging task  and commented positively on the ability to expand overview nodes  into snapshot and video sequences.",
    pdf = "pdfs/Waldner-2014-GHI.pdf",
    images = "images/Waldner-2014-GHI.jpg",
    thumbnails = "images/Waldner-2014-GHI.png",
    doi = "10.1145/2639189.2641202",
    keywords = "interaction history, graph visualization, provenance",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/"
    }
    [PDF] [Bibtex]
    @PHDTHESIS {natali14thesis,
    author = "Mattia Natali",
    title = "Sketch-based Modelling and Conceptual Visualization of Geomorphological Processes for Interactive Scientific Communication",
    school = "Department of Informatics, University of Bergen, Norway",
    year = "2014",
    month = "September",
    abstract = "Throughout this dissertation, solutions for rapid digitalization of ideas will be defined.More precisely, the focus is on interactive scientific sketching and communication of geology, where theresult is a digital illustrative 3D model. Results are achieved through a sketch-based modellingapproach which gives the user a more natural and intuitive modelling process, hence leading to aquicker definition of a geological illustration. To be able to quickly externalize and communicate onesideas as a digital 3D model, can be of importance. For instance, students may profit from explanationssupported by interactive illustrations. Exchange of information and hypotheses between domain expertsis also a targeted situation in our work. Furthermore, illustrative models are frequently employed in business, when decisional meetings take place for convincing the management that a project is worth to be funded. An advantage of digital models is that they can be saved and they are easy to distribute. In contrast to 2D images or paper sketches, one can interact with digital 3D models, and they can be transferred on portable devices for easy access (for instance during geological field studies). Another advantage, compared to standard geological illustrations, is that if a model has been created with internal structures, it can be arbitrarily cut and inspected. Different solutions for different aspects of subsurface geology are presented in this dissertation. To express folding and faulting processes, a first modelling approach based on cross-sectional sketches is introduced. User defined textures can be associated to each layer, and can then be deformed with sketch strokes, for communicating layer properties such as rock type and grain size. A following contribution includes a simple and compact representation to model and visualize 3D stratigraphic models. With this representation, erosion and deposition offluvial systems are easy to specify and display. Ancient river channels and other geological features, which are present in the subsurface, can be accessed by means of a volumetric representation. Geological models are obtained and visualized by sequentially defining stratigraphic layers, where each layer represents a unique erosion or deposition event. Evolution of rivers and deltas is important for geologists when interpreting the stratigraphy of the subsurface, in particular because it changes the landscape morphology and because river deposits are potential hydrocarbon reservoirs. Time plays a fundamental role in geological processes. Animations are well suited for communicating temporal change and a contribution in this direction is also given. With the techniques developed in this thesis, it becomes possible to produce a range of geological scenarios. The focus is on enabling geologists tocreate their subsurface models by means of sketches, to quickly communicate concepts and ideasrather than detailed information. Although the proposed techniques are simple to use and requirelittle design effort, complex models can be realized. ",
    pdf = "pdfs/natali14thesis.pdf",
    images = "images/Natali2014Rapid0.png, images/Natali2014Sketch0.png,",
    thumbnails = "images/Natali2014Rapid0.png, images/Natali2014Sketch0.png,",
    isbn = "?? ",
    url = "https://bora.uib.no/handle/1956/8570",
    project = "geoillustrator"
    }
    [Bibtex]
    @MISC {Kingman14ResilientGenome,
    author = "Pina Kingman",
    title = "Our Resilient Genome",
    howpublished = "Talk in the Forshkningsdagene UNG 2014",
    month = "September",
    year = "2014",
    abstract = "Motivation: Make science research accessible to the public through film. Inspire and instil an interest in science and molecular biology. Story: The short animated film will describe the molecular pathways involved in single strand break DNA repair. Every single human cell has to repair an estimated 10,000-20,000 DNA lesion every day. DNA is constantly exposed to a variety of genotoxic events, leading to many different types of lesions. If the damage is not repaired, these lesions may lead to mutations that in turn lead to cancer and ageing. Your cells, however, have fine tuned mechanisms that maintain the integrity of our genome. This film describes one of those mechanisms. Length: About 3 minutes. Audience: We are aiming for the type of person who would attend a science film festival. We are thus assuming an interest in biology and medicine, at least a high-school degree (with the high probability of intending to continue to higher education), and a basic understanding of biology. Timeline: The film is currently in production and will be finished mid/late summer.",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [DOI] [Bibtex]
    @INCOLLECTION {RobertLaramee2014HSH,
    author = "Robert Laramee and Hamish Carr and Min Chen and Helwig Hauser and Lars Linsen and Klaus Mueller and Vijay Natarajan and Harald Obermaier and Ronald Peikert and Eugene Zhang",
    title = "Future Challenges and Unsolved Problems in Multi-field Visualization",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and  Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R.  Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "19",
    pages = "205-211",
    month = "sep",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    doi = "10.1007/978-1-4471-6497-5_19",
    keywords = "uncertainty, heuristics, problem solving",
    owner = "hausser",
    timestamp = "2015.02.06",
    isbn = "978-1-4471-6496-8",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [PDF] [VID] [Bibtex]
    @INPROCEEDINGS {Kolesar-2014-IPT,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Illustrating Polymerization using Three-level Model Fusion",
    booktitle = "Proceedings of IEEE BioVis 2014",
    year = "2014",
    month = "aug",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many different aspects of physiological processes like polymerization,  both with respect to the involved molecular structures as well as  their related function. Illustrations of the spatio-temporal development  of such processes are not only used in biomedical education, but  also can serve scientists as an additional platform for in-silico  experiments. In this paper, we contribute a new, three-level modeling  approach to illustrate physiological processes from the class of  polymerization at different time scales. We integrate physical and  empirical modeling, according to which approach suits the different  involved levels of detail best, and we additionally enable a simple  form of interactive steering while the process is illustrated. We  demonstrate the suitability of our approach in the context of several  polymerization processes and report from a first evaluation with  domain experts.",
    pdf = "pdfs/Kolesar-2014-IPT.pdf",
    vid = "vids/Kolesar14Polymers.mp4",
    images = "images/Kolesar-2014-IPT.jpg",
    thumbnails = "images/Kolesar-2014-IPT.png",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Parulek-2014-CLV,
    author = "Julius Parulek and Daniel J{\"o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola",
    title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "6",
    pages = "276--287",
    month = "sep",
    abstract = "Molecular visualization is often challenged with rendering of large  molecular structures in real time. We introduce a novel approach  that enables us to show even large protein complexes. Our method  is based on the level-of-detail concept, where we exploit three different  abstractions combined in one visualization. Firstly, molecular surface  abstraction exploits three different surfaces, solvent-excluded surface  (SES), Gaussian kernels and van der Waals spheres, combined as one  surface by linear interpolation. Secondly, we introduce three shading  abstraction levels and a method for creating seamless transitions  between these representations. The SES representation with full shading  and added contours stands in focus while on the other side a sphere  representation of a cluster of atoms with constant shading and without  contours provide the context. Thirdly, we propose a hierarchical  abstraction based on a set of clusters formed on molecular atoms.  All three abstraction models are driven by one importance function  classifying the scene into the near-, mid- and far-field. Moreover,  we introduce a methodology to render the entire molecule directly  using the A-buffer technique, which further improves the performance.  The rendering performance is evaluated on series of molecules of  varying atom counts.",
    pdf = "pdfs/Parulek-2014-CLV.pdf",
    images = "images/Parulek-2014-CLV.jpg",
    thumbnails = "images/Parulek-2014-CLV.png",
    issn = "1467-8659",
    doi = "10.1111/cgf.12349",
    keywords = "level of detail algorithms, implicit surfaces, clustering, scientific visualization",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Swoboda-2014-VQA,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visual and Quantitative Analysis of Higher Order Arborization Overlaps for Neural Circuit Research",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "107--116",
    month = "sep",
    abstract = "Neuroscientists investigate neural circuits in the brain of the common  fruit fly Drosophila melanogaster to discover how complex behavior  is generated. Hypothesis building on potential connections between  individual neurons is an essential step in the discovery of circuits  that govern a specific behavior. Overlaps of arborizations of two  or more neurons indicate a potential anatomical connection, i.e.  the presence of joint synapses responsible for signal transmission  between neurons. Obviously, the number of higher order overlaps (i.e.  overlaps of three and more arborizations) increases exponentially  with the number of neurons under investigation making it almost impossible  to precompute quantitative information for all possible combinations.  Thus, existing solutions are restricted to pairwise comparison of  overlaps as they are relying on precomputed overlap quantification.  Analyzing overlaps by visual inspection of more than two arborizations  in 2D sections or in 3D is impeded by visual clutter or occlusion.  This work contributes a novel tool that complements existing methods  for potential connectivity exploration by providing for the first  time the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in its spatial anatomical context and on a quantitative level. Qualitative  evaluation with neuroscientists and non-expert users demonstrated  the utility and usability of the tool.",
    pdf = "pdfs/Swoboda-2014-VQA.pdf",
    images = "images/Swoboda-2014-VQA.jpg",
    thumbnails = "images/Swoboda-2014-VQA.png",
    youtube = "https://www.youtube.com/watch?v=iW2iVppPnsE",
    note = "VCBM 2014 Best Paper Honorable Mention",
    doi = "10.2312/vcbm.20141189",
    event = "VCBM 2014",
    keywords = "visual analysis, neurobiology",
    location = "Vienna, Austria"
    }
    [Bibtex]
    @MISC {Brambilla14Video,
    author = "Andrea Brambilla",
    title = "Video Visualization: An Overview",
    howpublished = "Trial lecture, University of Bergen",
    month = "November",
    year = "2014",
    abstract = "Videos are one of the most widespread media for collecting, communicating and archiving information. Nowadays, acquiring videos is a relatively straightforward process, and this explains their success in the context of entertainment, surveillance, sport events, and so on. On the other hand, watching and extracting information from a video stream is a lengthy process. Automatic techniques are only partially sucessful because of the intrinsic complexity of this kind of data. Video visualization is a growing research field which aims at easying the study of video data. It relies on both automatic techniques and user interaction, exploiting the best of both worlds. In this talk, I will introduce this field, focusing on its evolution from computer vision. I will discuss the main challenges and present an overview of the state-of-the-art. The talk will conclude with a discussion of the open problems and the expected future developments.",
    images = "images/Brambilla14Video.png",
    thumbnails = "images/Brambilla14Video.png",
    location = "Bergen, Norway",
    pres = "pdfs/Brambilla14Video.pptx"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Amirkhanov-2014-HSH,
    author = "Artem Amirkhanov and Stefan Bruckner and Christoph Heinzl and Meister Eduard Gr{\"o}ller",
    title = "The Haunted Swamps of Heuristics: Uncertainty in Problem Solving",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "5",
    pages = "51--60",
    month = "sep",
    abstract = "In scientific visualization the key task of research is the provision  of insight into a problem. Finding the solution to a problem may  be seen as finding a path through some rugged terrain which contains  mountains, chasms, swamps, and few flatlands. This path - an algorithm  discovered by the researcher - helps users to easily move around  this unknown area. If this way is a wide road paved with stones it  will be used for a long time by many travelers. However, a narrow  footpath leading through deep forests and deadly swamps will attract  only a few adventure seekers. There are many different paths with  different levels of comfort, length, and stability, which are uncertain  during the research process. Finding a systematic way to deal with  this uncertainty can greatly assist the search for a safe path which  is in our case the development of a suitable visualization algorithm  for a specific problem. In this work we will analyze the sources  of uncertainty in heuristically solving visualization problems and  will propose directions to handle these uncertainties.",
    pdf = "pdfs/Amirkhanov-2014-HSH.pdf",
    images = "images/Amirkhanov-2014-HSH.jpg",
    thumbnails = "images/Amirkhanov-2014-HSH.png",
    doi = "10.1007/978-1-4471-6497-5_5",
    keywords = "uncertainty, heuristics, problem solving",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [DOI] [Bibtex]
    @INCOLLECTION {turkay2014computationally,
    author = "Turkay, Cagatay and Jeanquartier, Fleur and Holzinger, Andreas and Hauser, Helwig",
    title = "On computationally-enhanced visual analysis of heterogeneous data and its application in biomedical informatics",
    booktitle = "Interactive Knowledge Discovery and Data Mining in Biomedical Informatics",
    publisher = "Springer",
    year = "2014",
    pages = "117--140",
    abstract = "With the advance of new data acquisition and generation technologies, the biomedical domain is becoming increasingly data-driven. Thus, understanding the information in large and complex data sets has been in the focus of several research fields such as statistics, data mining, machine learning, and visualization. While the first three fields predominantly rely on computational power, visualization relies mainly on human perceptual and cognitive capabilities for extracting information. Data visualization, similar to Human–Computer Interaction, attempts an appropriate interaction between human and data to interactively exploit data sets. Specifically within the analysis of complex data sets, visualization researchers have integrated computational methods to enhance the interactive processes. In this state-of-the-art report, we investigate how such an integration is carried out. We study the related literature with respect to the underlying analytical tasks and methods of integration. In addition, we focus on how such methods are applied to the biomedical domain and present a concise overview within our taxonomy. Finally, we discuss some open problems and future challenges.",
    images = "images/img_Page_12_Image_0001.jpg, images/img_Page_12_Image_0002.jpg, images/img_Page_12_Image_0003.jpg",
    thumbnails = "images/img_Page_12_Image_0001.jpg",
    doi = "10.1007/978-3-662-43968-5_7)"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Sedlmair-2014-VPS,
    author = "Michael Sedlmair and Christoph Heinzl and Stefan Bruckner and Harald Piringer and Torsten M{\"o}ller",
    title = "Visual Parameter Space Analysis: A Conceptual Framework",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2161--2170",
    month = "dec",
    abstract = "Various case studies in different application domains have shown the  great potential of visual parameter space analysis to support validating  and using simulation models. In order to guide and systematize research  endeavors in this area, we provide a conceptual framework for visual  parameter space analysis problems. The framework is based on our  own experience and a structured analysis of the visualization literature.  It contains three major components: (1) a data flow model that helps  to abstractly describe visual parameter space analysis problems independent  of their application domain; (2) a set of four navigation strategies  of how parameter space analysis can be supported by visualization  tools; and (3) a characterization of six analysis tasks. Based on  our framework, we analyze and classify the current body of literature,  and identify three open research gaps in visual parameter space analysis.  The framework and its discussion are meant to support visualization  designers and researchers in characterizing parameter space analysis  problems and to guide their design and evaluation processes.",
    pdf = "pdfs/Sedlmair-2014-VPS.pdf",
    images = "images/Sedlmair-2014-VPS.jpg",
    thumbnails = "images/Sedlmair-2014-VPS.png",
    doi = "10.1109/TVCG.2014.2346321",
    event = "IEEE VIS 2014",
    keywords = "parameter space analysis, input-output model, simulation, task characterization, literature analysis",
    location = "Paris, France"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014Dagstuhl,
    author = "Helwig Hauser",
    title = "Semi-abstract visualization of rich scientific data",
    howpublished = "Invited talk at the Dagstuhl 14231 Seminar on Scientific Visualization, Dagstuhl, Germany",
    month = "June",
    year = "2014",
    abstract = "Invited talk at the Dagstuhl 14231 Seminar on Scientific Visualization, Dagstuhl, Germany",
    pdf = "pdfs/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up.pdf",
    images = "images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002(3).jpg, images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002(2).jpg, images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002.jpg, images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0008.jpg",
    thumbnails = "images/2014-06-06-Dagstuhl-SemiAbstractSciVis-print2up_Image_0002(3).jpg",
    location = "Dagstuhl, Germany"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2014-VSI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger",
    title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2388--2396",
    month = "dec",
    abstract = "Researchers from many domains use scientific visualization in their  daily practice. Existing implementations of algorithms usually come  with a graphical user interface (high-level interface), or as software  library or source code (low-level interface). In this paper we present  a system that integrates domain-specific languages (DSLs) and facilitates  the creation of new DSLs. DSLs provide an effective interface for  domain scientists avoiding the difficulties involved with low-level  interfaces and at the same time offering more flexibility than high-level  interfaces. We describe the design and implementation of ViSlang,  an interpreted language specifically tailored for scientific visualization.  A major contribution of our design is the extensibility of the ViSlang  language. Novel DSLs that are tailored to the problems of the domain  can be created and integrated into ViSlang. We show that our approach  can be added to existing user interfaces to increase the flexibility  for expert users on demand, but at the same time does not interfere  with the user experience of novice users. To demonstrate the flexibility  of our approach we present new DSLs for volume processing, querying  and visualization. We report the implementation effort for new DSLs  and compare our approach with Matlab and Python implementations in  terms of run-time performance.",
    pdf = "pdfs/Rautek-2014-VSI.pdf",
    images = "images/Rautek-2014-VSI.jpg",
    thumbnails = "images/Rautek-2014-VSI.png",
    youtube = "https://www.youtube.com/watch?v=DbWazwyMRNw",
    doi = "10.1109/TVCG.2014.2346318",
    event = "IEEE VIS 2014",
    keywords = "domain-specific languages, volume visualization, volume visualization framework",
    location = "Paris, France",
    url = "http://vcc.kaust.edu.sa/Pages/Pub-ViSlang-Sys-Int-Dom-Spe-Lang-SC.aspx"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Pfister-2014-VIC,
    author = "Hanspeter Pfister and Verena Kaynig and Charl P. Botha and Stefan Bruckner and Vincent J. Dercksen and Hans-Christian Hege and Jos B.T.M. Roerdink",
    title = "Visualization in Connectomics",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "21",
    pages = "221--245",
    month = "sep",
    abstract = "Connectomics is a branch of neuroscience that attempts to create a  connectome, i.e., a completemap of the neuronal system and all connections  between neuronal structures. This representation can be used to understand  how functional brain states emerge from their underlying anatomical  structures and how dysfunction and neuronal diseases arise. We review  the current state-of-the-art of visualization and image processing  techniques in the field of connectomics and describe a number of  challenges. After a brief summary of the biological background and  an overview of relevant imaging modalities, we review current techniques  to extract connectivit",
    pdf = "pdfs/Pfister-2014-VIC.pdf",
    images = "images/Pfister-2014-VIC.jpg",
    thumbnails = "images/Pfister-2014-VIC.png",
    doi = "10.1007/978-1-4471-6497-5_21",
    keywords = "connectomics, neuroscience, visualization, imaging",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [DOI] [Bibtex]
    @ARTICLE {turkay2014characterizing,
    author = "Turkay, Cagatay and Lex, Alexander and Streit, Marc and Pfister, Hanspeter and Hauser, Helwig",
    title = "Characterizing cancer subtypes using dual analysis in caleydo stratomex",
    journal = "Computer Graphics and Applications, IEEE",
    year = "2014",
    volume = "34",
    number = "2",
    pages = "38--47",
    abstract = "Dual analysis uses statistics to describe both the dimensions and rows of a high-dimensional dataset. Researchers have integrated it into StratomeX, a Caleydo view for cancer subtype analysis. In addition, significant-difference plots show the elements of a candidate subtype that differ significantly from other subtypes, thus letting analysts characterize subtypes. Analysts can also investigate how data samples relate to their assigned subtype and other groups. This approach lets them create well-defined subtypes based on statistical properties. Three case studies demonstrate the approach's utility, showing how it reproduced findings from a published subtype characterization.",
    images = "images/img_Page_08_Image_0001.jpg, images/img_Page_04_Image_0001.jpg",
    thumbnails = "images/img_Page_08_Image_0001.jpg",
    publisher = "IEEE",
    doi = "10.1109/MCG.2014.1"
    }
    [Bibtex]
    @MISC {Brambilla14Visualizing,
    author = "Andrea Brambilla",
    title = "Visualizing the Long-term Behavior of 3D Fluid Flows",
    howpublished = "Presentation at the Konversatorium, Institute for Computer Graphics and Algorithm, TU Wien",
    month = "April",
    year = "2014",
    abstract = "The study of the long-term behavior of 3D fluid flows can provide useful insights into transport phenomena, which are of central importance in many fields, such as medicine and engineering. Such a study is normally based on the integration of the flow field. The resulting integral structures are indeed highly expressive, but their direct visualization often suffers from cluttering and occlusion issues. In this talk I will present my recent (and future) work addressing this problem. Three projects will be covered: (1) A seeding strategy for families of integral surfaces, which captures the predominant aspects of the long-term flow behavior. (2) A technique for easing the investigation and comparison of surface families, based on a 2D reformation process. (3) The work I am carrying out here at the TU, aimed at quantifying transport phenomena and depicting them using Sankey diagrams.",
    images = "images/Brambilla14Visualizing.png",
    thumbnails = "images/Brambilla14Visualizing.png",
    location = "Wien, Austria",
    url = "http://www.cg.tuwien.ac.at/courses/konversatorium/2014-04-25",
    pres = "pdfs/Brambilla14Visualizing.pdf"
    }
    [DOI] [Bibtex]
    @INCOLLECTION {peikert2014comparison,
    author = "Ronald Peikert and Armin Pobitzer and Filip Sadlo and Benjamin Schindler",
    title = "A Comparison of Finite-Time and Finite-Size Lyapunov Exponents",
    booktitle = "Topological Methods in Data Analysis and Visualization III",
    publisher = "Springer International Publishing",
    year = "2014",
    editor = "Peer-Timo Bremer and Ingrid Hotz and Valerio Pascucci and Ronald Peikert",
    series = "Mathematics and Visualization",
    pages = "187--200",
    images = "images/peikert2014comparison.png",
    thumbnails = "images/peikert2014comparison_thumb.png",
    doi = "10.1007/978-3-319-04099-8_12",
    url = "http://dx.doi.org/10.1007/978-3-319-04099-8_12",
    isbn = "978-3-319-04098-1"
    }
    [Bibtex]
    @ARTICLE {lemuzic2014ivm,
    author = "Mathieu Le Muzic and Julius Parulek and Anne-Kristin Stavrum and Ivan Viola",
    title = "Illustrative Visualization of Molecular Reactions using Omniscient Intelligence and Passive Agents ",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "3",
    pages = "141--150",
    month = "jun",
    abstract = "In this paper we propose a new type of a particle systems, tailored for illustrative visualization purposes, in particular for visualizing molecular reactions in biological networks. Previous visualizations of biochemical processes were exploiting the results of agent-based modeling. Such modeling aims at reproducing accurately the stochastic nature of molecular interactions. However, it is impossible to expect events of interest happening at a certain time and location, which is impractical for storytelling. To obtain the means of controlling molecular interactions, we propose to govern passive agents with an omniscient intelligence, instead of giving to the agents the freedom of initiating reaction autonomously. This makes it possible to generate illustrative animated stories that communicate the functioning of the molecular machinery. The rendering performance delivers for interactive framerates of massive amounts of data, based on the dynamic tessellation capabilities of modern graphics cards. Finally, we report an informal expert feedback we obtained from the potential users.",
    images = "images/Lemuzic14Illustrative.png, images/Lemuzic14Illustrative.png",
    thumbnails = "images/Lemuzic14Illustrative0_thumb.png, images/Lemuzic14Illustrative1_thumb.png",
    event = "EuroVis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/lemuzic-2014-ivm/",
    project = "physioillustration"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Angelelli-2014-LUP,
    author = "Paolo Angelelli and Sten Roar Snare and Siri Ann Nyrnes and Stefan Bruckner and Helwig Hauser and Lasse L{\o}vstakken",
    title = "Live Ultrasound-based Particle Visualization of Blood Flow in the Heart",
    booktitle = "Proceedings of SCCG 2014",
    year = "2014",
    pages = "42--49",
    month = "may",
    abstract = "We introduce an integrated method for the acquisition, processing  and visualization of live, in-vivo blood flow in the heart. The method  is based on ultrasound imaging, using a plane wave acquisition acquisition  protocol, which produces high frame rate ensemble data that are efficiently  processed to extract directional flow information not previously  available based on conventional Doppler imaging. These data are then  visualized using a tailored pathlet-based visualization approach,  to convey the slice-contained dynamic movement of the blood in the  heart. This is especially important when imaging patients with possible  congenital heart diseases, who typically exhibit complex flow patterns  that are challenging to interpret. With this approach, it now is  possible for the first time to achieve a real-time integration-based  visualization of 2D blood flow aspects based on ultrasonic imaging.  We demonstrate our solution in the context of selected cases of congenital  heart diseases in neonates, showing how our technique allows for  a more accurate and intuitive visualization of shunt flow and vortices.",
    pdf = "pdfs/Angelelli-2014-LUP.pdf",
    images = "images/Angelelli-2014-LUP.jpg",
    thumbnails = "images/Angelelli-2014-LUP.png",
    doi = "10.1145/2643188.2643200",
    keywords = "ultrasound medical visualization, real-time visualization, blood flow visualization",
    url = "http://dx.doi.org/10.1145/2643188.2643200"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2014HiB,
    author = "Helwig Hauser",
    title = "Interactive Visual Analysis of Rich Scientific Data",
    howpublished = "Invited talk at the Bergen University College in Bergen, Norway",
    month = "November",
    year = "2014",
    abstract = "Invited talk at the Bergen University College in Bergen, Norway",
    pdf = "pdfs/2014-11-25-BergenHIB-InvitedTalk-print2up-web.pdf",
    images = "images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0002.jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0003.jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0005.jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0002(2).jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0009.jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0002(3).jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0009(2).jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0003(2).jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2u--web_Image_0007.jpg, images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0005(2).jpg",
    thumbnails = "images/2014-11-25-BergenHIB-InvitedTalk-print2up-web_Image_0002.jpg",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2014-IIP,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Interactively Illustrating Polymerization using Three-level Model Fusion",
    journal = "BMC Bioinformatics",
    year = "2014",
    volume = "15",
    pages = "345",
    month = "oct",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many aspects of physiological processes, both with respect to the  involved molecular structures as well as their related function.  Illustrations of the spatio-temporal development of such processes  are not only used in biomedical education, but also can serve scientists  as an additional platform for in-silico experiments. Results In this  paper, we contribute a new, three-level modeling approach to illustrate  physiological processes from the class of polymerization at different  time scales. We integrate physical and empirical modeling, according  to which approach best suits the different involved levels of detail,  and we additionally enable a form of interactive steering, while  the process is illustrated. We demonstrate the suitability of our  approach in the context of several polymerization processes and report  from a first evaluation with domain experts. Conclusion We conclude  that our approach provides a new, hybrid modeling approach for illustrating  the process of emergence in physiology, embedded in a densely filled  environment. Our approach of a complementary fusion of three systems  combines the strong points from the different modeling approaches  and is capable to bridge different spatial and temporal scales.",
    pdf = "pdfs/Kolesar-2014-IIP.pdf",
    images = "images/Kolesar-2014-IIP.jpg",
    thumbnails = "images/Kolesar-2014-IIP.png",
    youtube = "https://www.youtube.com/watch?v=iMl5nDicmhg",
    doi = "10.1186/1471-2105-15-345",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29",
    url = "http://www.ii.uib.no/vis/projects/physioillustration/research/interactive-molecular-illustration.html"
    }
    [Bibtex]
    @MISC {Kingman14PARP1,
    author = "Pina Kingman and Anne-Kristin Stavrum and Ivan Viola and Helwig Hauser",
    title = "PARP-1 Binds Damaged DNA",
    howpublished = "Poster presented at the VizBi conference 2014",
    month = "March",
    year = "2014",
    abstract = "This image is an excerpt from the animation entitled Negative charge and poly(ADP)-ribosylation: a scientific animation. The molecules where uploaded from the Protein Data Bank using the Embedded Python Molecular Viewer plug-in for Autodesk Maya (Johnson et al. 2001; Sanner et al. 1996). The scene was rendered using Maxon Cinema 4D and composited in Adobe Photoshop. Subsurface Scattering was chosen to give the molecules a translucent appearance. Two PARP-1 molecules are shown bound to damaged DNA (Coquelle and Glover 2012). This work has been carried out within the PhysioIllustration project (funded by NFR, project #218023).",
    images = "images/Kingman13PARP1.jpg",
    thumbnails = "images/Kingman13PARP1_thumb.jpg",
    location = "Heidelberg, Germany",
    project = "physioillustration"
    }
    [Bibtex]
    @ARTICLE {matkovic2014visual,
    author = "Matkovic, Kresimir and Gracanin, Denis and Splechtna, Rainer and Jelovic, Mario and Stehno, Benedikt and Hauser, Helwig and Purgathofer, Werner",
    title = "Visual analytics for complex engineering systems: Hybrid visual steering of simulation ensembles",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "1803--1812",
    abstract = "In this paper we propose a novel approach to hybrid visual steering of simulation ensembles. A simulation ensemble is a collection of simulation runs of the same simulation model using different sets of control parameters. Complex engineering systems have very large parameter spaces so a naïve sampling can result in prohibitively large simulation ensembles. Interactive steering of simulation ensembles provides the means to select relevant points in a multi-dimensional parameter space (design of experiment). Interactive steering efficiently reduces the number of simulation runs needed by coupling simulation and visualization and allowing a user to request new simulations on the fly. As system complexity grows, a pure interactive solution is not always sufficient. The new approach of hybrid steering combines interactive visual steering with automatic optimization. Hybrid steering allows a domain expert to interactively (in a visualization) select data points in an iterative manner, approximate the values in a continuous region of the simulation space (by regression) and automatically find the “best� points in this continuous region based on the specified constraints and objectives (by optimization). We argue that with the full spectrum of optimization options, the steering process can be improved substantially. We describe an integrated system consisting of a simulation, a visualization, and an optimization component. We also describe typical tasks and propose an interactive analysis workflow for complex engineering systems. We demonstrate our approach on a case study from automotive industry, the optimization of a hydraulic circuit in a high pressure common rail Diesel injection system.",
    images = "images/matkovic2014visual1.jpg, images/matkovic2014visual2.jpg",
    thumbnails = "images/matkovic2014visual1.jpg",
    publisher = "IEEE"
    }
    [Bibtex]
    @MISC {Brambilla14Visibility,
    author = "Andrea Brambilla",
    title = "Visibility Management in Integration-based Flow Visualization",
    howpublished = "Talk at the Department of Informatics, Systems and Communication, University of Milano - Bicocca",
    month = "June",
    year = "2014",
    abstract = "The analysis of flow phenomena holds an important role in several fields, such as engineering and medicine. Flow visualization techniques aim at easing the investigation process by depicting the flow data through graphical entities. Specifically, integration-based methods employ lines and surfaces in order to visualize the long-term behavior of fluid particles. In this talk, I will give a brief introduction to integration-based flow visualization, present its advantages and discuss its main limitations, i.e., cluttering and occlusion. I will then present my recent work addressing these limitations. Two projects will be described: (1) A selection strategy for integral surfaces, which aims at detecting a small number of highly informative integral surfaces. (2) A technique for easing the investigation and comparison of surface families, based on a 2D reformation process.",
    images = "images/Brambilla14Visualizing.png",
    thumbnails = "images/Brambilla14Visualizing.png",
    location = "Milan, Italy",
    pres = "pdfs/Brambilla14Visibility.pdf"
    }
    [DOI] [VID] [Bibtex]
    @ARTICLE {Angelelli14Interactive,
    author = "Paolo Angelelli and Steffen Oeltze and Cagatay Turkay and Judit Haasz and Erlend Hodneland and Arvid Lundervold and Astri Johansen Lundervold and Bernhard Preim and Helwig Hauser",
    title = "Interactive Visual Analysis of Heterogeneous Cohort Study Data",
    journal = "Computer Graphics and Applications, IEEE",
    year = "2014",
    volume = "PP",
    number = "99",
    pages = "1-1",
    abstract = "Cohort studies are used in medicine to enable the study of medical hypotheses in large samples. Often, a large amount of heterogeneous data is acquired from many subjects. The analysis is usually hypothesis-driven, i.e., a specific subset of such data is studied to confirm or reject specific hypotheses. In this paper, we demonstrate how we enable the interactive visual exploration and analysis of such data, helping with the generation of new hypotheses and contributing to the process of validating them. We propose a data-cube based model which allows to handle partially overlapping data subsets during the interactive visualization. This model enables the seamless integration of the heterogeneous data, as well as the linking of spatial and non-spatial views on these data. We implemented this model in an application prototype, and used it to analyze data acquired in the context of a cohort study on cognitive aging. In this paper we present a case-study analysis of selected aspects of brain connectivity by using a prototype implementation of the presented model, to demonstrate its potential and flexibility.",
    vid = "vids/angelelli14CohortExplorer.wmv",
    images = "images/angelelli14Cohort.png",
    thumbnails = "images/angelelli14Cohort.png",
    doi = "10.1109/MCG.2014.40",
    url = "http://dx.doi.org/10.1109/MCG.2014.40"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2014-MSS,
    author = "Peter Mindek and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Managing Spatial Selections with Contextual Snapshots",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "8",
    pages = "132--144",
    month = "dec",
    abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing  particular features, they can be analysed and compared in different  views. However, the semantics of such selections often depend on  specific parameter settings and it can be difficult to reconstruct  them without additional information. In this paper, we present the  concept of contextual snapshots as an effective means for managing  spatial selections in visualized data. The selections are automatically  associated with the context in which they have been created. Contextual  snapshots can also be used as the basis for interactive integrated  and linked views, which enable in-place investigation and comparison  of multiple visual representations of data. Our approach is implemented  as a flexible toolkit with well-defined interfaces for integration  into existing systems. We demonstrate the power and generality of  our techniques by applying them to several distinct scenarios such  as the visualization of simulation data, the analysis of historical  documents and the display of anatomical data.",
    pdf = "pdfs/Mindek-2014-MSS.pdf",
    images = "images/Mindek-2014-MSS.jpg",
    thumbnails = "images/Mindek-2014-MSS.png",
    youtube = "https://www.youtube.com/watch?v=rxEf-Okp8Xo",
    doi = "10.1111/cgf.12406",
    keywords = "interaction, visual analytics, spatial selections, annotations",
    url = "http://www.cg.tuwien.ac.at/downloads/csl/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Solteszova-2014-VPS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner",
    title = "Visibility-Driven Processing of Streaming Volume Data",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "127--136",
    month = "sep",
    abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw  data is challenging to visualize directly without additional processing.  Noise removal and feature detection are common operations, but many  methods are too costly to compute over the whole volume when dealing  with live streamed data. In this paper, we propose a visibility-driven  processing scheme for handling costly on-the-fly processing of volumetric  data in real-time. In contrast to the traditional visualization pipeline,  our scheme utilizes a fast computation of the potentially visible  subset of voxels which significantly reduces the amount of data required  to process. As filtering operations modify the data values which  may affect their visibility, our method for visibility-mask generation  ensures that the set of elements deemed visible does not change after  processing. Our approach also exploits the visibility information  for the storage of intermediate values when multiple operations are  performed in sequence, and can therefore significantly reduce the  memory overhead of longer filter pipelines. We provide a thorough  technical evaluation of the approach and demonstrate it on several  typical scenarios where on-the-fly processing is required.",
    pdf = "pdfs/Solteszova-2014-VPS.pdf",
    images = "images/Solteszova-2014-VPS.jpg",
    thumbnails = "images/Solteszova-2014-VPS.png",
    youtube = "https://www.youtube.com/watch?v=WJgc6BX1qig",
    note = "VCBM 2014 Best Paper Award",
    doi = "10.2312/vcbm.20141198",
    event = "VCBM 2014",
    keywords = "ultrasound, visibility-driven processing, filtering",
    location = "Vienna, Austria"
    }
    [DOI] [Bibtex]
    @ARTICLE {turkay2014attribute,
    author = "Turkay, Cagatay and Slingsby, Aidan and Hauser, Helwig and Wood, Jo and Dykes, Jason",
    title = "Attribute signatures: Dynamic visual summaries for analyzing multivariate geographical data",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2033--2042",
    abstract = "The visual analysis of geographically referenced datasets with a large number of attributes is challenging due to the fact that the characteristics of the attributes are highly dependent upon the locations at which they are focussed, and the scale and time at which they are measured. Specialized interactive visual methods are required to help analysts in understanding the characteristics of the attributes when these multiple aspects are considered concurrently. Here, we develop attribute signatures-interactively crafted graphics that show the geographic variability of statistics of attributes through which the extent of dependency between the attributes and geography can be visually explored. We compute a number of statistical measures, which can also account for variations in time and scale, and use them as a basis for' our visualizations. We then employ different graphical configurations to show and compare both continuous and discrete variation of location and scale. Our methods allow variation in multiple statistical summaries of multiple attributes to be considered concurrently and geographically, as evidenced by examples in which the census geography of London and the wider UK are explored.",
    images = "images/img_Page_06_Image_0003.jpg, images/img_Page_01_Image_0002.jpg, images/img_Page_01_Image_0005.jpg, images/img_Page_07_Image_0003.jpg",
    thumbnails = "images/img_Page_06_Image_0003.jpg",
    publisher = "IEEE",
    doi = "10.1109/TVCG.2014.2346265"
    }

2013

    [DOI] [Bibtex]
    @ARTICLE {Lidal13Geological,
    author = "Endre M. Lidal and Mattia Natali and Daniel Patel and Helwig Hauser and Ivan Viola",
    title = "Geological storytelling",
    journal = "Computers \& Graphics",
    year = "2013",
    volume = "37",
    number = "5",
    pages = "445--459 ",
    abstract = "Developing structural geological models from exploratory subsea imaging is difficult and an ill-posed process. The structural geological processes that take place in the subsurface are both complex and time-dependent. We present Geological Storytelling, a novel graphical system for performing rapid and expressive geomodeling. Geologists can convey geological stories that externalize both their model and the reasoning process behind it through our simple, yet expressive sketch-based, flip-over canvases. This rapid modeling interface makes it easy to construct a large variety of geological stories, and our story tree concept facilitates easy management and the exploration of these alternatives. The stories are then animated and the geologists can examine and compare them to identify the most plausible models. Finally, the geological stories can be presented as illustrative animations of automatically synthesized 3D models, which efficiently communicate the complex geological evolution to non-experts and decision makers. Geological storytelling provides a complete pipeline from the ideas and knowledge in the mind of the geologist, through externalized artifacts specialized for discussion and knowledge dissemination among peer-experts, to automatically rendered illustrative 3D animations for communication to lay audience. We have developed geological storytelling in collaboration with domain experts that work with the modeling challenges on a daily basis. For evaluation, we have developed a geological storytelling prototype and presented it to experts and academics from the geosciences. In their feedback, they acknowledge that the rapid and expressive sketching of stories can make them explore more alternatives and that the 3D illustrative animations assist in communicating their models.",
    images = "images/Lidal13Geological01.jpg, images/Lidal13Geological02.png",
    thumbnails = "images/Lidal13Geological01.jpg, images/Lidal13Geological02.png",
    issn = "0097-8493",
    doi = "http://dx.doi.org/10.1016/j.cag.2013.01.010",
    url = "http://www.sciencedirect.com/science/article/pii/S0097849313000125",
    keywords = "Sketch-based modeling; Externalization of mental processes; Storytelling; 3D model synthesis; Animation; Alternatives exploration; Geology; Structural geological models",
    project = "geoillustrator"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Natali13Modeling,
    author = "Mattia Natali and Endre M. Lidal and Julius Parulek and Ivan Viola and Daniel Patel",
    title = "Modeling Terrains and Subsurface Geology",
    booktitle = "EuroGraphics 2013 State of the Art Reports (STARs)",
    year = "2013",
    pages = "155--173",
    abstract = "The process of creating terrain and landscape models is important in a variety of computer graphics and visualization applications, from films and computer games, via flight simulators and landscape planning, to scientific visualization and subsurface modelling. Interestingly, the modelling techniques used in this large range of application areas have started to meet in the last years. In this state-of-the-art report, we present two taxonomies of different modelling methods. Firstly we present a data oriented taxonomy, where we divide modelling into three different scenarios: the data-free, the sparse-data and the dense-data scenario. Then we present a workflow oriented taxonomy, where we divide modelling into the separate stages necessary for creating a geological model. We start the report by showing that the new trends in geological modelling are approaching the modelling methods that have been developed in computer graphics. We then give an introduction to the process of geological modelling followed by our two taxonomies with descriptions and comparisons of selected methods. Finally we discuss the challenges and trends in geological modelling.",
    pdf = "pdfs/Natali13Modeling.pdf",
    images = "images/Natali13Modeling.png",
    thumbnails = "images/Natali13Modeling.png",
    proceedings = "EuroGraphics 2013 State of the Art Reports (STARs)",
    url = "http://diglib.eg.org/EG/DL/conf/EG2013/stars/155-173.pdf",
    doi = "10.2312/conf/EG2013/stars/155-173",
    location = "Girona, Spain",
    project = "geoillustrator"
    }
    [PDF] [Bibtex]
    @MISC {Hauser2013SouthCHI,
    author = "Helwig Hauser",
    title = "Integrating Interactive and Computational Analysis in Visualization",
    howpublished = "Keynote talk at SouthCHI 2013 in Maribor, Slovenia.",
    month = "June",
    year = "2013",
    abstract = "Keynote talk at SouthCHI 2013 in Maribor, Slovenia.",
    pdf = "pdfs/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up.pdf",
    images = "images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001(6).jpg, images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001(5).jpg, images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001(4).jpg, images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001(3).jpg, images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001(2).jpg, images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001.jpg, images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0003.jpg",
    thumbnails = "images/2013-07-02-Maribor-SouthCHI-Keynote-IVA-print2up_Image_0001(6).jpg"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Mindek-2013-CSE,
    author = "Peter Mindek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations",
    booktitle = "Proceedings of SCCG 2013",
    year = "2013",
    pages = "59--66",
    month = "may",
    abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing  particular features, they can be analyzed and compared in different  views. However, the semantics of such selections are often dependent  on other parameter settings and it can be difficult to reconstruct  them without additional information. In this paper, we present the  concept of contextual snapshots as an effective means for managing  spatial selections in visualized data. The selections are automatically  associated with the context in which they have been created. Contextual  snapshots can be also used as the basis for interactive integrated  and linked views, which enable in-place investigation and comparison  of multiple visual representations of data. Our approach is implemented  as a flexible toolkit with welldefined interfaces for integration  into existing systems. We demonstrate the power and generality of  our techniques by applying them to several distinct scenarios such  as the visualization of simulation data and the analysis of historical  documents.",
    pdf = "pdfs/Mindek-2013-CSE.pdf",
    images = "images/Mindek-2013-CSE.jpg",
    thumbnails = "images/Mindek-2013-CSE.png",
    youtube = "https://www.youtube.com/watch?v=djuqJgixUCs",
    note = "SCCG 2013 Best Paper Award",
    doi = "10.1145/2508244.2508251",
    keywords = "spatial selections, annotations, linked views, provenance",
    location = "Smolenice, Slovakia",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Auzinger-2013-VVC,
    author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R{\"u}diger Schernthaner and Arnold K{\"o}chl and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Vessel Visualization using Curved Surface Reformation",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2013",
    volume = "19",
    number = "12",
    pages = "2858--2867",
    month = "dec",
    abstract = "Visualizations of vascular structures are frequently used in radiological  investigations to detect and analyze vascular diseases. Obstructions  of the blood flow through a vessel are one of the main interests  of physicians, and several methods have been proposed to aid the  visual assessment of calcifications on vessel walls. Curved Planar  Reformation (CPR) is a wide-spread method that is designed for peripheral  arteries which exhibit one dominant direction. To analyze the lumen  of arbitrarily oriented vessels, Centerline Reformation (CR) has  been proposed. Both methods project the vascular structures into  2D image space in order to reconstruct the vessel lumen. In this  paper, we propose Curved Surface Reformation (CSR), a technique that  computes the vessel lumen fully in 3D. This offers high-quality interactive  visualizations of vessel lumina and does not suffer from problems  of earlier methods such as ambiguous visibility cues or premature  discretization of centerline data. Our method maintains exact visibility  information until the final query of the 3D lumina data. We also  present feedback from several domain experts.",
    pdf = "pdfs/Auzinger-2013-VVC.pdf",
    images = "images/Auzinger-2013-VVC.jpg",
    thumbnails = "images/Auzinger-2013-VVC.png",
    youtube = "https://www.youtube.com/watch?v=rESIFaO_-Gs",
    doi = "10.1109/TVCG.2013.215",
    event = "IEEE VIS 2013",
    keywords = "volume Rendering, reformation, vessel, surface approximation",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/"
    }
    [Bibtex]
    @MISC {Kingman13ScienceFilm,
    author = "Pina Kingman",
    title = "Animating Biology: The making of a science film",
    howpublished = "Presentation in the VisBio 2013",
    month = "September",
    year = "2013",
    abstract = "Biology is complicated. Understanding cellular and molecular biology is particularly difficult. Amongst the many effective communication tools at our disposal, animated film is at the forefront. Animation brings biological stories to life, and thus aids in our understanding of biological structure and function. Whether used to inform patients, to teach undergraduate biology, or to disseminate advances in research, animated film is a tool worth employing. But what goes into an animation? I will explain the step-by-step process of creating animations and show a lot of inspiring examples.",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Parulek13Seamless,
    author = "Julius Parulek and Timo Ropinski and Ivan Viola",
    title = "Seamless Abstraction of Molecular Surfaces",
    booktitle = "Proceedings of the 29th Spring Conference on Computer Graphics",
    year = "2013",
    series = "SCCG '13",
    pages = "120--127",
    abstract = "Molecular visualization is often challenged with rendering of large sequences of molecular simulations in real time. We introduce a novel approach that enables us to show even large protein complexes over time in real-time. Our method is based on the level-ofdetail concept, where we exploit three different molecular surface models, solvent excluded surface (SES), Gaussian kernels and van der Waals spheres combined in one visualization. We introduce three shading levels that correspond to their geometric counterparts and a method for creating seamless transition between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation with constant shading and without contours provide the context. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts. ",
    pdf = "pdfs/Parulek13Seamless.pdf",
    images = "images/Parulek13Seamless01.png, images/Parulek13Seamless02.png",
    thumbnails = "images/Parulek13Seamless01_thumb.png, images/Parulek13Seamless02.png",
    proceedings = "Proceedings of the 29th Spring Conference on Computer Graphics",
    isbn = "978-80-223-3377-1",
    location = "Smolenice, Slovak Republic",
    numpages = "8",
    project = "physioillustration"
    }
    [Bibtex]
    @MISC {Kolesar13HumanPhysiology,
    author = "Ivan Kolesar",
    title = "Approaches for Visualizing Human Physiology",
    howpublished = "Presentation in the VisBio 2013",
    month = "September",
    year = "2013",
    abstract = "Physiology is scientific study of function in living systems. All in all, we presents several visual abstractions used to communicate physiological processes through different functional human systems in different scales from molecules to whole human body. However there are still several challenges for visualizing multi-scale physiological processes.",
    images = "images/no_thumb.png",
    thumbnails = "images/Kolesar13HumanPhysiology.jpg",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Brambilla13Integrated,
    author = "Andrea Brambilla and {\O }yvind Andreassen and Helwig Hauser",
    title = "Integrated Multi-aspect visualization of 3D Fluid Flows",
    booktitle = "Proc. of VMV 2013: Vision, Modeling \& Visualization",
    year = "2013",
    pages = "1--9",
    month = "Sept.",
    abstract = "The motion of a fluid is affected by several intertwined flow aspects. Analyzing one aspect at a time can only yield partial information about the flow behavior. More details can be revealed by studying their interactions. Our approach enables the investigation of these interactions by simultaneously visualizing meaningful flow aspects, such as swirling motion and shear strain. We adopt the notions of relevance and coherency. Relevance identifies locations where a certain flow aspect is deemed particularly important. The related piece of information is visualized by a specific visual entity, placed at the corresponding location. Coherency instead represents the homogeneity of a flow property in a local neighborhood. It is exploited in order to avoid visual redundancy and to reduce occlusion and cluttering. We have applied our approach to three CFD datasets, obtaining meaningful insights.",
    pdf = "pdfs/Brambilla13Integrated.pdf",
    images = "images/Brambilla13Integrated_00.png, images/Brambilla13Integrated_01.png",
    thumbnails = "images/Brambilla13Integrated_thumb00.png, images/Brambilla13Integrated_thumb01.png",
    proceedings = "Proc. of VMV 2013: Vision, Modeling \& Visualization",
    url = "http://diglib.eg.org/EG/DL/PE/VMV/VMV13/001-009.pdf",
    doi = "10.2312/PE.VMV.VMV13.001-009",
    location = "Lugano, Switzerland",
    pres = "pdfs/Brambilla13Integrated.pptx",
    extra = "extra/Brambilla13Integrated_extra.pdf"
    }
    [DOI] [Bibtex]
    @ARTICLE {Parulek13Visual,
    author = "Julius Parulek and Cagatay Turkay and Nathalie Reuter and Ivan Viola",
    title = "Visual cavity analysis in molecular simulations",
    journal = "BMC Bioinformatics",
    year = "2013",
    volume = "14",
    number = "Suppl 19",
    pages = "S4",
    month = "Nov.",
    abstract = "Molecular surfaces provide a useful mean for analyzing interactions between biomolecules; such as identification and characterization of ligand binding sites to a host macromolecule. We present a novel technique, which extracts potential binding sites, represented by cavities, and characterize them by 3D graphs and by amino acids. The binding sites are extracted using an implicit function sampling and graph algorithms. We propose an advanced cavity exploration technique based on the graph parameters and associated amino acids. Additionally, we interactively visualize the graphs in the context of the molecular surface. We apply our method to the analysis of MD simulations of Proteinase 3, where we verify the previously described cavities and suggest a new potential cavity to be studied.",
    images = "images/Parulek13Visual01.png, images/Parulek13Visual02.png",
    thumbnails = "images/Parulek13Visual01_thumb.png, images/Parulek13Visual02_thumb.png",
    url = "http://www.biomedcentral.com/1471-2105/14/S19/S4",
    doi = "10.1186/1471-2105-14-S19-S4",
    issn = "1471-2105",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Patel-2013-ICS,
    author = "Daniel Patel and Veronika \v{S}olt{\'e}szov{\'a} and Jan Martin Nordbotten and Stefan Bruckner",
    title = "Instant Convolution Shadows for Volumetric Detail Mapping",
    journal = "ACM Transactions on Graphics",
    year = "2013",
    volume = "32",
    number = "5",
    pages = "154:1--154:18",
    month = "sep",
    abstract = "In this article, we present a method for rendering dynamic scenes  featuring translucent procedural volumetric detail with all-frequency  soft shadows being cast from objects residing inside the view frustum.  Our approach is based on an approximation of physically correct shadows  from distant Gaussian area light sources positioned behind the view  plane, using iterative convolution. We present a theoretical and  empirical analysis of this model and propose an efficient class of  convolution kernels which provide high quality at interactive frame  rates. Our GPU-based implementation supports arbitrary volumetric  detail maps, requires no precomputation, and therefore allows for  real-time modi?cation of all rendering parameters.",
    pdf = "pdfs/Patel-2013-ICS.pdf",
    images = "images/Patel-2013-ICS.jpg",
    thumbnails = "images/Patel-2013-ICS.png",
    youtube = "https://www.youtube.com/watch?v=lhGWgew3HXY,https://www.youtube.com/watch?v=XrhYjgQxfb0",
    doi = "10.1145/2492684",
    keywords = "shadows, volumetric effects, procedural texturing, filtering",
    project = "geoillustrator",
    url = "http://dl.acm.org/citation.cfm?id=2492684"
    }
    [PDF] [Bibtex]
    @MISC {Parulek13Importance,
    author = "Julius Parulek and Timo Ropinski and Ivan Viola",
    title = "Importance Driven Visualization of Molecular Surfaces",
    howpublished = "Poster presented at the BioVis conference 2013",
    month = "October",
    year = "2013",
    pdf = "pdfs/Parulek13Importance.pdf",
    images = "images/Parulek13Importance.png",
    thumbnails = "images/Parulek13Importance.png",
    location = "Atlanta (GA)",
    project = "physioillustration"
    }
    [Bibtex]
    @MISC {Parulek13Analysis,
    author = "Julius Parulek",
    title = "Interactive Visual Exploration and Analysis of High-Dimensional, Temporal, and Heterogeneous Biological Data",
    howpublished = "Presentation in the VisBio 2013",
    month = "September",
    year = "2013",
    abstract = "High-dimensional data (hundreds of dimensions, or more) and temporal data (thousands of time frames) pose substantial challenges for both computational and interactive analysis. To reveal relevant intrinsic relations between items or dimensions, the utilization of only computational methods or standard visualization techniques is not enough. In this talk, we introduce the concept of interactive visual analysis (IVA) that enables us to combine computational methods with the user knowledge through a system of multiple linked views on the data and advanced interaction mechanisms. Our approach allows us to interact with the data on the level of individual items and also on the level of dimensions, exploiting a number of useful statistical methods in addition. To improve the understanding of temporal data, we utilize clustering methods, where the user is provided means to understand the internal cluster structure. Moreover, we also showcase how IVA can be beneficial when analyzing molecular dynamics.",
    images = "images/no_thumb.png",
    thumbnails = "images/no_thumb.png",
    location = "Bergen, Norway",
    project = "physioillustration"
    }
    [PDF] [Bibtex]
    @PHDTHESIS {lidal13thesis,
    author = "Endre M. Lidal",
    title = "Sketch-based Storytelling for Cognitive Problem Solving",
    school = "Department of Informatics, University of Bergen, Norway",
    year = "2013",
    month = "June",
    abstract = "Problem solving is an important part of all engineering and scienti�c activities. It is present, for instance, when experts want to develop more fuel- ef�cient cars or when they are searching for oil and gas in the subsurface. Many alternatives have to be examined and evaluated before the optimal solution is found. Solving such problems is not only performed inside the mind of the scientist, but it is also an interaction between mind and scribbles, sketches, or visualizations on papers, on blackboards, and on computers. For problem solving in expert teams, this externalization through sketches and visualizations also plays an important communicative role. This dissertation presents research for assisting the problem- solving process on the computer, through novel technological advances in the �elds of illustrative visualization and sketch-based modeling. Speci�cally, it targets problems that are related to evolutionary processes. Firstly, inspired by storytelling, the domain experts can express their ideas for solution as stories. These stories are based on sketches that the experts draw, utilizing a novel temporal-sketching interface inspired by a flip-over canvas metaphor. Further, the dissertation describes a set of sketching proxy geometries, such as the box-proxy geometry, that the experts can take advantage of when drawing three-dimensional (3D) sketches. These proxy geometries support the task of mapping a two-dimensional input (2D), e.g., a mouse or a digitizer tablet, to a 3D sketch. Solving dif�cult problems require that many different solutions are evaluated to identify the most optimal one. This dissertation introduces the story-tree, a tree-graph data structure and visualization, which manages and provides access to an ensemble of alternative stories. The story- tree also provides an interface where the stories can be evaluated and compared. This playback of the stories is done through automatic animations of the 2D sketches. The third challenge addressed in this dissertation is to communicate the optimal solution to decision-makers and laymen. By combining the animated 2D story sketches with illustrative visualization techniques it is possible to automatically synthesize and animate 3D models. These animations can be combined with new cutaway visualization techniques to reveal features hidden inside such 3D models. All of these contributions have been investigated in the context of the problemsolving tasks relevant to the early phase of petroleum exploration. This phase is characterized by having very little ground-through data available. Thus, a large solution space needs to be explored. Even so, the geologists need to produce models that can predict if petroleum is present. In addition to working with few data, the geologists also work under heavy time constraints because of the competition between the oil companies exploring the same area. The contributions from this dissertation have created enthusiasm among the domain experts and already, a new research initiative has materialized from the work described in this dissertation. Based on the feedback from the domain experts, we can conclude that the contributions presented in this dissertation form a valuable step towards better tools for problem solving, involving the computer, for the domain investigated here. ",
    pdf = "pdfs/lidal13thesis.pdf",
    images = "images/lidal13thesis.png",
    thumbnails = "images/lidal13thesis.png",
    isbn = "978-82-308-2330-9",
    project = "geoillustrator"
    }
    [DOI] [Bibtex]
    @ARTICLE {Alsallakh13Radial,
    author = "B. Alsallakh and W. Aigner and S. Miksch and H. Hauser",
    title = "Radial Sets: Interactive Visual Analysis of Large Overlapping Sets",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2013",
    volume = "19",
    number = "12",
    pages = "2496-2505",
    abstract = "In many applications, data tables contain multi-valued attributes that often store the memberships of the table entities to multiple sets such as which languages a person masters, which skills an applicant documents, or which features a product comes with. With a growing number of entities, the resulting element-set membership matrix becomes very rich of information about how these sets overlap. Many analysis tasks targeted at set-typed data are concerned with these overlaps as salient features of such data. This paper presents Radial Sets, a novel visual technique to analyze set memberships for a large number of elements. Our technique uses frequency-based representations to enable quickly finding and analyzing different kinds of overlaps between the sets, and relating these overlaps to other attributes of the table entities. Furthermore, it enables various interactions to select elements of interest, find out if they are over-represented in specific sets or overlaps, and if they exhibit a different distribution for a specific attribute compared to the rest of the elements. These interactions allow formulating highly-expressive visual queries on the elements in terms of their set memberships and attribute values. As we demonstrate via two usage scenarios, Radial Sets enable revealing and analyzing a multitude of overlapping patterns between large sets, beyond the limits of state-of-the-art techniques.",
    images = "images/Alsallakh13Radial_3.jpg, images/Alsallakh13Radial_1.jpg, images/Alsallakh13Radial_2.jpg",
    thumbnails = "images/Alsallakh13Radial_3_thumb.png, images/Alsallakh13Radial_1_thumb.png, images/Alsallakh13Radial_2_thumb.png",
    url = "http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6634104",
    doi = "10.1109/TVCG.2013.184",
    issn = "1077-2626"
    }
    [PDF] [Bibtex]
    @ARTICLE {Parulek13Fast,
    author = "Julius Parulek and Andrea Brambilla",
    title = "Fast Blending Scheme for Molecular Surface Representation",
    journal = "Visualization and Computer Graphics, IEEE Transactions on",
    year = "2013",
    pages = "2653--2662",
    month = "Dec.",
    abstract = "Representation of molecular surfaces is a well established way to study the interaction of molecules. The state-of-theart molecular representation is the SES model, which provides a detailed surface visualization. Nevertheless, it is computationally expensive, so the less accurate Gaussian model is traditionally preferred. We introduce a novel surface representation that resembles the SES and approaches the rendering performance of the Gaussian model. Our technique is based on the iterative blending of implicit functions and avoids any pre-computation. Additionally, we propose a GPU-based ray-casting algorithm that efficiently visualize our molecular representation. A qualitative and quantitative comparison of our model with respect to the Gaussian and SES models is presented. As showcased in the paper, our technique is a valid and appealing alternative to the Gaussian representation. This is especially relevant in all the applications where the cost of the SES is prohibitive.",
    pdf = "pdfs/Parulek13Fast.pdf",
    images = "images/Parulek13Fast01.png, images/Parulek13Fast02.png",
    thumbnails = "images/Parulek13Fast01_thumb.png, images/Parulek13Fast02_thumb.png",
    event = "Vis2013",
    project = "physioillustration",
    extra = "extra/Parulek13Fast_code.pdf"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Matkovic13Interactive,
    author = "Kresimir Matkovic and Mario Duras and Denis Gracanin and Rainer Splechtna and Benedikt Stehno and Helwig Hauser ",
    title = "Interactive Visual Analysis in the Concept Stage of a Hybrid-Vehicle Design",
    booktitle = "EuroVis Workshop on Visual Analytics",
    year = "2013",
    pages = "61--65",
    address = "Leipzig, Germany",
    publisher = "Eurographics Association",
    abstract = "The design of modern, hybrid vehicles is an active area of research. As the whole field is new, engineers need intuitive and powerful support tools. In this application paper, we illustrate an application of interactive visual analysis in the concept phase of a hybrid-vehicle design. We exploit coordinated multiple views to explore and analyze a simulation ensemble - a set of simulation runs of the same simulation model. Once we reduce the ensemble to a single run we use a detailed view, including an energy flow graph and a vehicle drive animation. Very positive feedback from domain experts and opportunities for additional improvements encourage further research.",
    pdf = "pdfs/Matkovic13Interactive.pdf",
    images = "images/Matkovic13Interactive_0.jpg, images/Matkovic13Interactive_1.jpg, images/Matkovic13Interactive_2.jpg, images/Matkovic13Interactive_3.jpg",
    thumbnails = "images/Matkovic13Interactive_0_thumb.jpg, images/Matkovic13Interactive_1.jpg, images/Matkovic13Interactive_2.jpg, images/Matkovic13Interactive_3.jpg",
    url = "http://diglib.eg.org/EG/DL/PE/EuroVAST/EuroVA13/061-065.pdf",
    doi = "10.2312/PE.EuroVAST.EuroVA13.061-065",
    isbn = "978-3-905674-55-2"
    }