Stefan Bruckner

Stefan Bruckner is a full professor in Visualization at the Department of Informatics of the University of Bergen, Norway. He received his master's degree (2004) and Ph.D. (2008), both in Computer Science, from the TU Wien, Austria, and was awarded the habilitation (venia docendi) in Practical Computer Science in 2012. Before his appointment in Bergen in 2013, he was an assistant professor at the Institute of Computer Graphics and Algorithms of the TU Wien.

His research interests include all aspects of data visualization, with a particular focus on interactive techniques for the exploration and analysis of spatial data. He has made significant contributions to areas such as illustrative visualization, volume rendering, smart visual interfaces, biomedical data visualization, and visual parameter space exploration. In addition to his contributions in basic research, he has successfully led industry collaborations with major companies such as GE Healthcare and Agfa HealthCare, and has 6 granted patents.

He is a recipient of the Eurographics Young Researcher Award, the Karl-Heinz-Höhne Award for Medical Visualization, and his research has received 8 best paper awards and honorable mentions at international events. He was program co-chair of EuroVis, PacificVis, the Eurographics Workshop on Visual Computing for Biology and Medicine, the Eurographics Medical Prize, and serves on the editorial board of Computers & Graphics. He is an ACM Distinguished Speaker, and a member of ACM SIGGRAPH, Eurographics, and the IEEE Computer Society.

Please note that we are moving to new webpages, and not all content (e.g., publications) has been transferred yet . Please see our old pages for more complete information:

http://www.ii.uib.no/vis_old/team/bruckner/

Publications

2018

    [PDF] [Bibtex]
    @ARTICLE {Bruckner-2018-MSD,
    author = "Bruckner, Stefan and Isenberg, Tobias and Ropinski, Timo and Wiebel, Alexander",
    title = "A Model of Spatial Directness in Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics (accepted for publication, to appear in an upcoming issue)",
    year = "2018",
    abstract = "We discuss the concept of directness in the context of spatial interaction with visualization. In particular, we propose a modelthat allows practitioners to analyze and describe the spatial directness of interaction techniques, ultimately to be able to better understandinteraction issues that may affect usability. To reach these goals, we distinguish between different types of directness. Each type ofdirectness depends on a particular mapping between different spaces, for which we consider the data space, the visualization space, theoutput space, the user space, the manipulation space, and the interaction space. In addition to the introduction of the model itself, we alsoshow how to apply it to several real-world interaction scenarios in visualization, and thus discuss the resulting types of spatial directness,without recommending either more direct or more indirect interaction techniques. In particular, we will demonstrate descriptive andevaluative usage of the proposed model, and also briefly discuss its generative usage.",
    pdf = "pdfs/Bruckner-2018-MSD.pdf",
    images = "images/Bruckner-2018-MSD.jpg",
    thumbnails = "images/Bruckner-2018-MSD.png"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Magnus-2018-VPI,
    author = "Magnus, Jens G and Bruckner, Stefan",
    title = "Interactive Dynamic Volume Illumination with Refraction and Caustics",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2018",
    volume = "24",
    number = "1",
    pages = "984--993",
    month = "jan",
    abstract = "In recent years, significant progress has been made indeveloping high-quality interactive methods for realistic volumeillumination. However, refraction -- despite being an important aspectof light propagation in participating media -- has so far only receivedlittle attention. In this paper, we present a novel approach forrefractive volume illumination including caustics capable of interactiveframe rates. By interleaving light and viewing ray propagation, ourtechnique avoids memory-intensive storage of illumination informationand does not require any precomputation. It is fully dynamic and allparameters such as light position and transfer function can be modifiedinteractively without a performance penalty.",
    pdf = "pdfs/Magnus-2018-IDV.pdf",
    images = "images/Magnus-2018-IDV.jpg",
    thumbnails = "images/Magnus-2018-IDV.png",
    youtube = "https://www.youtube.com/watch?v=3tn6sSXw4NQ",
    doi = "10.1109/TVCG.2017.2744438",
    event = "IEEE SciVis 2017",
    keywords = "interactive volume rendering, illumination, refraction, shadows, caustics",
    location = "Phoenix, USA"
    }
    [PDF] [Bibtex]
    @MISC {Smit18MMIV,
    author = "N. N. Smit and S. Bruckner and H. Hauser and I. Haldorsen and A. Lundervold and A. S. Lundervold and E. Hodneland and L. Oltedal and K. Specht and E. R. Gruner",
    title = "Research Agenda of the Mohn Medical Imaging and Visualization Centre in Bergen, Norway",
    howpublished = "Poster presented at the EG VCBM workshop 2018",
    month = "September",
    year = "2018",
    abstract = "The Mohn Medical Imaging and Visualization Centre (MMIV) was recently established in collaboration between the University of Bergen, Norway, and the Haukeland University Hospital in Bergen with generous financial support from the Bergen Research Foundation (BFS) to conduct cross-disciplinary research related to state-of-the-art medical imaging, including preclinical and clinical high-field MRI, CT and hybrid PET/CT/MR.The overall goal of the Centre is to research new methods in quantitative imaging and interactive visualization to predict changes in health and disease across spatial and temporal scales. This encompasses research in feature detection, feature extraction, and feature prediction, as well as on methods and techniques for the interactive visualization of spatial and abstract data related to and derived from these features.With special emphasis on the natural and medical sciences, the long-term goal of the Centre is to consolidate excellence in the interplay between medical imaging (physics, chemistry, radiography, radiology), and visualization (computer science and mathematics) and develop novel and refined imaging methods that may ultimately improve patient care. In this poster, we describe the overall research agenda of MMIV and describe the four core projects in the centre.",
    pdf = "pdfs/smit2018posterabstract.pdf",
    images = "images/MMIVPoster.png",
    thumbnails = "images/MMIVPoster.png",
    location = "Granada, Spain",
    project = "VIDI"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2018-SSW,
    author = "Stoppel, Sergej and Bruckner, Stefan",
    title = "Smart Surrogate Widgets for Direct Volume Manipulation",
    booktitle = "Proceedings of IEEE PacificVis 2018",
    year = "2018",
    pages = "36--45",
    month = "apr",
    abstract = "Interaction is an essential aspect in volume visualization, yet commonmanipulation tools such as bounding boxes or clipping planewidgets provide rather crude tools as they neglect the complex structureof the underlying data. In this paper, we introduce a novelvolume interaction approach based on smart widgets that are automaticallyplaced directly into the data in a visibility-driven manner.By adapting to what the user actually sees, they act as proxies thatallow for goal-oriented modifications while still providing an intuitiveset of simple operations that is easy to control. In particular, ourmethod is well-suited for direct manipulation scenarios such as touchscreens, where traditional user interface elements commonly exhibitlimited utility. To evaluate out approach we conducted a qualitativeuser study with nine participants with various backgrounds.",
    pdf = "pdfs/Stoppel-2018-SSW.pdf",
    images = "images/Stoppel-2018-SSW.jpg",
    thumbnails = "images/Stoppel-2018-SSW.png",
    youtube = "https://www.youtube.com/watch?v=wMRw-W0SrLk",
    event = "IEEE PacificVis 2018",
    keywords = "smart interfaces, volume manipulation, volume visualization",
    doi = "10.1109/PacificVis.2018.00014"
    }
    [PDF] [DOI] [VID] [Bibtex]
    @ARTICLE {Stoppel-2018-FVI,
    author = "Stoppel, Sergej and Erga, Magnus Paulson and Bruckner, Stefan",
    title = "Firefly: Virtual Illumination Drones for Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics (to appear)",
    year = "2018",
    month = "oct",
    abstract = "Light specification in three dimensional scenes is a complex problem and several approaches have been presented that aim to automate this process. However, there are many scenarios where a static light setup is insufficient, as the scene content and camera position may change. Simultaneous manual control over the camera and light position imposes a high cognitive load on the user. To address this challenge, we introduce a novel approach for automatic scene illumination with Fireflies. Fireflies are intelligent virtual light drones that illuminate the scene by traveling on a closed path. The Firefly path automatically adapts to changes in the scene based on an outcome-oriented energy function. To achieve interactive performance, we employ a parallel rendering pipeline for the light path evaluations. We provide a catalog of energy functions for various application scenarios and discuss the applicability of our method on several examples.",
    pdf = "pdfs/VIS2018-Firefly.pdf",
    vid = "vids/FinalVideo.mp4",
    images = "images/Teaser.png",
    thumbnails = "images/HeadRightCroppedThumbnail.png",
    doi = "10.1109/TVCG.2018.2864656"
    }
    [PDF] [VID] [Bibtex]
    @INPROCEEDINGS {Stoppel-2018-Firefly,
    author = "Stoppel, Sergej and Erga, Magnus Paulson and Bruckner, Stefan",
    title = "Firefly: Virtual Illumination Drones for Interactive Visualization",
    booktitle = "IEEE VIS",
    year = "2018",
    abstract = "Light specification in three dimensional scenes is a complex problem and several approaches have been presented thataim to automate this process. However, there are many scenarios where a static light setup is insufficient, as the scene content andcamera position may change. Simultaneous manual control over the camera and light position imposes a high cognitive load on theuser. To address this challenge, we introduce a novel approach for automatic scene illumination with Fireflies. Fireflies are intelligentvirtual light drones that illuminate the scene by traveling on a closed path. The Firefly path automatically adapts to changes in thescene based on an outcome-oriented energy function. To achieve interactive performance, we employ a parallel rendering pipeline forthe light path evaluations. We provide a catalog of energy functions for various application scenarios and discuss the applicability ofour method on several examples.",
    pdf = "pdfs/VIS2018-Firefly.pdf",
    vid = "vids/FinalVideo.mp4",
    images = "images/Teaser.png",
    thumbnails = "images/HeadRightCroppedThumbnail.png"
    }

2017

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2017-VPI,
    author = "Stoppel, Sergej and Bruckner, Stefan",
    title = "Vol²velle: Printable Interactive Volume Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "861--870",
    month = "jan",
    abstract = "Interaction is an indispensable aspect of data visualization. The  presentation of volumetric data, in particular, often significantly  benefits from interactive manipulation of parameters such as transfer  functions, rendering styles, or clipping planes. However, when we  want to create hardcopies of such visualizations, this essential  aspect is lost. In this paper, we present a novel approach for creating  hardcopies of volume visualizations which preserves a certain degree  of interactivity. We present a method for automatically generating  Volvelles, printable tangible wheel charts that can be manipulated  to explore different parameter settings. Our interactive system allows  the flexible mapping of arbitrary visualization parameters and supports  advanced features such as linked views. The resulting designs can  be easily reproduced using a standard printer and assembled within  a few minutes.",
    pdf = "pdfs/Stoppel-2017-VPI.pdf",
    images = "images/Stoppel-2017-VPI.jpg",
    thumbnails = "images/Stoppel-2017-VPI.png",
    youtube = "https://www.youtube.com/watch?v=Z1K8t-FCiXI",
    doi = "10.1109/TVCG.2016.2599211",
    event = "IEEE SciVis 2016",
    keywords = "physical visualization, interaction, volume visualization, illustrative visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Smit-2017-PAS,
    author = "Smit, Noeska and Lawonn, Kai and Kraima, Annelot and DeRuiter, Marco and Sokooti, Hessam and Bruckner, Stefan and Eisemann, Elmar and Vilanova, Anna",
    title = "PelVis: Atlas-based Surgical Planning for Oncological Pelvic Surgery",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "741--750",
    month = "jan",
    abstract = "Due to the intricate relationship between the pelvic organs and vital  structures, such as vessels and nerves, pelvic anatomy is often considered  to be complex to comprehend. In oncological pelvic surgery, a trade-off  has to be made between complete tumor resection and preserving function  by preventing damage to the nerves. Damage to the autonomic nerves  causes undesirable post-operative side-effects such as fecal and  urinal incontinence, as well as sexual dysfunction in up to 80 percent  of the cases. Since these autonomic nerves are not visible in pre-operative  MRI scans or during surgery, avoiding nerve damage during such a  surgical procedure becomes challenging. In this work, we present  visualization methods to represent context, target, and risk structures  for surgical planning. We employ distance-based and occlusion management  techniques in an atlas-based surgical planning tool for oncological  pelvic surgery. Patient-specific pre-operative MRI scans are registered  to an atlas model that includes nerve information. Through several  interactive linked views, the spatial relationships and distances  between the organs, tumor and risk zones are visualized to improve  understanding, while avoiding occlusion. In this way, the surgeon  can examine surgically relevant structures and plan the procedure  before going into the operating theater, thus raising awareness of  the autonomic nerve zone regions and potentially reducing post-operative  complications. Furthermore, we present the results of a domain expert  evaluation with surgical oncologists that demonstrates the advantages  of our approach.",
    pdf = "pdfs/Smit-2017-PAS.pdf",
    images = "images/Smit-2017-PAS.jpg",
    thumbnails = "images/Smit-2017-PAS.png",
    youtube = "https://www.youtube.com/watch?v=vHp05I5-hp8",
    doi = "10.1109/TVCG.2016.2598826",
    event = "IEEE SciVis 2016",
    keywords = "atlas, surgical planning, medical visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Lind-2017-CCR,
    author = "Lind, Andreas J and Bruckner, Stefan",
    title = "Comparing Cross-Sections and 3D Renderings for Surface Matching Tasks using Physical Ground Truths",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "781--790",
    month = "jan",
    abstract = "Within the visualization community there are some well-known techniques  for visualizing 3D spatial data and some general assumptions about  how perception affects the performance of these techniques in practice.  However, there is a lack of empirical research backing up the possible  performance differences among the basic techniques for general tasks.  One such assumption is that 3D renderings are better for obtaining  an overview, whereas cross sectional visualizations such as the commonly  used Multi- Planar Reformation (MPR) are better for supporting detailed  analysis tasks. In the present study we investigated this common  assumption by examining the difference in performance between MPR  and 3D rendering for correctly identifying a known surface. We also  examined whether prior experience working with image data affects  the participant’s performance, and whether there was any difference  between interactive or static versions of the visualizations. Answering  this question is important because it can be used as part of a scientific  and empirical basis for determining when to use which of the two  techniques. An advantage of the present study compared to other studies  is that several factors were taken into account to compare the two  techniques. The problem was examined through an experiment with 45  participants, where physical objects were used as the known surface  (ground truth). Our findings showed that: 1. The 3D renderings largely  outperformed the cross sections; 2. Interactive visualizations were  partially more effective than static visualizations; and 3. The high  experience group did not generally outperform the low experience  group.",
    pdf = "pdfs/Lind-2017-CCR.pdf",
    images = "images/Lind-2017-CCR.jpg",
    thumbnails = "images/Lind-2017-CCR.png",
    doi = "10.1109/TVCG.2016.2598602",
    event = "IEEE SciVis 2016",
    keywords = "human-computer interaction, quantitative evaluation, volume visualization",
    location = "Baltimore, USA"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2017-OFS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner",
    title = "Output-Sensitive Filtering of Streaming Volume Data",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "249--262",
    month = "jan",
    abstract = "Real-time volume data acquisition poses substantial challenges for  the traditional visualization pipeline where data enhancement is  typically seen as a pre-processing step. In the case of 4D ultrasound  data, for instance, costly processing operations to reduce noise  and to remove artifacts need to be executed for every frame. To enable  the use of high quality filtering operations in such scenarios, we  propose an output-sensitive approach to the visualization of streaming  volume data. Our method evaluates the potential contribution of all  voxels to the final image, allowing us to skip expensive processing  operations that have little or no effect on the visualization. As  filtering operations modify the data values which may affect the  visibility, our main contribution is a fast scheme to predict their  maximum effect on the final image. Our approach prioritizes filtering  of voxels with high contribution to the final visualization based  on a maximal permissible error per pixel. With zero permissible error,  the optimized filtering will yield a result identical to filtering  of the entire volume. We provide a thorough technical evaluation  of the approach and demonstrate it on several typical scenarios that  require on-the-fly processing.",
    pdf = "pdfs/Solteszova-2017-OFS.pdf",
    images = "images/Solteszova-2017-OFS.jpg",
    thumbnails = "images/Solteszova-2017-OFS.png",
    youtube = "https://www.youtube.com/watch?v=xGPs560ttp0",
    doi = "10.1111/cgf.12799",
    keywords = "output-sensitive processing, volume data, filtering"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2017-DVN,
    author = "Mindek, Peter and Mistelbauer, Gabriel and Gr{\"o}ller, Eduard and Bruckner, Stefan",
    title = "Data-Sensitive Visual Navigation",
    journal = "Computers \& Graphics",
    year = "2017",
    volume = "67",
    pages = "77--85",
    month = "oct",
    abstract = "In visualization systems it is often the case that thechanges of the input parameters are not proportional to the visualchange of the generated output. In this paper, we propose a model forenabling data-sensitive navigation for user-interface elements. Thismodel is applied to normalize the user input according to the visualchange, and also to visually communicate this normalization. In thisway, the exploration of heterogeneous data using common interactionelements can be performed in an efficient way. We apply our model to thefield of medical visualization and present guided navigation tools fortraversing vascular structures and for camera rotation around 3Dvolumes. The presented examples demonstrate that the model scales touser-interface elements where multiple parameters are setsimultaneously.",
    pdf = "pdfs/Mindek-2017-DVN.pdf",
    images = "images/Mindek-2017-DVN.jpg",
    thumbnails = "images/Mindek-2017-DVN.png",
    youtube = "https://www.youtube.com/watch?v=FnhbjX7BRXI",
    note = "SCCG 2017 Best Paper Award",
    doi = "10.1016/j.cag.2017.05.012",
    event = "SCCG 2017",
    keywords = "navigation, exploration, medical visualization",
    location = "Mikulov, Czech Republic"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2017-AVA,
    author = "Diehl, Alexandra and Pelorosso, Leandro and Delrieux, Claudio and Matkovi{\'c}, K and Ruiz, Juan and Gr{\"o}ller, M Eduard and Bruckner, Stefan",
    title = "Albero: A Visual Analytics Approach for Probabilistic Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "7",
    pages = "135--144",
    month = "oct",
    abstract = "Probabilistic weather forecasts are amongst the most popularways to quantify numerical forecast uncertainties. The analogregression method can quantify uncertainties and express them asprobabilities. The method comprises the analysis of errorsfrom a large database of past forecasts generated with a specificnumerical model and observational data. Current visualizationtools based on this method are essentially automated and provide limitedanalysis capabilities. In this paper, we propose a novelapproach that breaks down the automatic process using the experience andknowledge of the users and creates a new interactivevisual workflow. Our approach allows forecasters to study probabilisticforecasts, their inner analogs and observations, theirassociated spatial errors, and additional statistical information bymeans of coordinated and linked views. We designed thepresented solution following a participatory methodology together withdomain experts. Several meteorologists with differentbackgrounds validated the approach. Two case studies illustrate thecapabilities of our solution. It successfully facilitates theanalysis of uncertainty and systematic model biases for improveddecision-making and process-quality measurements.",
    pdf = "pdfs/Diehl-2017-AVA.pdf",
    images = "images/Diehl-2017-AVA.jpg",
    thumbnails = "images/Diehl-2017-AVA.png",
    youtube = "https://www.youtube.com/watch?v=-yqoeEgkz28",
    doi = "10.1111/cgf.13279",
    keywords = "visual analytics, weather forecasting, uncertainty"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Swoboda-2017-VQI,
    author = "Swoboda, Nicolas and Moosburner, Judith and Bruckner, Stefan and Yu, JY and Dickson, Barry J and B{\"u}hler, Katja",
    title = "Visualization and Quantification for Interactive Analysis of Neural Connectivity in Drosophila",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "160--171",
    month = "jan",
    abstract = "Neurobiologists investigate the brain of the common fruit fly Drosophila  melanogaster to discover neural circuits and link them to complex  behavior. Formulating new hypotheses about connectivity requires  potential connectivity information between individual neurons, indicated  by overlaps of arborizations of two or more neurons. As the number  of higher order overlaps (i.e., overlaps of three or more arborizations)  increases exponentially with the number of neurons under investigation,  visualization is impeded by clutter and quantification becomes a  burden. Existing solutions are restricted to visual or quantitative  analysis of pairwise overlaps, as they rely on precomputed overlap  data. We present a novel tool that complements existing methods for  potential connectivity exploration by providing for the first time  the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in both its spatial anatomical context and on a quantitative level.  Qualitative evaluation by neuroscientists and non-experts demonstrated  the utility and usability of the tool",
    pdf = "pdfs/Swoboda-2017-VQI.pdf",
    images = "images/Swoboda-2017-VQI.jpg",
    thumbnails = "images/Swoboda-2017-VQI.png",
    youtube = "https://www.youtube.com/watch?v=bycWGQQpqks",
    doi = "10.1111/cgf.12792",
    keywords = "visual analysis, neurobiology"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2017-FCC,
    author = "Koles{\'a}r, Ivan and Bruckner, Stefan and Viola, Ivan and Hauser, Helwig",
    title = "A Fractional Cartesian Composition Model for Semi-spatial Comparative Visualization Design",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "851--860",
    month = "jan",
    abstract = "The study of spatial data ensembles leads to substantial visualization  challenges in a variety of applications. In this paper, we present  a model for comparative visualization that supports the design of  according ensemble visualization solutions by partial automation.  We focus on applications, where the user is interested in preserving  selected spatial data characteristics of the data as much as possible—even  when many ensemble members should be jointly studied using comparative  visualization. In our model, we separate the design challenge into  a minimal set of user-specified parameters and an optimization component  for the automatic configuration of the remaining design variables.  We provide an illustrated formal description of our model and exemplify  our approach in the context of several application examples from  different domains in order to demonstrate its generality within the  class of comparative visualization problems for spatial data ensembles.",
    pdf = "pdfs/Kolesar-2017-FCC.pdf",
    images = "images/Kolesar-2017-FCC.jpg",
    thumbnails = "images/Kolesar-2017-FCC.png",
    youtube = "https://www.youtube.com/watch?v=_zk67fmryok",
    doi = "10.1109/TVCG.2016.2598870",
    event = "IEEE SciVis 2016",
    keywords = "visualization models, integrating spatial and non-spatial data visualization, design methodologies",
    location = "Baltimore, USA",
    project = "physioillustration"
    }

2016

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2016-GIR,
    author = "Sergej Stoppel and Erlend Hodneland and Helwig Hauser and Stefan Bruckner",
    title = "Graxels: Information Rich Primitives for the Visualization of Time-Dependent Spatial Data",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    pages = "183--192",
    month = "sep",
    abstract = "Time-dependent volumetric data has important applications in areas  as diverse as medicine, climatology, and engineering. However, the  simultaneous quantitative assessment of spatial and temporal features  is very challenging. Common visualization techniques show either  the whole volume in one time step (for example using direct volume  rendering) or let the user select a region of interest (ROI) for  which a collection of time-intensity curves is shown. In this paper,  we propose a novel approach that dynamically embeds quantitative  detail views in a spatial layout. Inspired by the concept of small  multiples, we introduce a new primitive graxel (graph pixel). Graxels  are view dependent primitives of time-intensity graphs, generated  on-the-fly by aggregating per-ray information over time and image  regions. Our method enables the detailed feature-aligned visual analysis  of time-dependent volume data and allows interactive refinement and  filtering. Temporal behaviors like frequency relations, aperiodic  or periodic oscillations and their spatial context are easily perceived  with our method. We demonstrate the power of our approach using examples  from medicine and the natural sciences.",
    pdf = "pdfs/Stoppel-2016-GIR.pdf",
    images = "images/Stoppel-2016-GIR.jpg",
    thumbnails = "images/Stoppel-2016-GIR.png",
    youtube = "https://www.youtube.com/watch?v=UsClj3ytd0Y",
    doi = "10.2312/vcbm.20161286",
    event = "VCBM 2016",
    keywords = "time-dependent data, volume data, small multiples",
    location = "Bergen, Norway"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Labschuetz-2016-JJC,
    author = "Matthias Labsch{\"u}tz and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2016",
    volume = "22",
    number = "1",
    pages = "1025--1034",
    month = "jan",
    abstract = "Abstract—Sparse volume data structures enable the efficient representation  of large but sparse volumes in GPU memory for com putation and visualization.  However, the choice of a specific data structure for a given data  set depends on several factors, such as the memory budget, the sparsity  of the data, and data access patterns. In general, there is no single  optimal sparse data structure, but a set of several candidates with  individual strengths and drawbacks. One solution to this problem  are hybrid data structures which locally adapt themselves to the  sparsity. However, they typically suffer from increased traversal  overhead which limits their utility in many applications. This paper  presents JiTTree, a novel sparse hybrid volume data structure that  uses just-in-time compilation to overcome these problems. By combining  multiple sparse data structures and reducing traversal overhead we  leverage their individual advantages. We demonstrate that hybrid  data structures adapt well to a large range of data sets. They are  especially superior to other sparse data structures for data sets  that locally vary in sparsity. Possible optimization criteria are  memory, performance and a combination thereof. Through just-in-time  (JIT) compilation, JiTTree reduces the traversal overhead of the  resulting optimal data structure. As a result, our hybrid volume  data structure enables efficient computations on the GPU, while being  superior in terms of memory usage when compared to non-hybrid data  structures.",
    pdf = "pdfs/Labschuetz-2016-JJC.pdf",
    images = "images/Labschuetz-2016-JJC.jpg",
    thumbnails = "images/Labschuetz-2016-JJC.png",
    doi = "10.1109/TVCG.2015.2467331",
    event = "IEEE SciVis 2015",
    keywords = "data transformation and representation, GPUs and multi-core architectures, volume rendering",
    location = "Chicago, USA"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Klein-2016-TIV,
    author = "Tobias Klein and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger and Peter Rautek",
    title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language",
    booktitle = "Proceedings of the International Workshop on OpenCL 2016",
    year = "2016",
    month = "apr",
    abstract = "The use of GPUs and the massively parallel computing paradigm have  become wide-spread. We describe a framework for the interactive visualization  and visual analysis of the run-time behavior of massively parallel  programs, especially OpenCL kernels. This facilitates understanding  a program's function and structure, finding the causes of possible  slowdowns, locating program bugs, and interactively exploring and  visually comparing different code variants in order to improve performance  and correctness. Our approach enables very specific, user-centered  analysis, both in terms of the recording of the run-time behavior  and the visualization itself. Instead of having to manually write  instrumented code to record data, simple code annotations tell the  source-to-source compiler which code instrumentation to generate  automatically. The visualization part of our framework then enables  the interactive analysis of kernel run-time behavior in a way that  can be very specific to a particular problem or optimization goal,  such as analyzing the causes of memory bank conflicts or understanding  an entire parallel algorithm.",
    pdf = "pdfs/Klein-2016-TIV.pdf",
    images = "images/Klein-2016-TIV.jpg",
    thumbnails = "images/Klein-2016-TIV.png",
    doi = "10.1145/2909437.2909459",
    event = "IWOCL 2016",
    extra = "pdfs/Klein-2016-TIV-Poster.pdf",
    keywords = "domain specific languages, GPU programming, visual exploration",
    location = "Vienna, Austria",
    owner = "bruckner"
    }

2015

    [PDF] [Bibtex]
    @ARTICLE {Angelelli-2015-PQA,
    author = "Paolo Angelelli and Stefan Bruckner",
    title = "Performance and Quality Analysis of Convolution-Based Volume Illumination",
    journal = "Journal of WSCG",
    year = "2015",
    volume = "23",
    number = "2",
    pages = "131--138",
    month = "jun",
    abstract = "Convolution-based techniques for volume rendering are among the fastest  in the on-the-fly volumetric illumination category. Such methods,  however, are still considerably slower than conventional local illumination  techniques. In this paper we describe how to adapt two commonly used  strategies for reducing aliasing artifacts, namely pre-integration  and supersampling, to such techniques. These strategies can help  reduce the sampling rate of the lighting information (thus the number  of convolutions), bringing considerable performance benefits. We  present a comparative analysis of their effectiveness in offering  performance improvements. We also analyze the (negligible) differences  they introduce when comparing their output to the reference method.  These strategies can be highly beneficial in setups where direct  volume rendering of continuously streaming data is desired and continuous  recomputation of full lighting information is too expensive, or where  memory constraints make it preferable not to keep additional precomputed  volumetric data in memory. In such situations these strategies make  single pass, convolution-based volumetric illumination models viable  for a broader range of applications, and this paper provides practical  guidelines for using and tuning such strategies to specific use cases.",
    pdf = "pdfs/Angelelli-2015-PQA.pdf",
    images = "images/Angelelli-2015-PQA.jpg",
    thumbnails = "images/Angelelli-2015-PQA.png",
    keywords = "volume rendering, global illumination, scientific visualization, medical visualization"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Mindek-2015-ASM,
    author = "Peter Mindek and Ladislav \v{C}mol{\'i}k and Ivan Viola and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Automatized Summarization of Multiplayer Games",
    booktitle = "Proceedings of SCCG 2015",
    year = "2015",
    pages = "93--100",
    month = "apr",
    abstract = "We present a novel method for creating automatized gameplay dramatization  of multiplayer video games. The dramatization serves as a visual  form of guidance through dynamic 3D scenes with multiple foci, typical  for such games. Our goal is to convey interesting aspects of the  gameplay by animated sequences creating a summary of events which  occurred during the game. Our technique is based on processing many  cameras, which we refer to as a flock of cameras, and events captured  during the gameplay, which we organize into a so-called event graph.  Each camera has a lifespan with a certain time interval and its parameters  such as position or look-up vector are changing over time. Additionally,  during its lifespan each camera is assigned an importance function,  which is dependent on the significance of the structures that are  being captured by the camera. The images captured by the cameras  are composed into a single continuous video using a set of operators  based on cinematographic effects. The sequence of operators is selected  by traversing the event graph and looking for specific patterns corresponding  to the respective operators. In this way, a large number of cameras  can be processed to generate an informative visual story presenting  the gameplay. Our compositing approach supports insets of camera  views to account for several important cameras simultaneously. Additionally,  we create seamless transitions between individual selected camera  views in order to preserve temporal continuity, which helps the user  to follow the virtual story of the gameplay.",
    pdf = "pdfs/Mindek-2015-ASM.pdf",
    images = "images/Mindek-2015-ASM.jpg",
    thumbnails = "images/Mindek-2015-ASM.png",
    note = "SCCG 2015 Best Paper Award",
    doi = "10.1145/2788539.2788549",
    keywords = "animation, storytelling, game visualization",
    location = "Smolenice, Slovakia",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Diehl-2015-VAS,
    author = "Alexandra Diehl and Leandro Pelorosso and Claudio Delrieux and Celeste Saulo and Juan Ruiz and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "381--390",
    month = "may",
    abstract = "Weather conditions affect multiple aspects of human life such as economy,  safety, security, and social activities. For this reason, weather  forecast plays a major role in society. Currently weather forecasts  are based on Numerical Weather Prediction (NWP) models that generate  a representation of the atmospheric flow. Interactive visualization  of geo-spatial data has been widely used in order to facilitate the  analysis of NWP models. This paper presents a visualization system  for the analysis of spatio-temporal patterns in short-term weather  forecasts. For this purpose, we provide an interactive visualization  interface that guides users from simple visual overviews to more  advanced visualization techniques. Our solution presents multiple  views that include a timeline with geo-referenced maps, an integrated  webmap view, a forecast operation tool, a curve-pattern selector,  spatial filters, and a linked meteogram. Two key contributions of  this work are the timeline with geo-referenced maps and the curve-pattern  selector. The latter provides novel functionality that allows users  to specify and search for meaningful patterns in the data. The visual  interface of our solution allows users to detect both possible weather  trends and errors in the weather forecast model.We illustrate the  usage of our solution with a series of case studies that were designed  and validated in collaboration with domain experts.",
    pdf = "pdfs/Diehl-2015-VAS.pdf",
    images = "images/Diehl-2015-VAS.jpg",
    thumbnails = "images/Diehl-2015-VAS.png",
    youtube = "https://www.youtube.com/watch?v=hhQwsuXpHo8",
    doi = "10.1111/cgf.12650",
    event = "EuroVis 2015",
    keywords = "weather forecasting, visual analysis, spatiotemporal data",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Karimov-2015-GVE,
    author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner",
    title = "Guided Volume Editing based on Histogram Dissimilarity",
    journal = "Computer Graphics Forum",
    year = "2015",
    volume = "34",
    number = "3",
    pages = "91--100",
    month = "may",
    abstract = "Segmentation of volumetric data is an important part of many analysis  pipelines, but frequently requires manual inspection and correction.  While plenty of volume editing techniques exist, it remains cumbersome  and error-prone for the user to find and select appropriate regions  for editing. We propose an approach to improve volume editing by  detecting potential segmentation defects while considering the underlying  structure of the object of interest. Our method is based on a novel  histogram dissimilarity measure between individual regions, derived  from structural information extracted from the initial segmentation.  Based on this information, our interactive system guides the user  towards potential defects, provides integrated tools for their inspection,  and automatically generates suggestions for their resolution. We  demonstrate that our approach can reduce interaction effort and supports  the user in a comprehensive investigation for high-quality segmentations.",
    pdf = "pdfs/Karimov-2015-GVE.pdf",
    images = "images/Karimov-2015-GVE.jpg",
    thumbnails = "images/Karimov-2015-GVE.png",
    youtube = "https://www.youtube.com/watch?v=zjTYkXTm_dM",
    doi = "10.1111/cgf.12621",
    event = "EuroVis 2015",
    keywords = "medical visualization, segmentation, volume editing, interaction",
    location = "Cagliari, Italy",
    owner = "bruckner",
    timestamp = "2015.06.08",
    url = "http://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/"
    }

2014

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Schmidt-2014-YMC,
    author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "YMCA - Your Mesh Comparison Application",
    booktitle = "Proceedings of IEEE VAST 2014",
    year = "2014",
    pages = "153--62",
    month = "nov",
    abstract = "Polygonal meshes can be created in several different ways. In this  paper we focus on the reconstruction of meshes from point clouds,  which are sets of points in 3D. Several algorithms that tackle this  task already exist, but they have different benefits and drawbacks,  which leads to a large number of possible reconstruction results  (i.e., meshes). The evaluation of those techniques requires extensive  comparisons between different meshes which is up to now done by either  placing images of rendered meshes side-by-side, or by encoding differences  by heat maps. A major drawback of both approaches is that they do  not scale well with the number of meshes. This paper introduces a  new comparative visual analysis technique for 3D meshes which enables  the simultaneous comparison of several meshes and allows for the  interactive exploration of their differences. Our approach gives  an overview of the differences of the input meshes in a 2D view.  By selecting certain areas of interest, the user can switch to a  3D representation and explore the spatial differences in detail.  To inspect local variations, we provide a magic lens tool in 3D.  The location and size of the lens provide further information on  the variations of the reconstructions in the selected area. With  our comparative visualization approach, differences between several  mesh reconstruction algorithms can be easily localized and inspected.",
    pdf = "pdfs/Schmidt-2014-YMC.pdf",
    images = "images/Schmidt-2014-YMC.jpg",
    thumbnails = "images/Schmidt-2014-YMC.png",
    youtube = "https://www.youtube.com/watch?v=1s-AmFCQRzM",
    doi = "10.1109/VAST.2014.7042491",
    event = "IEEE VIS 2014",
    keywords = "visual analysis, comparative visualization, 3D data exploration, focus+context, mesh comparison",
    location = "Paris, France",
    proceedings = "Proceedings of IEEE VAST 2014",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/ymca/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Waldner-2014-GHI,
    author = "Manuela Waldner and Stefan Bruckner and Ivan Viola",
    title = "Graphical Histories of Information Foraging",
    booktitle = "Proceedings of NordiCHI 2014",
    year = "2014",
    pages = "295--304",
    month = "oct",
    abstract = "During information foraging, knowledge workers iteratively seek, filter,  read, and extract information. When using multiple information sources  and different applications for information processing, re-examination  of activities for validation of previous decisions or re-discovery  of previously used information sources is challenging. In this paper,  we present a novel representation of cross-application histories  to support recall of past operations and re-discovery of information  resources. Our graphical history consists of a cross-scale visualization  combining an overview node-link diagram of used desktop resources  with nested (animated) snapshot sequences, based on a recording of  the visual screen output during the users’ desktop work. This representation  makes key elements of the users’ tasks visually stand out, while  exploiting the power of visual memory to recover subtle details of  their activities. In a preliminary study, users found our graphical  history helpful to recall details of an information foraging task  and commented positively on the ability to expand overview nodes  into snapshot and video sequences.",
    pdf = "pdfs/Waldner-2014-GHI.pdf",
    images = "images/Waldner-2014-GHI.jpg",
    thumbnails = "images/Waldner-2014-GHI.png",
    doi = "10.1145/2639189.2641202",
    keywords = "interaction history, graph visualization, provenance",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/"
    }
    [PDF] [VID] [Bibtex]
    @INPROCEEDINGS {Kolesar-2014-IPT,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Illustrating Polymerization using Three-level Model Fusion",
    booktitle = "Proceedings of IEEE BioVis 2014",
    year = "2014",
    month = "aug",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many different aspects of physiological processes like polymerization,  both with respect to the involved molecular structures as well as  their related function. Illustrations of the spatio-temporal development  of such processes are not only used in biomedical education, but  also can serve scientists as an additional platform for in-silico  experiments. In this paper, we contribute a new, three-level modeling  approach to illustrate physiological processes from the class of  polymerization at different time scales. We integrate physical and  empirical modeling, according to which approach suits the different  involved levels of detail best, and we additionally enable a simple  form of interactive steering while the process is illustrated. We  demonstrate the suitability of our approach in the context of several  polymerization processes and report from a first evaluation with  domain experts.",
    pdf = "pdfs/Kolesar-2014-IPT.pdf",
    vid = "vids/Kolesar14Polymers.mp4",
    images = "images/Kolesar-2014-IPT.jpg",
    thumbnails = "images/Kolesar-2014-IPT.png",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Parulek-2014-CLV,
    author = "Julius Parulek and Daniel J{\"o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola",
    title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "6",
    pages = "276--287",
    month = "sep",
    abstract = "Molecular visualization is often challenged with rendering of large  molecular structures in real time. We introduce a novel approach  that enables us to show even large protein complexes. Our method  is based on the level-of-detail concept, where we exploit three different  abstractions combined in one visualization. Firstly, molecular surface  abstraction exploits three different surfaces, solvent-excluded surface  (SES), Gaussian kernels and van der Waals spheres, combined as one  surface by linear interpolation. Secondly, we introduce three shading  abstraction levels and a method for creating seamless transitions  between these representations. The SES representation with full shading  and added contours stands in focus while on the other side a sphere  representation of a cluster of atoms with constant shading and without  contours provide the context. Thirdly, we propose a hierarchical  abstraction based on a set of clusters formed on molecular atoms.  All three abstraction models are driven by one importance function  classifying the scene into the near-, mid- and far-field. Moreover,  we introduce a methodology to render the entire molecule directly  using the A-buffer technique, which further improves the performance.  The rendering performance is evaluated on series of molecules of  varying atom counts.",
    pdf = "pdfs/Parulek-2014-CLV.pdf",
    images = "images/Parulek-2014-CLV.jpg",
    thumbnails = "images/Parulek-2014-CLV.png",
    issn = "1467-8659",
    doi = "10.1111/cgf.12349",
    keywords = "level of detail algorithms, implicit surfaces, clustering, scientific visualization",
    project = "physioillustration"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Swoboda-2014-VQA,
    author = "Nicolas Swoboda and Judith Moosburner and Stefan Bruckner and Jai Y. Yu and Barry J. Dickson and Katja B{\"u}hler",
    title = "Visual and Quantitative Analysis of Higher Order Arborization Overlaps for Neural Circuit Research",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "107--116",
    month = "sep",
    abstract = "Neuroscientists investigate neural circuits in the brain of the common  fruit fly Drosophila melanogaster to discover how complex behavior  is generated. Hypothesis building on potential connections between  individual neurons is an essential step in the discovery of circuits  that govern a specific behavior. Overlaps of arborizations of two  or more neurons indicate a potential anatomical connection, i.e.  the presence of joint synapses responsible for signal transmission  between neurons. Obviously, the number of higher order overlaps (i.e.  overlaps of three and more arborizations) increases exponentially  with the number of neurons under investigation making it almost impossible  to precompute quantitative information for all possible combinations.  Thus, existing solutions are restricted to pairwise comparison of  overlaps as they are relying on precomputed overlap quantification.  Analyzing overlaps by visual inspection of more than two arborizations  in 2D sections or in 3D is impeded by visual clutter or occlusion.  This work contributes a novel tool that complements existing methods  for potential connectivity exploration by providing for the first  time the possibility to compute and visualize higher order arborization  overlaps on the fly and to interactively explore this information  in its spatial anatomical context and on a quantitative level. Qualitative  evaluation with neuroscientists and non-expert users demonstrated  the utility and usability of the tool.",
    pdf = "pdfs/Swoboda-2014-VQA.pdf",
    images = "images/Swoboda-2014-VQA.jpg",
    thumbnails = "images/Swoboda-2014-VQA.png",
    youtube = "https://www.youtube.com/watch?v=iW2iVppPnsE",
    note = "VCBM 2014 Best Paper Honorable Mention",
    doi = "10.2312/vcbm.20141189",
    event = "VCBM 2014",
    keywords = "visual analysis, neurobiology",
    location = "Vienna, Austria"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Amirkhanov-2014-HSH,
    author = "Artem Amirkhanov and Stefan Bruckner and Christoph Heinzl and Meister Eduard Gr{\"o}ller",
    title = "The Haunted Swamps of Heuristics: Uncertainty in Problem Solving",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "5",
    pages = "51--60",
    month = "sep",
    abstract = "In scientific visualization the key task of research is the provision  of insight into a problem. Finding the solution to a problem may  be seen as finding a path through some rugged terrain which contains  mountains, chasms, swamps, and few flatlands. This path - an algorithm  discovered by the researcher - helps users to easily move around  this unknown area. If this way is a wide road paved with stones it  will be used for a long time by many travelers. However, a narrow  footpath leading through deep forests and deadly swamps will attract  only a few adventure seekers. There are many different paths with  different levels of comfort, length, and stability, which are uncertain  during the research process. Finding a systematic way to deal with  this uncertainty can greatly assist the search for a safe path which  is in our case the development of a suitable visualization algorithm  for a specific problem. In this work we will analyze the sources  of uncertainty in heuristically solving visualization problems and  will propose directions to handle these uncertainties.",
    pdf = "pdfs/Amirkhanov-2014-HSH.pdf",
    images = "images/Amirkhanov-2014-HSH.jpg",
    thumbnails = "images/Amirkhanov-2014-HSH.png",
    doi = "10.1007/978-1-4471-6497-5_5",
    keywords = "uncertainty, heuristics, problem solving",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Sedlmair-2014-VPS,
    author = "Michael Sedlmair and Christoph Heinzl and Stefan Bruckner and Harald Piringer and Torsten M{\"o}ller",
    title = "Visual Parameter Space Analysis: A Conceptual Framework",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2161--2170",
    month = "dec",
    abstract = "Various case studies in different application domains have shown the  great potential of visual parameter space analysis to support validating  and using simulation models. In order to guide and systematize research  endeavors in this area, we provide a conceptual framework for visual  parameter space analysis problems. The framework is based on our  own experience and a structured analysis of the visualization literature.  It contains three major components: (1) a data flow model that helps  to abstractly describe visual parameter space analysis problems independent  of their application domain; (2) a set of four navigation strategies  of how parameter space analysis can be supported by visualization  tools; and (3) a characterization of six analysis tasks. Based on  our framework, we analyze and classify the current body of literature,  and identify three open research gaps in visual parameter space analysis.  The framework and its discussion are meant to support visualization  designers and researchers in characterizing parameter space analysis  problems and to guide their design and evaluation processes.",
    pdf = "pdfs/Sedlmair-2014-VPS.pdf",
    images = "images/Sedlmair-2014-VPS.jpg",
    thumbnails = "images/Sedlmair-2014-VPS.png",
    doi = "10.1109/TVCG.2014.2346321",
    event = "IEEE VIS 2014",
    keywords = "parameter space analysis, input-output model, simulation, task characterization, literature analysis",
    location = "Paris, France"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2014-VSI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Markus Hadwiger",
    title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2014",
    volume = "20",
    number = "12",
    pages = "2388--2396",
    month = "dec",
    abstract = "Researchers from many domains use scientific visualization in their  daily practice. Existing implementations of algorithms usually come  with a graphical user interface (high-level interface), or as software  library or source code (low-level interface). In this paper we present  a system that integrates domain-specific languages (DSLs) and facilitates  the creation of new DSLs. DSLs provide an effective interface for  domain scientists avoiding the difficulties involved with low-level  interfaces and at the same time offering more flexibility than high-level  interfaces. We describe the design and implementation of ViSlang,  an interpreted language specifically tailored for scientific visualization.  A major contribution of our design is the extensibility of the ViSlang  language. Novel DSLs that are tailored to the problems of the domain  can be created and integrated into ViSlang. We show that our approach  can be added to existing user interfaces to increase the flexibility  for expert users on demand, but at the same time does not interfere  with the user experience of novice users. To demonstrate the flexibility  of our approach we present new DSLs for volume processing, querying  and visualization. We report the implementation effort for new DSLs  and compare our approach with Matlab and Python implementations in  terms of run-time performance.",
    pdf = "pdfs/Rautek-2014-VSI.pdf",
    images = "images/Rautek-2014-VSI.jpg",
    thumbnails = "images/Rautek-2014-VSI.png",
    youtube = "https://www.youtube.com/watch?v=DbWazwyMRNw",
    doi = "10.1109/TVCG.2014.2346318",
    event = "IEEE VIS 2014",
    keywords = "domain-specific languages, volume visualization, volume visualization framework",
    location = "Paris, France",
    url = "http://vcc.kaust.edu.sa/Pages/Pub-ViSlang-Sys-Int-Dom-Spe-Lang-SC.aspx"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Pfister-2014-VIC,
    author = "Hanspeter Pfister and Verena Kaynig and Charl P. Botha and Stefan Bruckner and Vincent J. Dercksen and Hans-Christian Hege and Jos B.T.M. Roerdink",
    title = "Visualization in Connectomics",
    booktitle = "Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization",
    publisher = "Springer",
    year = "2014",
    editor = "Min Chen and Hans Hagen and Charles D. Hansen and Christopher R. Johnson and Arie E. Kaufman",
    series = "Mathematics and Visualization",
    chapter = "21",
    pages = "221--245",
    month = "sep",
    abstract = "Connectomics is a branch of neuroscience that attempts to create a  connectome, i.e., a completemap of the neuronal system and all connections  between neuronal structures. This representation can be used to understand  how functional brain states emerge from their underlying anatomical  structures and how dysfunction and neuronal diseases arise. We review  the current state-of-the-art of visualization and image processing  techniques in the field of connectomics and describe a number of  challenges. After a brief summary of the biological background and  an overview of relevant imaging modalities, we review current techniques  to extract connectivit",
    pdf = "pdfs/Pfister-2014-VIC.pdf",
    images = "images/Pfister-2014-VIC.jpg",
    thumbnails = "images/Pfister-2014-VIC.png",
    doi = "10.1007/978-1-4471-6497-5_21",
    keywords = "connectomics, neuroscience, visualization, imaging",
    owner = "bruckner",
    timestamp = "2014.12.30",
    url = "http://www.springer.com/mathematics/computational+science+%26+engineering/book/978-1-4471-6496-8"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Angelelli-2014-LUP,
    author = "Paolo Angelelli and Sten Roar Snare and Siri Ann Nyrnes and Stefan Bruckner and Helwig Hauser and Lasse L{\o}vstakken",
    title = "Live Ultrasound-based Particle Visualization of Blood Flow in the Heart",
    booktitle = "Proceedings of SCCG 2014",
    year = "2014",
    pages = "42--49",
    month = "may",
    abstract = "We introduce an integrated method for the acquisition, processing  and visualization of live, in-vivo blood flow in the heart. The method  is based on ultrasound imaging, using a plane wave acquisition acquisition  protocol, which produces high frame rate ensemble data that are efficiently  processed to extract directional flow information not previously  available based on conventional Doppler imaging. These data are then  visualized using a tailored pathlet-based visualization approach,  to convey the slice-contained dynamic movement of the blood in the  heart. This is especially important when imaging patients with possible  congenital heart diseases, who typically exhibit complex flow patterns  that are challenging to interpret. With this approach, it now is  possible for the first time to achieve a real-time integration-based  visualization of 2D blood flow aspects based on ultrasonic imaging.  We demonstrate our solution in the context of selected cases of congenital  heart diseases in neonates, showing how our technique allows for  a more accurate and intuitive visualization of shunt flow and vortices.",
    pdf = "pdfs/Angelelli-2014-LUP.pdf",
    images = "images/Angelelli-2014-LUP.jpg",
    thumbnails = "images/Angelelli-2014-LUP.png",
    doi = "10.1145/2643188.2643200",
    keywords = "ultrasound medical visualization, real-time visualization, blood flow visualization",
    url = "http://dx.doi.org/10.1145/2643188.2643200"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kolesar-2014-IIP,
    author = "Ivan Kolesar and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser",
    title = "Interactively Illustrating Polymerization using Three-level Model Fusion",
    journal = "BMC Bioinformatics",
    year = "2014",
    volume = "15",
    pages = "345",
    month = "oct",
    abstract = "Research in cell biology is steadily contributing new knowledge about  many aspects of physiological processes, both with respect to the  involved molecular structures as well as their related function.  Illustrations of the spatio-temporal development of such processes  are not only used in biomedical education, but also can serve scientists  as an additional platform for in-silico experiments. Results In this  paper, we contribute a new, three-level modeling approach to illustrate  physiological processes from the class of polymerization at different  time scales. We integrate physical and empirical modeling, according  to which approach best suits the different involved levels of detail,  and we additionally enable a form of interactive steering, while  the process is illustrated. We demonstrate the suitability of our  approach in the context of several polymerization processes and report  from a first evaluation with domain experts. Conclusion We conclude  that our approach provides a new, hybrid modeling approach for illustrating  the process of emergence in physiology, embedded in a densely filled  environment. Our approach of a complementary fusion of three systems  combines the strong points from the different modeling approaches  and is capable to bridge different spatial and temporal scales.",
    pdf = "pdfs/Kolesar-2014-IIP.pdf",
    images = "images/Kolesar-2014-IIP.jpg",
    thumbnails = "images/Kolesar-2014-IIP.png",
    youtube = "https://www.youtube.com/watch?v=iMl5nDicmhg",
    doi = "10.1186/1471-2105-15-345",
    keywords = "biochemical visualization, L-system modeling, multi-agent modeling, visualization of physiology, polymerization",
    owner = "bruckner",
    project = "physioillustration",
    timestamp = "2014.12.29",
    url = "http://www.ii.uib.no/vis/projects/physioillustration/research/interactive-molecular-illustration.html"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mindek-2014-MSS,
    author = "Peter Mindek and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Managing Spatial Selections with Contextual Snapshots",
    journal = "Computer Graphics Forum",
    year = "2014",
    volume = "33",
    number = "8",
    pages = "132--144",
    month = "dec",
    abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing  particular features, they can be analysed and compared in different  views. However, the semantics of such selections often depend on  specific parameter settings and it can be difficult to reconstruct  them without additional information. In this paper, we present the  concept of contextual snapshots as an effective means for managing  spatial selections in visualized data. The selections are automatically  associated with the context in which they have been created. Contextual  snapshots can also be used as the basis for interactive integrated  and linked views, which enable in-place investigation and comparison  of multiple visual representations of data. Our approach is implemented  as a flexible toolkit with well-defined interfaces for integration  into existing systems. We demonstrate the power and generality of  our techniques by applying them to several distinct scenarios such  as the visualization of simulation data, the analysis of historical  documents and the display of anatomical data.",
    pdf = "pdfs/Mindek-2014-MSS.pdf",
    images = "images/Mindek-2014-MSS.jpg",
    thumbnails = "images/Mindek-2014-MSS.png",
    youtube = "https://www.youtube.com/watch?v=rxEf-Okp8Xo",
    doi = "10.1111/cgf.12406",
    keywords = "interaction, visual analytics, spatial selections, annotations",
    url = "http://www.cg.tuwien.ac.at/downloads/csl/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Solteszova-2014-VPS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner",
    title = "Visibility-Driven Processing of Streaming Volume Data",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "127--136",
    month = "sep",
    abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw  data is challenging to visualize directly without additional processing.  Noise removal and feature detection are common operations, but many  methods are too costly to compute over the whole volume when dealing  with live streamed data. In this paper, we propose a visibility-driven  processing scheme for handling costly on-the-fly processing of volumetric  data in real-time. In contrast to the traditional visualization pipeline,  our scheme utilizes a fast computation of the potentially visible  subset of voxels which significantly reduces the amount of data required  to process. As filtering operations modify the data values which  may affect their visibility, our method for visibility-mask generation  ensures that the set of elements deemed visible does not change after  processing. Our approach also exploits the visibility information  for the storage of intermediate values when multiple operations are  performed in sequence, and can therefore significantly reduce the  memory overhead of longer filter pipelines. We provide a thorough  technical evaluation of the approach and demonstrate it on several  typical scenarios where on-the-fly processing is required.",
    pdf = "pdfs/Solteszova-2014-VPS.pdf",
    images = "images/Solteszova-2014-VPS.jpg",
    thumbnails = "images/Solteszova-2014-VPS.png",
    youtube = "https://www.youtube.com/watch?v=WJgc6BX1qig",
    note = "VCBM 2014 Best Paper Award",
    doi = "10.2312/vcbm.20141198",
    event = "VCBM 2014",
    keywords = "ultrasound, visibility-driven processing, filtering",
    location = "Vienna, Austria"
    }

2013

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Mindek-2013-CSE,
    author = "Peter Mindek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations",
    booktitle = "Proceedings of SCCG 2013",
    year = "2013",
    pages = "59--66",
    month = "may",
    abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing  particular features, they can be analyzed and compared in different  views. However, the semantics of such selections are often dependent  on other parameter settings and it can be difficult to reconstruct  them without additional information. In this paper, we present the  concept of contextual snapshots as an effective means for managing  spatial selections in visualized data. The selections are automatically  associated with the context in which they have been created. Contextual  snapshots can be also used as the basis for interactive integrated  and linked views, which enable in-place investigation and comparison  of multiple visual representations of data. Our approach is implemented  as a flexible toolkit with welldefined interfaces for integration  into existing systems. We demonstrate the power and generality of  our techniques by applying them to several distinct scenarios such  as the visualization of simulation data and the analysis of historical  documents.",
    pdf = "pdfs/Mindek-2013-CSE.pdf",
    images = "images/Mindek-2013-CSE.jpg",
    thumbnails = "images/Mindek-2013-CSE.png",
    youtube = "https://www.youtube.com/watch?v=djuqJgixUCs",
    note = "SCCG 2013 Best Paper Award",
    doi = "10.1145/2508244.2508251",
    keywords = "spatial selections, annotations, linked views, provenance",
    location = "Smolenice, Slovakia",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Auzinger-2013-VVC,
    author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R{\"u}diger Schernthaner and Arnold K{\"o}chl and Michael Wimmer and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "Vessel Visualization using Curved Surface Reformation",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2013",
    volume = "19",
    number = "12",
    pages = "2858--2867",
    month = "dec",
    abstract = "Visualizations of vascular structures are frequently used in radiological  investigations to detect and analyze vascular diseases. Obstructions  of the blood flow through a vessel are one of the main interests  of physicians, and several methods have been proposed to aid the  visual assessment of calcifications on vessel walls. Curved Planar  Reformation (CPR) is a wide-spread method that is designed for peripheral  arteries which exhibit one dominant direction. To analyze the lumen  of arbitrarily oriented vessels, Centerline Reformation (CR) has  been proposed. Both methods project the vascular structures into  2D image space in order to reconstruct the vessel lumen. In this  paper, we propose Curved Surface Reformation (CSR), a technique that  computes the vessel lumen fully in 3D. This offers high-quality interactive  visualizations of vessel lumina and does not suffer from problems  of earlier methods such as ambiguous visibility cues or premature  discretization of centerline data. Our method maintains exact visibility  information until the final query of the 3D lumina data. We also  present feedback from several domain experts.",
    pdf = "pdfs/Auzinger-2013-VVC.pdf",
    images = "images/Auzinger-2013-VVC.jpg",
    thumbnails = "images/Auzinger-2013-VVC.png",
    youtube = "https://www.youtube.com/watch?v=rESIFaO_-Gs",
    doi = "10.1109/TVCG.2013.215",
    event = "IEEE VIS 2013",
    keywords = "volume Rendering, reformation, vessel, surface approximation",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Patel-2013-ICS,
    author = "Daniel Patel and Veronika \v{S}olt{\'e}szov{\'a} and Jan Martin Nordbotten and Stefan Bruckner",
    title = "Instant Convolution Shadows for Volumetric Detail Mapping",
    journal = "ACM Transactions on Graphics",
    year = "2013",
    volume = "32",
    number = "5",
    pages = "154:1--154:18",
    month = "sep",
    abstract = "In this article, we present a method for rendering dynamic scenes  featuring translucent procedural volumetric detail with all-frequency  soft shadows being cast from objects residing inside the view frustum.  Our approach is based on an approximation of physically correct shadows  from distant Gaussian area light sources positioned behind the view  plane, using iterative convolution. We present a theoretical and  empirical analysis of this model and propose an efficient class of  convolution kernels which provide high quality at interactive frame  rates. Our GPU-based implementation supports arbitrary volumetric  detail maps, requires no precomputation, and therefore allows for  real-time modi?cation of all rendering parameters.",
    pdf = "pdfs/Patel-2013-ICS.pdf",
    images = "images/Patel-2013-ICS.jpg",
    thumbnails = "images/Patel-2013-ICS.png",
    youtube = "https://www.youtube.com/watch?v=lhGWgew3HXY,https://www.youtube.com/watch?v=XrhYjgQxfb0",
    doi = "10.1145/2492684",
    keywords = "shadows, volumetric effects, procedural texturing, filtering",
    project = "geoillustrator",
    url = "http://dl.acm.org/citation.cfm?id=2492684"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Mistelbauer-2013-VVC,
    author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R{\"u}diger Schernthaner and Ivan Baclija and Arnold K{\"o}chl and Armin Kanitsar and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Vessel Visualization using Curvicircular Feature Aggregation",
    journal = "Computer Graphics Forum",
    year = "2013",
    volume = "32",
    number = "3",
    pages = "231--240",
    month = "jun",
    abstract = "Radiological investigations are common medical practice for the diagnosis  of peripheral vascular diseases. Existing visualization methods such  as Curved Planar Reformation (CPR) depict calcifications on vessel  walls to determine if blood is still able to flow. While it is possible  with conventional CPR methods to examine the whole vessel lumen by  rotating around the centerline of a vessel, we propose Curvicircular  Feature Aggregation (CFA), which aggregates these rotated images  into a single view. By eliminating the need for rotation, vessels  can be investigated by inspecting only one image. This method can  be used as a guidance and visual analysis tool for treatment planning.  We present applications of this technique in the medical domain and  give feedback from radiologists.",
    pdf = "pdfs/Mistelbauer-2013-VVC.pdf",
    images = "images/Mistelbauer-2013-VVC.jpg",
    thumbnails = "images/Mistelbauer-2013-VVC.png",
    youtube = "https://www.youtube.com/watch?v=WwF5GPOs1pA",
    doi = "10.1111/cgf.12110",
    event = "EuroVis 2013",
    keywords = "medical visualization, vessel visualization, vessel reformation",
    location = "Leipzig, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Karimov-2013-VSV,
    author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "ViviSection: Skeleton-based Volume Editing",
    journal = "Computer Graphics Forum",
    year = "2013",
    volume = "32",
    number = "3",
    pages = "461--470",
    month = "jun",
    abstract = "Volume segmentation is important in many applications, particularly  in the medical domain. Most segmentation techniques, however, work  fully automatically only in very restricted scenarios and cumbersome  manual editing of the results is a common task. In this paper, we  introduce a novel approach for the editing of segmentation results.  Our method exploits structural features of the segmented object to  enable intuitive and robust correction and verification. We demonstrate  that our new approach can significantly increase the segmentation  quality even in difficult cases such as in the presence of severe  pathologies.",
    pdf = "pdfs/Karimov-2013-VSV.pdf",
    images = "images/Karimov-2013-VSV.jpg",
    thumbnails = "images/Karimov-2013-VSV.png",
    youtube = "https://www.youtube.com/watch?v=4s12ZbUyHiY",
    doi = "10.1111/cgf.12133",
    event = "EuroVis 2013",
    keywords = "volume visualization, volume editing, segmentation, interaction",
    location = "Leipzig, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/"
    }
    [PDF] [YT] [Bibtex]
    @ARTICLE {Mindek-2013-VPE,
    author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Meister Eduard Gr{\"o}ller",
    title = "Visual Parameter Exploration in {GPU} Shader Space",
    journal = "Journal of WSCG",
    year = "2013",
    volume = "21",
    number = "3",
    pages = "225--234",
    month = "jun",
    abstract = "The wide availability of high-performance GPUs has made the use of  shader programs in visualization ubiquitous.Understanding shaders  is a challenging task. Frequently it is difficult to mentally reconstruct  the nature and types of transformations applied to the underlying  data during the visualization process. We propose a method for the  visual analysis of GPU shaders, which allows the flexible exploration  and investigation of algorithms, parameters, and their effects. We  introduce a method for extracting feature vectors composed of several  attributes of the shader, as well as a direct manipulation interface  for assigning semantics to them. The user interactively classifies  pixels of images which are rendered with the investigated shader.  The two resulting classes, a positive class and a negative one, are  employed to steer the visualization. Based on this information, we  can extract a wide variety of additional attributes and visualize  their relation to this classification. Our system allows an interactive  exploration of shader space and we demonstrate its utility for several  different applications.",
    pdf = "pdfs/Mindek-2013-VPE.pdf",
    images = "images/Mindek-2013-VPE.jpg",
    thumbnails = "images/Mindek-2013-VPE.png",
    youtube = "https://www.youtube.com/watch?v=Sk7EXvqCoxs",
    keywords = "parameter space exploration, shader augmentation",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Schmidt-2013-VVA,
    author = "Johanna Schmidt and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "VAICo: Visual Analysis for Image Comparison",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2013",
    volume = "19",
    number = "12",
    pages = "2090--2099",
    month = "dec",
    abstract = "Scientists, engineers, and analysts are confronted with ever larger  and more complex sets of data, whose analysis poses special challenges.  In many situations it is necessary to compare two or more datasets.  Hence there is a need for comparative visualization tools to help  analyze differences or similarities among datasets. In this paper  an approach for comparative visualization for sets of images is presented.  Well-established techniques for comparing images frequently place  them side-by-side. A major drawback of such approaches is that they  do not scale well. Other image comparison methods encode differences  in images by abstract parameters like color. In this case information  about the underlying image data gets lost. This paper introduces  a new method for visualizing differences and similarities in large  sets of images which preserves contextual information, but also allows  the detailed analysis of subtle variations. Our approach identifies  local changes and applies cluster analysis techniques to embed them  in a hierarchy. The results of this process are then presented in  an interactive web application which allows users to rapidly explore  the space of differences and drill-down on particular features. We  demonstrate the flexibility of our approach by applying it to multiple  distinct domains.",
    pdf = "pdfs/Schmidt-2013-VVA.pdf",
    images = "images/Schmidt-2013-VVA.jpg",
    thumbnails = "images/Schmidt-2013-VVA.png",
    youtube = "https://www.youtube.com/watch?v=wfBqKZLVszk",
    doi = "10.1109/TVCG.2013.213",
    event = "IEEE VIS 2013",
    keywords = "focus+context visualization, image set comparison, comparative visualization",
    url = "http://www.cg.tuwien.ac.at/research/publications/2013/schmidt-2013-vaico/"
    }

2012

    [PDF] [DOI] [VID] [YT] [Bibtex]
    @ARTICLE {Birkeland-2012-IMC,
    author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola",
    title = "Illustrative Membrane Clipping",
    journal = "Computer Graphics Forum",
    year = "2012",
    volume = "31",
    number = "3",
    pages = "905--914",
    month = "jun",
    abstract = "Clipping is a fast, common technique for resolving occlusions. It  only requires simple interaction, is easily understandable, and thus  has been very popular for volume exploration. However, a drawback  of clipping is that the technique indiscriminately cuts through features.  Illustrators, for example, consider the structures in the vicinity  of the cut when visualizing complex spatial data and make sure that  smaller structures near the clipping plane are kept in the image  and not cut into fragments. In this paper we present a new technique,  which combines the simple clipping interaction with automated selective  feature preservation using an elastic membrane. In order to prevent  cutting objects near the clipping plane, the deformable membrane  uses underlying data properties to adjust itself to salient structures.  To achieve this behaviour, we translate data attributes into a potential  field which acts on the membrane, thus moving the problem of deformation  into the soft-body dynamics domain. This allows us to exploit existing  GPU-based physics libraries which achieve interactive frame rates.  For manual adjustment, the user can insert additional potential fields,  as well as pinning the membrane to interesting areas. We demonstrate  that our method can act as a flexible and non-invasive replacement  of traditional clipping planes.",
    pdf = "pdfs/Birkeland-2012-IMC.pdf",
    vid = "vids/Birkeland12Illustrative.avi",
    images = "images/Birkeland12Illustrative01.png, images/Birkeland12Illustrative02.png, images/Birkeland12Illustrative03.png",
    thumbnails = "images/Birkeland-2012-IMC.png",
    youtube = "https://www.youtube.com/watch?v=I89_--zul6c",
    note = "presented at EuroVis 2012",
    doi = "10.1111/j.1467-8659.2012.03083.x",
    event = "EuroVis 2012",
    keywords = "clipping, volume rendering, illustrative visualization",
    location = "Vienna, Austria",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/"
    }
    [PDF] [Bibtex]
    @MISC {Bruckner-2012-VEA-Thesis,
    author = "Stefan Bruckner",
    title = "Visual Exploration and Analysis of Volumetric Data",
    howpublished = "Habilitation Thesis",
    month = "mar",
    year = "2012",
    abstract = "Information technology has led to a rapid increase in the amount of  data that arise in areas such as biology, medicine, climate science,  and engineering. In many cases, these data are volumetric in nature,  i.e., they describe the distribution of one or several quantities  over a region in space. Volume visualization is the field of research  which investigates the transformation of such data sets into images  for purposes such as understanding structure or identifying features.  This thesis presents work to aid this process by improving the interactive  depiction, analysis, and exploration of volumetric data.",
    pdf = "pdfs/Bruckner-2012-VEA-Thesis.pdf",
    images = "images/Bruckner-2012-VEA-Thesis.jpg",
    thumbnails = "images/Bruckner-2012-VEA-Thesis.png",
    affiliation = "tuwien",
    keywords = "volume visualization, visual analysis, visual exploration",
    school = "Vienna University of Technology, Austria",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Bruckner-2012-VEA/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Herghelegiu-2012-BPV,
    author = "Paul Herghelegiu and Vasile Manta and Radu Perin and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Biopsy Planner - Visual Analysis for Needle Pathway Planning in Deep Seated Brain Tumor Biopsy",
    journal = "Computer Graphics Forum",
    year = "2012",
    volume = "31",
    number = "3",
    pages = "1085--1094",
    month = "jun",
    abstract = "Biopsies involve taking samples from living tissue using a biopsy  needle. In current clinical practice they are a first mandatory step  before any further medical actions are planned. Performing a biopsy  on a deep seated brain tumor requires considerable time for establishing  and validating the desired biopsy needle pathway to avoid damage.  In this paper, we present a system for the visualization, analysis,  and validation of biopsy needle pathways. Our system uses a multi-level  approach for identifying stable needle placements which minimize  the risk of hitting blood vessels. This is one of the major dangers  in this type of intervention. Our approach helps in identifying and  visualizing the point on the pathway that is closest to a surrounding  blood vessel, requiring a closer inspection by the neurosurgeon.  An evaluation by medical experts is performed to demonstrate the  utility of our system.",
    pdf = "pdfs/Herghelegiu-2012-BPV.pdf",
    images = "images/Herghelegiu-2012-BPV.jpg",
    thumbnails = "images/Herghelegiu-2012-BPV.png",
    youtube = "https://www.youtube.com/watch?v=PBEv-D_0Zm8",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2012.03101.x",
    event = "EuroVis 2012",
    keywords = "biopsy planning, medical visualization, visual analysis",
    location = "Vienna, Austria",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Herghelegiu-2012-BPV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Ropinski-2012-UBT,
    author = "Timo Ropinski and Stefan Diepenbrock and Stefan Bruckner and Klaus Hinrichs and Meister Eduard Gr{\"o}ller",
    title = "Unified Boundary-Aware Texturing for Interactive Volume Rendering",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2012",
    volume = "18",
    number = "11",
    pages = "1942--1955",
    month = "nov",
    abstract = "In this paper, we describe a novel approach for applying texture mapping  to volumetric data sets. In contrast to previous approaches, the  presented technique enables a unified integration of 2D and 3D textures  and thus allows to emphasize material boundaries as well as volumetric  regions within a volumetric data set at the same time. One key contribution  of this paper is a parametrization technique for volumetric data  sets, which takes into account material boundaries and volumetric  regions. Using this technique, the resulting parametrizations of  volumetric data sets enable texturing effects which create a higher  degree of realism in volume rendered images. We evaluate the quality  of the parametrization and demonstrate the usefulness of the proposed  concepts by combining volumetric texturing with volumetric lighting  models to generate photorealistic volume renderings. Furthermore,  we show the applicability in the area of illustrative visualization.",
    pdf = "pdfs/Ropinski-2012-UBT.pdf",
    images = "images/Ropinski-2012-UBT.jpg",
    thumbnails = "images/Ropinski-2012-UBT.png",
    youtube = "https://www.youtube.com/watch?v=kieFLOz22Dg",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2011.285",
    keywords = "interactive volume rendering, volumetric texturing",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Ropinski-2012-UBT/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Csebfalvi-2012-IOM,
    author = "Bal{\'a}zs Csebfalvi and Bal{\'a}zs T{\'o}th and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Illumination-Driven Opacity Modulation for Expressive Volume Rendering",
    booktitle = "Proceedings of VMV 2012",
    year = "2012",
    pages = "103--109",
    month = "nov",
    abstract = "Using classical volume visualization, typically a couple of isosurface  layers are rendered semi-transparently to show the internal structures  contained in the data. However, the opacity transfer function is  often difficult to specify such that all the isosurfaces are of high  contrast and sufficiently perceivable. In this paper, we propose  a volumerendering technique which ensures that the different layers  contribute to fairly different regions of the image space. Since  the overlapping between the effected regions is reduced, an outer  translucent isosurface does not decrease significantly the contrast  of a partially hidden inner isosurface. Therefore, the layers of  the data become visually well separated. Traditional transfer functions  assign color and opacity values to the voxels depending on the density  and the gradient. In contrast, we assign also different illumination  directions to different materials, and modulate the opacities view-dependently  based on the surface normals and the directions of the light sources,  which are fixed to the viewing angle. We will demonstrate that this  model allows an expressive visualization of volumetric data.",
    pdf = "pdfs/Csebfalvi-2012-IOM.pdf",
    images = "images/Csebfalvi-2012-IOM.jpg",
    thumbnails = "images/Csebfalvi-2012-IOM.png",
    youtube = "https://www.youtube.com/watch?v=ZvB-Vb7aa4o",
    affiliation = "tuwien",
    doi = "10.2312/PE/VMV/VMV12/103-109",
    event = "VMV 2012",
    keywords = "illustrative visualization, illumination, volume rendering",
    location = "Magdeburg, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Csebfalvi-2012-IOM/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Mistelbauer-2012-SSV,
    author = "Gabriel Mistelbauer and Hamed Bouzari and R{\"u}diger Schernthaner and Ivan Baclija and Arnold K{\"o}chl and Stefan Bruckner and Milos Sr{\'a}mek and Meister Eduard Gr{\"o}ller",
    title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization",
    booktitle = "Proceedings of IEEE VAST 2012",
    year = "2012",
    pages = "163--172",
    month = "oct",
    publisher = "IEEE Computer Society",
    abstract = "Due to the ever growing volume of acquired data and information, users  have to be constantly aware of the methods for their exploration  and for interaction. Of these, not each might be applicable to the  data at hand or might reveal the desired result. Owing to this, innovations  may be used inappropriately and users may become skeptical. In this  paper we propose a knowledge-assisted interface for medical visualization,  which reduces the necessary effort to use new visualization methods,  by providing only the most relevant ones in a smart way. Consequently,  we are able to expand such a system with innovations without the  users to worry about when, where, and especially how they may or  should use them. We present an application of our system in the medical  domain and give qualitative feedback from domain experts.",
    pdf = "pdfs/Mistelbauer-2012-SSV.pdf",
    images = "images/Mistelbauer-2012-SSV.jpg",
    thumbnails = "images/Mistelbauer-2012-SSV.png",
    youtube = "https://www.youtube.com/watch?v=cZREOedW7c4",
    affiliation = "tuwien",
    doi = "10.1109/VAST.2012.6400555",
    keywords = "knowledge-based visualization, medical visualization, integrated views",
    location = "Seattle, WA, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Ford-2012-HRV,
    author = "Steven Ford and Ivan Viola and Stefan Bruckner and Hans Torp and Gabriel Kiss",
    title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound",
    booktitle = "Proceedings of WASA 2012",
    year = "2012",
    pages = "169--176",
    month = "nov",
    abstract = "Medical ultrasound is a challenging modality when it comes to image  interpretation. The goal we address in this work is to assist the  ultrasound examiner and partially alleviate the burden of interpretation.  We propose to address this goal with visualization that provides  clear cues on the orientation and the correspondence between anatomy  and the data being imaged. Our system analyzes the stream of 3D ultrasound  data and in real-time identifies distinct features that are basis  for a dynamically deformed mesh model of the heart. The heart mesh  is composited with the original ultrasound data to create the data-to-anatomy  correspondence. The visualization is broadcasted over the internet  allowing, among other opportunities, a direct visualization on the  patient on a tablet computer. The examiner interacts with the transducer  and with the visualization parameters on the tablet. Our system has  been characterized by domain specialist as useful in medical training  and for navigating occasional ultrasound users.",
    pdf = "pdfs/Ford-2012-HRV.pdf",
    images = "images/Ford-2012-HRV.jpg",
    thumbnails = "images/Ford-2012-HRV.png",
    youtube = "https://www.youtube.com/watch?v=2d3G7ig-yiQ",
    affiliation = "tuwien",
    doi = "10.1145/2425296.2425326",
    keywords = "medical visualization, ultrasound",
    url = "http://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/"
    }

2011

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Haidacher-2011-VAM,
    author = "Martin Haidacher and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Volume Analysis Using Multimodal Surface Similarity",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2011",
    volume = "17",
    number = "12",
    pages = "1969--1978",
    month = "oct",
    abstract = "The combination of volume data acquired by multiple modalities has  been recognized as an important but challenging task. Modalities  often differ in the structures they can delineate and their joint  information can be used to extend the classification space. However,  they frequently exhibit differing types of artifacts which makes  the process of exploiting the additional information non-trivial.  In this paper, we present a framework based on an information-theoretic  measure of isosurface similarity between different modalities to  overcome these problems. The resulting similarity space provides  a concise overview of the differences between the two modalities,  and also serves as the basis for an improved selection of features.  Multimodal classification is expressed in terms of similarities and  dissimilarities between the isosurfaces of individual modalities,  instead of data value combinations. We demonstrate that our approach  can be used to robustly extract features in applications such as  dual energy computed tomography of parts in industrial manufacturing.",
    pdf = "pdfs/Haidacher-2011-VAM.pdf",
    images = "images/Haidacher-2011-VAM.jpg",
    thumbnails = "images/Haidacher-2011-VAM.png",
    youtube = "https://www.youtube.com/watch?v=x9ZTUssg8Fk",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2011.258",
    event = "IEEE Visualization 2011",
    keywords = "surface similarity, volume visualization, multimodal data",
    location = "Providence, Rhode Island, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-VAM/"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Patel-2011-PEA,
    author = "Daniel Patel and Meister Eduard Gr{\"o}ller and Stefan Bruckner",
    title = "PhD Education Through Apprenticeship",
    booktitle = "Proceedings of Eurographics 2011 (Education Papers)",
    year = "2011",
    editor = "S. Maddock, J. Jorge",
    pages = "23--28",
    month = "apr",
    abstract = "We describe and analyze the PhD education in the visualization group  at the Vienna University of Technology and set the education in a  larger perspective. Four central mechanisms drive the PhD education  in Vienna. They are: to require an article-based PhD; to give the  student freedom to choose research direction; to let students work  in shared offices towards joint deadlines; and to involve students  in reviewing articles. This paper describes these mechanisms in detail  and illustrates their effect.",
    pdf = "pdfs/Patel-2011-PEA.pdf",
    images = "images/Patel-2011-PEA.jpg",
    thumbnails = "images/Patel-2011-PEA.png",
    keywords = "M., education, visualization, apprenticeship",
    location = "Llandudno, United Kingdom",
    url = "http://www.cg.tuwien.ac.at/research/publications/2011/patel-2011-PEA/"
    }

2010

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2010-HVC,
    author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Meister Eduard Gr{\"o}ller",
    title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering",
    journal = "Computers \& Graphics",
    year = "2010",
    volume = "34",
    number = "4",
    pages = "361--369",
    month = "aug",
    abstract = "In this paper, we introduce a novel framework for the compositing  of interactively rendered 3D layers tailored to the needs of scientific  illustration. Currently, traditional scientific illustrations are  produced in a series of composition stages, combining different pictorial  elements using 2D digital layering. Our approach extends the layer  metaphor into 3D without giving up the advantages of 2D methods.  The new compositing approach allows for effects such as selective  transparency, occlusion overrides, and soft depth buffering. Furthermore,  we show how common manipulation techniques such as masking can be  integrated into this concept. These tools behave just like in 2D,  but their influence extends beyond a single viewpoint. Since the  presented approach makes no assumptions about the underlying rendering  algorithms, layers can be generated based on polygonal geometry,  volumetric data, pointbased representations, or others. Our implementation  exploits current graphics hardware and permits real-time interaction  and rendering.",
    pdf = "pdfs/Bruckner-2010-HVC.pdf",
    images = "images/Bruckner-2010-HVC.jpg",
    thumbnails = "images/Bruckner-2010-HVC.png",
    youtube = "https://www.youtube.com/watch?v=V-Jbgpd9OjU,https://www.youtube.com/watch?v=Tsc30U4x3ic,https://www.youtube.com/watch?v=I4x5QtG25Tc",
    doi = "10.1016/j.cag.2010.04.003",
    keywords = "compositing, masking, illustration",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Sikachev-2010-DFC,
    author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Dynamic Focus+Context for Volume Rendering",
    booktitle = "Proceedings of VMV 2010",
    year = "2010",
    pages = "331--338",
    address = "University of Siegen, Siegen, Germany",
    month = "nov",
    abstract = "Interactive visualization is widely used in many applications for  efficient representation of complex data. Many techniques make use  of the focus+context approach in a static manner. These techniques  do not fully make use of the interaction semantics. In this paper  we present a dynamic focus+context approach that highlights salient  features during user interaction. We explore rotation, panning, and  zooming interaction semantics and propose several methods of changing  visual representations, based on a suggested engagement-estimation  method. We use DVR-MIP interpolation and a radial opacity-change  approach, exploring rotation, panning, and zooming semantics. Our  approach adds short animations during user interaction that help  to explore the data efficiently and aid the user in the detection  of unknown features.",
    pdf = "pdfs/Sikachev-2010-DFC.pdf",
    images = "images/Sikachev-2010-DFC.jpg",
    thumbnails = "images/Sikachev-2010-DFC.png",
    youtube = "https://www.youtube.com/watch?v=6x-gVBHYAcA,https://www.youtube.com/watch?v=TgotxmoepB8,https://www.youtube.com/watch?v=8K67zA8pbAo",
    affiliation = "tuwien",
    doi = "10.2312/PE/VMV/VMV10/331-338",
    keywords = "focus+contex, volume rendering, view-dependent visualization, level-of-detail techniques, nonphotorealistic techniques, user interaction",
    location = "Siegen, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-DFC/"
    }
    [PDF] [DOI] [Bibtex]
    @INCOLLECTION {Bruckner-2010-IFC,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller and Klaus Mueller and Bernhard Preim and Deborah Silver",
    title = "Illustrative Focus+Context Approaches in Interactive Volume Visualization",
    booktitle = "Scientific Visualization: Advanced Concepts",
    publisher = "Schloss Dagstuhl -- Leibniz-Zentrum fuer Informatik",
    year = "2010",
    editor = "Hans Hagen",
    series = "Dagstuhl Follow-Ups",
    chapter = "10",
    pages = "136--162",
    month = "aug",
    abstract = "Illustrative techniques are a new and exciting direction in visualization  research. Traditional techniques which have been used by scientific  illustrators for centuries are re-examined under the light of modern  computer technology. In this paper, we discuss the use of the focus+context  concept for the illustrative visualization of volumetric data. We  give an overview of the state-of-the-art and discuss recent approaches  which employ this concept in novel ways.",
    pdf = "pdfs/Bruckner-2010-IFC.pdf",
    images = "images/Bruckner-2010-IFC.jpg",
    thumbnails = "images/Bruckner-2010-IFC.png",
    affiliation = "tuwien",
    doi = "10.4230/DFU.SciViz.2010.136",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-IFC/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Patel-2010-SVV,
    author = "Daniel Patel and Stefan Bruckner and Ivan Viola and Meister Eduard Gr{\"o}ller",
    title = "Seismic Volume Visualization for Horizon Extraction",
    booktitle = "Proceedings of IEEE Pacific Visualization 2010",
    year = "2010",
    pages = "73--80",
    month = "mar",
    abstract = "Seismic horizons indicate change in rock properties and are central  in geoscience interpretation. Traditional interpretation systems  involve time consuming and repetitive manual volumetric seeding for  horizon growing. We present a novel system for rapidly interpreting  and visualizing seismic volumetric data. First we extract horizon  surface-parts by preprocessing the seismic data. Then during interaction  the user can assemble in realtime the horizon parts into horizons.  Traditional interpretation systems use gradient-based illumination  models in the rendering of the seismic volume and polygon rendering  of horizon surfaces. We employ realtime gradientfree forward-scattering  in the rendering of seismic volumes yielding results similar to high-quality  global illumination. We use an implicit surface representation of  horizons allowing for a seamless integration of horizon rendering  and volume rendering. We present a collection of novel techniques  constituting an interpretation and visualization system highly tailored  to seismic data interpretation.",
    pdf = "pdfs/Patel-2010-SVV.pdf",
    images = "images/Patel-2010-SVV.jpg",
    thumbnails = "images/Patel-2010-SVV.png",
    youtube = "https://www.youtube.com/watch?v=YXg4LZsTQdc",
    doi = "10.1109/PACIFICVIS.2010.5429605",
    keywords = "volume visualization, horizon extraction, seismic data",
    location = "Taipei, Taiwan",
    project = "geoillustrator,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/patel-2010-SVV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2010-ISM,
    author = "Stefan Bruckner and Torsten M{\"o}ller",
    title = "Isosurface Similarity Maps",
    journal = "Computer Graphics Forum",
    year = "2010",
    volume = "29",
    number = "3",
    pages = "773--782",
    month = "jun",
    abstract = "In this paper, we introduce the concept of isosurface similarity maps  for the visualization of volume data. Isosurface similarity maps  present structural information of a volume data set by depicting  similarities between individual isosurfaces quantified by a robust  information-theoretic measure. Unlike conventional histograms, they  are not based on the frequency of isovalues and/or derivatives and  therefore provide complementary information. We demonstrate that  this new representation can be used to guide transfer function design  and visualization parameter specification. Furthermore, we use isosurface  similarity to develop an automatic parameter-free method for identifying  representative isovalues. Using real-world data sets, we show that  isosurface similarity maps can be a useful addition to conventional  classification techniques.",
    pdf = "pdfs/Bruckner-2010-ISM.pdf",
    images = "images/Bruckner-2010-ISM.jpg",
    thumbnails = "images/Bruckner-2010-ISM.png",
    youtube = "https://www.youtube.com/watch?v=NZFqx4QceCA,https://www.youtube.com/watch?v=kQO8fTJJxVg,https://www.youtube.com/watch?v=KDIbmfOAW00",
    note = "EuroVis 2010 Best Paper Award",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2009.01689.x",
    event = "EuroVis 2010",
    keywords = "isosurfaces, volume visualization, mutual information, histograms",
    location = "Bordeaux, France",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-ISM/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Haidacher-2010-VVS,
    author = "Martin Haidacher and Daniel Patel and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Volume Visualization based on Statistical Transfer-Function Spaces",
    booktitle = "Proceedings of IEEE Pacific Visualization 2010",
    year = "2010",
    pages = "17--24",
    month = "mar",
    abstract = "It is a difficult task to design transfer functions for noisy data.  In traditional transfer-function spaces, data values of different  materials overlap. In this paper we introduce a novel statistical  transfer-function space which in the presence of noise, separates  different materials in volume data sets. Our method adaptively estimates  statistical properties, i.e. the mean value and the standard deviation,  of the data values in the neighborhood of each sample point. These  properties are used to define a transfer-function space which enables  the distinction of different materials. Additionally, we present  a novel approach for interacting with our new transfer-function space  which enables the design of transfer functions based on statistical  properties. Furthermore, we demonstrate that statistical information  can be applied to enhance visual appearance in the rendering process.  We compare the new method with 1D, 2D, and LH transfer functions  to demonstrate its usefulness.",
    pdf = "pdfs/Haidacher-2010-VVS.pdf",
    images = "images/Haidacher-2010-VVS.jpg",
    thumbnails = "images/Haidacher-2010-VVS.png",
    youtube = "https://www.youtube.com/watch?v=firkkbHdZ5o",
    doi = "10.1109/PACIFICVIS.2010.5429615",
    keywords = "transfer function, statistics, shading, noisy data, classification",
    location = "Taipei, Taiwan",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/haidacher_2010_statTF/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2010-RES,
    author = "Stefan Bruckner and Torsten M{\"o}ller",
    title = "Result-Driven Exploration of Simulation Parameter Spaces for Visual Effects Design",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2010",
    volume = "16",
    number = "6",
    pages = "1467--1475",
    month = "oct",
    abstract = "Graphics artists commonly employ physically-based simulation for the  generation of effects such as smoke, explosions, and similar phenomena.  The task of finding the correct parameters for a desired result,  however, is difficult and time-consuming as current tools provide  little to no guidance. In this paper, we present a new approach for  the visual exploration of such parameter spaces. Given a three-dimensional  scene description, we utilize sampling and spatio-temporal clustering  techniques to generate a concise overview of the achievable variations  and their temporal evolution. Our visualization system then allows  the user to explore the simulation space in a goal-oriented manner.  Animation sequences with a set of desired characteristics can be  composed using a novel search-by-example approach and interactive  direct volume rendering is employed to provide instant visual feedback.  A user study was performed to evaluate the applicability of our system  in production use.",
    pdf = "pdfs/Bruckner-2010-RES.pdf",
    images = "images/Bruckner-2010-RES.jpg",
    thumbnails = "images/Bruckner-2010-RES.png",
    youtube = "https://www.youtube.com/watch?v=JunXyxULCpo",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2010.190",
    event = "IEEE Visualization 2010",
    keywords = "visual exploration, visual effects, clustering, time-dependent volume data",
    location = "Salt Lake City, Utah, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/brucker-2010-RES/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2010-MOS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Daniel Patel and Stefan Bruckner and Ivan Viola",
    title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2010",
    volume = "29",
    number = "3",
    pages = "883--891",
    month = "jun",
    abstract = "In this paper, we present a novel technique which simulates directional  light scattering for more realistic interactive visualization of  volume data. Our method extends the recent directional occlusion  shading model by enabling light source positioning with practically  no performance penalty. Light transport is approximated using a tilted  cone-shaped function which leaves elliptic footprints in the opacity  buffer during slice-based volume rendering. We perform an incremental  blurring operation on the opacity buffer for each slice in front-to-back  order. This buffer is then used to define the degree of occlusion  for the subsequent slice. Our method is capable of generating high-quality  soft shadowing effects, allows interactive modification of all illumination  and rendering parameters, and requires no pre-computation.",
    pdf = "pdfs/Solteszova-2010-MOS.pdf",
    images = "images/Solteszova-2010-MOS.jpg",
    thumbnails = "images/Solteszova-2010-MOS.png",
    youtube = "https://www.youtube.com/watch?v=V4y0BVKV_bw",
    doi = "10.1111/j.1467-8659.2009.01695.x",
    event = "EuroVis 2010",
    keywords = "global illumination, volume rendering, shadows, optical model",
    location = "Bordeaux, France",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/"
    }

2009

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2009-BVQ,
    author = "Stefan Bruckner and Veronika \v{S}olt{\'e}szov{\'a} and Meister Eduard Gr{\"o}ller and Ji\v{r}{\'i} Hlad\r{u}vka and Katja B{\"u}hler and Jai Yu and Barry Dickson",
    title = "BrainGazer - Visual Queries for Neurobiology Research",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2009",
    volume = "15",
    number = "6",
    pages = "1497--1504",
    month = "nov",
    abstract = "Neurobiology investigates how anatomical and physiological relationships  in the nervous system mediate behavior. Molecular genetic techniques,  applied to species such as the common fruit fly Drosophila melanogaster,  have proven to be an important tool in this research. Large databases  of transgenic specimens are being built and need to be analyzed to  establish models of neural information processing. In this paper  we present an approach for the exploration and analysis of neural  circuits based on such a database. We have designed and implemented  BrainGazer, a system which integrates visualization techniques for  volume data acquired through confocal microscopy as well as annotated  anatomical structures with an intuitive approach for accessing the  available information. We focus on the ability to visually query  the data based on semantic as well as spatial relationships. Additionally,  we present visualization techniques for the concurrent depiction  of neurobiological volume data and geometric objects which aim to  reduce visual clutter. The described system is the result of an ongoing  interdisciplinary collaboration between neurobiologists and visualization  researchers.",
    pdf = "pdfs/Bruckner-2009-BVQ.pdf",
    images = "images/Bruckner-2009-BVQ.jpg",
    thumbnails = "images/Bruckner-2009-BVQ.png",
    youtube = "https://www.youtube.com/watch?v=LB5t3RtLifk",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2009.121",
    event = "IEEE Visualization 2009",
    keywords = "biomedical visualization, neurobiology, visual queries, volume visualization",
    location = "Atlantic City, New Jersey, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-BVQ/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2009-IVV,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Instant Volume Visualization using Maximum Intensity Difference Accumulation",
    journal = "Computer Graphics Forum",
    year = "2009",
    volume = "28",
    number = "3",
    pages = "775--782",
    month = "jun",
    abstract = "It has long been recognized that transfer function setup for Direct  Volume Rendering (DVR) is crucial to its usability. However, the  task of finding an appropriate transfer function is complex and time-consuming  even for experts. Thus, in many practical applications simpler techniques  which do not rely on complex transfer functions are employed. One  common example is Maximum Intensity Projection (MIP) which depicts  the maximum value along each viewing ray. In this paper, we introduce  Maximum Intensity Difference Accumulation (MIDA), a new approach  which combines the advantages of DVR and MIP. Like MIP, MIDA exploits  common data characteristics and hence does not require complex transfer  functions to generate good visualization results. It does, however,  feature occlusion and shape cues similar to DVR. Furthermore, we  show that MIDA - in addition to being a useful technique in its own  right- can be used to smoothly transition between DVR and MIP in  an intuitive manner. MIDA can be easily implemented using volume  raycasting and achieves real-time performance on current graphics  hardware.",
    pdf = "pdfs/Bruckner-2009-IVV.pdf",
    images = "images/Bruckner-2009-IVV.jpg",
    thumbnails = "images/Bruckner-2009-IVV.png",
    youtube = "https://www.youtube.com/watch?v=lNwZJXxoLTg,https://www.youtube.com/watch?v=AR-Zp3S35hs,https://www.youtube.com/watch?v=xk4J8bkI2-Y,https://www.youtube.com/watch?v=XApq2rGKMR8",
    issn = "0167-7055",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2009.01474.x",
    event = "EuroVis 2009",
    keywords = "illustrative visualization, maximum intensity projection, direct volume rendering",
    location = "Berlin, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-IVV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Kohlmann-2009-CPV,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Contextual Picking of Volumetric Structures",
    booktitle = "Proceedings of the IEEE Pacific Visualization 2009",
    year = "2009",
    editor = "Peter Eades, Thomas Ertl, Han-Wei Shen",
    pages = "185--192",
    month = "may",
    abstract = "This paper presents a novel method for the interactive identification  of contextual interest points within volumetric data by picking on  a direct volume rendered image. In clinical diagnostics the points  of interest are often located in the center of anatomical structures.  In order to derive the volumetric position which allows a convenient  examination of the intended structure, the system automatically extracts  contextual meta information from the DICOM (Digital Imaging and Communications  in Medicine) images and the setup of the medical workstation. Along  a viewing ray for a volumetric picking, the ray profile is analyzed  for structures which are similar to predefined templates from a knowledge  base. We demonstrate with our results that the obtained position  in 3D can be utilized to highlight a structure in 2D slice views,  to interactively calculate centerlines of tubular objects, or to  place labels at contextually-defined volumetric positions.",
    pdf = "pdfs/Kohlmann-2009-CPV.pdf",
    images = "images/Kohlmann-2009-CPV.jpg",
    thumbnails = "images/Kohlmann-2009-CPV.png",
    youtube = "https://www.youtube.com/watch?v=SgyGwePAE7o",
    affiliation = "tuwien",
    doi = "10.1109/PACIFICVIS.2009.4906855",
    isbn = "978-1-4244-4404-5",
    keywords = "picking, interaction, selection, volume visualization",
    location = "Peking, China",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-cp/"
    }

2008

    [DOI] [Bibtex]
    @ARTICLE {rautek08illustrative,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Ivan Viola",
    title = "Illustrative Visualization: New Technology or Useless Tautology?",
    journal = "SIGGRAPH Comput. Graph.",
    year = "2008",
    volume = "42",
    number = "3",
    images = "images/rautek08illustrative.jpg",
    thumbnails = "images/rautek08illustrative_thumb.jpg",
    address = "New York, NY, USA",
    affiliation = "tuwien",
    doi = "http://doi.acm.org/10.1145/1408626.1408633",
    project = "illvis",
    publisher = "ACM",
    url = "http://doi.acm.org/10.1145/1408626.1408633"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Ruiz-2008-SEV,
    author = "Marc Ruiz and Ivan Viola and Imma Boada and Stefan Bruckner and Miquel Feixas and Mateu Sbert",
    title = "Similarity-based Exploded Views",
    booktitle = "Proceedings of Smart Graphics 2008",
    year = "2008",
    pages = "154--165",
    month = "aug",
    abstract = "Exploded views are often used in illustration to overcome the problem  of occlusion when depicting complex structures. In this paper, we  propose a volume visualization technique inspired by exploded views  that partitions the volume into a number of parallel slabs and shows  them apart from each other. The thickness of slabs is driven by the  similarity between partitions. We use an information-theoretic technique  for the generation of exploded views. First, the algorithm identifies  the viewpoint from which the structure is the highest. Then, the  partition of the volume into the most informative slabs for exploding  is obtained using two complementary similarity-based strategies.  The number of slabs and the similarity parameter are freely adjustable  by the user.",
    pdf = "pdfs/Ruiz-2008-SEV.pdf",
    images = "images/Ruiz-2008-SEV.jpg",
    thumbnails = "images/Ruiz-2008-SEV.png",
    doi = "10.1007/978-3-540-85412-8_14",
    keywords = "volume visualization, illustrative visualization, exploded views",
    location = "Rennes, France",
    project = "illvis,medviz",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-SEV/"
    }
    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Kohlmann-2008-LEI,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "LiveSync++: Enhancements of an Interaction Metaphor",
    booktitle = "Proceedings of Graphics Interface 2008",
    year = "2008",
    pages = "81--88",
    month = "may",
    abstract = "The LiveSync interaction metaphor allows an efficient and non-intrusive  integration of 2D and 3D visualizations in medical workstations.  This is achieved by synchronizing the 2D slice view with the volumetric  view. The synchronization is initiated by a simple picking on a structure  of interest in the slice view. In this paper we present substantial  enhancements of the existing concept to improve its usability. First,  an efficient parametrization for the derived parameters is presented,  which allows hierarchical refinement of the search space for good  views. Second, the extraction of the feature of interest is performed  in a way, which is adapting to the volumetric extent of the feature.  The properties of the extracted features are utilized to adjust a  predefined transfer function in a feature-enhancing manner. Third,  a new interaction mode is presented, which allows the integration  of more knowledge about the user-intended visualization, without  increasing the interaction effort. Finally, a new clipping technique  is integrated, which guarantees an unoccluded view on the structure  of interest while keeping important contextual information.",
    pdf = "pdfs/Kohlmann-2008-LEI.pdf",
    images = "images/Kohlmann-2008-LEI.jpg",
    thumbnails = "images/Kohlmann-2008-LEI.png",
    youtube = "https://www.youtube.com/watch?v=_Jt8ezi7yjs",
    affiliation = "tuwien",
    keywords = "viewpoint selection, linked views, medical visualization, smart interaction",
    location = "Windsor, Ontario, Canada",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/kohlmann-2008-lse/"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Rautek-2008-IVN,
    author = "Peter Rautek and Stefan Bruckner and Ivan Viola and Meister Eduard Gr{\"o}ller",
    title = "Illustrative visualization: new technology or useless tautology?",
    journal = "ACM SIGGRAPH Computer Graphics",
    year = "2008",
    volume = "42",
    number = "3",
    month = "aug",
    abstract = "The computer graphics group at TU Vienna has created some of most  beautiful and effective illustrative visualizations. In this article,  they share with us their unique perspective on illustrative visualization.",
    pdf = "pdfs/Rautek-2008-IVN.pdf",
    images = "images/Rautek-2008-IVN.jpg",
    thumbnails = "images/Rautek-2008-IVN.png",
    doi = "10.1145/1408626.1408633",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-VF/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Bruckner-2008-IVV,
    author = "Stefan Bruckner and Peter Kohlmann and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Integrating Volume Visualization Techniques Into Medical Applications",
    booktitle = "Proceedings of ISBI 2008",
    year = "2008",
    pages = "820--823",
    month = "may",
    abstract = "One of the main obstacles in integrating 3D volume visualization in  the clinical workflow is the time-consuming process of adjusting  parameters such as viewpoint, transfer functions, and clipping planes  required to generate a diagnostically relevant image. Current applications  therefore make scarce use of volume rendering and instead primarily  employ 2D views generated through standard techniques such as multi-planar  reconstruction (MPR). However, in many cases 3D renditions can supply  additional useful information. This paper discusses ongoing work  which aims to improve the integration of 3D visualization into the  diagnostic workflow by automatically generating meaningful renditions  based on minimal user interaction. A method for automatically generating  3D views for structures in 2D slices based on a single picking interaction  is presented.",
    pdf = "pdfs/Bruckner-2008-IVV.pdf",
    images = "images/Bruckner-2008-IVV.jpg",
    thumbnails = "images/Bruckner-2008-IVV.png",
    affiliation = "tuwien",
    doi = "10.1109/ISBI.2008.4541122",
    isbn = "978-1-4244-2002-5",
    keywords = "viewpoint selection, medical visualization, volume rendering",
    location = "Paris, France",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IVV/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Haidacher-2008-ITF,
    author = "Martin Haidacher and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Information-based Transfer Functions for Multimodal Visualization",
    booktitle = "Proceedings of VCBM 2008",
    year = "2008",
    editor = "C.P Botha, G. Kindlmann, W.J. Niessen, and B. Preim",
    pages = "101--108",
    month = "oct",
    publisher = "Eurographics Association",
    abstract = "Transfer functions are an essential part of volume visualization.  In multimodal visualization at least two values exist at every sample  point. Additionally, other parameters, such as gradient magnitude,  are often retrieved for each sample point. To find a good transfer  function for this high number of parameters is challenging because  of the complexity of this task. In this paper we present a general  information-based approach for transfer function design in multimodal  visualization which is independent of the used modality types. Based  on information theory, the complex multi-dimensional transfer function  space is fused to allow utilization of a well-known 2D transfer function  with a single value and gradient magnitude as parameters. Additionally,  a quantity is introduced which enables better separation of regions  with complementary information. The benefit of the new method in  contrast to other techniques is a transfer function space which is  easy to understand and which provides a better separation of different  tissues. The usability of the new approach is shown on examples of  different modalities.",
    pdf = "pdfs/Haidacher-2008-ITF.pdf",
    images = "images/Haidacher-2008-ITF.jpg",
    thumbnails = "images/Haidacher-2008-ITF.png",
    affiliation = "tuwien",
    doi = "10.2312/VCBM/VCBM08/101-108",
    isbn = "978-3-905674-13-2",
    issn = "2070-5778",
    keywords = "multimodal visualization, transfer functions, information theory",
    location = "Delft",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/haidacher-2008-vcbm/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2008-ISI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Interaction-Dependent Semantics for Illustrative Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2008",
    volume = "27",
    number = "3",
    pages = "847--854",
    month = "may",
    abstract = "In traditional illustration the choice of appropriate styles and rendering  techniques is guided by the intention of the artist. For illustrative  volume visualizations it is difficult to specify the mapping between  the 3D data and the visual representation that preserves the intention  of the user. The semantic layers concept establishes this mapping  with a linguistic formulation of rules that directly map data features  to rendering styles. With semantic layers fuzzy logic is used to  evaluate the user defined illustration rules in a preprocessing step.  In this paper we introduce interaction-dependent rules that are evaluated  for each frame and are therefore computationally more expensive.  Enabling interaction-dependent rules, however, allows the use of  a new class of semantics, resulting in more expressive interactive  illustrations. We show that the evaluation of the fuzzy logic can  be done on the graphics hardware enabling the efficient use of interaction-dependent  semantics. Further we introduce the flat rendering mode and discuss  how different rendering parameters are influenced by the rule base.  Our approach provides high quality illustrative volume renderings  at interactive frame rates, guided by the specification of illustration  rules.",
    pdf = "pdfs/Rautek-2008-ISI.pdf",
    images = "images/Rautek-2008-ISI.jpg",
    thumbnails = "images/Rautek-2008-ISI.png",
    youtube = "https://www.youtube.com/watch?v=fHIl2A50Ico",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2008.01216.x",
    event = "Eurographics/ IEEE-VGTC Symposium on Visualization",
    keywords = "volume visualization, illustrative visualization, semantics, interaction",
    location = "Eindhoven, The Netherlands",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-IDS/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Ruiz-2008-OVR,
    author = "Marc Ruiz and Imma Boada and Ivan Viola and Stefan Bruckner and Miquel Feixas and Mateu Sbert",
    title = "Obscurance-based Volume Rendering Framework",
    booktitle = "Proceedings of Volume Graphics 2008",
    year = "2008",
    pages = "113--120",
    month = "aug",
    abstract = "Obscurances, from which ambient occlusion is a particular case, is  a technology that produces natural-looking lighting effects in a  faster way than global illumination. Its application in volume visualization  is of special interest since it permits us to generate a high quality  rendering at a low cost. In this paper, we propose an obscurance-based  framework that allows us to obtain realistic and illustrative volume  visualizations in an interactive manner. Obscurances can include  color bleeding effects without additional cost. Moreover, we obtain  a saliency map from the gradient of obscurances and we show its application  to enhance volume visualization and to select the most salient views.",
    pdf = "pdfs/Ruiz-2008-OVR.pdf",
    images = "images/Ruiz-2008-OVR.jpg",
    thumbnails = "images/Ruiz-2008-OVR.png",
    doi = "10.2312/VG/VG-PBG08/113-120",
    keywords = "volume rendering, illustrative visualization, ambient occlusion",
    location = "Los Angeles, CA, USA",
    project = "illvis,medviz",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-OVR/"
    }
    [PDF] [Bibtex]
    @PHDTHESIS {Bruckner-2008-IIV-Thesis,
    author = "Stefan Bruckner",
    title = "Interactive Illustrative Volume Visualization",
    school = "Vienna University of Technology, Austria",
    year = "2008",
    month = "apr",
    abstract = "Illustrations are essential for the effective communication of complex  subjects. Their production, however, is a difficult and expensive  task. In recent years, three-dimensional imaging has become a vital  tool not only in medical diagnosis and treatment planning, but also  in many technical disciplines (e.g., material inspection), biology,  and archeology. Modalities such as X-Ray Computed Tomography (CT)  and Magnetic Resonance Imaging (MRI) produce high-resolution volumetric  scans on a daily basis. It seems counter-intuitive that even though  such a wealth of data is available, the production of an illustration  should still require a mainly manual and time-consuming process.  This thesis is devoted to the computer-assisted generation of illustrations  directly from volumetric data using advanced visualization techniques.  The concept of a direct volume illustration system is introduced  for this purpose. Instead of requiring an additional modeling step,  this system allows the designer of an illustration to work directly  on the measured data. Abstraction, a key component of traditional  illustrations, is used in order to reduce visual clutter, emphasize  important structures, and reveal hidden detail. Low-level abstraction  techniques are concerned with the appearance of objects and allow  flexible artistic shading of structures in volumetric data sets.  High-level abstraction techniques control which objects are visible.  For this purpose, novel methods for the generation of ghosted and  exploded views are introduced. The visualization techniques presented  in this thesis employ the features of current graphics hardware to  achieve interactive performance. The resulting system allows the  generation of expressive illustrations directly from volumetric data  with applications in medical training, patient education, and scientific  communication.",
    pdf = "pdfs/Bruckner-2008-IIV-Thesis.pdf",
    images = "images/Bruckner-2008-IIV-Thesis.jpg",
    thumbnails = "images/Bruckner-2008-IIV-Thesis.png",
    affiliation = "tuwien",
    keywords = "visual analysis, visual exploration, volume data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IIV/"
    }

2007

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2007-EDF,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Enhancing Depth-Perception with Flexible Volumetric Halos",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2007",
    volume = "13",
    number = "6",
    pages = "1344--1351",
    month = "oct",
    abstract = "Volumetric data commonly has high depth complexity which makes it  difficult to judge spatial relationships accurately. There are many  different ways to enhance depth perception, such as shading, contours,  and shadows. Artists and illustrators frequently employ halos for  this purpose. In this technique, regions surrounding the edges of  certain structures are darkened or brightened which makes it easier  to judge occlusion. Based on this concept, we present a flexible  method for enhancing and highlighting structures of interest using  GPU-based direct volume rendering. Our approach uses an interactively  defined halo transfer function to classify structures of interest  based on data value, direction, and position. A feature-preserving  spreading algorithm is applied to distribute seed values to neighboring  locations, generating a controllably smooth field of halo intensities.  These halo intensities are then mapped to colors and opacities using  a halo profile function. Our method can be used to annotate features  at interactive frame rates.",
    pdf = "pdfs/Bruckner-2007-EDF.pdf",
    images = "images/Bruckner-2007-EDF.jpg",
    thumbnails = "images/Bruckner-2007-EDF.png",
    youtube = "https://www.youtube.com/watch?v=NvHfxX8wjE8",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2007.70555",
    event = "IEEE Visualization 2007",
    keywords = "volume rendering, illustrative visualization, halos",
    location = "Sacramento, California, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-EDF/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Kohlmann-2007-LDV,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "LiveSync: Deformed Viewing Spheres for Knowledge-Based Navigation",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2007",
    volume = "13",
    number = "6",
    pages = "1544--1551",
    month = "oct",
    abstract = "Although real-time interactive volume rendering is available even  for very large data sets, this visualization method is used quite  rarely in the clinical practice. We suspect this is because it is  very complicated and time consuming to adjust the parameters to achieve  meaningful results. The clinician has to take care of the appropriate  viewpoint, zooming, transfer function setup, clipping planes and  other parameters. Because of this, most often only 2D slices of the  data set are examined. Our work introduces LiveSync, a new concept  to synchronize 2D slice views and volumetric views of medical data  sets. Through intuitive picking actions on the slice, the users define  the anatomical structures they are interested in. The 3D volumetric  view is updated automatically with the goal that the users are provided  with expressive result images. To achieve this live synchronization  we use a minimal set of derived information without the need for  segmented data sets or data-specific pre-computations. The components  we consider are the picked point, slice view zoom, patient orientation,  viewpoint history, local object shape and visibility. We introduce  deformed viewing spheres which encode the viewpoint quality for the  components. A combination of these deformed viewing spheres is used  to estimate a good viewpoint. Our system provides the physician with  synchronized views which help to gain deeper insight into the medical  data with minimal user interaction.",
    pdf = "pdfs/Kohlmann-2007-LDV.pdf",
    images = "images/Kohlmann-2007-LDV.jpg",
    thumbnails = "images/Kohlmann-2007-LDV.png",
    youtube = "https://www.youtube.com/watch?v=vzoS6plGxzQ",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2007.70576",
    event = "IEEE Visualization 2007",
    keywords = "linked views, interaction, medical visualization, navigation, viewpoint selection",
    location = "Sacramento, California, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/kohlmann-2007-livesync/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2007-STF,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Style Transfer Functions for Illustrative Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2007",
    volume = "26",
    number = "3",
    pages = "715--724",
    month = "sep",
    abstract = "Illustrative volume visualization frequently employs non-photorealistic  rendering techniques to enhance important features or to suppress  unwanted details. However, it is difficult to integrate multiple  non-photorealistic rendering approaches into a single framework due  to great differences in the individual methods and their parameters.  In this paper, we present the concept of style transfer functions.  Our approach enables flexible data-driven illumination which goes  beyond using the transfer function to just assign colors and opacities.  An image-based lighting model uses sphere maps to represent non-photorealistic  rendering styles. Style transfer functions allow us to combine a  multitude of different shading styles in a single rendering. We extend  this concept with a technique for curvature-controlled style contours  and an illustrative transparency model. Our implementation of the  presented methods allows interactive generation of high-quality volumetric  illustrations.",
    pdf = "pdfs/Bruckner-2007-STF.pdf",
    images = "images/Bruckner-2007-STF.jpg",
    thumbnails = "images/Bruckner-2007-STF.png",
    youtube = "https://www.youtube.com/watch?v=40SdXa7aAjI",
    note = "Eurographics 2007 3rd Best Paper Award",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659.2007.01095.x",
    event = "Eurographics 2007",
    keywords = "illustrative visualization, transfer functions, volume rendering",
    location = "Prague, Czech Republic",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-STF/"
    }
    [PDF] [Bibtex]
    @ARTICLE {Kohlmann-2007-EBV,
    author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Evaluation of a Bricked Volume Layout for a Medical Workstation based on Java",
    journal = "Journal of WSCG",
    year = "2007",
    volume = "15",
    number = "1-3",
    pages = "83--90",
    month = "jan",
    abstract = "Volumes acquired for medical examination purposes are constantly increasing  in size. For this reason, the computer’s memory is the limiting  factor for visualizing the data. Bricking is a well-known concept  used for rendering large data sets. The volume data is subdivided  into smaller blocks to achieve better memory utilization. Until now,  the vast majority of medical workstations use a linear volume layout.  We implemented a bricked volume layout for such a workstation based  on Java as required by our collaborative company partner to evaluate  different common access patterns to the volume data. For rendering,  we were mainly interested to see how the performance will differ  from the traditional linear volume layout if we generate images of  arbitrarily oriented slices via Multi-Planar Reformatting (MPR).  Furthermore, we tested access patterns which are crucial for segmentation  issues like a random access to data values and a simulated region  growing. Our goal was to find out if it makes sense to change the  volume layout of a medical workstation to benefit from bricking.  We were also interested to identify the tasks where problems might  occur if bricking is applied. Overall, our results show that it is  feasible to use a bricked volume layout in the stringent context  of a medical workstation implemented in Java.",
    pdf = "pdfs/Kohlmann-2007-EBV.pdf",
    images = "images/Kohlmann-2007-EBV.jpg",
    thumbnails = "images/Kohlmann-2007-EBV.png",
    issn = "1213-6972",
    affiliation = "tuwien",
    event = "WSCG 2007",
    keywords = "MPR, bricked volume layout, medical visualization, medical workstation",
    location = "Plzen, Czech Republic",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/Kohlmann-2007-EBV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Rautek-2007-SLI,
    author = "Peter Rautek and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Semantic Layers for Illustrative Volume Rendering",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2007",
    volume = "13",
    number = "6",
    pages = "1336--1343",
    month = "oct",
    abstract = "Direct volume rendering techniques map volumetric attributes (e.g.,  density, gradient magnitude, etc.) to visual styles. Commonly this  mapping is specified by a transfer function. The specification of  transfer functions is a complex task and requires expert knowledge  about the underlying rendering technique. In the case of multiple  volumetric attributes and multiple visual styles the specification  of the multi-dimensional transfer function becomes more challenging  and non-intuitive. We present a novel methodology for the specification  of a mapping from several volumetric attributes to multiple illustrative  visual styles. We introduce semantic layers that allow a domain expert  to specify the mapping in the natural language of the domain. A semantic  layer defines the mapping of volumetric attributes to one visual  style. Volumetric attributes and visual styles are represented as  fuzzy sets. The mapping is specified by rules that are evaluated  with fuzzy logic arithmetics. The user specifies the fuzzy sets and  the rules without special knowledge about the underlying rendering  technique. Semantic layers allow for a linguistic specification of  the mapping from attributes to visual styles replacing the traditional  transfer function specification.",
    pdf = "pdfs/Rautek-2007-SLI.pdf",
    images = "images/Rautek-2007-SLI.jpg",
    thumbnails = "images/Rautek-2007-SLI.png",
    youtube = "https://www.youtube.com/watch?v=c91m6ru5m0g",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2007.70591",
    event = "IEEE Visualization 2007",
    keywords = "focus+context techniques, volume visualization, illustrative visualization",
    location = "Sacramento, California, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-SLI/"
    }

2006

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2006-ICE,
    author = "Stefan Bruckner and S{\"o}ren Grimm and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Illustrative Context-Preserving Exploration of Volume Data",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2006",
    volume = "12",
    number = "6",
    pages = "1559--1569",
    month = "nov",
    abstract = "In volume rendering it is very difficult to simultaneously visualize  interior and exterior structures while preserving clear shape cues.  Highly transparent transfer functions produce cluttered images with  many overlapping structures, while clipping techniques completely  remove possibly important context information. In this paper we present  a new model for volume rendering, inspired by techniques from illustration.  It provides a means of interactively inspecting the interior of a  volumetric data set in a feature-driven way which retains context  information. The context-preserving volume rendering model uses a  function of shading intensity, gradient magnitude, distance to the  eye point, and previously accumulated opacity to selectively reduce  the opacity in less important data regions. It is controlled by two  user-specified parameters. This new method represents an alternative  to conventional clipping techniques, shares their easy and intuitive  user control, but does not suffer from the drawback of missing context  information.",
    pdf = "pdfs/Bruckner-2006-ICE.pdf",
    images = "images/Bruckner-2006-ICE.jpg",
    thumbnails = "images/Bruckner-2006-ICE.png",
    youtube = "https://www.youtube.com/watch?v=a92NXYtJeT0,https://www.youtube.com/watch?v=OLEr5-O1jmY,https://www.youtube.com/watch?v=RSet7-n6Mc4,https://www.youtube.com/watch?v=w0U8lteEMOM,https://www.youtube.com/watch?v=csYsfKrQxN8,https://www.youtube.com/watch?v=3xduvvU6IAw",
    issn = "1077-2626",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2006.96",
    keywords = "focus+context techniques, volume rendering, illustrative visualization",
    url = "http://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-ICE/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2006-EVV,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "Exploded Views for Volume Data",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2006",
    volume = "12",
    number = "5",
    pages = "1077--1084",
    month = "sep",
    abstract = "Exploded views are an illustration technique where an object is partitioned  into several segments. These segments are displaced to reveal otherwise  hidden detail. In this paper we apply the concept of exploded views  to volumetric data in order to solve the general problem of occlusion.  In many cases an object of interest is occluded by other structures.  While transparency or cutaways can be used to reveal a focus object,  these techniques remove parts of the context information. Exploded  views, on the other hand, do not suffer from this drawback. Our approach  employs a force-based model: the volume is divided into a part configuration  controlled by a number of forces and constraints. The focus object  exerts an explosion force causing the parts to arrange according  to the given constraints. We show that this novel and flexible approach  allows for a wide variety of explosion-based visualizations including  view-dependent explosions. Furthermore, we present a high-quality  GPU-based volume ray casting algorithm for exploded views which allows  rendering and interaction at several frames per second.",
    pdf = "pdfs/Bruckner-2006-EVV.pdf",
    images = "images/Bruckner-2006-EVV.jpg",
    thumbnails = "images/Bruckner-2006-EVV.png",
    youtube = "https://www.youtube.com/watch?v=6jEqVrjaM3M",
    issn = "1077-2626",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2006.140",
    event = "IEEE Visualization 2006",
    keywords = "exploded views, illustrative visualization, volume rendering",
    url = "http://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-EVV/"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Rautek-2006-DHQ,
    author = "Peter Rautek and Bal{\'a}zs Csebfalvi and S{\"o}ren Grimm and Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "D2VR: High Quality Volume Rendering of Projection-based Volumetric Data",
    booktitle = "Proceedings of EuroVis 2006",
    year = "2006",
    pages = "211--218",
    month = "may",
    publisher = "IEEE CS",
    abstract = "Volume rendering techniques are conventionally classified as either  direct or indirect methods. Indirect methods require to transform  the initial volumetric model into an intermediate geometrical model  in order to efficiently visualize it. In contrast, direct volume  rendering (DVR) methods can directly process the volumetric data.  Modern CT scanners usually provide data as a set of samples on a  rectilinear grid, which is computed from the measured projections  by discrete tomographic reconstruction. Therefore the rectilinear  grid can already be considered as an intermediate volume representation.  In this paper we introduce direct direct volume rendering (D²VR).  D2VR does not require a rectilinear grid, since it is based on an  immediate processing of the measured projections. Arbitrary samples  for ray casting are reconstructed from the projections by using the  Filtered Back-Projection algorithm. Our method removes a lossy resampling  step from the classical volume rendering pipeline. It provides much  higher accuracy than traditional grid-based resampling techniques  do. Furthermore we also present a novel high-quality gradient estimation  scheme, which is also based on the Filtered Back-Projection algorithm.",
    pdf = "pdfs/Rautek-2006-DHQ.pdf",
    images = "images/Rautek-2006-DHQ.jpg",
    thumbnails = "images/Rautek-2006-DHQ.png",
    number = "In Proceedings of EuroVis",
    affiliation = "tuwien",
    doi = "10.2312/VisSym/EuroVis06/211-218",
    keywords = "volume rendering, filtered back-projection, reconstruction",
    url = "http://www.cg.tuwien.ac.at/research/publications/2006/RAUTEK06/"
    }

2005

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Bruckner-2005-ICV,
    author = "Stefan Bruckner and S{\"o}ren Grimm and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Illustrative Context-Preserving Volume Rendering",
    booktitle = "Proceedings of EuroVis 2005",
    year = "2005",
    pages = "69--76",
    month = "may",
    abstract = "In volume rendering it is very difficult to simultaneously visualize  interior and exterior structures while preserving clear shape cues.  Very transparent transfer functions produce cluttered images with  many overlapping structures, while clipping techniques completely  remove possibly important context information. In this paper we present  a new model for volume rendering, inspired by techniques from illustration  that provides a means of interactively inspecting the interior of  a volumetric data set in a feature-driven way which retains context  information. The context-preserving volume rendering model uses a  function of shading intensity, gradient magnitude, distance to the  eye point, and previously accumulated opacity to selectively reduce  the opacity in less important data regions. It is controlled by two  user-specified parameters. This new method represents an alternative  to conventional clipping techniques, shares their easy and intuitive  user control, but does not suffer from the drawback of missing context  information. ",
    pdf = "pdfs/Bruckner-2005-ICV.pdf",
    images = "images/Bruckner-2005-ICV.jpg",
    thumbnails = "images/Bruckner-2005-ICV.png",
    youtube = "https://www.youtube.com/watch?v=Tc4E2oOD8Zg,https://www.youtube.com/watch?v=_8P_hVBoFeU,https://www.youtube.com/watch?v=0yxNoPjT6Ig,https://www.youtube.com/watch?v=EjG6E2WEO30",
    affiliation = "tuwien",
    doi = "10.2312/VisSym/EuroVis05/069-076",
    keywords = "non-photorealistic techniques, focus+context techniques, volume rendering",
    url = "http://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-ICV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Bruckner-2005-VIS,
    author = "Stefan Bruckner and Meister Eduard Gr{\"o}ller",
    title = "VolumeShop: An Interactive System for Direct Volume Illustration",
    booktitle = "Proceedings of IEEE Visualization 2005",
    year = "2005",
    editor = "C. T. Silva, E. Gr{\"o}ller, H. Rushmeier",
    pages = "671--678",
    month = "oct",
    abstract = "Illustrations play a major role in the education process. Whether  used to teach a surgical or radiologic procedure, to illustrate normal  or aberrant anatomy, or to explain the functioning of a technical  device, illustration significantly impacts learning. Although many  specimens are readily available as volumetric data sets, particularly  in medicine, illustrations are commonly produced manually as static  images in a time-consuming process. Our goal is to create a fully  dynamic three-dimensional illustration environment which directly  operates on volume data. Single images have the aesthetic appeal  of traditional illustrations, but can be interactively altered and  explored. In this paper we present methods to realize such a system  which combines artistic visual styles and expressive visualization  techniques. We introduce a novel concept for direct multi-object  volume visualization which allows control of the appearance of inter-penetrating  objects via two-dimensional transfer functions. Furthermore, a unifying  approach to efficiently integrate many non-photorealistic rendering  models is presented. We discuss several illustrative concepts which  can be realized by combining cutaways, ghosting, and selective deformation.  Finally, we also propose a simple interface to specify objects of  interest through three-dimensional volumetric painting. All presented  methods are integrated into VolumeShop, an interactive hardware-accelerated  application for direct volume illustration.",
    pdf = "pdfs/Bruckner-2005-VIS.pdf",
    images = "images/Bruckner-2005-VIS.jpg",
    thumbnails = "images/Bruckner-2005-VIS.png",
    youtube = "https://www.youtube.com/watch?v=1FZausY8dFw,https://www.youtube.com/watch?v=WB-4NHKSM4k,https://www.youtube.com/watch?v=Rzi6q6n5lRs,https://www.youtube.com/watch?v=0B_fVsBibZk",
    affiliation = "tuwien",
    doi = "10.1109/VISUAL.2005.1532856",
    isbn = "0780394623",
    keywords = "focus+context techniques, illustrative visualization, volume rendering",
    location = "Minneapolis, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-VIS/"
    }
    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Coto-2005-MAC,
    author = "Ernesto Coto and S{\"o}ren Grimm and Stefan Bruckner and Meister Eduard Gr{\"o}ller and Armin Kanitsar and Omaira Rodriguez",
    title = "MammoExplorer: An Advanced {CAD} Application for Breast {DC}E-{MRI}",
    booktitle = "Proceedings of VMV 2005",
    year = "2005",
    editor = "G. Greiner, J. Hornegger, H. Niemann, M. Stamminger",
    pages = "91--98",
    month = "nov",
    abstract = "Currently X-ray mammography is the most widely used method for early  detection of breast cancer. However, the use of Dynamic Contrast  Enhanced MRI (DCE-MRI) has gained wider attention, since it considerably  improves tumor detection and classification by analyzing the flow  of contrast agent within the breast tissue. In this paper we present  MammoExplorer, a CAD application that combines advanced interaction,  segmentation and visualization techniques to explore Breast DCE-MRI  data. Our application uses Brushing and Linking, Two-level Volume  Rendering, Importance-driven Volume Rendering, and False Color Maps.  In addition, we present Enhancement Scatterplots, a novel graphical  representation of DCE-MRI data, novel segmentation approaches, and  a new way to explore time-varying CE-MRI data.",
    pdf = "pdfs/Coto-2005-MAC.pdf",
    images = "images/Coto-2005-MAC.jpg",
    thumbnails = "images/Coto-2005-MAC.png",
    youtube = "https://www.youtube.com/watch?v=6XBD1f1y2xs",
    affiliation = "tuwien",
    isbn = "3898380688",
    keywords = "CAD, breast cancer, contrast enhanced MRI",
    location = "Erlangen, Germany",
    url = "http://www.cg.tuwien.ac.at/research/publications/2005/coto-2005-MAC/"
    }

2004

    [PDF] [DOI] [Bibtex]
    @ARTICLE {Grimm-2004-VVD,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "VOTS: VOlume doTS as a Point-Based Representation of Volumetric Data",
    journal = "Computer Graphics Forum",
    year = "2004",
    volume = "23",
    number = "3",
    pages = "668--661",
    month = "sep",
    abstract = "We present Volume dots (Vots), a new primitive for volumetric data  modelling, processing, and rendering. Vots are a point-based representation  of volumetric data. An individual Vot is specified by the coefficients  of a Taylor series expansion, i.e. the function value and higher  order derivatives at a specific point. A Vot does not only represent  a single sample point, it represents the underlying function within  a region. With the Vots representation we have a more intuitive and  high-level description of the volume data. This allows direct analytical  examination and manipulation of volumetric datasets. Vots enable  the representation of the underlying scalar function with specified  precision. User-centric importance sampling is also possible, i.e.,  unimportant volume parts are still present but represented with just  very few Vots. As proof of concept, we show Maximum Intensity Projection  based on Vots.",
    pdf = "pdfs/Grimm-2004-VVD.pdf",
    images = "images/Grimm-2004-VVD.jpg",
    thumbnails = "images/Grimm-2004-VVD.png",
    issn = "0167-7055",
    affiliation = "tuwien",
    doi = "10.1111/j.1467-8659..00798.x",
    keywords = "point-based data, volume data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-volume/"
    }
    [PDF] [Bibtex]
    @MASTERTHESIS {Bruckner-2004-EVV-Thesis,
    author = "Stefan Bruckner",
    title = "Efficient Volume Visualization of Large Medical Datasets",
    school = "Vienna University of Technology, Austria",
    year = "2004",
    month = "may",
    abstract = "The size of volumetric datasets used in medical environments is increasing  at a rapid pace. Due to excessive pre-computation and memory demanding  data structures, most current approaches for volume visualization  do not meet the requirements of daily clinical routine. In this diploma  thesis, an approach for interactive high-quality rendering of large  medical data is presented. It is based on image-order raycasting  with object-order data traversal, using an optimized cache coherent  memory layout. New techniques and parallelization strategies for  direct volume rendering of large data on commodity hardware are presented.  By using new memory efficient acceleration data structures, high-quality  direct volume rendering of several hundred megabyte sized datasets  at sub-second frame rates on a commodity notebook is achieved.",
    pdf = "pdfs/Bruckner-2004-EVV-Thesis.pdf",
    images = "images/Bruckner-2004-EVV-Thesis.jpg",
    thumbnails = "images/Bruckner-2004-EVV-Thesis.png",
    affiliation = "tuwien",
    keywords = "volume rendering, large data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/bruckner-2004-EVV/"
    }
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Grimm-2004-MEA,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Memory Efficient Acceleration Structures and Techniques for {CPU}-based Volume Raycasting of Large Data",
    booktitle = "Proceedings of IEEE VolVis 2004",
    year = "2004",
    editor = "D. Silver, T. Ertl, C. Silva",
    pages = "1--8",
    month = "oct",
    abstract = "Most CPU-based volume raycasting approaches achieve high performance  by advanced memory layouts, space subdivision, and excessive pre-computing.  Such approaches typically need an enormous amount of memory. They  are limited to sizes which do not satisfy the medical data used in  daily clinical routine. We present a new volume raycasting approach  based on image-ordered raycasting with object-ordered processing,  which is able to perform high-quality rendering of very large medical  data in real-time on commodity computers. For large medical data  such as computed tomographic (CT) angiography run-offs (512x512x1202)  we achieve rendering times up to 2.5 fps on a commodity notebook.  We achieve this by introducing a memory efficient acceleration technique  for on-the-fly gradient estimation and a memory efficient hybrid  removal and skipping technique of transparent regions. We employ  quantized binary histograms, granular resolution octrees, and a cell  invisibility cache. These acceleration structures require just a  small extra storage of approximately 10%.",
    pdf = "pdfs/Grimm-2004-MEA.pdf",
    images = "images/Grimm-2004-MEA.jpg",
    thumbnails = "images/Grimm-2004-MEA.png",
    youtube = "https://www.youtube.com/watch?v=WK9DJ6Dyrx4,https://www.youtube.com/watch?v=iYz5VYHMd9U,https://www.youtube.com/watch?v=UdtaaENWs7M",
    affiliation = "tuwien",
    doi = "10.1109/SVVG.2004.8",
    isbn = "0-7803-8781-3",
    keywords = "volume rendering, acceleration, large data",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-memory/"
    }
    [PDF] [Bibtex]
    @INPROCEEDINGS {Bruckner-2004-EVV,
    author = "Stefan Bruckner",
    title = "Efficient Volume Visualization of Large Medical Datasets",
    booktitle = "Proceedings of CESCG 2004",
    year = "2004",
    month = "apr",
    abstract = "In volume visualization, huge amounts of data have to be processed.  While modern hardware is quite capable of this task in terms of processing  power, the gap between CPU performance and memory bandwidth further  increases with every new generation of CPUs. It is therefore essential  to efficiently use the limited memory bandwidth. In this paper, we  present novel approaches to optimize CPU-based volume raycasting  of large datasets on commodity hardware. A new addressing scheme  is introduced, which permits the use of a bricked volume layout with  minimal overhead. We further present an extended parallelization  strategy for Simultaneous Multithreading. Finally, we introduce memory  efficient acceleration data structures which enable us to render  large medical datasets, such as the Visible Male (587x341x1878),  at up to 2.5 frames/second on a commodity notebook.",
    pdf = "pdfs/Bruckner-2004-EVV.pdf",
    images = "images/Bruckner-2004-EVV.jpg",
    thumbnails = "images/Bruckner-2004-EVV.png",
    note = "CESCG 2004 Best Paper Award and Best Presentation Award",
    affiliation = "tuwien",
    url = "http://www.cescg.org/CESCG-2004/web/Bruckner-Stefan/html/"
    }
    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Grimm-2004-FDM,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "Flexible Direct Multi-Volume Rendering in Interactive Scenes",
    booktitle = "Proceedings of VMV 2004",
    year = "2004",
    pages = "386--379",
    month = "oct",
    abstract = "In this paper we describe methods to efficiently visualize multiple  ntersecting volumetric objects. We introduce the concept of V-Objects.  V-Objects represent abstract properties of an object connected to  a volumetric data source. We present a method to perform direct volume  rendering of a scene comprised of an arbitrary number of possibly  intersecting V-Objects. The idea of our approach is to distinguish  between regions of intersection, which need costly multi-volume processing,  and regions containing only one V-Object, which can be processed  using a highly efficient brick-wise volume traversal scheme. Using  this method, we achieve significant performance gains for multi-volume  rendering. We show possible medical applications, such as surgical  planning, diagnosis, and education.",
    pdf = "pdfs/Grimm-2004-FDM.pdf",
    images = "images/Grimm-2004-FDM.jpg",
    thumbnails = "images/Grimm-2004-FDM.png",
    youtube = "https://www.youtube.com/watch?v=pDskLE6cnFw,https://www.youtube.com/watch?v=VYKaSpsZd2s,https://www.youtube.com/watch?v=BGE640_Tw2U,https://www.youtube.com/watch?v=p-I0HWBv4Jc,https://www.youtube.com/watch?v=6zlprE38GGo",
    affiliation = "tuwien",
    keywords = "multi volume rendering, medical visualization, volume raycasting",
    location = "Stanford, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/GRIMM-2004-FDMX-P/"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Grimm-2004-RDA,
    author = "S{\"o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Meister Eduard Gr{\"o}ller",
    title = "A Refined Data Addressing and Processing Scheme to Accelerate Volume Raycasting",
    journal = "Computers \& Graphics",
    year = "2004",
    volume = "28",
    number = "5",
    pages = "719--729",
    month = "oct",
    abstract = "Most volume rendering systems based on CPU volume raycasting still  suffer from inefficient CPU utilization and high memory usage. To  target these issues we present a new technique for efficient data  addressing. Furthermore, we introduce a new processing scheme for  volume raycasting which exploits thread-level parallelism - a technology  now supported by commodity computer architectures.",
    pdf = "pdfs/Grimm-2004-RDA.pdf",
    images = "images/Grimm-2004-RDA.jpg",
    thumbnails = "images/Grimm-2004-RDA.png",
    issn = "0097-8493",
    affiliation = "tuwien",
    doi = "10.1016/j.cag.2004.06.010",
    isbn = "0097-8493",
    keywords = "volume raycasting, bricking, parallel computing",
    url = "http://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-arefined/"
    }

2003

    [PDF] [YT] [Bibtex]
    @INPROCEEDINGS {Bruckner-2003-IWN,
    author = "Stefan Bruckner and Dieter Schmalstieg and Helwig Hauser and Meister Eduard Gr{\"o}ller",
    title = "The Inverse Warp: Non-Invasive Integration of Shear-Warp Volume Rendering into Polygon Rendering Pipelines",
    booktitle = "Proceedings of VMV 2003",
    year = "2003",
    editor = "T. Ertl, B. Girod, G. Greiner, H. Niemann, H.-P. Seidel, E. Steinbach, R. Westermann",
    pages = "529--536",
    month = "nov",
    publisher = "infix",
    abstract = "In this paper, a simple and efficient solution for combining shear-warp  volume rendering and the hardware graphics pipeline is presented.  The approach applies an inverse warp transformation to the Z-Buffer,  containing the rendered geometry. This information is used for combining  geometry and volume data during compositing. We present applications  of this concept which include hybrid volume rendering, i.e., concurrent  rendering of polygonal objects and volume data, and volume clipping  on convex clipping regions. Furthermore, it can be used to efficiently  define regions with different rendering modes and transfer functions  for focus+context volume rendering. Empirical results show that the  approach has very low impact on performance.",
    pdf = "pdfs/Bruckner-2003-IWN.pdf",
    images = "images/Bruckner-2003-IWN.jpg",
    thumbnails = "images/Bruckner-2003-IWN.png",
    youtube = "https://www.youtube.com/watch?v=l_49gLBUO3E,https://www.youtube.com/watch?v=zmWQfUs3Bmc,https://www.youtube.com/watch?v=qFwv-Ru8Ftc",
    affiliation = "tuwien",
    isbn = "3898380483",
    keywords = "focus+context techniques, clipping, hybrid volume rendering",
    url = "http://www.cg.tuwien.ac.at/research/publications/2003/Bruckner-2003-The/"
    }