Sergej Stoppel

Operative PostDoc

Computer human interaction; computational aesthetics

Sergej Stoppel finished his masters in Mathematics in 2014. He worked as a researcher at Fraunhofer Institute for Production Systems and Design Technology from 2010 until 2014.
Sergej defended his PhD thesis "User-Centric Parameter Specification for Interactive Virtual and Physical Visual Representations" in October 2018.
Currently Sergej is a postdoc at the visualization group at the University of Bergen. His main research interests lie in visual data science.



    [PDF] [DOI] [YT] [Bibtex]
    @inproceedings {Trautner-2022-HCP,
    author = {Trautner, Thomas and Sbardellati, Maximilian and Stoppel, Sergej and Bruckner, Stefan},
    title = {{Honeycomb Plots: Visual Enhancements for Hexagonal Maps}},
    booktitle = {Proc. of VMV 2022: Vision, Modeling, and Visualization},
    editor = {Bender, Jan and Botsch, Mario and Keim, Daniel A.},
    pages = {65--73},
    year = {2022},
    publisher = {The Eurographics Association},
    ISBN = {978-3-03868-189-2},
    DOI = {10.2312/vmv.20221205},
    abstract = {Aggregation through binning is a commonly used technique for visualizing large, dense, and overplotted two-dimensional data sets. However, aggregation can hide nuanced data-distribution features and complicates the display of multiple data-dependent variables, since color mapping is the primary means of encoding. In this paper, we present novel techniques for enhancing hexplots with spatialization cues while avoiding common disadvantages of three-dimensional visualizations. In particular, we focus on techniques relying on preattentive features that exploit shading and shape cues to emphasize relative value differences. Furthermore, we introduce a novel visual encoding that conveys information about the data distributions or trends within individual tiles. Based on multiple usage examples from different domains and real-world scenarios, we generate expressive visualizations that increase the information content of classic hexplots and validate their effectiveness in a user study.},
    pdf = "pdfs/Trautner-2022-HCP.pdf",
    thumbnails = "images/Trautner-2022-HCP-thumb.png",
    images = "images/Trautner-2022-HCP-thumb.png",
    youtube = "",
    git = ""


    [PDF] [DOI] [VID] [YT] [Bibtex]
    author = {Trautner, T. and Bolte, F. and Stoppel, S. and Bruckner, S.},
    title = {Sunspot Plots: Model-based Structure Enhancement for Dense Scatter Plots},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {3},
    pages = {551--563},
    keywords = {information visualization, scatterplots, kernel density estimation},
    doi = {10.1111/cgf.14001},
    abstract = {Scatter plots are a powerful and well-established technique for visualizing the relationships between two variables as a collection of discrete points. However, especially when dealing with large and dense data, scatter plots often exhibit problems such as overplotting, making the data interpretation arduous. Density plots are able to overcome these limitations in highly populated regions, but fail to provide accurate information of individual data points. This is particularly problematic in sparse regions where the density estimate may not provide a good representation of the underlying data. In this paper, we present sunspot plots, a visualization technique that communicates dense data as a continuous data distribution, while preserving the discrete nature of data samples in sparsely populated areas. We furthermore demonstrate the advantages of our approach on typical failure cases of scatter plots within synthetic and real-world data sets and validate its effectiveness in a user study.},
    year = {2020},
    pdf = "pdfs/Trautner_2020_SunspotPlots_PDF.pdf",
    thumbnails = "images/Trautner_2020_SunspotPlots_thumb.png",
    images = "images/Trautner_2020_SunspotPlots_thumb.png",
    vid = "vids/Trautner_2020_SunspotPlots_video.mp4",
    youtube = "",
    project = "MetaVis"
    [PDF] [DOI] [Bibtex]
    author = {Solteszova, V. and Smit, N. N. and Stoppel, S. and Gr\"{u}ner, R. and Bruckner, S.},
    title = {Memento: Localized Time-Warping for Spatio-Temporal Selection},
    journal = {Computer Graphics Forum},
    volume = {39},
    number = {1},
    pages = {231--243},
    year = {2020},
    keywords = {interaction, temporal data, visualization, spatio-temporal projection},
    images = "images/Solteszova-2019-MLT.jpg",
    thumbnails = "images/Solteszova-2019-MLT-1.jpg",
    pdf = "pdfs/Solteszova-2019-MLT.pdf",
    doi = {10.1111/cgf.13763},
    abstract = {Abstract Interaction techniques for temporal data are often focused on affecting the spatial aspects of the data, for instance through the use of transfer functions, camera navigation or clipping planes. However, the temporal aspect of the data interaction is often neglected. The temporal component is either visualized as individual time steps, an animation or a static summary over the temporal domain. When dealing with streaming data, these techniques are unable to cope with the task of re-viewing an interesting local spatio-temporal event, while continuing to observe the rest of the feed. We propose a novel technique that allows users to interactively specify areas of interest in the spatio-temporal domain. By employing a time-warp function, we are able to slow down time, freeze time or even travel back in time, around spatio-temporal events of interest. The combination of such a (pre-defined) time-warp function and brushing directly in the data to select regions of interest allows for a detailed review of temporally and spatially localized events, while maintaining an overview of the global spatio-temporal data. We demonstrate the utility of our technique with several usage scenarios.},
    project = "MetaVis,ttmedvis,VIDI"


    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2019-LFL,
    author = "Stoppel, Sergej and Bruckner, Stefan",
    title = "LinesLab: A Flexible Low-Cost Approach for the Generation of Physical Monochrome Art",
    journal = "Computer Graphics Forum",
    year = "2019",
    abstract = "The desire for the physical generation of computer art has seen a significant body of research that has resulted in sophisticated robots and painting machines, together with specialized algorithms mimicking particular artistic techniques. The resulting setups are often expensive and complex, making them unavailable for recreational and hobbyist use. In recent years, however, a new class of affordable low-cost plotters and cutting machines has reached the market. In this paper, we present a novel system for the physical generation of line and cut-out art based on digital images, targeted at such off-the-shelf devices. Our approach uses a meta-optimization process to generate results that represent the tonal content of a digital image while conforming to the physical and mechanical constraints of home-use devices. By flexibly combining basic sets of positional and shape encodings, we are able to recreate a wide range of artistic styles. Furthermore, our system optimizes the output in terms of visual perception based on the desired viewing distance, while remaining scalable with respect to the medium size.",
    pdf = "pdfs/Stoppel-2019-LFL.pdf",
    images = "images/Stoppel-2019-LFL.jpg",
    thumbnails = "images/Stoppel-2019-LFL.png",
    publisher = "The Eurographics Association and John Wiley and Sons Ltd.",
    doi = "10.1111/cgf.13609",
    youtube = "",
    project = "MetaVis"
    [PDF] [DOI] [VID] [Bibtex]
    @ARTICLE {Stoppel-2019-FVI,
    author = "Sergej Stoppel and Magnus Paulson Erga and Stefan Bruckner",
    title = "Firefly: Virtual Illumination Drones for Interactive Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2019",
    volume = "25",
    pages = "1204-1213",
    abstract = "Light specification in three dimensional scenes is a complex problem and several approaches have been presented that aim to automate this process. However, there are many scenarios where a static light setup is insufficient, as the scene content and camera position may change. Simultaneous manual control over the camera and light position imposes a high cognitive load on the user. To address this challenge, we introduce a novel approach for automatic scene illumination with Fireflies. Fireflies are intelligent virtual light drones that illuminate the scene by traveling on a closed path. The Firefly path automatically adapts to changes in the scene based on an outcome-oriented energy function. To achieve interactive performance, we employ a parallel rendering pipeline for the light path evaluations. We provide a catalog of energy functions for various application scenarios and discuss the applicability of our method on several examples.",
    pdf = "pdfs/VIS2018-Firefly.pdf",
    vid = "vids/FinalVideo.mp4",
    images = "images/Teaser.png",
    thumbnails = "images/HeadRightCroppedThumbnail.png",
    doi = "10.1109/TVCG.2018.2864656",
    project = "MetaVis"


    @ARTICLE {PhDThesis2018Stoppel,
    author = "Stoppel, Sergej",
    title = "User-Centric Parameter Specification for Interactive Virtual and Physical Visual Representations",
    journal = "Universitetet i Bergen",
    year = "2018"
    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2018-SSW,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Smart Surrogate Widgets for Direct Volume Manipulation",
    booktitle = "Proceedings of IEEE PacificVis 2018",
    year = "2018",
    pages = "36--45",
    month = "apr",
    abstract = "Interaction is an essential aspect in volume visualization, yet commonmanipulation tools such as bounding boxes or clipping planewidgets provide rather crude tools as they neglect the complex structureof the underlying data. In this paper, we introduce a novelvolume interaction approach based on smart widgets that are automaticallyplaced directly into the data in a visibility-driven manner.By adapting to what the user actually sees, they act as proxies thatallow for goal-oriented modifications while still providing an intuitiveset of simple operations that is easy to control. In particular, ourmethod is well-suited for direct manipulation scenarios such as touchscreens, where traditional user interface elements commonly exhibitlimited utility. To evaluate out approach we conducted a qualitativeuser study with nine participants with various backgrounds.",
    pdf = "pdfs/Stoppel-2018-SSW.pdf",
    images = "images/Stoppel-2018-SSW.jpg",
    thumbnails = "images/Stoppel-2018-SSW.png",
    youtube = "",
    event = "IEEE PacificVis 2018",
    keywords = "smart interfaces, volume manipulation, volume visualization",
    doi = "10.1109/PacificVis.2018.00014",
    project = "MetaVis"


    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Stoppel-2017-VPI,
    author = "Sergej Stoppel and Stefan Bruckner",
    title = "Vol²velle: Printable Interactive Volume Visualization",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2017",
    volume = "23",
    number = "1",
    pages = "861--870",
    month = "jan",
    abstract = "Interaction is an indispensable aspect of data visualization. The  presentation of volumetric data, in particular, often significantly  benefits from interactive manipulation of parameters such as transfer  functions, rendering styles, or clipping planes. However, when we  want to create hardcopies of such visualizations, this essential  aspect is lost. In this paper, we present a novel approach for creating  hardcopies of volume visualizations which preserves a certain degree  of interactivity. We present a method for automatically generating  Volvelles, printable tangible wheel charts that can be manipulated  to explore different parameter settings. Our interactive system allows  the flexible mapping of arbitrary visualization parameters and supports  advanced features such as linked views. The resulting designs can  be easily reproduced using a standard printer and assembled within  a few minutes.",
    pdf = "pdfs/Stoppel-2017-VPI.pdf",
    images = "images/Stoppel-2017-VPI.jpg",
    thumbnails = "images/Stoppel-2017-VPI.png",
    youtube = "",
    doi = "10.1109/TVCG.2016.2599211",
    event = "IEEE SciVis 2016",
    keywords = "physical visualization, interaction, volume visualization, illustrative visualization",
    location = "Baltimore, USA"
    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2017-OFS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner",
    title = "Output-Sensitive Filtering of Streaming Volume Data",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "249--262",
    month = "jan",
    abstract = "Real-time volume data acquisition poses substantial challenges for  the traditional visualization pipeline where data enhancement is  typically seen as a pre-processing step. In the case of 4D ultrasound  data, for instance, costly processing operations to reduce noise  and to remove artifacts need to be executed for every frame. To enable  the use of high quality filtering operations in such scenarios, we  propose an output-sensitive approach to the visualization of streaming  volume data. Our method evaluates the potential contribution of all  voxels to the final image, allowing us to skip expensive processing  operations that have little or no effect on the visualization. As  filtering operations modify the data values which may affect the  visibility, our main contribution is a fast scheme to predict their  maximum effect on the final image. Our approach prioritizes filtering  of voxels with high contribution to the final visualization based  on a maximal permissible error per pixel. With zero permissible error,  the optimized filtering will yield a result identical to filtering  of the entire volume. We provide a thorough technical evaluation  of the approach and demonstrate it on several typical scenarios that  require on-the-fly processing.",
    pdf = "pdfs/Solteszova-2017-OFS.pdf",
    images = "images/Solteszova-2017-OFS.jpg",
    thumbnails = "images/Solteszova-2017-OFS.png",
    youtube = "",
    doi = "10.1111/cgf.12799",
    keywords = "output-sensitive processing, volume data, filtering"


    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Stoppel-2016-GIR,
    author = "Sergej Stoppel and Erlend Hodneland and Helwig Hauser and Stefan Bruckner",
    title = "Graxels: Information Rich Primitives for the Visualization of Time-Dependent Spatial Data",
    booktitle = "Proceedings of VCBM 2016",
    year = "2016",
    pages = "183--192",
    month = "sep",
    abstract = "Time-dependent volumetric data has important applications in areas  as diverse as medicine, climatology, and engineering. However, the  simultaneous quantitative assessment of spatial and temporal features  is very challenging. Common visualization techniques show either  the whole volume in one time step (for example using direct volume  rendering) or let the user select a region of interest (ROI) for  which a collection of time-intensity curves is shown. In this paper,  we propose a novel approach that dynamically embeds quantitative  detail views in a spatial layout. Inspired by the concept of small  multiples, we introduce a new primitive graxel (graph pixel). Graxels  are view dependent primitives of time-intensity graphs, generated  on-the-fly by aggregating per-ray information over time and image  regions. Our method enables the detailed feature-aligned visual analysis  of time-dependent volume data and allows interactive refinement and  filtering. Temporal behaviors like frequency relations, aperiodic  or periodic oscillations and their spatial context are easily perceived  with our method. We demonstrate the power of our approach using examples  from medicine and the natural sciences.",
    pdf = "pdfs/Stoppel-2016-GIR.pdf",
    images = "images/Stoppel-2016-GIR.jpg",
    thumbnails = "images/Stoppel-2016-GIR.png",
    youtube = "",
    doi = "10.2312/vcbm.20161286",
    event = "VCBM 2016",
    keywords = "time-dependent data, volume data, small multiples",
    location = "Bergen, Norway"
    @MISC {Stoppel2015ConfReport,
    author = "Sergej Stoppel",
    title = "Conference Report IEEE VIS 2014",
    month = "January",
    year = "2016",
    abstract = "Conference report about the IEEE VIS 2014 in Paris.",
    images = "images/Shneiderman_Gerson_Pushups.PNG",
    thumbnails = "images/Shneiderman_Gerson_Pushups.PNG",
    url = ""