Veronika Šoltészová

Adjunct associate professor

MedViz, SciVis

Publications

2017

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2017-OFS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner",
    title = "Output-Sensitive Filtering of Streaming Volume Data",
    journal = "Computer Graphics Forum",
    year = "2017",
    volume = "36",
    number = "1",
    pages = "249--262",
    month = "jan",
    abstract = "Real-time volume data acquisition poses substantial challenges for  the traditional visualization pipeline where data enhancement is  typically seen as a pre-processing step. In the case of 4D ultrasound  data, for instance, costly processing operations to reduce noise  and to remove artifacts need to be executed for every frame. To enable  the use of high quality filtering operations in such scenarios, we  propose an output-sensitive approach to the visualization of streaming  volume data. Our method evaluates the potential contribution of all  voxels to the final image, allowing us to skip expensive processing  operations that have little or no effect on the visualization. As  filtering operations modify the data values which may affect the  visibility, our main contribution is a fast scheme to predict their  maximum effect on the final image. Our approach prioritizes filtering  of voxels with high contribution to the final visualization based  on a maximal permissible error per pixel. With zero permissible error,  the optimized filtering will yield a result identical to filtering  of the entire volume. We provide a thorough technical evaluation  of the approach and demonstrate it on several typical scenarios that  require on-the-fly processing.",
    pdf = "pdfs/Solteszova-2017-OFS.pdf",
    images = "images/Solteszova-2017-OFS.jpg",
    thumbnails = "images/Solteszova-2017-OFS.png",
    youtube = "https://www.youtube.com/watch?v=xGPs560ttp0",
    doi = "10.1111/cgf.12799",
    keywords = "output-sensitive processing, volume data, filtering"
    }

2014

    [PDF] [DOI] [YT] [Bibtex]
    @INPROCEEDINGS {Solteszova-2014-VPS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner",
    title = "Visibility-Driven Processing of Streaming Volume Data",
    booktitle = "Proceedings of VCBM 2014",
    year = "2014",
    pages = "127--136",
    month = "sep",
    abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw  data is challenging to visualize directly without additional processing.  Noise removal and feature detection are common operations, but many  methods are too costly to compute over the whole volume when dealing  with live streamed data. In this paper, we propose a visibility-driven  processing scheme for handling costly on-the-fly processing of volumetric  data in real-time. In contrast to the traditional visualization pipeline,  our scheme utilizes a fast computation of the potentially visible  subset of voxels which significantly reduces the amount of data required  to process. As filtering operations modify the data values which  may affect their visibility, our method for visibility-mask generation  ensures that the set of elements deemed visible does not change after  processing. Our approach also exploits the visibility information  for the storage of intermediate values when multiple operations are  performed in sequence, and can therefore significantly reduce the  memory overhead of longer filter pipelines. We provide a thorough  technical evaluation of the approach and demonstrate it on several  typical scenarios where on-the-fly processing is required.",
    pdf = "pdfs/Solteszova-2014-VPS.pdf",
    images = "images/Solteszova-2014-VPS.jpg",
    thumbnails = "images/Solteszova-2014-VPS.png",
    youtube = "https://www.youtube.com/watch?v=WJgc6BX1qig",
    note = "VCBM 2014 Best Paper Award",
    doi = "10.2312/vcbm.20141198",
    event = "VCBM 2014",
    keywords = "ultrasound, visibility-driven processing, filtering",
    location = "Vienna, Austria"
    }

2013

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Patel-2013-ICS,
    author = "Daniel Patel and Veronika \v{S}olt{\'e}szov{\'a} and Jan Martin Nordbotten and Stefan Bruckner",
    title = "Instant Convolution Shadows for Volumetric Detail Mapping",
    journal = "ACM Transactions on Graphics",
    year = "2013",
    volume = "32",
    number = "5",
    pages = "154:1--154:18",
    month = "sep",
    abstract = "In this article, we present a method for rendering dynamic scenes  featuring translucent procedural volumetric detail with all-frequency  soft shadows being cast from objects residing inside the view frustum.  Our approach is based on an approximation of physically correct shadows  from distant Gaussian area light sources positioned behind the view  plane, using iterative convolution. We present a theoretical and  empirical analysis of this model and propose an efficient class of  convolution kernels which provide high quality at interactive frame  rates. Our GPU-based implementation supports arbitrary volumetric  detail maps, requires no precomputation, and therefore allows for  real-time modi?cation of all rendering parameters.",
    pdf = "pdfs/Patel-2013-ICS.pdf",
    images = "images/Patel-2013-ICS.jpg",
    thumbnails = "images/Patel-2013-ICS.png",
    youtube = "https://www.youtube.com/watch?v=lhGWgew3HXY,https://www.youtube.com/watch?v=XrhYjgQxfb0",
    doi = "10.1145/2492684",
    keywords = "shadows, volumetric effects, procedural texturing, filtering",
    project = "geoillustrator",
    url = "http://dl.acm.org/citation.cfm?id=2492684"
    }

2012

    [Bibtex]
    @ARTICLE {Birkeland12TheUltrasound,
    author = "{\AA}smund Birkeland and Veronika \v{S}olt{\'e}szov{\'a} and Dieter H{\"o}nigmann and Odd Helge Gilja and Svein Brekke and Timo Ropinski and Ivan Viola",
    title = "The Ultrasound Visualization Pipeline - A Survey",
    journal = "CoRR",
    year = "2012",
    volume = "abs/1206.3975",
    abstract = "Ultrasound is one of the most frequently used imaging modality in medicine. The high spatial resolution, its interactive nature and non-invasiveness makes it the first choice in many examinations. Image interpretation is one of ultrasounds main challenges. Much training is required to obtain a confident skill level in ultrasound-based diagnostics. State-of-the-art graphics techniques is needed to provide meaningful visualizations of ultrasound in real-time. In this paper we present the process-pipeline for ultrasound visualization, including an overview of the tasks performed in the specific steps. To provide an insight into the trends of ultrasound visualization research, we have selected a set of significant publications and divided them into a technique-based taxonomy covering the topics pre-processing, segmentation, registration, rendering and augmented reality. For the different technique types we discuss the difference between ultrasound-based techniques and techniques for other modalities.",
    images = "images/Birkeland2012TheUltrasound.png",
    thumbnails = "images/Birkeland2012TheUltrasound_thumb.png",
    url = "http://arxiv.org/abs/1206.3975",
    project = "illustrasound,medviz,illvis"
    }
    [PDF] [VID] [Bibtex]
    @INPROCEEDINGS {Solteszova12Stylized,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Ruben Patel and Helwig Hauser and Ivanko Viola",
    title = "Stylized Volume Visualization of Streamed Sonar Data",
    booktitle = "Proceedings of Spring Conference on Computer Graphics (SCCG 2012)",
    year = "2012",
    pages = "13--20",
    month = "May",
    abstract = "Current visualization technology implemented in the software for 2D sonars used in marine research is limited to slicing whilst volume visualization is only possible as post processing. We designed and implemented a system which allows for instantaneous volume visualization of streamed scans from 2D sonars without prior resampling to a voxel grid. The volume is formed by a set of most recent scans which are being stored. We transform each scan using its associated transformations to the view-space and slice their bounding box by view-aligned planes. Each slicing plane is reconstructed from the underlying scans and directly used for slice-based volume rendering. We integrated a low frequency illumination model which enhances the depth perception of noisy acoustic measurements. While we visualize the 2D data and time as 3D volumes, the temporal dimension is not intuitively communicated. Therefore, we introduce a concept of temporal outlines. Our system is a result of an interdisciplinary collaboration between visualization and marine scientists. The application of our system was evaluated by independent domain experts who were not involved in the design process in order to determine real life applicability.",
    pdf = "pdfs/Solteszova12Stylized.pdf",
    vid = "vids/Solteszova12Stylized.mp4",
    images = "images/Solteszova12Stylized01.png, images/Solteszova12Stylized02.png, images/Solteszova12Stylized03.png",
    thumbnails = "images/Solteszova12Stylized01_thumb.png, images/Solteszova12Stylized02_thumb.png, images/Solteszova12Stylized03_thumb.png",
    note = "Second best paper and second best presentation awards",
    location = "Smolenice castle, Slovakia",
    project = "illustrasound,medviz,illvis"
    }
    [PDF] [Bibtex]
    @PHDTHESIS {solteszova12thesis,
    author = "Veronika \v{S}olt{\'e}szov{\'a}",
    title = "Perception-Augmenting Illumination",
    school = "Department of Informatics, University of Bergen, Norway",
    year = "2012",
    month = "Aug",
    abstract = "At each stage of the visualization pipeline, the information is impeded by loss or by noise because of imprecise acquisition, storage limitations, and processing. Furthermore, it passes through the complex and not yet well understood pathways in the human visual system and finally to result into a mental image. Due to the noise that impedes the information in the visualization pipeline and the processes in the human visual system, the mental image and the real-world phenomenon do not match. From the aspect of physics, the input of the visual system is confined only to patterns of light. Illumination is therefore essential in 3D visualization for perception of visualized objects. In this thesis, several advancements for advanced volumetric lighting are presented. First, a novel lighting model that supports interactive light source placement and yields a high-quality soft shadowing effect, is proposed. The light transport is represented by conical functions and approximated with an incremental blurring operation of the opacity buffer during front-to-back slicing of the volume. Furthermore, a new perceptuallyfounded model for expressing shadows that gives a full control over the appearance of shadows in terms of color and opacity, is presented. Third, a systematic error in perception of surface slant is modeled. This knowledge is then applied to adjust an existing shading model in a manner that compensates for the error in perception. These new visualization methodologies are linked to the knowledge of perceptual psychology and the craft of illustrators, who experimented with visual-presentation techniques for centuries. The new methodologies are showcased on challenging acoustic modalities such as 3D medical ultrasound and sonar imaging.",
    pdf = "pdfs/solteszova12thesis.pdf",
    images = "images/solteszova12thesis.png",
    thumbnails = "images/solteszova12thesis_thumb.png",
    isbn = "978-82-308-2118-3"
    }
    [PDF] [DOI] [Bibtex]
    @ARTICLE {Solteszova12APerceptual,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Cagatay Turkay and Mark Price and Ivan Viola",
    title = "A Perceptual-Statistics Shading Model",
    journal = "Visualization and Computer Graphics, IEEE Transaction on",
    year = "2012",
    volume = "18",
    number = "12",
    pages = "2265 -2274",
    month = "Dec",
    abstract = "The process of surface perception is complex and based on several influencing factors, e.g., shading, silhouettes, occluding contours, and top down cognition. The accuracy of surface perception can be measured and the influencing factors can be modified in order to decrease the error in perception. This paper presents a novel concept of how a perceptual evaluation of a visualization technique can contribute to its redesign with the aim of improving the match between the distal and the proximal stimulus. During analysis of data from previous perceptual studies, we observed that the slant of 3D surfaces visualized on 2D screens is systematically underestimated. The visible trends in the error allowed us to create a statistical model of the perceived surface slant. Based on this statistical model we obtained from user experiments, we derived a new shading model that uses adjusted surface normals and aims to reduce the error in slant perception. The result is a shape-enhancement of visualization which is driven by an experimentally-founded statistical model. To assess the efficiency of the statistical shading model, we repeated the evaluation experiment and confirmed that the error in perception was decreased. Results of both user experiments are publicly-available datasets.",
    pdf = "pdfs/Solteszova12APerceptual.pdf",
    images = "images/Solteszova12APerceptual01.png, images/Solteszova12APerceptual02.png, images/Solteszova12APerceptual03.png",
    thumbnails = "images/Solteszova12APerceptual01_thumb.png, images/Solteszova12APerceptual02_thumb.png, images/Solteszova12APerceptual03_thumb.png",
    event = "IEEE Scientific Visualization Conference 2012",
    location = "Seattle, WA, USA",
    doi = "10.1109/TVCG.2012.188",
    issn = "1077--2626",
    extra = "extra/Solteszova12APerceptual.zip",
    project = "illustrasound,medviz,illvis"
    }
    [PDF] [DOI] [Bibtex]
    @INPROCEEDINGS {Solteszova12Lowest,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Linn Emilie S{\ae}vil Helljesen and Wolfgang Wein and Odd Helge Gilja and Ivan Viola",
    title = "Lowest-Variance Streamlines for Filtering of 3D Ultrasound",
    booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (VCBM 2012)",
    year = "2012",
    pages = "41--48",
    month = "Sep",
    abstract = "Ultrasound as an acoustic imaging modality suffers from various kinds of noise. The presence of noise especially hinders the 3D visualization of ultrasound data, both in terms of resolving the spatial occlusion of the signal by surrounding noise, and mental decoupling of the signal from noise. This paper presents a novel type of structurepreserving filter that has been specifically designed to eliminate the presence of speckle and random noise in 3D ultrasound datasets. This filter is based on a local distribution of variance for a given voxel. The lowest variance direction is assumed to be aligned with the direction of the structure. A streamline integration over the lowest-variance vector field defines the filtered output value. The new filter is compared to other popular filtering approaches and its superiority is documented on several use cases. A case study where a clinician was delineating vascular structures of the liver from 3D visualizations further demonstrates the benefits of our approach compared to the state of the art.",
    pdf = "pdfs/Solteszova12Lowest.pdf",
    images = "images/Solteszova12Lowest01.png, images/Solteszova12Lowest02.png",
    thumbnails = "images/Solteszova12Lowest01_thumb.png, images/Solteszova12Lowest02_thumb.png",
    location = "Norrk{\"o}ping, Sweden",
    url = "http://diglib.eg.org/EG/DL/WS/VCBM/VCBM12",
    doi = "10.2312/VCBM/VCBM12/041-048",
    project = "illustrasound,medviz,illvis"
    }

2011

    [Bibtex]
    @INPROCEEDINGS {solteszova11chromatic,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Daniel Patel and Ivan Viola",
    title = "Chromatic Shadows for Improved Perception",
    booktitle = "Proc. Non-photorealistic Animation and Rendering (NPAR 2011)",
    year = "2011",
    pages = "105--115",
    abstract = "Soft shadows are effective depth and shape cues. However, traditional shadowing algorithms decrease the luminance in shadowareas. The features in shadow become dark and thus shadowing causesinformation hiding. For this reason, in shadowed areas, medical illustrators decrease the luminance less and compensate the lower luminance range by adding color, i.e., by introducing a chromatic component. This paper presents a novel technique which enables an interactive setup of an illustrative shadow representation for preventing overdarkening of important structures. We introduce a scalar attribute for every voxel denoted as shadowiness and propose a shadow transfer function that maps the shadowiness to a color and a blend factor. Typically, the blend factor increases linearly with the shadowiness. We then let the original object color blend with the shadow color according to the blend factor. We suggest a specific shadow transfer function, designed together with a medical illustrator which shifts the shadow color towards blue. This shadow transfer function is quantitatively evaluated with respect to relative depth and surface perception.",
    images = "images/solteszova11chromatic3.jpg, images/solteszova11chromatic2.jpg, images/solteszova11chromatic.jpg, images/solteszova11chromatic4.jpg",
    thumbnails = "images/solteszova11chromatic3_thumb.jpg, images/solteszova11chromatic2_thumb.jpg, images/solteszova11chromatic_thumb.jpg, images/solteszova11chromatic4_thumb.jpg",
    location = "Vancouver, Canada",
    url = "http://dx.doi.org/10.1145/2024676.2024694",
    project = "illustrasound,medviz,illvis"
    }

2010

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Solteszova-2010-MOS,
    author = "Veronika \v{S}olt{\'e}szov{\'a} and Daniel Patel and Stefan Bruckner and Ivan Viola",
    title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering",
    journal = "Computer Graphics Forum",
    year = "2010",
    volume = "29",
    number = "3",
    pages = "883--891",
    month = "jun",
    abstract = "In this paper, we present a novel technique which simulates directional  light scattering for more realistic interactive visualization of  volume data. Our method extends the recent directional occlusion  shading model by enabling light source positioning with practically  no performance penalty. Light transport is approximated using a tilted  cone-shaped function which leaves elliptic footprints in the opacity  buffer during slice-based volume rendering. We perform an incremental  blurring operation on the opacity buffer for each slice in front-to-back  order. This buffer is then used to define the degree of occlusion  for the subsequent slice. Our method is capable of generating high-quality  soft shadowing effects, allows interactive modification of all illumination  and rendering parameters, and requires no pre-computation.",
    pdf = "pdfs/Solteszova-2010-MOS.pdf",
    images = "images/Solteszova-2010-MOS.jpg",
    thumbnails = "images/Solteszova-2010-MOS.png",
    youtube = "https://www.youtube.com/watch?v=V4y0BVKV_bw",
    doi = "10.1111/j.1467-8659.2009.01695.x",
    event = "EuroVis 2010",
    keywords = "global illumination, volume rendering, shadows, optical model",
    location = "Bordeaux, France",
    project = "illustrasound,medviz,illvis",
    url = "http://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/"
    }

2009

    [PDF] [DOI] [YT] [Bibtex]
    @ARTICLE {Bruckner-2009-BVQ,
    author = "Stefan Bruckner and Veronika \v{S}olt{\'e}szov{\'a} and Meister Eduard Gr{\"o}ller and Ji\v{r}{\'i} Hlad\r{u}vka and Katja B{\"u}hler and Jai Yu and Barry Dickson",
    title = "BrainGazer - Visual Queries for Neurobiology Research",
    journal = "IEEE Transactions on Visualization and Computer Graphics",
    year = "2009",
    volume = "15",
    number = "6",
    pages = "1497--1504",
    month = "nov",
    abstract = "Neurobiology investigates how anatomical and physiological relationships  in the nervous system mediate behavior. Molecular genetic techniques,  applied to species such as the common fruit fly Drosophila melanogaster,  have proven to be an important tool in this research. Large databases  of transgenic specimens are being built and need to be analyzed to  establish models of neural information processing. In this paper  we present an approach for the exploration and analysis of neural  circuits based on such a database. We have designed and implemented  BrainGazer, a system which integrates visualization techniques for  volume data acquired through confocal microscopy as well as annotated  anatomical structures with an intuitive approach for accessing the  available information. We focus on the ability to visually query  the data based on semantic as well as spatial relationships. Additionally,  we present visualization techniques for the concurrent depiction  of neurobiological volume data and geometric objects which aim to  reduce visual clutter. The described system is the result of an ongoing  interdisciplinary collaboration between neurobiologists and visualization  researchers.",
    pdf = "pdfs/Bruckner-2009-BVQ.pdf",
    images = "images/Bruckner-2009-BVQ.jpg",
    thumbnails = "images/Bruckner-2009-BVQ.png",
    youtube = "https://www.youtube.com/watch?v=LB5t3RtLifk",
    affiliation = "tuwien",
    doi = "10.1109/TVCG.2009.121",
    event = "IEEE Visualization 2009",
    keywords = "biomedical visualization, neurobiology, visual queries, volume visualization",
    location = "Atlantic City, New Jersey, USA",
    url = "http://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-BVQ/"
    }