2025
![[PDF]](https://vis.uib.no/wp-content/plugins/papercite/img/pdf.png)
![[DOI]](https://vis.uib.no/wp-content/plugins/papercite/img/external.png)
@article{ziman2025genaixbiomedvis,
title={"It looks sexy but it's wrong." Tensions in creativity and accuracy using genAI for biomedical visualization},
author = {Ziman, Roxanne and Saharan, Shehryar and McGill, Ga\"{e}l and Garrison, Laura},
journal = {arXiv, IEEE Transactions on Visualization and Computer Graphics--in press},
year = {2025},
numpages = {11},
publisher = {arXiv},
doi = {10.48550/arXiv.2507.14494},
abstract = {We contribute an in-depth analysis of the workflows and tensions arising from generative AI (genAI) use in biomedical visualization (BioMedVis). Although genAI affords facile production of aesthetic visuals for biological and medical content, the architecture of these tools fundamentally limits the accuracy and trustworthiness of the depicted information, from imaginary (or fanciful) molecules to alien anatomy. Through 17 interviews with a diverse group of practitioners and researchers, we qualitatively analyze the concerns and values driving genAI (dis)use for the visual representation of spatially-oriented biomedical data. We find that BioMedVis experts, both in roles as developers and designers, use genAI tools at different stages of their daily workflows and hold attitudes ranging from enthusiastic adopters to skeptical avoiders of genAI. In contrasting the current use and perspectives on genAI observed in our study with predictions towards genAI in the visualization pipeline from prior work, our refocus the discussion of genAI's effects on projects in visualization in the here and now with its respective opportunities and pitfalls for future visualization research. At a time when public trust in science is in jeopardy, we are reminded to first do no harm, not just in biomedical visualization but in science communication more broadly. Our observations reaffirm the necessity of human intervention for empathetic design and assessment of accurate scientific visuals.},
pdf = {pdfs/ziman2025genaixbiomedvis.pdf},
images = {images/ziman2025itlookssexy.png},
thumbnails = {images/ziman2025itlookssexy_thumb.png},
project = {VIDI},
git = {https://osf.io/mbw86/}
}
![[PDF]](https://vis.uib.no/wp-content/plugins/papercite/img/pdf.png)
![[DOI]](https://vis.uib.no/wp-content/plugins/papercite/img/external.png)
@article{zhang2025deconstruct,
title={Deconstructing Implicit Beliefs in Visual Data Journalism: Unstable Meanings Behind Data as Truth & Design for Insight},
author={Zhang, Ke Er Amy and Jenkinson, Jodie and Garrison, Laura},
journal={arXiv, IEEE Transactions on Visualization and Computer Graphics--in press},
year={2025},
numpages={11},
publisher={arXiv},
doi = {10.48550/arXiv.2507.12377},
abstract={We conduct a deconstructive reading of a qualitative interview study with 17 visual data journalists from newsrooms across the globe. We borrow a deconstruction approach from literary critique to explore the instability of meaning in language and reveal implicit beliefs in words and ideas. Through our analysis we surface two sets of opposing implicit beliefs in visual data journalism: objectivity/subjectivity and humanism/mechanism. We contextualize these beliefs through a genealogical analysis, which brings deconstruction theory into practice by providing a historic backdrop for these opposing perspectives. Our analysis shows that these beliefs held within visual data journalism are not self-enclosed but rather a product of external societal forces and paradigm shifts over time. Through this work, we demonstrate how thinking with critical theories such as deconstruction and genealogy can reframe "success" in visual data storytelling and diversify visualization research outcomes. These efforts push the ways in which we as researchers produce domain knowledge to examine the sociotechnical issues of today's values towards datafication and data visualization. All supplemental materials for this work are available at osf.io/5fr48.},
pdf = {pdfs/zhang2025deconstruct.pdf},
images = {images/zhang2025deconstruct.png},
thumbnails = {images/zhang2025deconstruct_thumb.png},
project = {VIDI},
git={https://osf.io/5fr48/}
}
![[PDF]](https://vis.uib.no/wp-content/plugins/papercite/img/pdf.png)
@inproceedings{alhazwani2025datahum,
author = {Al-Hazwani, Ibrahim and Zhang, Ke Er Amy and Garrison, Laura and Bernard, J{\"u}rgen},
title = {Data Humanism decoded: A characterization of its principles to bridge
data visualization researchers and practitioners},
booktitle = {Proceedings of IEEE VIS 2025 (Short Papers)--in press"},
year = {2025},
numpages = {5},
publisher = {IEEE Computer Society},
address = {Los Alamitos},
abstract = {Data Humanism is a human-centered design approach that emphasizes the personal, contextual, and imperfect nature of data. Despite its growing influence among practitioners, the 13 principles outlined in Giorgia Lupi’s visual manifesto remain loosely defined in research contexts, creating a gap between design practice and systematic application. Through a mixed-methods approach, including a systematic literature review, multimedia analysis, and expert interviews, we present a characterization of Data Humanism principles for visualization researchers. Our characterization provides concrete definitions that maintain interpretive flexibility in operationalizing design choices. We validate our work through direct consultation with Lupi. Moreover, we leverage the characterization to decode a visualization work, mapping Data Humanism principles to specific visual design choices. Our work creates a common language for human-centered visualization, bridging the gap between practice and research for future applications and evaluations.},
pdf = {pdfs/alhazwani2025datahum.pdf},
images = {images/alhazwani2025datahum.png},
thumbnails = {images/alhazwani2025datahum_thumb.png},
}
![[PDF]](https://vis.uib.no/wp-content/plugins/papercite/img/pdf.png)
![[DOI]](https://vis.uib.no/wp-content/plugins/papercite/img/external.png)
@article{mittenentzwei2025icom,
title = {AI-based character generation for disease stories: A case study using epidemiological data to highlight preventable risk factors},
author = {Mittenentzwei, Sarah and Garrison, Laura A. and Budich, Beatrice and Lawonn, Kai and Dockhorn, Alexander and Preim, Bernhard and Meuschke, Monique},
year = 2025,
journal = {i-com},
publisher = {De Gruyter},
pages = {},
doi = {10.1515/icom-2024-0041},
abstract = {Data-driven storytelling has grown significantly, becoming prevalent in various fields, including healthcare. In medical narratives, characters are crucial for engaging audiences, making complex medical information accessible, and potentially influencing positive behavioral and lifestyle changes. However, designing characters that are both educational and relatable to effectively engage audiences is challenging. We propose a GenAI-assisted pipeline for character design in data-driven medical stories, utilizing Stable Diffusion, a deep learning text-to-image model, to transform data into visual character representations. This approach reduces the time and artistic skills required to create characters that reflect the underlying data. As a proof-of-concept, we generated and evaluated two characters in a crowd-sourced case study, assessing their authenticity to the underlying data and consistency over time. In a qualitative evaluation with four experts with knowledge in design and health communication, the characters were discussed regarding their quality and refinement opportunities. The characters effectively conveyed various aspects of the data, such as emotions, age, and body weight. However, generating multiple consistent images of the same character proved to be a significant challenge. This underscores a key issue in using generative AI for character creation: the limited control designers have over the output.},
images = {images/mittenentzwei2025icom.png},
thumbnails = {images/mittenentzwei2025icom_thumb.png},
pdf = {pdfs/mittenentzwei2025icom.pdf}
}
![[PDF]](https://vis.uib.no/wp-content/plugins/papercite/img/pdf.png)
@inproceedings{zhang2025stories,
author = {Zhang, Ke Er Amy and Garrison, Laura},
title = {Modern snapshots in the crafting of a medical illustration},
booktitle = {Proceedings of CHI '25 Workshop "How do design stories work? Exploring narrative forms of knowledge in HCI"},
year = {2025},
numpages = {3},
abstract = {The time-honored practice of medical illustration and visualization, has, like nearly all other disciplines, seen changes in its tooling and development pipeline in step with technological and societal developments. At its core, however, medical visualization remains a discipline focused on telling stories about biology and medicine. The story we tell in this work assumes a more distant vantage point to tell a story about the biomedical storytellers themselves. Our story peers over the shoulders of two medical illustrators in the middle of a project to illustrate a procedure in one of the small blood vessels around the heart, and through the medium of an online chat explores the dialogue, tensions, and goals of such projects in the digital age. We adopt the two-column format of the CHI template, as it is more reminiscent of the width of our usual messaging windows while working. The second part of our submission reflects on these tensions and modes of storytelling from an HCI and Visualization-situated perspective.},
pdf = {pdfs/zhang2025stories.pdf},
images = {images/zhang2025stories.png},
thumbnails = {images/zhang2025stories.png},
project = {VIDI}
}
![[PDF]](https://vis.uib.no/wp-content/plugins/papercite/img/pdf.png)
![[DOI]](https://vis.uib.no/wp-content/plugins/papercite/img/external.png)
@article{vandenbossche2025open,
title = {The Open Anatomy Explorer--a journey towards accessible open-source 3D learning environments},
author = {Vandenbossche, Vicky and Van Kenhove, Michiel and Smit, Noeska and Willaert, Wouter and De Turck, Filip and Volckaert, Bruno and Valcke, Martin and Audenaert, Emmanuel},
year = 2025,
journal = {Journal of Visual Communication in Medicine},
publisher = {Taylor \& Francis},
pages = {1--12},
doi = {10.1080/17453054.2024.2446764},
url = {https://www.tandfonline.com/doi/full/10.1080/17453054.2024.2446764},
abstract = {Anatomy learning has traditionally relied on drawings, plastic models, and cadaver dissections/prosections to help students understand the three-dimensional (3D) relationships within the human body. However, the landscape of anatomy education has been transformed with the introduction of digital media. In this light, the Open Anatomy Explorer (OPANEX) was developed. It includes two user interfaces (UI): one for students and one for administrators. The administrator UI offers features such as uploading and labelling of 3D models, and customizing 3D settings. Additionally, the OPANEX facilitates content sharing between institutes through its import-export functionality. To evaluate the integration of OPANEX within the existing array of learning resources, a survey was conducted as part of the osteology course at Ghent University, Belgium. The survey aimed to investigate the frequency of use of five learning resources, attitudes towards 3D environments, and the OPANEX user experience. Analysis revealed that the OPANEX was the most frequently used resource. Students’ attitudes towards 3D learning environments further supported this preference. Feedback on the OPANEX user experience indicated various reasons for its popularity, including the quality of the models, regional annotations, and customized learning content. In conclusion, the outcomes underscore the educational value of the OPANEX, reflecting students’ positive attitudes towards 3D environments in anatomy education.},
images = {images/vandenbossche2025open.png},
thumbnails = {images/vandenbossche2025open.png},
pdf = {pdfs/vandenbossche2025open.pdf}
}