% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Simard:362215,
author = {Simard, Mikaël and Fullarton, Ryan and Volz, Lennart and
Schuy, Christoph and Chung, Savanna and Baker, Colin and
Graeff, Christian and Fekete, Charles-Antoine Collins},
title = {{A} generative adversarial network to improve integrated
mode proton imaging resolution using paired proton–carbon
data},
journal = {Medical physics},
volume = {52},
number = {9},
issn = {0094-2405},
address = {Hoboken, NJ},
publisher = {Wiley},
reportid = {GSI-2025-01088},
pages = {e18081},
year = {2025},
note = {Beamtime funding of Philipps University Marburg
(MIT-2022-12). This is an open access article under the
terms of the Creative Commons Attribution License 4.},
abstract = {Integrated mode proton imaging is a clinically accessible
method for proton radiographs (pRads), but its spatial
resolution is limited by multiple Coulomb scattering (MCS).
As the amplitude of MCS decreases with increasing particle
charge, heavier ions such as carbon ions produce radiographs
with better resolution (cRads). Improving image resolution
of pRads may thus be achieved by transferring individual
proton pencil beam images to the equivalent carbon ion data
using a trained image translation network. The approach can
be interpreted as applying a data-driven deconvolution
operation with a spatially variant point spread
function.Propose a deep learning framework based on paired
proton-carbon data to increase the resolution of integrated
mode pRads.A conditional generative adversarial network,
Proton2Carbon, was developed to translate proton pencil beam
images into synthetic carbon ion beam images. The model was
trained on 547 224 paired proton-carbon images acquired with
a scintillation detector at the Marburg Ion Therapy Centre.
Image reconstruction was performed using a 2D lateral
method, and the model was evaluated on internal and external
datasets for spatial resolution, using custom 3D-printed
line pair modules.The Proton2Carbon model improved the
spatial resolution of pRads from 1.7 to 2.7 lp/cm on
internal data and to 2.3 lp/cm on external data,
demonstrating generalizability. Water equivalent thickness
accuracy remained consistent with pRads and cRads.
Evaluation on an anthropomorphic head phantom showed
enhanced structural clarity, though some increased noise was
observed.This study demonstrates that deep learning can
enhance pRad image quality by leveraging paired
proton-carbon data. Proton2Carbon can be integrated into
existing imaging workflows to improve clinical and research
applications of proton radiography. To facilitate further
research, the full dataset used to train Proton2Carbon is
publicly released and available at
https://zenodo.org/records/14945165.},
keywords = {Protons / Carbon / Image Processing, Computer-Assisted:
methods / Deep Learning / Humans / Phantoms, Imaging /
Generative Adversarial Networks / generative adversarial
network (Other) / image‐to‐image translation (Other) /
ion beam therapy (Other) / ion imaging (Other) / ion
radiography (Other) / super‐resolution imaging (Other) /
Protons (NLM Chemicals) / Carbon (NLM Chemicals)},
cin = {BIO},
ddc = {610},
cid = {I:(DE-Ds200)BIO-20160831OR354},
pnm = {633 - Life Sciences – Building Blocks of Life: Structure
and Function (POF4-633) / HITRIplus - Heavy Ion Therapy
Research Integration plus (101008548)},
pid = {G:(DE-HGF)POF4-633 / G:(EU-Grant)101008548},
experiment = {$EXP:(DE-Ds200)External_experiment-20200803$},
typ = {PUB:(DE-HGF)16},
pubmed = {pmid:40926569},
UT = {WOS:001570644000001},
doi = {10.1002/mp.18081},
url = {https://repository.gsi.de/record/362215},
}