Year | Type | Publication |
---|---|---|
To Appear | In Conf. Proceedings | E Pastorelli, H Herrmann (To Appear). Virtual Reality visualization for short fibre orientation analysis. In Proceedings of the Biennial Baltic Electronics Conference, BEC, pp. 201–204. (link) (bib) x @inproceedings{Pastorelli, volume = { 2015-Novem }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971310302{\&}doi=10.1109{\%}2FBEC.2014.7320591{\&}partnerID=40{\&}md5=3476efbc0430ba44724fcc8006637b70 }, type = { Conference Proceedings }, title = { Virtual Reality visualization for short fibre orientation analysis }, pages = { 201--204 }, doi = { 10.1109/BEC.2014.7320591 }, booktitle = { Proceedings of the Biennial Baltic Electronics Conference, BEC }, author = { Pastorelli and Herrmann }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | S Meesters, P Ossenblok, A Colon, O Schijns, L Florack, P Boon, L Wagner, A Fuster (To Appear). Automated identification of intracranial depth electrodes in computed tomography data. In Proceedings - International Symposium on Biomedical Imaging, pp. 976–979. (link) (bib) x @inproceedings{Meesters, volume = { 2015-July }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944328676{\&}doi=10.1109{\%}2FISBI.2015.7164034{\&}partnerID=40{\&}md5=3989bdde301290c2ae9d67f2142f6235 }, type = { Conference Proceedings }, title = { Automated identification of intracranial depth electrodes in computed tomography data }, pages = { 976--979 }, doi = { 10.1109/ISBI.2015.7164034 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Meesters and Ossenblok and Colon and Schijns and Florack and Boon and Wagner and Fuster }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | M Schiwarth, J Weissenböck, B Plank, B Fröhler, C Heinzl, J Kastner (To Appear). Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers. In IOP Conference Series: Materials Science and Engineering, pp. NA (link) (bib) x @inproceedings{Schiwarth, volume = { 406 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054221856{\&}doi=10.1088{\%}2F1757-899X{\%}2F406{\%}2F1{\%}2F012014{\&}partnerID=40{\&}md5=e1757458a87eb396d4743ad19f658d5a }, type = { Conference Proceedings }, title = { Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers }, doi = { 10.1088/1757-899X/406/1/012014 }, booktitle = { IOP Conference Series: Materials Science and Engineering }, author = { Schiwarth and Weissenb{\"{o}}ck and Plank and Fr{\"{o}}hler and Heinzl and Kastner }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | G N Stevenson, S L Collins, L Impey, J A Noble (To Appear). Surface parameterisation of the utero/placental interface using 3D power doppler ultrasound. In Proceedings - International Symposium on Biomedical Imaging, pp. 891–894. (link) (bib) x @inproceedings{Stevenson, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-80055033565{\&}doi=10.1109{\%}2FISBI.2011.5872547{\&}partnerID=40{\&}md5=906b8369dc14ae8e1bb4597aa31b1ec7 }, type = { Conference Proceedings }, title = { Surface parameterisation of the utero/placental interface using 3D power doppler ultrasound }, pages = { 891--894 }, doi = { 10.1109/ISBI.2011.5872547 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Stevenson and Collins and Impey and Noble }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | F Ponzio, E Macii, E Ficarra, S Di Cataldo (To Appear). A multi-modal brain image registration framework for US-guided neuronavigation systems integrating MR and US for minimally invasive neuroimaging. In BIOIMAGING 2017 - 4th International Conference on Bioimaging, Proceedings; Part of 10th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2017, pp. 114–121. (link) (bib) x @inproceedings{Ponzio, volume = { 2017-Janua }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049240676{\&}partnerID=40{\&}md5=0189c956d91dec04b477ed5846ecb930 }, type = { Conference Proceedings }, title = { A multi-modal brain image registration framework for US-guided neuronavigation systems integrating MR and US for minimally invasive neuroimaging }, pages = { 114--121 }, booktitle = { BIOIMAGING 2017 - 4th International Conference on Bioimaging, Proceedings; Part of 10th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2017 }, author = { Ponzio and Macii and Ficarra and {Di Cataldo} }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | E Kugu (To Appear). Satellite image denoising using Bilateral Filter with SPEA2 optimized parameters. In RAST 2013 - Proceedings of 6th International Conference on Recent Advances in Space Technologies, pp. 217–223. (link) (bib) x @inproceedings{Kugu, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883888988{\&}doi=10.1109{\%}2FRAST.2013.6581204{\&}partnerID=40{\&}md5=eba34fda9e6d5ba7ef3f98fd189e8ed5 }, type = { Conference Proceedings }, title = { Satellite image denoising using Bilateral Filter with SPEA2 optimized parameters }, pages = { 217--223 }, doi = { 10.1109/RAST.2013.6581204 }, booktitle = { RAST 2013 - Proceedings of 6th International Conference on Recent Advances in Space Technologies }, author = { Kugu }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | Y Suter, C Rummel, R Wiest, M Reyes (To Appear). Fast and uncertainty-aware cerebral cortex morphometry estimation using random forest regression. In Proceedings - International Symposium on Biomedical Imaging, pp. 1052–1055. (link) (bib) x @inproceedings{Suter, volume = { 2018-April }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048099086{\&}doi=10.1109{\%}2FISBI.2018.8363752{\&}partnerID=40{\&}md5=e22ed68e255aa6c6af3b32100df7276a }, type = { Conference Proceedings }, title = { Fast and uncertainty-aware cerebral cortex morphometry estimation using random forest regression }, pages = { 1052--1055 }, doi = { 10.1109/ISBI.2018.8363752 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Suter and Rummel and Wiest and Reyes }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | R Medina, S Bautista, V Morocho (To Appear). Accuracy of connected confidence left ventricle segmentation in 3-D multi-slice computerized tomography images. In 2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017, pp. 1–6. (link) (bib) x @inproceedings{Medina, volume = { 2017-Janua }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045738426{\&}doi=10.1109{\%}2FETCM.2017.8247499{\&}partnerID=40{\&}md5=31a12ff7f15e6264e843b9c8cc503a7c }, type = { Conference Proceedings }, title = { Accuracy of connected confidence left ventricle segmentation in 3-D multi-slice computerized tomography images }, pages = { 1--6 }, doi = { 10.1109/ETCM.2017.8247499 }, booktitle = { 2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017 }, author = { Medina and Bautista and Morocho }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | T Mandl, J Martinek, W Mayr, F Rattay, M Reichel, E Moser (To Appear). Towards a numerical 3D model of functional electrical stimulation of denervated, degenerated human skeletal muscle. In 21st European Modeling and Simulation Symposium, EMSS 2009, pp. NA (link) (bib) x @inproceedings{Mandl, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874185461{\&}partnerID=40{\&}md5=a9627e50e42db43cbfa2f5ca618bdad4 }, type = { Conference Proceedings }, title = { Towards a numerical 3D model of functional electrical stimulation of denervated, degenerated human skeletal muscle }, booktitle = { 21st European Modeling and Simulation Symposium, EMSS 2009 }, author = { Mandl and Martinek and Mayr and Rattay and Reichel and Moser }, year = { To Appear }, } |
To Appear | In Conf. Proceedings | A Rezaei, J Nuyts (To Appear). Joint registration of attenuation and activity images in gated TOF-PET. In IEEE Nuclear Science Symposium Conference Record, pp. NA (link) (bib) x @inproceedings{Rezaei, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904204340{\&}doi=10.1109{\%}2FNSSMIC.2013.6829031{\&}partnerID=40{\&}md5=9a3c352e39a5cde2bb765fec1156cfb1 }, type = { Conference Proceedings }, title = { Joint registration of attenuation and activity images in gated TOF-PET }, doi = { 10.1109/NSSMIC.2013.6829031 }, booktitle = { IEEE Nuclear Science Symposium Conference Record }, author = { Rezaei and Nuyts }, year = { To Appear }, } |
2020 | Journal | Jakub Ceranka, Sabrina Verga, Maryna Kvasnytsia, Frédéric Lecouvet, Nicolas Michoux, Johan de Mey, Hubert Raeymaekers, Thierry Metens, Julie Absil, Jef Vandemeulebroucke (2020). Multi-atlas segmentation of the skeleton from whole-body MRI—Impact of iterative background masking. Magnetic Resonance in Medicine, 83(5), pp. 1851–1862. (link) (bib) x @article{Ceranka2020, year = { 2020 }, volume = { 83 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi-atlas segmentation of the skeleton from whole-body MRI—Impact of iterative background masking }, pages = { 1851--1862 }, number = { 5 }, keywords = { atlas-based segmentation,bone segmentation,image registration,whole-body MRI }, journal = { Magnetic Resonance in Medicine }, issn = { 15222594 }, doi = { 10.1002/mrm.28042 }, author = { Ceranka and Verga and Kvasnytsia and Lecouvet and Michoux and Mey and Raeymaekers and Metens and Absil and Vandemeulebroucke }, abstract = { Purpose: To improve multi-atlas segmentation of the skeleton from whole-body MRI. In particular, we study the effect of employing the atlas segmentations to iteratively mask tissues outside of the region of interest to improve the atlas alignment and subsequent segmentation. Methods: An improved atlas registration scheme is proposed. Starting from a suitable initial alignment, the alignment is refined by introducing additional stages of deformable registration during which the image sampling is limited to the dilated atlas segmentation label mask. The performance of the method was demonstrated using leave-one-out cross-validation using atlases of 10 whole-body 3D-T1 images of prostate cancer patients with bone metastases and healthy male volunteers, and compared to existing state of the art. Both registration accuracy and resulting segmentation quality, using four commonly used label fusion strategies, were evaluated. Results: The proposed method showed significant improvement in registration and segmentation accuracy with respect to the state of the art for all validation criteria and label fusion strategies, resulting in a Dice coefficient of 0.887 (STEPS label fusion). The average Dice coefficient for the multi-atlas segmentation showed over 11{\%} improvement with a decrease of false positive rate from 28.3{\%} to 13.2{\%}. For this application, repeated application of the background masking did not lead to significant improvement of the segmentation result. Conclusions: A registration strategy, relying on the use of atlas segmentations as mask during image registration was proposed and evaluated for multi-atlas segmentation of whole-body MRI. The approach significantly improved registration and final segmentation accuracy and may be applicable to other structures of interest. }, } |
2020 | Journal | Jordy Tasserie, Antoine Grigis, Lynn Uhrig, Morgan Dupont, Alexis Amadon, Béchir Jarraya (2020). Pypreclin: An automatic pipeline for macaque functional MRI preprocessing. NeuroImage, 207, pp. 15. (link) (bib) x @article{Tasserie2020, year = { 2020 }, volume = { 207 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Pypreclin: An automatic pipeline for macaque functional MRI preprocessing }, pages = { 15 }, keywords = { Automatic,Macaque,Motion artifact,Movement artifact,Non-human primate,Preprocessing,fMRI }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2019.116353 }, author = { Tasserie and Grigis and Uhrig and Dupont and Amadon and Jarraya }, abstract = { Non-human primate functional MRI (fMRI) is a growing field in neuroscience. However, there is no standardized method for monkey fMRI data analysis, specifically for data preprocessing. The preprocessing of monkey fMRI data is challenged by several technical and experimental specificities of the monkey research such as artifacts related to body movements or to intracranial leads. Here we propose to address these challenges by developing a new versatile pipeline for macaque fMRI preprocessing. We developed a Python module, Pypreclin, to process raw images using state of the art algorithms embedded in a fully automatic pipeline. To evaluate its robustness, we applied Pypreclin to fMRI data acquired at 3T in both awake and anesthetized macaques, with or without iron oxide contrast agent, using single loop or multichannel phased-array coils, combined or not with intracranial implanted electrodes. We performed both resting-state and auditory evoked fMRI and compared the results of Pypreclin to a previously employed preprocessing pipeline. Pypreclin successfully achieved the registration of the fMRI data to the macaque brain template in all the experimental conditions. Moreover, Pypreclin enables more accurate locations of auditory evoked activations in relation to the gray matter at corrected level in the awake fMRI condition. Finally, using the Primate neuroimaging Data-Exchange open access platform, we could further validate Pypreclin for monkey fMRI images that were acquired at ultra-high fields, from other institutions and using different protocols. Pypreclin is a validated preprocessing tool that adapts to diverse experimental and technical situations of monkey fMRI. Pypreclin code is available on open source data sharing platform. }, } |
2020 | Journal | Jie Liang Song, Xin Ye Fu, Ali Raza, Nai An Shen, Ya Qi Xue, Hua Jie Wang, Jin Ye Wang (2020). Enhancement of mechanical strength of TCP-alginate based bioprinted constructs. Journal of the Mechanical Behavior of Biomedical Materials, 103, pp. NA (link) (bib) x @article{Song2020, year = { 2020 }, volume = { 103 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074942779{\&}doi=10.1016{\%}2Fj.jmbbm.2019.103533{\&}partnerID=40{\&}md5=5a9d1f1c5ade6a354c2e6bf7adb9613f }, type = { Journal Article }, title = { Enhancement of mechanical strength of TCP-alginate based bioprinted constructs }, keywords = { Alginate-TCP bioink,Bioprinting,Bone defect,PCL supporter,Unit-assembly model }, journal = { Journal of the Mechanical Behavior of Biomedical Materials }, issn = { 18780180 }, doi = { 10.1016/j.jmbbm.2019.103533 }, author = { Song and Fu and Raza and Shen and Xue and Wang and Wang }, abstract = { To overcome the mechanical drawback of bioink, we proposed a supporter model to enhance the mechanical strength of bioprinted 3D constructs, in which a unit-assembly idea was involved. Based on Computed Tomography images of critical-sized rabbit bone defect, the 3D re-construction was accomplished by a sequenced process using Mimics 17.0, BioCAM and BioCAD software. 3D constructs were bioprinted using polycaprolactone (PCL) ink for the outer supporter under extrusion mode, and cell-laden tricalcium phosphate (TCP)/alginate bioink for the inner filler under air pressure dispensing mode. The relationship of viscosity of bioinks, 3D bioprinting pressure, TCP/alginate ratio and cell survival were investigated by the shear viscosities analysis, live/dead cell test and cell-counting kit 8 measurement. The viscosity of bioinks at 1.0 s−1-shear rate could be adjusted within the range of 1.75 ± 0.29 Pa{\textperiodcentered}s to 155.65 ± 10.86 Pa{\textperiodcentered}s by changing alginate concentration, corresponding to 10 kPa–130 kPa of printing pressure. This design with PCL supporter could significantly enhance the compressive strength and compressive modulus of standardized 3D mechanical testing specimens up to 2.15 ± 0.14 MPa to 2.58 ± 0.09 MPa, and 42.83 ± 4.75 MPa to 53.12 ± 1.19 MPa, respectively. Cells could maintain the high viability (over 80{\%}) under the given printing pressure but cell viability declined with the increase of TCP content. Cell survival after experiencing 7 days of cell culture could be achieved when the ratio of TCP/alginate was 1 : 4. All data supported the feasibility of the supporter and unit-assembly model to enhance mechanical properties of bioprinted 3D constructs. }, } |
2020 | Journal | Chonnipa Nantavithya, Daniel R. Gomez, Joe Y. Chang, Abdallah S.R. Mohamed, C. David Fuller, Heng Li, Eric D. Brooks, Saumil J. Gandhi (2020). An improved method for analyzing and reporting patterns of in-field recurrence after stereotactic ablative radiotherapy in early-stage non-small cell lung cancer. Radiotherapy and Oncology, 145, pp. 209–214. (link) (bib) x @article{Nantavithya2020, year = { 2020 }, volume = { 145 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079230198{\&}doi=10.1016{\%}2Fj.radonc.2020.01.002{\&}partnerID=40{\&}md5=fe2e0bf370b46396d4913f6e5289a860 }, type = { Journal Article }, title = { An improved method for analyzing and reporting patterns of in-field recurrence after stereotactic ablative radiotherapy in early-stage non-small cell lung cancer }, pages = { 209--214 }, keywords = { Early-stage non-small cell lung cancer,In-field recurrence,Stereotactic ablative radiotherapy }, journal = { Radiotherapy and Oncology }, issn = { 18790887 }, doi = { 10.1016/j.radonc.2020.01.002 }, author = { Nantavithya and Gomez and Chang and Mohamed and Fuller and Li and Brooks and Gandhi }, abstract = { Introduction: Patterns of local, regional, and distant failure after stereotactic ablative radiotherapy (SABR) for early-stage non-small cell lung cancer (NSCLC) have been widely reported. However, reliable methods for analyzing causes of local failure are lacking. We describe a method for analyzing and reporting patterns of in-field recurrence after SABR, incorporating dosimetric parameters from initial treatment plan as well as geometric information from diagnostic images at recurrence. Material and methods: Diagnostic CT images at recurrence were registered with initial treatment planning images and radiation dose by deformable image registration. Recurrent gross tumor volume (rGTV) and centroid (geometric center of rGTV) were delineated. In-field failure was classified as centroids originating within the original planning target volume. Dose-volume histograms for each rGTV were used to further classify in-field recurrences as central high-dose (dose to 95{\%} of rGTV [rGTVD95{\%}] ≥95{\%} of dose prescribed to PTV) or peripheral high-dose (rGTVD95{\%} {\textless}95{\%} of dose prescribed to PTV). Results: 634 patients received SABR from 2004 to 2014 with 48 local recurrences. 35 of these had evaluable images with 16 in-field recurrences: 9 central high-dose, 6 peripheral high-dose, and 1 had both. Time to and volume of recurrence were not statistically different between central versus peripheral high-dose recurrences. However mean rGTV dose, mean centroid dose, and rGTVD95{\%} were higher for central versus peripheral high-dose recurrences. Conclusion: We report a standardized method for analysis and classification of in-field recurrence after SABR. There were more central as opposed to peripheral high-dose recurrences, suggesting biological rather than technical issues underlying majority of in-field failures. }, } |
2020 | Journal | Eros Montin, Antonella Belfatto, Marco Bologna, Silvia Meroni, Claudia Cavatorta, Emilia Pecori, Barbara Diletto, Maura Massimino, Maria Chiara Oprandi, Geraldina Poggi, Filippo Arrigoni, Denis Peruzzo, Emanuele Pignoli, Lorenza Gandola, Pietro Cerveri, Luca Mainardi (2020). A multi-metric registration strategy for the alignment of longitudinal brain images in pediatric oncology. Medical and Biological Engineering and Computing, NA pp. NA (link) (bib) x @article{Montin2020, year = { 2020 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079384742{\&}doi=10.1007{\%}2Fs11517-019-02109-4{\&}partnerID=40{\&}md5=211d8231bb9224f67092a15ccbb09ce3 }, type = { Journal Article }, title = { A multi-metric registration strategy for the alignment of longitudinal brain images in pediatric oncology }, keywords = { Brain MRI,Deformable registration,Image registration,Mutual information,Normalized gradient field,Pediatric brain tumors }, journal = { Medical and Biological Engineering and Computing }, issn = { 17410444 }, doi = { 10.1007/s11517-019-02109-4 }, author = { Montin and Belfatto and Bologna and Meroni and Cavatorta and Pecori and Diletto and Massimino and Oprandi and Poggi and Arrigoni and Peruzzo and Pignoli and Gandola and Cerveri and Mainardi }, abstract = { Survival of pediatric patients with brain tumor has increased over the past 20 years, and increasing evidence of iatrogenic toxicities has been reported. In follow-ups, images are acquired at different time points where substantial changes of brain morphology occur, due to childhood physiological development and treatment effects. To address the image registration complexity, we propose two multi-metric approaches (Mplus, Mdot), combining mutual information (MI) and normalized gradient field filter (NGF). The registration performance of the proposed metrics was assessed on a simulated dataset (Brainweb) and compared with those obtained by MI and NGF separately, using mean magnitude and mean angular errors. The most promising metric (Mplus) was then selected and tested on a retrospective dataset comprising 45 pediatric patients who underwent focal radiotherapy for brain cancer. The quality of the realignment was scored by a radiation oncologist using a perceived misalignment metric (PM). All patients but one were assessed as PM ≤ 2 (good alignment), but the remaining one, severely affected by hydrocephalus and pneumocephalus at the first MRI acquisition, scored PM = 5 (unacceptable). These preliminary findings suggest that Mplus might improve the registration accuracy in complex applications such as pediatric oncology, when data are acquired throughout the years of follow-up, and is worth investigating. [Figure not available: see fulltext.]. }, } |
2020 | Journal | Markus Huellebrand, Daniel Messroghli, Lennart Tautz, Titus Kuehne, Anja Hennemuth (2020). An extensible software platform for interdisciplinary cardiovascular imaging research. Computer Methods and Programs in Biomedicine, 184, pp. 13. (link) (bib) x @article{Huellebrand2020, year = { 2020 }, volume = { 184 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { An extensible software platform for interdisciplinary cardiovascular imaging research }, pages = { 13 }, keywords = { Cardiology,Image processing,MRI,Medical image analysis,Segmentation }, journal = { Computer Methods and Programs in Biomedicine }, issn = { 18727565 }, doi = { 10.1016/j.cmpb.2019.105277 }, author = { Huellebrand and Messroghli and Tautz and Kuehne and Hennemuth }, abstract = { Background and objective: Cardiovascular imaging is an exponentially growing field with aspects ranging from image acquisition and analysis to disease characterization, and evaluation of therapy approaches.The transfer of innovative new technological and algorithmic solutions into clinical practice is still slow. In addition to the verification of solutions, their integration in the clinical processing workflow must be enabled for the assessment of clinical impact and risks. The goal of our software platform for cardiac image processing – CAIPI – is to support researchers from different specialties such as imaging physics, computer science, and medicine by a common extensible platform to address typical challenges and hurdles in interdisciplinary cardiovascular imaging research. It provides an integrated solution for method comparison, integrated analysis, and validation in the clinical context. The interface concept enables a combination with existing frameworks that address specific aspects of the pipeline, such as modeling (e.g., OpenCMISS, CARP) or image reconstruction (Gadgetron). Methods: In our platform, we developed a concept for import, integration, and management of cardiac image data. The integration approach considers the spatiotemporal properties of the beating heart through a specific data model. The solution is based on MeVisLab and provides functionalities for data retrieval and storage. Two types of plugins can be added. While ToolPlugins usually provide processing algorithms such as image correction and segmentation, AnalysisPlugins enable interactive data exploration and reporting. GUI integration concepts are presented for both plugin types. We developed domain-specific reporting and visualization tools (e.g., AHA segment model) to enable validation studies by clinical experts. The platform offers plugins for calculating and reporting quantitative parameters such as cardiac function, which can be used to, e.g., evaluate the effect of processing algorithms on clinical parameters. Export functionalities include quantitative measurements to Excel, image data to PACS, and STL models to modeling and simulation tools. Results: To demonstrate the applicability of this concept both for method development and clinical application, we present use cases representing different problems along the innovation chain in cardiac MR imaging. Validation of an image reconstruction method (MRI T1 mapping) Validation of an image correction method for real-time 2D-PC MRI Comparison of quantification methods for blood flow analysis Training and integration of machine learning solutions with expert annotations Clinical studies with new imaging techniques (flow measurements in the carotid arteries and peripheral veins as well as cerebral spinal fluid). Conclusion: The presented platform can be used in interdisciplinary teams, in which engineers or data scientists perform the method validation, followed by clinical research studies in patient collectives. The demonstrated use cases show how it enables the transfer of innovations through validation in the cardiovascular application context. }, } |
2020 | Journal | Ruman Gerst, Anna Medyukhina, Marc Thilo Figge (2020). MISA++: A standardized interface for automated bioimage analysis. SoftwareX, 11, pp. NA (link) (bib) x @article{Gerst2020, year = { 2020 }, volume = { 11 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078663806{\&}doi=10.1016{\%}2Fj.softx.2020.100405{\&}partnerID=40{\&}md5=4063263c3ae932dded430bc8192b1704 }, type = { Journal Article }, title = { MISA++: A standardized interface for automated bioimage analysis }, keywords = { Application integration,Big volume image data,Image processing,Light-sheet fluorescence microscopy,Parallelization }, journal = { SoftwareX }, issn = { 23527110 }, doi = { 10.1016/j.softx.2020.100405 }, author = { Gerst and Medyukhina and Figge }, abstract = { Modern imaging techniques, such as lightsheet fluorescence microscopy (LSFM), allow the capture of whole organs in three spatial dimensions. The analysis of these big volume image data requires a combination of user-friendly and highly efficient tools. We here present MISA++, an image analysis framework that allows easy integration of custom high-performance C++ tools into third-party applications via standardized components for parallelization, data and parameter handling, command line interface, and communication with third-party applications. We demonstrate its capabilities by implementing a plugin for ImageJ that provides a graphical user interface for any application built with our framework, and a high-performance re-implementation of our Python-based algorithm to segment glomeruli in LSFM images of whole murine kidneys. }, } |
2020 | Journal | Zoltán Bárdosi, Christian Plattner, Yusuf \"Ozbek, Thomas Hofmann, Srdjan Milosavljevic, Volker Schartinger, Wolfgang Freysinger (2020). CIGuide: in situ augmented reality laser guidance. International Journal of Computer Assisted Radiology and Surgery, 15(1), pp. 49–57. (link) (bib) x @article{B?rdosi2020, year = { 2020 }, volume = { 15 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { CIGuide: in situ augmented reality laser guidance }, pages = { 49--57 }, number = { 1 }, keywords = { Augmented reality,Laser guidance,Magnetic tracking,Microscope,Navigated surgery,Optical tracking,Robotic control }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-019-02066-1 }, author = { B{\'{a}}rdosi and Plattner and {\"{O}}zbek and Hofmann and Milosavljevic and Schartinger and Freysinger }, abstract = { Purpose : A robotic intraoperative laser guidance system with hybrid optic-magnetic tracking for skull base surgery is presented. It provides in situ augmented reality guidance for microscopic interventions at the lateral skull base with minimal mental and workload overhead on surgeons working without a monitor and dedicated pointing tools. Methods : Three components were developed: a registration tool (Rhinospider), a hybrid magneto-optic-tracked robotic feedback control scheme and a modified robotic end-effector. Rhinospider optimizes registration of patient and preoperative CT data by excluding user errors in fiducial localization with magnetic tracking. The hybrid controller uses an integrated microscope HD camera for robotic control with a guidance beam shining on a dual plate setup avoiding magnetic field distortions. A robotic needle insertion platform (iSYS Medizintechnik GmbH, Austria) was modified to position a laser beam with high precision in a surgical scene compatible to microscopic surgery. Results : System accuracy was evaluated quantitatively at various target positions on a phantom. The accuracy found is 1.2 mm ± 0.5 mm. Errors are primarily due to magnetic tracking. This application accuracy seems suitable for most surgical procedures in the lateral skull base. The system was evaluated quantitatively during a mastoidectomy of an anatomic head specimen and was judged useful by the surgeon. Conclusion : A hybrid robotic laser guidance system with direct visual feedback is proposed for navigated drilling and intraoperative structure localization. The system provides visual cues directly on/in the patient anatomy, reducing the standard limitations of AR visualizations like depth perception. The custom- built end-effector for the iSYS robot is transparent to using surgical microscopes and compatible with magnetic tracking. The cadaver experiment showed that guidance was accurate and that the end-effector is unobtrusive. This laser guidance has potential to aid the surgeon in finding the optimal mastoidectomy trajectory in more difficult interventions. }, } |
2020 | Journal | Lennart Tautz, Lars Walczak, Joachim Georgii, Amer Jazaerli, Katharina Vellguth, Isaac Wamala, Simon Sündermann, Volkmar Falk, Anja Hennemuth (2020). Combining position-based dynamics and gradient vector flow for 4D mitral valve segmentation in TEE sequences. International Journal of Computer Assisted Radiology and Surgery, 15(1), pp. 119–128. (link) (bib) x @article{RN839, year = { 2020 }, volume = { 15 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074526687{\&}doi=10.1007{\%}2Fs11548-019-02071-4{\&}partnerID=40{\&}md5=d65f50a05a3b5b16c3d481c565ea1497 }, type = { Journal Article }, title = { Combining position-based dynamics and gradient vector flow for 4D mitral valve segmentation in TEE sequences }, pages = { 119--128 }, number = { 1 }, keywords = { Echocardiography,Mitral valve,Position-based dynamics,Segmentation,Tracking }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-019-02071-4 }, author = { Tautz and Walczak and Georgii and Jazaerli and Vellguth and Wamala and S{\"{u}}ndermann and Falk and Hennemuth }, abstract = { Purpose: For planning and guidance of minimally invasive mitral valve repair procedures, 3D+t transesophageal echocardiography (TEE) sequences are acquired before and after the intervention. The valve is then visually and quantitatively assessed in selected phases. To enable a quantitative assessment of valve geometry and pathological properties in all heart phases, as well as the changes achieved through surgery, we aim to provide a new 4D segmentation method. Methods: We propose a tracking-based approach combining gradient vector flow (GVF) and position-based dynamics (PBD). An open-state surface model of the valve is propagated through time to the closed state, attracted by the GVF field of the leaflet area. The PBD method ensures topological consistency during deformation. For evaluation, one expert in cardiac surgery annotated the closed-state leaflets in 10 TEE sequences of patients with normal and abnormal mitral valves, and defined the corresponding open-state models. Results: The average point-to-surface distance between the manual annotations and the final tracked model was 1.00mm±1.08mm. Qualitatively, four cases were satisfactory, five passable and one unsatisfactory. Each sequence could be segmented in 2–6 min. Conclusion: Our approach enables to segment the mitral valve in 4D TEE image data with normal and pathological valve closing behavior. With this method, in addition to the quantification of the remaining orifice area, shape and dimensions of the coaptation zone can be analyzed and considered for planning and surgical result assessment. }, } |
2020 | Journal | Steffie M.B. Peters, Sebastiaan L. Meyer Viol, Niels R. van der Werf, Nick de Jong, Floris H.P. van Velden, Antoi Meeuwis, Mark W. Konijnenberg, Martin Gotthardt, Hugo W.A.M. de Jong, Marcel Segbers (2020). Variability in lutetium-177 SPECT quantification between different state-of-the-art SPECT/CT systems. EJNMMI Physics, 7(1), pp. 13. (link) (bib) x @article{RN804, year = { 2020 }, volume = { 7 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Variability in lutetium-177 SPECT quantification between different state-of-the-art SPECT/CT systems }, pages = { 13 }, number = { 1 }, journal = { EJNMMI Physics }, issn = { 21977364 }, doi = { 10.1186/s40658-020-0278-3 }, author = { Peters and {Meyer Viol} and Werf and Jong and Velden and Meeuwis and Konijnenberg and Gotthardt and Jong and Segbers }, abstract = { Background: Quantitative SPECT imaging in targeted radionuclide therapy with lutetium-177 holds great potential for individualized treatment based on dose assessment. The establishment of dose-effect relations requires a standardized method for SPECT quantification. The purpose of this multi-center study is to evaluate quantitative accuracy and inter-system variations of different SPECT/CT systems with corresponding commercially available quantitative reconstruction algorithms. This is an important step towards a vendor-independent standard for quantitative lutetium-177 SPECT. Methods: Four state-of-the-art SPECT/CT systems were included: Discovery™ NM/CT 670Pro (GE Healthcare), Symbia Intevo™, and two Symbia™ T16 (Siemens Healthineers). Quantitative accuracy and inter-system variations were evaluated by repeatedly scanning a cylindrical phantom with 6 spherical inserts (0.5 – 113 ml). A sphere-to-background activity concentration ratio of 10:1 was used. Acquisition settings were standardized: medium energy collimator, body contour trajectory, photon energy window of 208 keV (± 10{\%}), adjacent 20{\%} lower scatter window, 2 × 64 projections, 128 × 128 matrix size, and 40 s projection time. Reconstructions were performed using GE Evolution with Q.Metrix™, Siemens xSPECT Quant™, Siemens Broad Quantification™ or Siemens Flash3D™ algorithms using vendor recommended settings. In addition, projection data were reconstructed using Hermes SUV SPECT™ with standardized reconstruction settings to obtain a vendor-neutral quantitative reconstruction for all systems. Volumes of interest (VOI) for the spheres were obtained by applying a 50{\%} threshold of the sphere maximum voxel value corrected for background activity. For each sphere, the mean and maximum recovery coefficient (RCmean and RCmax) of three repeated measurements was calculated, defined as the imaged activity concentration divided by the actual activity concentration. Inter-system variations were defined as the range of RC over all systems. Results: RC decreased with decreasing sphere volume. Inter-system variations with vendor-specific reconstructions were between 0.06 and 0.41 for RCmean depending on sphere size (maximum 118{\%} quantification difference), and improved to 0.02–0.19 with vendor-neutral reconstructions (maximum 38{\%} quantification difference). Conclusion: This study shows that eliminating sources of possible variation drastically reduces inter-system variation in quantification. This means that absolute SPECT quantification for 177Lu is feasible in a multi-center and multi-vendor setting; however, close agreement between vendors and sites is key for multi-center dosimetry and quantitative biomarker studies. }, } |
2020 | Journal | Christopher Noble, Kent D. Carlson, Erica Neumann, Dan Dragomir-Daescu, Ahmet Erdemir, Amir Lerman, Melissa Young (2020). Patient specific characterization of artery and plaque material properties in peripheral artery disease. Journal of the Mechanical Behavior of Biomedical Materials, 101, pp. 14. (link) (bib) x @article{RN806, year = { 2020 }, volume = { 101 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Patient specific characterization of artery and plaque material properties in peripheral artery disease }, pages = { 14 }, keywords = { Intravascular ultrasound,Inverse finite element analysis,Peripheral artery disease,Pressure inflation testing,Virtual histology }, journal = { Journal of the Mechanical Behavior of Biomedical Materials }, issn = { 18780180 }, doi = { 10.1016/j.jmbbm.2019.103453 }, author = { Noble and Carlson and Neumann and Dragomir-Daescu and Erdemir and Lerman and Young }, abstract = { Patient-specific finite element (FE) modeling of atherosclerotic plaque is challenging, as there is limited information available clinically to characterize plaque components. This study proposes that for the limited data available in vivo, material properties of plaque and artery can be identified using inverse FE analysis and either a simple neo-Hookean constitutive model or assuming linear elasticity provides sufficient accuracy to capture the changes in vessel deformation, which is the available clinical metric. To test this, 10 human cadaveric femoral arteries were each pressurized ex vivo at 6 pressure levels, while intravascular ultrasound (IVUS) and virtual histology (VH) imaging were performed during controlled pull-back to determine vessel geometry and plaque structure. The VH images were then utilized to construct FE models with heterogeneous material properties corresponding to the vessel plaque components. The constitutive models were then fit to each plaque component by minimizing the difference between the experimental and the simulated geometry using the inverse FE method. Additionally, we further simplified the analysis by assuming the vessel wall had a homogeneous structure, i.e. lumping artery and plaque as one tissue. We found that for the heterogeneous wall structure, the simulated and experimental vessel geometries compared well when the fitted neo-Hookean parameters or elastic modulus, in the case of linear elasticity, were utilized. Furthermore, taking the median of these fitted parameters then inputting these as plaque component mechanical properties in the finite element simulation yielded differences between simulated and experimental geometries that were on average around 2{\%} greater (1.30–5.55{\%} error range to 2.33–11.71{\%} error range). For the homogeneous wall structure the simulated and experimental wall geometries had an average difference of around 4{\%} although when the difference was calculated using the median fitted value this difference was larger than for the heterogeneous fits. Finally, comparison to uniaxial tension data and to literature constitutive models also gave confidence to the suitability of this simplified approach for patient-specific arterial simulation based on data that may be acquired in the clinic. }, } |
2020 | Journal | Shu Kondo, Takahiro Takahashi, Nobuhiro Yamagata, Yasuhito Imanishi, Hidetaka Katow, Shun Hiramatsu, Katrina Lynn, Ayako Abe, Ajayrama Kumaraswamy, Hiromu Tanimoto (2020). Neurochemical Organization of the Drosophila Brain Visualized by Endogenously Tagged Neurotransmitter Receptors. Cell Reports, 30(1), pp. 284–297.e5. (link) (bib) x @article{RN805, year = { 2020 }, volume = { 30 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Neurochemical Organization of the Drosophila Brain Visualized by Endogenously Tagged Neurotransmitter Receptors }, pages = { 284--297.e5 }, number = { 1 }, journal = { Cell Reports }, issn = { 22111247 }, doi = { 10.1016/j.celrep.2019.12.018 }, author = { Kondo and Takahashi and Yamagata and Imanishi and Katow and Hiramatsu and Lynn and Abe and Kumaraswamy and Tanimoto }, abstract = { Kondo et al. have generated a comprehensive library of convertible GAL4 knockin Drosophila strains for neurotransmitter receptor genes. The GAL4 lines can be converted into other reporters through the use of RMCE, providing a versatile toolkit. Expression profiling of receptor genes reveals neurochemical segmentation of the brain. }, } |
2020 | Journal | Yuliang Huang, Chenguang Li, Haiyang Wang, Qiaoqiao Hu, Ruoxi Wang, Cheng Chang, Wenjun Ma, Weibo Li, Hao Wu, Yibao Zhang (2020). A quantitative evaluation of deformable image registration based on MV cone beam CT images: Impact of deformation magnitudes and image modalities. Physica Medica, 71, pp. 82–87. (link) (bib) x @article{RN829, year = { 2020 }, volume = { 71 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079849262{\&}doi=10.1016{\%}2Fj.ejmp.2020.02.016{\&}partnerID=40{\&}md5=ffe26ce9da5145f82cc70d5a9518fab4 }, type = { Journal Article }, title = { A quantitative evaluation of deformable image registration based on MV cone beam CT images: Impact of deformation magnitudes and image modalities }, pages = { 82--87 }, keywords = { Adaptive radiotherapy,Deformable image registration,MVCBCT }, journal = { Physica Medica }, issn = { 1724191X }, doi = { 10.1016/j.ejmp.2020.02.016 }, author = { Huang and Li and Wang and Hu and Wang and Chang and Ma and Li and Wu and Zhang }, abstract = { Background and purpose: To evaluate the impact of deformation magnitude and image modality on deformable-image-registration (DIR) accuracy using Halcyon megavoltage cone beam CT images (MVCBCT). Materials and methods: Planning CT images of an anthropomorphic Head phantom were aligned rigidly with MVCBCT and re-sampled to achieve the same resolution, denoted as pCT. MVCBCT was warped with twenty simulated pre-known virtual deformation fields (Ti, i = 1–20) with increasing deformation magnitudes, yielding warped CBCT (wCBCT). The pCT and MVCBCT were registered to wCBCT respectively (Multi-modality and Uni-modality DIR), generating deformation vector fields Vi and Vi′ (i = 1–20). Vi and Vi′ were compared with Ti respectively to assess the DIR accuracy geometrically. In addition, Vi, Ti, and Vi′ were applied to pCT, generating deformed CT (dCTi), ground-truth CT (Gi) and deformed CT′ (dCTi′) respectively. The Hounsfield Unit (HU) on these virtual CT images were also compared. Results: The mean errors of vector displacement increased with the deformation magnitude. For deformation magnitudes between 2.82 mm and 7.71 mm, the errors of uni-modality DIR were 1.16 mm {\~{}} 1.73 mm smaller than that of multi-modality (p = 0.0001, Wilcoxon signed rank test). DIR could reduce the maximum signed and absolute HU deviations from 70.8 HU to 11.4 HU and 208 HU to 46.2 HU respectively. Conclusions: As deformation magnitude increases, DIR accuracy continues to deteriorate and uni-modality DIR consistently outperformed multi-modality DIR. DIR-based adaptive radiotherapy utilizing the noisy MVCBCT images is only conditionally applicable with caution. }, } |
2020 | Journal | Serena Bonaretti, Garry E. Gold, Gary S. Beaupre (2020). PyKNEEr: An image analysis workflow for open and reproducible research on femoral knee cartilage. PLoS ONE, 15(1), pp. NA (link) (bib) x @article{RN832, year = { 2020 }, volume = { 15 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078319647{\&}doi=10.1371{\%}2Fjournal.pone.0226501{\&}partnerID=40{\&}md5=72a88df7ae12079c45940349b7f58af9 }, type = { Journal Article }, title = { PyKNEEr: An image analysis workflow for open and reproducible research on femoral knee cartilage }, number = { 1 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0226501 }, author = { Bonaretti and Gold and Beaupre }, abstract = { Transparent research in musculoskeletal imaging is fundamental to reliably investigate diseases such as knee osteoarthritis (OA), a chronic disease impairing femoral knee cartilage. To study cartilage degeneration, researchers have developed algorithms to segment femoral knee cartilage from magnetic resonance (MR) images and to measure cartilage morphology and relaxometry. The majority of these algorithms are not publicly available or require advanced programming skills to be compiled and run. However, to accelerate discoveries and findings, it is crucial to have open and reproducible workflows. We present pyKNEEr, a framework for open and reproducible research on femoral knee cartilage from MR images. pyKNEEr is written in python, uses Jupyter notebook as a user interface, and is available on GitHub with a GNU GPLv3 license. It is composed of three modules: 1) image preprocessing to standardize spatial and intensity characteristics; 2) femoral knee cartilage segmentation for intersubject, multimodal, and longitudinal acquisitions; and 3) analysis of cartilage morphology and relaxometry. Each module contains one or more Jupyter notebooks with narrative, code, visualizations, and dependencies to reproduce computational environments. pyKNEEr facilitates transparent image-based research of femoral knee cartilage because of its ease of installation and use, and its versatility for publication and sharing among researchers. Finally, due to its modular structure, pyKNEEr favors code extension and algorithm comparison. We tested our reproducible workflows with experiments that also constitute an example of transparent research with pyKNEEr, and we compared pyKNEEr performances to existing algorithms in literature review visualizations. We provide links to executed notebooks and executable environments for immediate reproducibility of our findings. }, } |
2020 | Journal | S\'ilvia D. Almeida, Joa\~o Santinha, Francisco P.M. Oliveira, Joana Ip, Maria Lisitskaya, Joa\~o Louren\cco, Aycan Uysal, Celso Matos, Cristina Joa\~o, Nikolaos Papanikolaou (2020). Quantification of tumor burden in multiple myeloma by atlas-based semi-automatic segmentation of WB-DWI. Cancer Imaging, 20(1), pp. NA (link) (bib) x @article{RN830, year = { 2020 }, volume = { 20 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077786257{\&}doi=10.1186{\%}2Fs40644-020-0286-5{\&}partnerID=40{\&}md5=c2b44a405ae7ba7bfa48478311ee70a6 }, type = { Journal Article }, title = { Quantification of tumor burden in multiple myeloma by atlas-based semi-automatic segmentation of WB-DWI }, number = { 1 }, keywords = { Atlas-based segmentation,Diffusion weighted imaging,Multiple myeloma,Semi-automatic segmentation,Total lesion burden }, journal = { Cancer Imaging }, issn = { 14707330 }, doi = { 10.1186/s40644-020-0286-5 }, author = { Almeida and Santinha and Oliveira and Ip and Lisitskaya and Louren{\c{c}}o and Uysal and Matos and Joa{\~{o}} and Papanikolaou }, abstract = { Background: Whole-body diffusion weighted imaging (WB-DWI) has proven value to detect multiple myeloma (MM) lesions. However, the large volume of imaging data and the presence of numerous lesions makes the reading process challenging. The aim of the current study was to develop a semi-automatic lesion segmentation algorithm for WB-DWI images in MM patients and to evaluate this smart-algorithm (SA) performance by comparing it to the manual segmentations performed by radiologists. Methods: An atlas-based segmentation was developed to remove the high-signal intensity normal tissues on WB-DWI and to restrict the lesion area to the skeleton. Then, an outlier threshold-based segmentation was applied to WB-DWI images, and the segmented area's signal intensity was compared to the average signal intensity of a low-fat muscle on T1-weighted images. This method was validated in 22 whole-body DWI images of patients diagnosed with MM. Dice similarity coefficient (DSC), sensitivity and positive predictive value (PPV) were computed to evaluate the SA performance against the gold standard (GS) and to compare with the radiologists. A non-parametric Wilcoxon test was also performed. Apparent diffusion coefficient (ADC) histogram metrics and lesion volume were extracted for the GS segmentation and for the correctly identified lesions by SA and their correlation was assessed. Results: The mean inter-radiologists DSC was 0.323 ± 0.268. The SA vs GS achieved a DSC of 0.274 ± 0.227, sensitivity of 0.764 ± 0.276 and PPV 0.217 ± 0.207. Its distribution was not significantly different from the mean DSC of inter-radiologist segmentation (p = 0.108, Wilcoxon test). ADC and lesion volume intraclass correlation coefficient (ICC) of the GS and of the correctly identified lesions by the SA was 0.996 for the median and 0.894 for the lesion volume (p {\textless} 0.001). The duration of the lesion volume segmentation by the SA was, on average, 10.22 ± 0.86 min, per patient. Conclusions: The SA provides equally reproducible segmentation results when compared to the manual segmentation of radiologists. Thus, the proposed method offers robust and efficient segmentation of MM lesions on WB-DWI. This method may aid accurate assessment of tumor burden and therefore provide insights to treatment response assessment. }, } |
2019 | Journal | Amandine Crombé, Cynthia Périer, Mich\`ele Kind, Baudouin Denis De Senneville, Fran\ccois Le Loarer, Antoine Italiano, Xavier Buy, Olivier Saut (2019). T2-based MRI Delta-radiomics improve response prediction in soft-tissue sarcomas treated by neoadjuvant chemotherapy.. Journal of Magnetic Resonance Imaging, 50(2), pp. 497–510. (link) (bib) x @article{Crombe2019, year = { 2019 }, volume = { 50 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { T2-based MRI Delta-radiomics improve response prediction in soft-tissue sarcomas treated by neoadjuvant chemotherapy. }, pages = { 497--510 }, number = { 2 }, journal = { Journal of Magnetic Resonance Imaging }, issn = { 15222586 }, doi = { 10.1002/jmri.26589 }, author = { Cromb{\'{e}} and P{\'{e}}rier and Kind and {De Senneville} and {Le Loarer} and Italiano and Buy and Saut }, abstract = { Background: Standard of care for patients with high-grade soft-tissue sarcoma (STS) are being redefined since neoadjuvant chemotherapy (NAC) has demonstrated a positive effect on patients' outcome. Yet response evaluation in clinical trials still relies on RECIST criteria. Purpose: To investigate the added value of a Delta-radiomics approach for early response prediction in patients with STS undergoing NAC. Study Type: Retrospective. Population: Sixty-five adult patients with newly-diagnosed, locally-advanced, histologically proven high-grade STS of trunk and extremities. All were treated by anthracycline-based NAC followed by surgery and had available MRI at baseline and after two chemotherapy cycles. Field Strength/Sequence: Pre- and postcontrast enhanced T1-weighted imaging (T1-WI), turbo spin echo T2-WI at 1.5 T. Assessment: A threshold of {\textless}10{\%} viable cells on surgical specimens defined good response (Good-HR). Two senior radiologists performed a semantic analysis of the MRI. After 3D manual segmentation of tumors at baseline and early evaluation, and standardization of voxel-sizes and intensities, absolute changes in 33 texture and shape features were calculated. Statistical Tests: Classification models based on logistic regression, support vector machine, k-nearest neighbors, and random forests were elaborated using crossvalidation (training and validation) on 50 patients ("training cohort") and was validated on 15 other patients ("test cohort"). Results: Sixteen patients were good-HR. Neither RECIST status (P = 0.112) nor semantic radiological variables were associated with response (range of P-values: 0.134–0.490) except an edema decrease (P = 0.003), although 14 shape and texture features were (range of P-values: 0.002–0.037). On the training cohort, the highest diagnostic performances were obtained with random forests built on three features: $\Delta${\_}Histogram{\_}Entropy, $\Delta${\_}Elongation, $\Delta${\_}Surrounding{\_}Edema, which provided: area under the curve the receiver operating characteristic = 0.86, accuracy = 88.1{\%}, sensitivity = 94.1{\%}, and specificity = 66.3{\%}. On the test cohort, this model provided an accuracy of 74.6{\%} but 3/5 good-HR were systematically ill-classified. Data Conclusion: A T2-based Delta-radiomics approach might improve early response assessment in STS patients with a limited number of features. Level of Evidence: 3. Technical Efficacy: Stage 2. J. Magn. Reson. Imaging 2019;50:497–510. }, } |
2019 | Journal | Marc Ziegler, Sebastian Lettmaier, Rainer Fietkau, Christoph Bert (2019). Choosing a reference phase for a dynamic tumor tracking treatment: A new degree of freedom?. Medical Physics, 46(8), pp. 3371–3377. (link) (bib) x @article{Ziegler2019a, year = { 2019 }, volume = { 46 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Choosing a reference phase for a dynamic tumor tracking treatment: A new degree of freedom? }, pages = { 3371--3377 }, number = { 8 }, keywords = { 4D dose calculation,dynamic tumor tracking,liver cancer,lung cancer }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.13654 }, author = { Ziegler and Lettmaier and Fietkau and Bert }, abstract = { Purpose: With the introduction of dynamic tumor tracking in radiotherapy, it is possible to irradiate moving targets with minimal safety margins. However, most dynamic tumor tracking techniques rely on changing the beam geometry by, for example, adapting the multileaf collimator (MLC) positions or rotating the LINAC head. These changes are relative to a reference position which is determined by a specific breathing phase. Since these changes in the beam path also influence the delivered dose, choosing a different reference position based on a different breathing phase impacts the applied dose to the patient. This work investigates the influence of choosing different reference breathing phases on the dose distribution. Methods: The Vero system tracks the moving target by performing a pan and tilt rotation of the LINAC head. For 13 patients, the target position was extracted from every phase of a four-dimensional computed tomography (4DCT) and the pan and tilt values were determined with respect to three different reference phases. These reference phases were inspiration, expiration, and the midventilation. For all reference phases, a 4D dose calculation was performed on the 4DCT regarding the respective pan and tilt values. Furthermore, the applied dose to the target and surrounding organs at risk was calculated. To accumulate the dose distribution, weights from the actual patient breathing motion were determined. The weights were calculated from the breathing motions from different days to investigate the impact of daily variations in the breathing motion onto the accumulated dose distribution. All obtained values were then compared to the static treatment plan. Results: The mean and maximum doses applied to the target or surrounding organs at risk show no general behavior depending on the different reference phases. Nevertheless, for some patients, large differences (approx. 30{\%}) in the applied dose to certain organs at risk could be observed, whereas the applied dose to the target shows no dependency on the different reference phases. However, the mean target dose is in all cases approx. 1.5{\%} below the reference value from the static treatment plan. Conclusion: Although no general dependency of the applied dose on the selected reference phase could be found, the choice of the reference phase can have great impact on the organ at risk dose for some patients. Thus, the choice of the reference phase used for patient positioning should be considered during treatment planning since it can be seen as a new degree of freedom of a treatment based on tracking. }, } |
2019 | Journal | Marc Ziegler, Tobias Brandt, Sebastian Lettmaier, Rainer Fietkau, Christoph Bert (2019). Method for a motion model based automated 4D dose calculation. Physics in Medicine and Biology, 64(22), pp. 12. (link) (bib) x @article{Ziegler2019, year = { 2019 }, volume = { 64 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Method for a motion model based automated 4D dose calculation }, pages = { 12 }, number = { 22 }, keywords = { 4D dose calculation,Tumor tracking,motion modelling,real-time monitoring,stereotactic body radiation therapy }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/ab4e51 }, author = { Ziegler and Brandt and Lettmaier and Fietkau and Bert }, abstract = { The Vero system can treat intra-fractionally moving tumors with gimbaled dynamic tumor tracking (DTT) by rotating the treatment beam so that it follows the motion of the tumor. However, the changes in the beam geometry and the constant breathing motion of the patient influence the dose applied to the patient. This study aims to perform a full 4D dose reconstruction for thirteen patients treated with DTT at the Vero system at the Universit{\"{a}}tsklinikum Erlangen and investigates the temporal resolution required to perform an accurate 4D dose reconstruction. For all patients, a 4DCT was used to train a 4D motion model, which is able to calculate pseudo-CT images for arbitrary breathing phases. A new CT image was calculated for every 100 ms of treatment and a dose calculation was performed according to the current beam geometry (i.e. the rotation of the treatment beam at this moment in time) by rotating according to the momentary beam rotation, which is extracted from log-files. The resulting dose distributions were accumulated on the planning CT and characteristic parameters were extracted and compared. $\gamma$-evaluations of dose accumulations with different spatialoral resolutions were performed to determine the minimal required resolution. In total 173 700 dose calculations were performed. The accumulated 4D dose distributions show a reduced mean GTV dose of 0.77{\%} compared to the static treatment plan. For some patients larger deviations were observed, especially in the presence of a poor 4DCT quality. The $\gamma$-evaluation showed that a temporal resolution of 500 ms is sufficient for an accurate dose reconstruction. If the tumor motion is regarded as well, a spatialoral sampling of 1400 ms and 2 mm yields accurate results, which reduces the workload by 84{\%}. }, } |
2019 | Journal | Duo Zhang, Michael Ghaly, Greta S.P. Mok (2019). Interpolated CT for attenuation correction on respiratory gating cardiac SPECT/CT — A simulation study. Medical Physics, 46(6), pp. 2621–2628. (link) (bib) x @article{Zhang2019, year = { 2019 }, volume = { 46 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Interpolated CT for attenuation correction on respiratory gating cardiac SPECT/CT — A simulation study }, pages = { 2621--2628 }, number = { 6 }, keywords = { attenuation correction,cardiac SPECT/CT,respiratory gating }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.13513 }, author = { Zhang and Ghaly and Mok }, abstract = { Purpose: Respiratory gated four-dimensional (4D) single photon emission computed tomography (SPECT) with phase-matched CT reduces respiratory blurring and attenuation correction (AC) artifacts in cardiac SPECT. This study aims to develop and investigate the effectiveness of an interpolated CT (ICT) method for improved cardiac SPECT AC using simulations. Methods: We used the 4D XCAT phantom to simulate a population of ten patients varied in gender, anatomy, 99mTc-sestamibi distribution, respiratory patterns, and disease states. Simulated 120 SPECT projection data were rebinned into six equal count gates. Activity and attenuation maps in each gate were averaged as gated SPECT and CT (GCT). Three helical CTs were simulated at end-inspiration (HCT-IN), end-expiration (HCT-EX), and mid-respiration (HCT-MID). The ICTs were obtained from HCT-EX and HCT-IN using the motion vector field generated between them from affine plus b-spline registration. Projections were reconstructed by OS-EM method, using GCT, ICT, and three HCTs for AC. Reconstructed images of each gate were registered to end-expiration and averaged to generate the polar plots. Relative difference for each segment and relative defect size were computed using images of GCT AC as reference. Results: The average of maximum relative difference through ten phantoms was 7.93 ± 4.71{\%}, 2.50 ± 0.98{\%}, 3.58 ± 0.74{\%}, and 2.14 ± 0.56{\%} for noisy HCT-IN, HCT-MID, HCT-EX, and ICT AC data, respectively. The ICT showed closest defect size to GCT while the differences from HCTs can be over 40{\%}. Conclusion: We conclude that the performance of ICT is similar to GCT. It improves the image quality and quantitative accuracy for respiratory-gated cardiac SPECT as compared to conventional HCT, while it can potentially further reduce the radiation dose of GCT. }, } |
2019 | Journal | L. Vanquin, C. Boydev, J. Korhonen, E. Rault, F. Crop, T. Lacornerie, A. Wagner, J. Laffarguette, D. Pasquier, N. Reynaert (2019). Radiotherapy treatment planning of prostate cancer using magnetic resonance imaging. Cancer/Radiotherapie, 23(4), pp. 281–289. (link) (bib) x @article{Vanquin2019, year = { 2019 }, volume = { 23 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Radiotherapy treatment planning of prostate cancer using magnetic resonance imaging }, pages = { 281--289 }, number = { 4 }, keywords = { Elastic registration,MRI,Pseudo-CT (Pseudo-Computerized Tomography),Radiotherapy }, journal = { Cancer/Radiotherapie }, issn = { 17696658 }, doi = { 10.1016/j.canrad.2018.09.005 }, author = { Vanquin and Boydev and Korhonen and Rault and Crop and Lacornerie and Wagner and Laffarguette and Pasquier and Reynaert }, abstract = { Purpose: Magnetic resonance imaging (MRI) plays an increasing role in radiotherapy dose planning. Indeed, MRI offers superior soft tissue contrast compared to computerized tomography (CT) and therefore could provide a better delineation of target volumes and organs at risk than CT for radiotherapy. Furthermore, an MRI-only radiotherapy workflow would suppress registration errors inherent to the registration of MRI with CT. However, the estimation of the electronic density of tissues using MRI images is still a challenging issue. The purpose of this work was to design and evaluate a pseudo-CT generation method for prostate cancer treatments. Materials and methods: A pseudo-CT was generated for ten prostate cancer patients using an elastic deformation based method. For each patient, dose delivered to the patient was calculated using both the planning CT and the pseudo-CT. Dose differences between CT and pseudo-CT were investigated. Results: Mean dose relative difference in the planning target volume is 0.9{\%} on average and ranges from 0.1{\%} to 1.7{\%}. In organs at risks, this value is 1.8{\%}, 0.8{\%}, 0.8{\%} and 1{\%} on average in the rectum, the right and left femoral heads, and the bladder respectively. Conclusion: The dose calculated using the pseudo-CT is very close to the dose calculated using the CT for both organs at risk and PTV. These results confirm that pseudo-CT images generated using the proposed method could be used to calculate radiotherapy treatment doses on MRI images. }, } |
2019 | Journal | J. Uthoff, F. A. De Stefano, K. Panzer, B. W. Darbro, T. S. Sato, R. Khanna, D. E. Quelle, D. K. Meyerholz, J. Weimer, J. C. Sieren (2019). Radiomic biomarkers informative of cancerous transformation in neurofibromatosis-1 plexiform tumors. Journal of Neuroradiology, 46(3), pp. 179–185. (link) (bib) x @article{Uthoff2019, year = { 2019 }, volume = { 46 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Radiomic biomarkers informative of cancerous transformation in neurofibromatosis-1 plexiform tumors }, pages = { 179--185 }, number = { 3 }, keywords = { Magnetic resonance imaging,Malignant peripheral nerve sheath tumor,Plexiform neurofibroma,Positron emission tomography,Quantitative feature extraction }, journal = { Journal of Neuroradiology }, issn = { 17730406 }, doi = { 10.1016/j.neurad.2018.05.006 }, author = { Uthoff and {De Stefano} and Panzer and Darbro and Sato and Khanna and Quelle and Meyerholz and Weimer and Sieren }, abstract = { Background: This study explores whether objective, quantitative radiomic biomarkers derived from magnetic resonance (MR), positron emission tomography (PET), and computed tomography (CT) may be useful in reliably distinguishing malignant peripheral nerve sheath tumors (MPNST) from benign plexiform neurofibromas (PN). Methods: A registration and segmentation pipeline was established using a cohort of NF1 patients with histopathological diagnosis of PN or MPNST, and medical imaging of the PN including MR and PET-CT. The corrected MR datasets were registered to the corresponding PET-CT via landmark-based registration. PET standard-uptake value (SUV) thresholds were used to guide segmentation of volumes of interest: MPNST-associated PET-hot regions (SUV ≥ 3.5) and PN-associated PET-elevated regions (2.0 {\textless} SUV {\textless} 3.5). Quantitative imaging features were extracted from the MR, PET, and CT data and compared for statistical differences. Intensity histogram features included (mean, media, maximum, variance, full width at half maximum, entropy, kurtosis, and skewness), while image texture was quantified using Law's texture energy measures, grey-level co-occurrence matrices, and neighborhood grey-tone difference matrices. Results: For each of the 20 NF1 subjects, a total of 320 features were extracted from the image data. Feature reduction and statistical testing identified 9 independent radiomic biomarkers from the MR data (4 intensity and 5 texture) and 4 PET (2 intensity and 2 texture) were different between the PET-hot versus PET-elevated volumes of interest. Conclusions: Our data suggests imaging features can be used to distinguish malignancy in NF1-realted tumors, which could improve MPNST risk assessment and positively impact clinical management of NF1 patients. }, } |
2019 | Journal | Hyemin Um, Florent Tixier, Dalton Bermudez, Joseph O. Deasy, Robert J. Young, Harini Veeraraghavan (2019). Impact of image preprocessing on the scanner dependence of multi-parametric MRI radiomic features and covariate shift in multi-institutional glioblastoma datasets. Physics in Medicine and Biology, 64(16), pp. 12. (link) (bib) x @article{Um2019, year = { 2019 }, volume = { 64 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Impact of image preprocessing on the scanner dependence of multi-parametric MRI radiomic features and covariate shift in multi-institutional glioblastoma datasets }, pages = { 12 }, number = { 16 }, keywords = { covariate shift,glioblastoma multiforme (GBM),image preprocessing,magnetic resonance imaging (MRI),multi-institution,radiomics,scanner variations }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/ab2f44 }, author = { Um and Tixier and Bermudez and Deasy and Young and Veeraraghavan }, abstract = { Recent advances in radiomics have enhanced the value of medical imaging in various aspects of clinical practice, but a crucial component that remains to be investigated further is the robustness of quantitative features to imaging variations and across multiple institutions. In the case of MRI, signal intensity values vary according to the acquisition parameters used, yet no consensus exists on which preprocessing techniques are favorable in reducing scanner-dependent variability of image-based features. Hence, the purpose of this study was to assess the impact of common image preprocessing methods on the scanner dependence of MRI radiomic features in multi-institutional glioblastoma multiforme (GBM) datasets. Two independent GBM cohorts were analyzed: 50 cases from the TCGA-GBM dataset and 111 cases acquired in our institution, and each case consisted of 3 MRI sequences viz. FLAIR, T1-weighted, and T1-weighted post-contrast. Five image preprocessing techniques were examined: 8-bit global rescaling, 8-bit local rescaling, bias field correction, histogram standardization, and isotropic resampling. A total of 420 features divided into eight categories representing texture, shape, edge, and intensity histogram were extracted. Two distinct imaging parameters were considered: scanner manufacturer and scanner magnetic field strength. Wilcoxon tests identified features robust to the considered acquisition parameters under the selected image preprocessing techniques. A machine learning-based strategy was implemented to measure the covariate shift between the analyzed datasets using features computed using the aforementioned preprocessing methods. Finally, radiomic scores (rad-scores) were constructed by identifying features relevant to patients' overall survival after eliminating those impacted by scanner variability. These were then evaluated for their prognostic significance through Kaplan-Meier and Cox hazards regression analyses. Our results demonstrate that overall, histogram standardization contributes the most in reducing radiomic feature variability as it is the technique to reduce the covariate shift for three feature categories and successfully discriminate patients into groups of different survival risks. }, } |
2019 | Journal | J. Donald Tournier, Robert Smith, David Raffelt, Rami Tabbara, Thijs Dhollander, Maximilian Pietsch, Daan Christiaens, Ben Jeurissen, Chun Hung Yeh, Alan Connelly (2019). MRtrix3: A fast, flexible and open software framework for medical image processing and visualisation. NeuroImage, 202, pp. 17. (link) (bib) x @article{Tournier2019, year = { 2019 }, volume = { 202 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MRtrix3: A fast, flexible and open software framework for medical image processing and visualisation }, pages = { 17 }, keywords = { Image,MRI,Processing,Software,Visualisation }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2019.116137 }, author = { Tournier and Smith and Raffelt and Tabbara and Dhollander and Pietsch and Christiaens and Jeurissen and Yeh and Connelly }, abstract = { MRtrix3 is an open-source, cross-platform software package for medical image processing, analysis and visualisation, with a particular emphasis on the investigation of the brain using diffusion MRI. It is implemented using a fast, modular and flexible general-purpose code framework for image data access and manipulation, enabling efficient development of new applications, whilst retaining high computational performance and a consistent command-line interface between applications. In this article, we provide a high-level overview of the features of the MRtrix3 framework and general-purpose image processing applications provided with the software. }, } |
2019 | Journal | Florent Tixier, Hyemin Um, Dalton Bermudez, Aditi Iyer, Aditya Apte, Maya S. Graham, Kathryn S. Nevel, Joseph O. Deasy, Robert J. Young, Harini Veeraraghavan (2019). Preoperative MRI-radiomics features improve prediction of survival in glioblastoma patients over MGMT methylation status alone. Oncotarget, 10(6), pp. 660–672. (link) (bib) x @article{Tixier2019, year = { 2019 }, volume = { 10 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060153455{\&}doi=10.18632{\%}2Foncotarget.26578{\&}partnerID=40{\&}md5=fc3514c9fba21e53a37d3020503c236d }, type = { Journal Article }, title = { Preoperative MRI-radiomics features improve prediction of survival in glioblastoma patients over MGMT methylation status alone }, pages = { 660--672 }, number = { 6 }, keywords = { Glioblastoma,MGMT,Magnetic resonance imaging,Radiomics,Survival analysis }, journal = { Oncotarget }, issn = { 19492553 }, doi = { 10.18632/oncotarget.26578 }, author = { Tixier and Um and Bermudez and Iyer and Apte and Graham and Nevel and Deasy and Young and Veeraraghavan }, abstract = { Background: Glioblastoma (GBM) is the most common malignant central nervous system tumor, and MGMT promoter hypermethylation in this tumor has been shown to be associated with better prognosis. We evaluated the capacity of radiomics features to add complementary information to MGMT status, to improve the ability to predict prognosis. Methods: 159 patients with untreated GBM were included in this study and divided into training and independent test sets. 286 radiomics features were extracted from the magnetic resonance images acquired prior to any treatments. A least absolute shrinkage selection operator (LASSO) selection followed by Kaplan-Meier analysis was used to determine the prognostic value of radiomics features to predict overall survival (OS). The combination of MGMT status with radiomics was also investigated and all results were validated on the independent test set. Results: LASSO analysis identified 8 out of the 286 radiomic features to be relevant which were then used for determining association to OS. One feature (edge descriptor) remained significant on the external validation cohort after multiple testing (p=0.04) and the combination with MGMT identified a group of patients with the best prognosis with a survival probability of 0.61 after 43 months (p=0.0005). Conclusion: Our results suggest that combining radiomics with MGMT is more accurate in stratifying patients into groups of different survival risks when compared to with using these predictors in isolation. We identified two subgroups within patients who have methylated MGMT: one with a similar survival to unmethylated MGMT patients and the other with a significantly longer OS. }, } |
2019 | Journal | H. G. Boyd Taylor, A. M. Puckett, Z. J. Isherwood, M. M. Schira (2019). Vascular effects on the BOLD response and the retinotopic mapping of hV4. PLoS ONE, 14(6), pp. 32. (link) (bib) x @article{Taylor2019, year = { 2019 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Vascular effects on the BOLD response and the retinotopic mapping of hV4 }, pages = { 32 }, number = { 6 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0204388 }, author = { {Boyd Taylor} and Puckett and Isherwood and Schira }, abstract = { Despite general acceptance that the retinotopic organisation of human V4 (hV4) takes the form of a single, uninterrupted ventral hemifield, measured retinotopic maps of this visual area are often incomplete. Here, we test hypotheses that artefact from draining veins close to hV4 cause inverted BOLD responses that may serve to obscure a portion of the lower visual quarterfield-including the lower vertical meridian-in some hemispheres. We further test whether correcting such responses can restore the 'missing' retinotopic coverage in hV4. Subjects (N = 10) viewed bowtie, ring, drifting bar and full field flash stimuli. Functional EPIs were acquired over approximately 1.5h and analysed to reveal retinotopic maps of early visual cortex, including hV4. Normalised mean maps (which show the average EPI signal amplitude) were constructed by voxel-wise averaging of the EPI time course and used to locate venous eclipses, which can be identified by a decrease in the EPI signal caused by deoxygenated blood. Inverted responses are shown to cluster in these regions and correcting these responses improves maps of hV4 in some hemispheres, including restoring a complete hemifield map in one. A leftwards bias was found whereby 6/10 left hemisphere hV4 maps were incomplete, while this was the case in only 1/10 right hemisphere maps. Incomplete hV4 maps did not correspond with venous artefact in every instance, with incomplete maps being present in the absence of a venous eclipse and complete maps coexisting with a proximate venous eclipse. We also show that mean maps of upper surfaces (near the boundary between cortical grey matter and CSF) provide highly detailed maps of veins on the cortical surface. Results suggest that venous eclipses and inverted voxels can explain some incomplete hV4 maps, but cannot explain the remainder nor the leftwards bias in hV4 coverage reported here. }, } |
2019 | Journal | Sharmin Sultana, Praful Agrawal, Shireen Elhabian, Ross Whitaker, Jason E. Blatt, Benjamin Gilles, Justin Cetas, Tanweer Rashid, Michel A. Audette (2019). Medial axis segmentation of cranial nerves using shape statistics-aware discrete deformable models. International Journal of Computer Assisted Radiology and Surgery, 14(11), pp. 1955–1967. (link) (bib) x @article{Sultana2019, year = { 2019 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Medial axis segmentation of cranial nerves using shape statistics-aware discrete deformable models }, pages = { 1955--1967 }, number = { 11 }, keywords = { 3D contour models,Brainstem,Centerline,Cranial nerves,Deformable models,MRI,Segmentation,Statistical shape models,Surgical guidance }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-019-02014-z }, author = { Sultana and Agrawal and Elhabian and Whitaker and Blatt and Gilles and Cetas and Rashid and Audette }, abstract = { Purpose: We propose a segmentation methodology for brainstem cranial nerves using statistical shape model (SSM)-based deformable 3D contours from T2 MR images. Methods: We create shape models for ten pairs of cranial nerves. High-resolution T2 MR images are segmented for nerve centerline using a 1-Simplex discrete deformable 3D contour model. These segmented centerlines comprise training datasets for the shape model. Point correspondence for the training dataset is performed using an entropy-based energy minimization framework applied to particles located on the centerline curve. The shape information is incorporated into the 1-Simplex model by introducing a shape-based internal force, making the deformation stable against low resolution and image artifacts. Results: The proposed method is validated through extensive experiments using both synthetic and patient MRI data. The robustness and stability of the proposed method are experimented using synthetic datasets. SSMs are constructed independently for ten pairs (CNIII–CNXII) of brainstem cranial nerves using ten non-pathological image datasets of the brainstem. The constructed ten SSMs are assessed in terms of compactness, specificity and generality. In order to quantify the error distances between segmented results and ground truths, two metrics are used: mean absolute shape distance (MASD) and Hausdorff distance (HD). MASD error using the proposed shape model is 0.19 ± 0.13 (mean ± std. deviation) mm and HD is 0.21 mm which are sub-voxel accuracy given the input image resolution. Conclusion: This paper described a probabilistic digital atlas of the ten brainstem-attached cranial nerve pairs by incorporating a statistical shape model with the 1-Simplex deformable contour. The integration of shape information as a priori knowledge results in robust and accurate centerline segmentations from even low-resolution MRI data, which is essential in neurosurgical planning and simulations for accurate and robust 3D patient-specific models of critical tissues including cranial nerves. }, } |
2019 | Journal | Constantinos Spanakis, Emmanouil Mathioudakis, Nikos Kampanis, Manolis Tsiknakis, Kostas Marias (2019). Machine-learning regression in evolutionary algorithms and image registration. IET Image Processing, 13(5), pp. 843–849. (link) (bib) x @article{Spanakis2019, year = { 2019 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Machine-learning regression in evolutionary algorithms and image registration }, pages = { 843--849 }, number = { 5 }, journal = { IET Image Processing }, issn = { 17519659 }, doi = { 10.1049/iet-ipr.2018.5389 }, author = { Spanakis and Mathioudakis and Kampanis and Tsiknakis and Marias }, abstract = { Evolutionary algorithms have been used recently as an alternative in image registration, especially in cases where the similarity function is non-convex with many local optima. However, their drawback is that they tend to be computationally expensive. Trying to avoid local minima can increase the computational cost. The purpose of authors' research is to minimise the duration of the image registration process. This paper presents a method to minimise the computational cost by introducing a machine learning-based variant of Harmony Search. To this end, a series of machine-learning regression methods are tested in order to find the most appropriate that minimises the cost without degrading the quality of the results. The best regression method is then incorporated in the optimisation process and is compared with two well-known ITK image registration methods. The comparison of authors' image registration method with ITK concerns both the quality of the results and the duration of the registration experiments. The comparison is done on a set of random image pairs of various sources (e.g. medical or satellite images), and the encouraging results strongly indicate that authors' method can be used in a variety of image registration applications producing quality results in significantly less time. }, } |
2019 | Journal | Mumtaz Hussain Soomro, Matteo Coppotelli, Silvia Conforto, Maurizio Schmid, Gaetano Giunta, Lorenzo Del Secco, Emanuele Neri, Damiano Caruso, Marco Rengo, Andrea Laghi (2019). Automated segmentation of colorectal tumor in 3D MRI Using 3D multiscale densely connected convolutional neural network. Journal of Healthcare Engineering, 2019, pp. 11. (link) (bib) x @article{Soomro2019, year = { 2019 }, volume = { 2019 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated segmentation of colorectal tumor in 3D MRI Using 3D multiscale densely connected convolutional neural network }, pages = { 11 }, journal = { Journal of Healthcare Engineering }, issn = { 20402309 }, doi = { 10.1155/2019/1075434 }, author = { Soomro and Coppotelli and Conforto and Schmid and Giunta and {Del Secco} and Neri and Caruso and Rengo and Laghi }, abstract = { The main goal of this work is to automatically segment colorectal tumors in 3D T2-weighted (T2w) MRI with reasonable accuracy. For such a purpose, a novel deep learning-based algorithm suited for volumetric colorectal tumor segmentation is proposed. The proposed CNN architecture, based on densely connected neural network, contains multiscale dense interconnectivity between layers of fine and coarse scales, thus leveraging multiscale contextual information in the network to get better flow of information throughout the network. Additionally, the 3D level-set algorithm was incorporated as a postprocessing task to refine contours of the network predicted segmentation. The method was assessed on T2-weighted 3D MRI of 43 patients diagnosed with locally advanced colorectal tumor (cT3/T4). Cross validation was performed in 100 rounds by partitioning the dataset into 30 volumes for training and 13 for testing. Three performance metrics were computed to assess the similarity between predicted segmentation and the ground truth (i.e., manual segmentation by an expert radiologist/oncologist), including Dice similarity coefficient (DSC), recall rate (RR), and average surface distance (ASD). The above performance metrics were computed in terms of mean and standard deviation (mean ± standard deviation). The DSC, RR, and ASD were 0.8406 ± 0.0191, 0.8513 ± 0.0201, and 2.6407 ± 2.7975 before postprocessing, and these performance metrics became 0.8585 ± 0.0184, 0.8719 ± 0.0195, and 2.5401 ± 2.402 after postprocessing, respectively. We compared our proposed method to other existing volumetric medical image segmentation baseline methods (particularly 3D U-net and DenseVoxNet) in our segmentation tasks. The experimental results reveal that the proposed method has achieved better performance in colorectal tumor segmentation in volumetric MRI than the other baseline techniques. }, } |
2019 | Journal | S. Primpke, P. A. Dias, G. Gerdts (2019). Automated identification and quantification of microfibres and microplastics. Analytical Methods, 11(16), pp. 2138–2147. (link) (bib) x @article{Primpke2019, year = { 2019 }, volume = { 11 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated identification and quantification of microfibres and microplastics }, pages = { 2138--2147 }, number = { 16 }, journal = { Analytical Methods }, issn = { 17599679 }, doi = { 10.1039/c9ay00126c }, author = { Primpke and Dias and Gerdts }, abstract = { The ubiquitous presence of microlitter (ML), precisely microplastics (MP) and microfibres (MF) in the global environment is of growing concern for science, and society in general. Reliable methods are urgently needed for the identification and quantification of these emerging environmental pollutants. Recently a rapid Fourier transform infrared (FTIR) imaging pipeline was developed for automated identification and quantification of MP. However, although the usefulness for the quantification of MP could already be shown in several studies, microfibres could not be targeted so far by the developed analysis pipeline. In this study we present a novel approach for the simultaneous identification and quantification of MP and MF. By concentrating the sample on membrane filters and applying a BaF2 window on top of the filter, all objects-including MF-are fixed in the focal plane of the FTIR microscope. Furthermore, the analysis pipeline was augmented with algorithms which take into consideration the filamentous structure of MF. The novel analysis pipeline now allows to separate MP and MF via a preselection of fibres from the dataset by object size and shape. MP and MF are subsequently further investigated for specific polymer types and lengths/sizes. After parameter optimization the newly developed analysis approach was applied to archived samples from previous studies on treated waste water. The results were compared with respect to the original detected polymer types and numbers, but also considered MF detection. }, } |
2019 | Journal | Sam Poppe, Eoghan P. Holohan, Olivier Galland, Nico Buls, Gert Van Gompel, Benyameen Keelson, Pierre Yves Tournigand, Joost Brancart, Dave Hollis, Alex Nila, Matthieu Kervyn (2019). An inside perspective on magma intrusion: Quantifying 3d displacement and strain in laboratory experiments by dynamic X-ray computed tomography. Frontiers in Earth Science, 7, pp. 20. (link) (bib) x @article{Poppe2019, year = { 2019 }, volume = { 7 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { An inside perspective on magma intrusion: Quantifying 3d displacement and strain in laboratory experiments by dynamic X-ray computed tomography }, pages = { 20 }, keywords = { Analog,Digital volume correlation,Laboratory modeling,Magma intrusion,Surface deformation,X-ray computed tomography }, journal = { Frontiers in Earth Science }, issn = { 22966463 }, doi = { 10.3389/feart.2019.00062 }, author = { Poppe and Holohan and Galland and Buls and {Van Gompel} and Keelson and Tournigand and Brancart and Hollis and Nila and Kervyn }, abstract = { Magma intrusions grow to their final geometries by deforming the Earth's crust internally and by displacing the Earth's surface. Interpreting the related displacements in terms of intrusion geometry is key to forecasting a volcanic eruption. While scaled laboratory models enable us to study the relationships between surface displacement and intrusion geometry, past approaches entailed limitations regarding imaging of the laboratory model interior or simplicity of the simulated crustal rheology. Here we apply cutting-edge medical wide beam X-ray Computed Tomography (CT) to quantify in 4D the deformation induced in laboratory models by an intrusion of a magma analog (golden syrup) into a rheologically-complex granular host rock analog (sand and plaster). We extract the surface deformation and we quantify the strain field of the entire experimental volume in 3D over time by using Digital Volume Correlation (DVC). By varying the strength and height of the host material, and intrusion velocity, we observe how intrusions of contrasting geometries grow, and induce contrasting strain field characteristics and surface deformation in 4D. The novel application of CT and DVC reveals that distributed strain accommodation and mixed-mode (opening and shear) fracturing dominates in low-cohesion material overburden, and leads to the growth of thick cryptodomes or cup-shaped intrusions. More localized strain accommodation and opening-mode fracturing dominates in high-cohesion material overburden, and leads to the growth of cone sheets or thin dikes. The results demonstrate how the combination of CT and DVC can greatly enhance the utility of optically non-transparent crustal rock analogs in obtaining insights into shallow crustal deformation processes. This unprecedented perspective on the spatio-temporal interaction of intrusion growth coupled with host material deformation provides a conceptual framework that can be tested by field observations at eroded volcanic systems and by the ever increasing spatial and temporal resolution of geodetic data at active volcanoes. }, } |
2019 | Journal | Csaba Pinter, Andras Lasso, Gabor Fichtinger (2019). Polymorph segmentation representation for medical image computing. Computer Methods and Programs in Biomedicine, 171, pp. 19–26. (link) (bib) x @article{Pinter2019, year = { 2019 }, volume = { 171 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Polymorph segmentation representation for medical image computing }, pages = { 19--26 }, keywords = { 3D Slicer,DICOM,Open-source,Segmentation,Software library,Voxelization }, journal = { Computer Methods and Programs in Biomedicine }, issn = { 18727565 }, doi = { 10.1016/j.cmpb.2019.02.011 }, author = { Pinter and Lasso and Fichtinger }, abstract = { Background and objective: Segmentation is a ubiquitous operation in medical image computing. Various data representations can describe segmentation results, such as labelmap volumes or surface models. Conversions between them are often required, which typically include complex data processing steps. We identified four challenges related to managing multiple representations: conversion method selection, data provenance, data consistency, and coherence of in-memory objects. Methods: A complex data container preserves identity and provenance of the contained representations and ensures data coherence. Conversions are executed automatically on-demand. A graph containing the implemented conversion algorithms determines each execution, ensuring consistency between various representations. The design and implementation of a software library are proposed, in order to provide a readily usable software tool to manage segmentation data in multiple data representations. A low-level core library called PolySeg implemented in the Visualization Toolkit (VTK) manages the data objects and conversions. It is used by a high-level application layer, which has been implemented in the medical image visualization and analysis platform 3D Slicer. The application layer provides advanced visualization, transformation, interoperability, and other functions. Results: The core conversion algorithms comprising the graph were validated. Several applications were implemented based on the library, demonstrating advantages in terms of usability and ease of software development in each case. The Segment Editor application provides fast, comprehensive, and easy-to-use manual and semi-automatic segmentation workflows. Clinical applications for gel dosimetry, external beam planning, and MRI-ultrasound image fusion in brachytherapy were rapidly prototyped resulting robust applications that are already in use in clinical research. The conversion algorithms were found to be accurate and reliable using these applications. Conclusions: A generic software library has been designed and developed for automatic management of multiple data formats in segmentation tasks. It enhances both user and developer experience, enabling fast and convenient manual workflows and quicker and more robust software prototyping. The software's BSD-style open-source license allows complete freedom of use of the library. }, } |
2019 | Journal | Neree Payan, Benoit Presles, Fran\ccois Brunotte, Charles Coutant, Isabelle Desmoulins, Jean Marc Vrigneaud, Alexandre Cochet (2019). Biological correlates of tumor perfusion and its heterogeneity in newly diagnosed breast cancer using dynamic first-pass 18F-FDG PET/CT. European Journal of Nuclear Medicine and Molecular Imaging, NA pp. NA (link) (bib) x @article{Payan2019, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85070290987{\&}doi=10.1007{\%}2Fs00259-019-04422-4{\&}partnerID=40{\&}md5=a6426041023e645a74b0ccede063cd2f }, type = { Journal Article }, title = { Biological correlates of tumor perfusion and its heterogeneity in newly diagnosed breast cancer using dynamic first-pass 18F-FDG PET/CT }, keywords = { 18F-FDG PET/CT,Blood flow,Breast cancer,Heterogeneity,Textural features }, journal = { European Journal of Nuclear Medicine and Molecular Imaging }, issn = { 16197089 }, doi = { 10.1007/s00259-019-04422-4 }, author = { Payan and Presles and Brunotte and Coutant and Desmoulins and Vrigneaud and Cochet }, abstract = { Purpose: The aim of this prospective study is to analyze the global tumor blood flow (BF) and its heterogeneity in newly diagnosed breast cancer (BC) according to tumor biological characteristics and molecular subtypes. These perfusion parameters were compared to those classically derived from metabolic studies to investigate links between perfusion and metabolism. Methods: Two hundred seventeen newly diagnosed BC patients underwent a 18F-FDG PET/CT exam before any treatment. A 2-min dynamic acquisition, centered on the chest, was performed immediately after intravenous injection of 3 MBq/kg of 18F-FDG, followed by a two-step static acquisition 90 min later. Tumor BF was calculated (in ml/min/g) using a single compartment kinetic model. In addition to standard PET parameters, texture features (TF) describing the heterogeneity of tumor perfusion and metabolism were extracted. Patients were divided into three groups: Luminal (HR+/HER2-), HER2 (HER2+), and TN (HR-/HER2-). Global and TF parameters of BF and metabolism were compared in different groups of patients according to tumor biological characteristics. Results: Tumors with lymph node involvement showed a higher perfusion, whereas no significant differences in SUV{\_}max or SUV{\_}mean were reported. TN tumors had a higher metabolic activity than HER2 and luminal tumors but no significant differences in global BF values were noted. HER2 tumors exhibited a larger tumor heterogeneity of both perfusion and metabolism compared to luminal and TN tumors. Heterogeneity of perfusion appeared well correlated to that of metabolism. Conclusions: The study of breast cancer perfusion shows a higher BF in large tumors and in tumors with lymph node involvement, not paralleled by similar modifications in tumor global metabolism. In addition, the observed correlation between the perfusion heterogeneity and the metabolism heterogeneity suggests that tumor perfusion and consequently the process of tumor angiogenesis might be involved in the metabolism heterogeneity previously shown in BC. }, } |
2019 | Journal | John Muschelli, Adrian Gherman, Jean Philippe Fortin, Brian Avants, Brandon Whitcher, Jonathan D. Clayden, Brian S. Caffo, Ciprian M. Crainiceanu (2019). Neuroconductor: An R platform for medical imaging analysis. Biostatistics, 20(2), pp. 218–239. (link) (bib) x @article{Muschelli2019, year = { 2019 }, volume = { 20 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Neuroconductor: An R platform for medical imaging analysis }, pages = { 218--239 }, number = { 2 }, keywords = { Bioinformatics,Image analysis,Statistical modelling }, journal = { Biostatistics }, issn = { 14684357 }, doi = { 10.1093/biostatistics/kxx068 }, author = { Muschelli and Gherman and Fortin and Avants and Whitcher and Clayden and Caffo and Crainiceanu }, abstract = { Neuroconductor (https://neuroconductor.org) is an open-source platform for rapid testing and dissemination of reproducible computational imaging software. The goals of the project are to: (i) provide a centralized repository of R software dedicated to image analysis, (ii) disseminate software updates quickly, (iii) train a large, diverse community of scientists using detailed tutorials and short courses, (iv) increase software quality via automatic and manual quality controls, and (v) promote reproducibility of image data analysis. Based on the programming language R (https://www.r-project.org/), Neuroconductor starts with 51 inter-operable packages that cover multiple areas of imaging including visualization, data processing and storage, and statistical inference. Neuroconductor accepts new R package submissions, which are subject to a formal review and continuous automated testing. We provide a description of the purpose of Neuroconductor and the user and developer experience. }, } |
2019 | Journal | Sanketh S. Moudgalya, Kevin Wilson, Xiaoxia Zhu, Mikalai M. Budzevich, Joseph P. Walton, Nathan D. Cahill, Robert D. Frisina, David A. Borkholder (2019). Cochlear pharmacokinetics - Micro-computed tomography and learning-prediction modeling for transport parameter determination. Hearing Research, 380, pp. 46–59. (link) (bib) x @article{Moudgalya2019, year = { 2019 }, volume = { 380 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Cochlear pharmacokinetics - Micro-computed tomography and learning-prediction modeling for transport parameter determination }, pages = { 46--59 }, keywords = { 3-D image registration,Cochlea,Contrast agent,Inner ear,Micro-computed tomography,Mouse,Optimization,Pharmacokinetics,Segmentation }, journal = { Hearing Research }, issn = { 18785891 }, doi = { 10.1016/j.heares.2019.05.009 }, author = { Moudgalya and Wilson and Zhu and Budzevich and Walton and Cahill and Frisina and Borkholder }, abstract = { Inner ear disorders such as sensorineural deafness and genetic diseases may one day be treated with local drug delivery to the inner ear. Current pharmacokinetic models have been based on invasive methods to measure drug concentrations, limiting them in spatial resolution, and restricting the research to larger rodents. We developed an intracochlear pharmacokinetic model based on an imaging, learning-prediction (LP) paradigm for learning transport parameters in the murine cochlea. This was achieved using noninvasive micro-computed tomography imaging of the cochlea during in vivo infusion of a contrast agent at the basal end of scala tympani through a cochleostomy. Each scan was registered in 3-D to a cochlear atlas to segment the cochlear regions with high accuracy, enabling concentrations to be extracted along the length of each scala. These spatio-temporal concentration profiles were used to learn a concentration dependent diffusion coefficient, and transport parameters between the major scalae and to clearance. The LP model results are comparable to the current state of the art model, and can simulate concentrations for cases involving different infusion molecules and different drug delivery protocols. Forward simulation results with pulsatile delivery suggest the pharmacokinetic model can be used to optimize drug delivery protocols to reduce total drug delivered and the potential for toxic side effects. While developed in the challenging murine cochlea, the processes are scalable to larger animals and different drug infusion paradigms. }, } |
2019 | Journal | Pauline Mouches, Nils D. Forkert (2019). A statistical atlas of cerebral arteries generated using multi-center MRA datasets from healthy subjects. Scientific data, 6(1), pp. 29. (link) (bib) x @article{Mouches2019, year = { 2019 }, volume = { 6 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A statistical atlas of cerebral arteries generated using multi-center MRA datasets from healthy subjects }, pages = { 29 }, number = { 1 }, journal = { Scientific data }, issn = { 20524463 }, doi = { 10.1038/s41597-019-0034-5 }, author = { Mouches and Forkert }, abstract = { Magnetic resonance angiography (MRA) can capture the variation of cerebral arteries with high spatial resolution. These measurements include valuable information about the morphology, geometry, and density of brain arteries, which may be useful to identify risk factors for cerebrovascular and neurological diseases at an early time point. However, this requires knowledge about the distribution and morphology of vessels in healthy subjects. The statistical arterial brain atlas described in this work is a free and public neuroimaging resource that can be used to identify vascular morphological changes. The atlas was generated based on 544 freely available multi-center MRA and T1-weighted MRI datasets. The arteries were automatically segmented in each MRA dataset and used for vessel radius quantification. The binary segmentation and vessel size information were non-linearly registered to the MNI brain atlas using the T1-weighted MRI datasets to construct atlases of artery occurrence probability, mean artery radius, and artery radius standard deviation. This public neuroimaging resource improves the understanding of the distribution and size of arteries in the healthy human brain. }, } |
2019 | Journal | Kishore R. Mosaliganti, Ian A. Swinburne, Chon U. Chan, Nikolaus D. Obholzer, Amelia A. Green, Shreyas Tanksale, L. Mahadevan, Sean G. Megason (2019). Size control of the inner ear via hydraulic feedback. eLife, 8, pp. 30. (link) (bib) x @article{Mosaliganti2019, year = { 2019 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Size control of the inner ear via hydraulic feedback }, pages = { 30 }, journal = { eLife }, issn = { 2050084X }, doi = { 10.7554/eLife.39596 }, author = { Mosaliganti and Swinburne and Chan and Obholzer and Green and Tanksale and Mahadevan and Megason }, abstract = { Animals make organs of precise size, shape, and symmetry but how developing embryos do this is largely unknown. Here, we combine quantitative imaging, physical theory, and physiological measurement of hydrostatic pressure and fluid transport in zebrafish to study size control of the developing inner ear. We find that fluid accumulation creates hydrostatic pressure in the lumen leading to stress in the epithelium and expansion of the otic vesicle. Pressure, in turn, inhibits fluid transport into the lumen. This negative feedback loop between pressure and transport allows the otic vesicle to change growth rate to control natural or experimentally-induced size variation. Spatiotemporal patterning of contractility modulates pressure-driven strain for regional tissue thinning. Our work connects molecular-driven mechanisms, such as osmotic pressure driven strain and actomyosin tension, to the regulation of tissue morphogenesis via hydraulic feedback to ensure robust control of organ size. }, } |
2019 | Journal | Caroline Magnain, Jean C. Augustinack, Lee Tirrell, Morgan Fogarty, Matthew P. Frosch, David Boas, Bruce Fischl, Kathleen S. Rockland (2019). Colocalization of neurons in optical coherence microscopy and Nissl-stained histology in Brodmann's area 32 and area 21. Brain Structure and Function, 224(1), pp. 351–362. (link) (bib) x @article{Magnain2019, year = { 2019 }, volume = { 224 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Colocalization of neurons in optical coherence microscopy and Nissl-stained histology in Brodmann's area 32 and area 21 }, pages = { 351--362 }, number = { 1 }, keywords = { Human brain,Isocortex,Limbic,Neuron,Optical imaging,Tissue,Validation }, journal = { Brain Structure and Function }, issn = { 18632661 }, doi = { 10.1007/s00429-018-1777-z }, author = { Magnain and Augustinack and Tirrell and Fogarty and Frosch and Boas and Fischl and Rockland }, abstract = { Optical coherence tomography is an optical technique that uses backscattered light to highlight intrinsic structure, and when applied to brain tissue, it can resolve cortical layers and fiber bundles. Optical coherence microscopy (OCM) is higher resolution (i.e., 1.25 µm) and is capable of detecting neurons. In a previous report, we compared the correspondence of OCM acquired imaging of neurons with traditional Nissl stained histology in entorhinal cortex layer II. In the current method-oriented study, we aimed to determine the colocalization success rate between OCM and Nissl in other brain cortical areas with different laminar arrangements and cell packing density. We focused on two additional cortical areas: medial prefrontal, pre-genual Brodmann area (BA) 32 and lateral temporal BA 21. We present the data as colocalization matrices and as quantitative percentages. The overall average colocalization in OCM compared to Nissl was 67{\%} for BA 32 (47{\%} for Nissl colocalization) and 60{\%} for BA 21 (52{\%} for Nissl colocalization), but with a large variability across cases and layers. One source of variability and confounds could be ascribed to an obscuring effect from large and dense intracortical fiber bundles. Other technical challenges, including obstacles inherent to human brain tissue, are discussed. Despite limitations, OCM is a promising semi-high throughput tool for demonstrating detail at the neuronal level, and, with further development, has distinct potential for the automatic acquisition of large databases as are required for the human brain. }, } |
2019 | Journal | Xiaodong Ma, Zhiyong Yang, Shan Jiang, Guobin Zhang, Bin Huo, Shude Chai (2019). Hybrid optimization based on non-coplanar needles for brachytherapy dose planning. Journal of Contemporary Brachytherapy, 11(3), pp. 267–279. (link) (bib) x @article{Ma2019a, year = { 2019 }, volume = { 11 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Hybrid optimization based on non-coplanar needles for brachytherapy dose planning }, pages = { 267--279 }, number = { 3 }, keywords = { Brachytherapy,Dose volume histogram,Inverse optimization,Non-coplanar needle }, journal = { Journal of Contemporary Brachytherapy }, issn = { 20812841 }, doi = { 10.5114/jcb.2019.86167 }, author = { Ma and Yang and Jiang and Zhang and Huo and Chai }, abstract = { Purpose: An ideal dose distribution in a target is the ultimate goal of preoperative dose planning. Furthermore, avoiding vital organs or tissues such as blood vessels or bones during the puncture procedure is significant in lowdose-rate brachytherapy. The aim of this work is to develop a hybrid inverse optimization method based on non-coplanar needles to assist the physician during conformal dose planning, which cannot be properly achieved with a traditional coplanar template. Material and methods: The hybrid inverse optimization technique include two novel technologies: an inverse optimization algorithm and a dose volume histogram evaluation method. Brachytherapy treatment planning system was designed as an experimental platform. Left lung adenocarcinoma case was used to test the performance of the method in non-coplanar and coplanar needles, and malignant tumor of spine case was involved to test the practical application of this technique. In addition, the optimization time of every test was also recorded. Results: The proposed method can achieve an ideal dose distribution, avoiding vital organs (bones). In the first experiment, 13 non-coplanar needles and 24 seeds were used to get an ideal dose distribution to cover the target, whereas 11 coplanar needles and 23 seeds were used to cover the same target. In the second experiment, the new method used 22 non-coplanar needles and 65 seeds to cover the target, while 63 seeds and 22 needles were used in the actual operation. In addition, the computation time of the hybrid inverse optimization method was 20.5 seconds in the tumor of 94.67 cm3 by using 22 needles, which was fast enough for clinical application. Conclusions: The hybrid inverse optimization method achieved high conformity in the clinical practice. The non-coplanar needle can help to achieve a better dose distribution than the coplanar needle. }, } |
2019 | Journal | Xiaodong Ma, Zhiyong Yang, Shan Jiang, Guobin Zhang, Shude Chai (2019). A novel auto-positioning method in Iodine-125 seed brachytherapy driven by preoperative planning. Journal of Applied Clinical Medical Physics, 20(6), pp. 23–30. (link) (bib) x @article{Ma2019, year = { 2019 }, volume = { 20 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A novel auto-positioning method in Iodine-125 seed brachytherapy driven by preoperative planning }, pages = { 23--30 }, number = { 6 }, keywords = { auto-positioning,iodine-125 seed brachytherapy,preoperative planning,treatment planning system }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1002/acm2.12591 }, author = { Ma and Yang and Jiang and Zhang and Chai }, abstract = { Iodine-125 seed brachytherapy has great potential in the treatment of malignant tumors. However, the success of this treatment is highly dependent on the ability to accurately position the coplanar template. The aim of this study was to develop an auto-positioning system for the template with a design focus on efficiency and accuracy. In this study, an auto-positioning system was presented, which was composed of a treatment planning system (TPS) and a robot-assisted system. The TPS was developed as a control system for the robot-assisted system. Then, the robot-assisted system was driven by the output of the TPS to position the template. Contrast experiments for error validation were carried out in a computed tomography environment to compare with the traditional positioning method (TPM). Animal experiments on Sprague–Dawley rats were also carried out to evaluate the auto-positioning system. The error validation experiments and animal experiments with this auto-positioning system were successfully carried out with improved efficiency and accuracy. The error validation experiments achieved a positioning error of 1.04 ± 0.19 mm and a positioning time of 23.15 ± 2.52 min, demonstrating a great improvement compared with the TPM (2.55 ± 0.21 mm and 40.35 ± 2.99 min, respectively). The animal experiments demonstrated that the mean deviation of the seed position was 0.75 mm. The dose-volume histogram of the preoperative planning showed the same as the postoperative dosimetry validation. A novel auto-positioning system driven by preoperative planning was established, which exhibited higher efficiency and accuracy compared with the TPM. }, } |
2019 | Journal | Marie Ange Lebre, Antoine Vacavant, Manuel Grand-Brochier, Hugo Rositi, Robin Strand, Hubert Rosier, Armand Abergel, Pascal Chabrot, Beno\^it Magnin (2019). A robust multi-variability model based liver segmentation algorithm for CT-scan and MRI modalities. Computerized Medical Imaging and Graphics, 76, pp. 11. (link) (bib) x @article{Lebre2019a, year = { 2019 }, volume = { 76 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A robust multi-variability model based liver segmentation algorithm for CT-scan and MRI modalities }, pages = { 11 }, keywords = { 3-D,Automatic segmentation,CT,Liver,MRI,Robustness,Shape model,Variability }, journal = { Computerized Medical Imaging and Graphics }, issn = { 18790771 }, doi = { 10.1016/j.compmedimag.2019.05.003 }, author = { Lebre and Vacavant and Grand-Brochier and Rositi and Strand and Rosier and Abergel and Chabrot and Magnin }, abstract = { Developing methods to segment the liver in medical images, study and analyze it remains a significant challenge. The shape of the liver can vary considerably from one patient to another, and adjacent organs are visualized in medical images with similar intensities, making the boundaries of the liver ambiguous. Consequently, automatic or semi-automatic segmentation of liver is a difficult task. Moreover, scanning systems and magnetic resonance imaging have different settings and parameters. Thus the images obtained differ from one machine to another. In this article, we propose an automatic model-based segmentation that allows building a faithful 3-D representation of the liver, with a mean Dice value equal to 90.3{\%} on CT and MRI datasets. We compare our algorithm with a semi-automatic method and with other approaches according to the state of the art. Our method works with different data sources, we use a large quantity of CT and MRI images from machines in various hospitals and multiple DICOM images available from public challenges. Finally, for evaluation of liver segmentation approaches in state of the art, robustness is not adequacy addressed with a precise definition. Another originality of this article is the introduction of a novel measure of robustness, which takes into account the liver variability at different scales. }, } |
2019 | Journal | Marie Ange Lebre, Antoine Vacavant, Manuel Grand-Brochier, Hugo Rositi, Armand Abergel, Pascal Chabrot, Beno\^it Magnin (2019). Automatic segmentation methods for liver and hepatic vessels from CT and MRI volumes, applied to the Couinaud scheme. Computers in Biology and Medicine, 110, pp. 42–51. (link) (bib) x @article{Lebre2019, year = { 2019 }, volume = { 110 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic segmentation methods for liver and hepatic vessels from CT and MRI volumes, applied to the Couinaud scheme }, pages = { 42--51 }, keywords = { CT and MRI volumes,Couinaud,Liver segmentation,Medical imaging,Vessel segmentation }, journal = { Computers in Biology and Medicine }, issn = { 18790534 }, doi = { 10.1016/j.compbiomed.2019.04.014 }, author = { Lebre and Vacavant and Grand-Brochier and Rositi and Abergel and Chabrot and Magnin }, abstract = { Background: Proper segmentation of the liver from medical images is critical for computer-assisted diagnosis, therapy and surgical planning. Knowledge of its vascular structure allows division of the liver into eight functionally independent segments, each with its own vascular inflow, known as the Couinaud scheme. Couinaud's description is the most widely used classification, since it is well-suited for surgery and accurate for the localization of lesions. However, automatic segmentation of the liver and its vascular structure to construct the Couinaud scheme remains a challenging task. Methods: We present a complete framework to obtain Couinaud's classification in three main steps; first, we propose a model-based liver segmentation, then a vascular segmentation based on a skeleton process, and finally, the construction of the eight independent liver segments. Our algorithms are automatic and allow 3D visualizations. Results: We validate these algorithms on various databases with different imaging modalities (Magnetic Resonance Imaging (MRI) and Computed Tomography (CT)). Experimental results are presented on diseased livers, which pose complex challenges because both the overall organ shape and the vessels can be severely deformed. A mean DICE score of 0.915 is obtained for the liver segmentation, and an average accuracy of 0.98 for the vascular network. Finally, we present an evaluation of our method for performing the Couinaud segmentation thanks to medical reports with promising results. Conclusions: We were able to automatically reconstruct 3-D volumes of the liver and its vessels on MRI and CT scans. Our goal is to develop an improved method to help radiologists with tumor localization. }, } |
2019 | Journal | Angel Kennedy, Jason Dowling, Peter B. Greer, Lois Holloway, Michael G. Jameson, Dale Roach, Soumya Ghose, David Rivest-Hénault, Marco Marcello, Martin A. Ebert (2019). Similarity clustering-based atlas selection for pelvic CT image segmentation. Medical Physics, 46(5), pp. 2243–2250. (link) (bib) x @article{Kennedy2019, year = { 2019 }, volume = { 46 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Similarity clustering-based atlas selection for pelvic CT image segmentation }, pages = { 2243--2250 }, number = { 5 }, keywords = { autosegmentation,clustering,image registration,image-atlas }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.13494 }, author = { Kennedy and Dowling and Greer and Holloway and Jameson and Roach and Ghose and Rivest-H{\'{e}}nault and Marcello and Ebert }, abstract = { Purpose: To demonstrate selection of a small representative subset of images from a pool of images comprising a potential atlas (PA) pelvic CT set to be used for autosegmentation of a separate target image set. The aim is to balance the need for the atlas set to represent anatomical diversity with the need to minimize resources required to create a high quality atlas set (such as multiobserver delineation), while retaining access to additional information available for the PA image set. Methods: Preprocessing was performed for image standardization, followed by image registration. Clustering was used to select the subset that provided the best coverage of a target dataset as measured by postregistration image intensity similarities. Tests for clustering robustness were performed including repeated clustering runs using different starting seeds and clustering repeatedly using 90{\%} of the target dataset chosen randomly. Comparisons of coverage of a target set (comprising 711 pelvic CT images) were made for atlas sets of five images (chosen from a PA set of 39 pelvic CT and MR images) (a) at random (averaged over 50 random atlas selections), (b) based solely on image similarities within the PA set (representing prospective atlas development), (c) based on similarities within the PA set and between the PA and target dataset (representing retrospective atlas development). Comparisons were also made to coverage provided by the entire PA set of 39 images. Results: Exemplar selection was highly robust with exemplar selection results being unaffected by choice of starting seed with very occasional change to one of the exemplar choices when the target set was reduced. Coverage of the target set, as measured by best normalized cross-correlation similarity of target images to any exemplar image, provided by five well-selected atlas images (mean = 0.6497) was more similar to coverage provided by the entire PA set (mean = 0.6658) than randomly chosen atlas subsets (mean = 0.5977). This was true both of the mean values and the shape of the distributions. Retrospective selection of atlases (mean = 0.6497) provided a very small improvement over prospective atlas selection (mean = 0.6431). All differences were significant (P {\textless} 1.0E-10). Conclusions: Selection of a small representative image set from one dataset can be utilized to develop an atlas set for either retrospective or prospective autosegmentation of a different target dataset. The coverage provided by such a judiciously selected subset has the potential to facilitate propagation of numerous retrospectively defined structures, utilizing additional information available with multimodal imaging in the atlas set, without the need to create large atlas image sets. }, } |
2019 | Journal | Flora Jung, Samaneh Kazemifar, Robert Bartha, Nagalingam Rajakumar (2019). Semiautomated Assessment of the Anterior Cingulate Cortex in Alzheimer's Disease. Journal of Neuroimaging, 29(3), pp. 376–382. (link) (bib) x @article{Jung2019, year = { 2019 }, volume = { 29 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Semiautomated Assessment of the Anterior Cingulate Cortex in Alzheimer's Disease }, pages = { 376--382 }, number = { 3 }, keywords = { MRI,automated segmentation,brain atrophy,structural biomarker }, journal = { Journal of Neuroimaging }, issn = { 15526569 }, doi = { 10.1111/jon.12598 }, author = { Jung and Kazemifar and Bartha and Rajakumar }, abstract = { BACKGROUND AND PURPOSE: The anterior cingulate cortex (ACC) is involved in several cognitive processes including executive function. Degenerative changes of ACC are consistently seen in Alzheimer's disease (AD). However, volumetric changes specific to the ACC in AD are not clear because of the difficulty in segmenting this region. The objectives of the current study were to develop a precise and high-throughput approach for measuring ACC volumes and to correlate the relationship between ACC volume and cognitive function in AD. METHODS: Structural T 1 -weighted magnetic resonance images of AD patients (n = 47) and age-matched controls (n = 47) at baseline and at 24 months were obtained from the Alzheimer's disease neuroimaging initiative (ADNI) database and studied using a custom-designed semiautomated segmentation protocol. RESULTS: ACC volumes obtained using the semiautomated protocol were highly correlated to values obtained from manual segmentation (r =.98) and the semiautomated protocol was considerably faster. When comparing AD and control subjects, no significant differences were observed in baseline ACC volumes or in change in ACC volumes over 24 months using the two segmentation methods. However, a change in ACC volume over 24 months did not correlate with a change in mini-mental state examination scores. CONCLUSIONS: Our results indicate that the proposed semiautomated segmentation protocol is reliable for determining ACC volume in neurodegenerative conditions including AD. }, } |
2019 | Journal | Daniel Christopher Hoinkiss, Peter Erhard, Nora Josefin Breutigam, Federico von Samson-Himmelstjerna, Matthias Günther, David Andrew Porter (2019). Prospective motion correction in functional MRI using simultaneous multislice imaging and multislice-to-volume image registration. NeuroImage, 200, pp. 159–173. (link) (bib) x @article{Hoinkiss2019, year = { 2019 }, volume = { 200 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Prospective motion correction in functional MRI using simultaneous multislice imaging and multislice-to-volume image registration }, pages = { 159--173 }, keywords = { BOLD,EPI,Functional MRI,Image registration,Kalman filter,Multislice-to-volume,Prospective motion correction,Real-time MRI sequences,Simultaneous multislice (SMS) }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2019.06.042 }, author = { Hoinkiss and Erhard and Breutigam and Samson-Himmelstjerna and G{\"{u}}nther and Porter }, abstract = { The sensitivity to subject motion is one of the major challenges in functional MRI (fMRI) studies in which a precise alignment of images from different time points is required to allow reliable quantification of brain activation throughout the scan. Especially the long measurement times and laborious fMRI tasks add to the amount of subject motion found in typical fMRI measurements, even when head restraints are used. In case of moving subjects, prospective motion correction can maintain the relationship between spatial image information and subject anatomy by constantly adapting the image slice positioning to follow the subject in real time. Image-based prospective motion correction is well-established in fMRI studies and typically computes the motion estimates based on a volume-to-volume image registration, resulting in low temporal resolution. This study combines fMRI using simultaneous multislice imaging with multislice-to-volume-based image registration to allow sub-TR motion detection with subsequent real-time adaption of the imaging system. Simultaneous multislice imaging is widely used in fMRI studies and, together with multislice-to-volume-based image registration algorithms, enables computing suitable motion states after only a single readout by registering the simultaneously excited slices to a reference volume acquired at the start of the measurement. The technique is evaluated in three human BOLD fMRI studies (n = 1, 5, and 1) to explore different aspects of the method. It is compared to conventional, volume-to-volume-based prospective motion correction as well as retrospective motion correction methods. Results show a strong reduction in retrospectively computed residual motion parameters of up to 50{\%} when comparing the two prospective motion correction techniques. An analysis of temporal signal-to-noise ratio as well as brain activation results shows high consistency between the results before and after additional retrospective motion correction when using the proposed technique, indicating successful prospective motion correction. The comparison of absolute tSNR values does not show an improvement compared to using retrospective motion correction alone. However, the improved temporal resolution may provide improved tSNR in the presence of more exaggerated intra-volume motion. }, } |
2019 | Journal | Matthias Peter Hilty, Philippe Guerci, Yasin Ince, Fevzi Toraman, Can Ince (2019). MicroTools enables automated quantification of capillary density and red blood cell velocity in handheld vital microscopy. Communications Biology, 2(1), pp. 15. (link) (bib) x @article{Hilty2019, year = { 2019 }, volume = { 2 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MicroTools enables automated quantification of capillary density and red blood cell velocity in handheld vital microscopy }, pages = { 15 }, number = { 1 }, journal = { Communications Biology }, issn = { 23993642 }, doi = { 10.1038/s42003-019-0473-8 }, author = { Hilty and Guerci and Ince and Toraman and Ince }, abstract = { Direct assessment of capillary perfusion has been prioritized in hemodynamic management of critically ill patients in addition to optimizing blood flow on the global scale. Sublingual handheld vital microscopy has enabled online acquisition of moving image sequences of the microcirculation, including the flow of individual red blood cells in the capillary network. However, due to inherent content complexity, manual image sequence analysis remained gold standard, introducing inter-observer variability and precluding real-time image analysis for clinical therapy guidance. Here we introduce an advanced computer vision algorithm for instantaneous analysis and quantification of morphometric and kinetic information related to capillary blood flow in the sublingual microcirculation. We evaluated this technique in a porcine model of septic shock and resuscitation and cardiac surgery patients. This development is of high clinical relevance because it enables implementation of point-of-care goal-directed resuscitation procedures based on correction of microcirculatory perfusion in critically ill and perioperative patients. }, } |
2019 | Journal | Saumya Gurbani, Brent Weinberg, Lee Cooper, Eric Mellon, Eduard Schreibmann, Sulaiman Sheriff, Andrew Maudsley, Mohammed Goryawala, Hui Kuo Shu, Hyunsuk Shim (2019). The Brain Imaging Collaboration Suite (BrICS): A Cloud Platform for Integrating Whole-Brain Spectroscopic MRI into the Radiation Therapy Planning Workflow. Tomography (Ann Arbor, Mich.), 5(1), pp. 184–191. (link) (bib) x @article{Gurbani2019, year = { 2019 }, volume = { 5 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { The Brain Imaging Collaboration Suite (BrICS): A Cloud Platform for Integrating Whole-Brain Spectroscopic MRI into the Radiation Therapy Planning Workflow }, pages = { 184--191 }, number = { 1 }, keywords = { cloud platform,radiation therapy,spectroscopic MRI }, journal = { Tomography (Ann Arbor, Mich.) }, issn = { 2379139X }, doi = { 10.18383/j.tom.2018.00028 }, author = { Gurbani and Weinberg and Cooper and Mellon and Schreibmann and Sheriff and Maudsley and Goryawala and Shu and Shim }, abstract = { Glioblastoma has poor prognosis with inevitable local recurrence despite aggressive treatment with surgery and chemoradiation. Radiation therapy (RT) is typically guided by contrast-enhanced T1-weighted magnetic resonance imaging (MRI) for defining the high-dose target and T2-weighted fluid-attenuation inversion recovery MRI for defining the moderate-dose target. There is an urgent need for improved imaging methods to better delineate tumors for focal RT. Spectroscopic MRI (sMRI) is a quantitative imaging technique that enables whole-brain analysis of endogenous metabolite levels, such as the ratio of choline-to-N-acetylaspartate. Previous work has shown that choline-to-N-acetylaspartate ratio accurately identifies tissue with high tumor burden beyond what is seen on standard imaging and can predict regions of metabolic abnormality that are at high risk for recurrence. To facilitate efficient clinical implementation of sMRI for RT planning, we developed the Brain Imaging Collaboration Suite (BrICS; https://brainimaging.emory.edu/brics-demo), a cloud platform that integrates sMRI with standard imaging and enables team members from multiple departments and institutions to work together in delineating RT targets. BrICS is being used in a multisite pilot study to assess feasibility and safety of dose-escalated RT based on metabolic abnormalities in patients with glioblastoma (Clinicaltrials.gov NCT03137888). The workflow of analyzing sMRI volumes and preparing RT plans is described. The pipeline achieved rapid turnaround time by enabling team members to perform their delegated tasks independently in BrICS when their clinical schedules allowed. To date, 18 patients have been treated using targets created in BrICS and no severe toxicities have been observed. }, } |
2019 | Journal | Adam Gribble, Michael A. Pinkert, Jared Westreich, Yuming Liu, Adib Keikhosravi, Mohammadali Khorasani, Sharon Nofech-Mozes, Kevin W. Eliceiri, Alex Vitkin (2019). A multiscale Mueller polarimetry module for a stereo zoom microscope. Biomedical Engineering Letters, 9(3), pp. 339–349. (link) (bib) x @article{Gribble2019, year = { 2019 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A multiscale Mueller polarimetry module for a stereo zoom microscope }, pages = { 339--349 }, number = { 3 }, keywords = { Label-free imaging,Module,Mueller matrix polarimetry,Multiscale,Pathology,Stereo zoom microscope }, journal = { Biomedical Engineering Letters }, issn = { 2093985X }, doi = { 10.1007/s13534-019-00116-w }, author = { Gribble and Pinkert and Westreich and Liu and Keikhosravi and Khorasani and Nofech-Mozes and Eliceiri and Vitkin }, abstract = { Mueller polarimetry is a quantitative polarized light imaging modality that is capable of label-free visualization of tissue pathology, does not require extensive sample preparation, and is suitable for wide-field tissue analysis. It holds promise for selected applications in biomedicine, but polarimetry systems are often constrained by limited end-user accessibility and/or long-imaging times. In order to address these needs, we designed a multiscale-polarimetry module that easily couples to a commercially available stereo zoom microscope. This paper describes the module design and provides initial polarimetry imaging results from a murine preclinical breast cancer model and human breast cancer samples. The resultant polarimetry module has variable resolution and field of view, is low-cost, and is simple to switch in or out of a commercial microscope. The module can reduce long imaging times by adopting the main imaging approach used in pathology: scanning at low resolution to identify regions of interest, then at high resolution to inspect the regions in detail. Preliminary results show how the system can aid in region of interest identification for pathology, but also highlight that more work is needed to understand how tissue structures of pathological interest appear in Mueller polarimetry images across varying spatial zoom scales. }, } |
2019 | Journal | Oliver Grauer, Mohammed Jaber, Katharina Hess, Matthias Weckesser, Wolfram Schwindt, Stephan Maring, Johannes Wölfer, Walter Stummer (2019). Combined intracavitary thermotherapy with iron oxide nanoparticles and radiotherapy as local treatment modality in recurrent glioblastoma patients. Journal of Neuro-Oncology, 141(1), pp. 83–94. (link) (bib) x @article{Grauer2019, year = { 2019 }, volume = { 141 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Combined intracavitary thermotherapy with iron oxide nanoparticles and radiotherapy as local treatment modality in recurrent glioblastoma patients }, pmid = { 30506500 }, pages = { 83--94 }, number = { 1 }, keywords = { Caspase-3,HSP70,PD-L1,Superparamagnetic iron oxide nanoparticles,Thermotherapy }, journal = { Journal of Neuro-Oncology }, issn = { 15737373 }, doi = { 10.1007/s11060-018-03005-x }, author = { Grauer and Jaber and Hess and Weckesser and Schwindt and Maring and W{\"{o}}lfer and Stummer }, abstract = { Background: There is an increasing interest in local tumor ablative treatment modalities that induce immunogenic cell death and the generation of antitumor immune responses. Methods: We report six recurrent glioblastoma patients who were treated with intracavitary thermotherapy after coating the resection cavity wall with superparamagnetic iron oxide nanoparticles (“NanoPaste” technique). Patients underwent six 1-h hyperthermia sessions in an alternating magnetic field and, if possible, received concurrent fractionated radiotherapy at a dose of 39.6 Gy. Results: There were no major side effects during active treatment. However, after 2–5 months, patients developed increasing clinical symptoms. CT scans showed tumor flare reactions with prominent edema around nanoparticle deposits. Patients were treated with dexamethasone and, if necessary, underwent re-surgery to remove nanoparticles. Histopathology revealed sustained necrosis directly adjacent to aggregated nanoparticles without evidence for tumor activity. Immunohistochemistry showed upregulation of Caspase-3 and heat shock protein 70, prominent infiltration of macrophages with ingested nanoparticles and CD3 + T-cells. Flow cytometric analysis of freshly prepared tumor cell suspensions revealed increased intracellular ratios of IFN-$\gamma$ to IL-4 in CD4 + and CD8 + memory T cells, and activation of tumor-associated myeloid cells and microglia with upregulation of HLA-DR and PD-L1. Two patients had long-lasting treatment responses {\textgreater} 23 months without receiving any further therapy. Conclusion: Intracavitary thermotherapy combined with radiotherapy can induce a prominent inflammatory reaction around the resection cavity which might trigger potent antitumor immune responses possibly leading to long-term stabilization of recurrent GBM patients. These results warrant further investigations in a prospective phase-I trial. }, } |
2019 | Journal | William Gon\ccalves, Takuya Mabuchi, Takashi Tokumasu (2019). Nucleation and Growth of Cavities in Hydrated Nafion Membranes under Tensile Strain: A Molecular Dynamics Study. Journal of Physical Chemistry C, 123(47), pp. 28958–28968. (link) (bib) x @article{Goncalves2019, year = { 2019 }, volume = { 123 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Nucleation and Growth of Cavities in Hydrated Nafion Membranes under Tensile Strain: A Molecular Dynamics Study }, pages = { 28958--28968 }, number = { 47 }, journal = { Journal of Physical Chemistry C }, issn = { 19327455 }, doi = { 10.1021/acs.jpcc.9b07101 }, author = { Gon{\c{c}}alves and Mabuchi and Tokumasu }, abstract = { Molecular dynamics simulations are performed to investigate the nucleation and growth of cavities in a hydrated Nafion membrane under mechanical deformation. The simulation model used in this study accurately reproduces the experimental values of the elastic modulus of the membrane as a function of water content. The results obtained from triaxial tensile tests reveal a ductile to brittle transition as the water content increases. The nucleation and growth of the cavities have been quantitatively analyzed in terms of the number and size of cavities, illustrating the ductile to brittle transition uncovered by the stress/strain curves. Further local analyses have been carried out to identify the nucleation sites. The analysis of local plasticity indicates that as the water content increases, the membrane accumulates more plastic deformation in the hydrophilic domain than in the hydrophobic domain during the rupture stage of the tensile tests. These results suggest that the water network significantly impacts the nucleation and expansion of cavities induced by mechanical deformation. Furthermore, the local mechanical properties of the Nafion membrane are evaluated. The results show that the mechanical properties are heterogeneous at the nanoscale and that the cavities nucleate in soft regions of the membrane. A statistical analysis of the local water density of nucleation sites indicates that the polymer-water interfaces are more likely to nucleate cavities. The expansion and coalescence of cavities is facilitated by the high molecular reorganization of the water network, which explains the brittle behavior of membranes with high water content. }, } |
2019 | Journal | Olivia K. Ginty, John T. Moore, Mehdi Eskandari, Patrick Carnahan, Andras Lasso, Matthew A. Jolley, Mark Monaghan, Terry M. Peters (2019). Dynamic, patient-specific mitral valve modelling for planning transcatheter repairs. International Journal of Computer Assisted Radiology and Surgery, 14(7), pp. 1227–1235. (link) (bib) x @article{Ginty2019, year = { 2019 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Dynamic, patient-specific mitral valve modelling for planning transcatheter repairs }, pages = { 1227--1235 }, number = { 7 }, keywords = { 3D printing,Mitral valve,Mitral valve models,Modelling,Surgical simulation,Transcatheter devices }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-019-01998-y }, author = { Ginty and Moore and Eskandari and Carnahan and Lasso and Jolley and Monaghan and Peters }, abstract = { Purpose:: Transcatheter, beating heart repair techniques for mitral valve regurgitation is a very active area of development. However, it is difficult to both simulate and predict the clinical outcomes of mitral repairs, owing to the complexity of mitral valve geometry and the influence of hemodynamics. We aim to produce a workflow for manufacturing dynamic patient-specific models to simulate the mitral valve for transcatheter repair applications. Methods:: In this paper, we present technology and associated workflow, for using transesophageal echocardiography to generate dynamic physical replicas of patient valves. We validate our workflow using six patient datasets representing patients with unique or particularly challenging pathologies as selected by a cardiologist. The dynamic component of the models and their resultant potential as procedure planning tools is due to a dynamic pulse duplicator that permits the evaluation of the valve models experiencing realistic hemodynamics. Results:: Early results indicate the workflow has excellent anatomical accuracy and the ability to replicate regurgitation pathologies, as shown by colour Doppler ultrasound and anatomical measurements comparing patients and models. Analysis of all measurements successfully resulted in t critical two-tail {\textgreater} t stat and p values {\textgreater} 0.05, thus demonstrating no statistical difference between the patients and models, owing to high fidelity morphological replication. Conclusions:: Due to the combination of a dynamic environment and patient-specific modelling, this workflow demonstrates a promising technology for simulating the complete morphology of mitral valves undergoing transcatheter repairs. }, } |
2019 | Journal | David Fuentes, Kareem Ahmed, Jonathan S. Lin, Reham Abdel-Wahab, Ahmed O. Kaseb, Manal Hassan, Janio Szklaruk, Ali Morshid, John D. Hazle, Aliya Qayyum, Khaled M. Elsayes (2019). Automated Volumetric Assessment of Hepatocellular Carcinoma Response to Sorafenib: A Pilot Study. Journal of computer assisted tomography, 43(3), pp. 499–506. (link) (bib) x @article{Fuentes2019, year = { 2019 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated Volumetric Assessment of Hepatocellular Carcinoma Response to Sorafenib: A Pilot Study }, pages = { 499--506 }, number = { 3 }, journal = { Journal of computer assisted tomography }, issn = { 15323145 }, doi = { 10.1097/RCT.0000000000000866 }, author = { Fuentes and Ahmed and Lin and Abdel-Wahab and Kaseb and Hassan and Szklaruk and Morshid and Hazle and Qayyum and Elsayes }, abstract = { PURPOSE: This pilot study evaluates the feasibility of automated volumetric quantification of hepatocellular carcinoma (HCC) as an imaging biomarker to assess treatment response for sorafenib. METHODS: In this institutional review board-approved, Health Insurance Portability and Accountability Act-compliant retrospective study, a training database of manually labeled background liver, enhancing and nonenhancing tumor tissue was established using pretherapy and first posttherapy multiphasic computed tomography images from a registry of 13 HCC patients. For each patient, Hounsfield density and geometry-based feature images were generated from registered multiphasic computed tomography data sets and used as the input for a random forest-based classifier of enhancing and nonenhancing tumor tissue. Leave-one-out cross-validation of the dice similarity measure was applied to quantify the classifier accuracy. A Cox regression model was used to confirm volume changes as predictors of time to progression (TTP) of target lesions for both manual and automatic methods. RESULTS: When compared with manual labels, an overall classification accuracy of dice similarity coefficient of 0.71 for pretherapy and 0.66 posttherapy enhancing tumor labels and 0.45 for pretherapy and 0.59 for posttherapy nonenhancing tumor labels was observed. Automated methods for quantifying volumetric changes in the enhancing lesion agreed with manual methods and were observed as a significant predictor of TTP. CONCLUSIONS: Automated volumetric analysis was determined to be feasible for monitoring HCC response to treatment. The information extracted using automated volumetrics is likely to reproduce labor-intensive manual data and provide a good predictor for TTP. Further work will extend these studies to additional treatment modalities and larger patient populations. }, } |
2019 | Journal | Robert Finnegan, Jason Dowling, Eng Siew Koh, Simon Tang, James Otton, Geoff Delaney, Vikneswary Batumalai, Carol Luo, Pramukh Atluri, Athiththa Satchithanandha, David Thwaites, Lois Holloway (2019). Feasibility of multi-atlas cardiac segmentation from thoracic planning CT in a probabilistic framework. Physics in Medicine and Biology, 64(8), pp. NA (link) (bib) x @article{Finnegan2019, year = { 2019 }, volume = { 64 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064555945{\&}doi=10.1088{\%}2F1361-6560{\%}2Fab0ea6{\&}partnerID=40{\&}md5=65ca7ad7c68a23ee0cb634955c18a67b }, type = { Journal Article }, title = { Feasibility of multi-atlas cardiac segmentation from thoracic planning CT in a probabilistic framework }, number = { 8 }, keywords = { atlas-based segmentation,computed tomography,heart contouring,image registration,medical image processing,whole heart segmentation }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/ab0ea6 }, author = { Finnegan and Dowling and Koh and Tang and Otton and Delaney and Batumalai and Luo and Atluri and Satchithanandha and Thwaites and Holloway }, abstract = { Toxicity to cardiac and coronary structures is an important late morbidity for patients undergoing left-sided breast radiotherapy. Many current studies have relied on estimates of cardiac doses assuming standardised anatomy, with a calculated increase in relative risk of 7.4{\%} per Gy (mean heart dose). To provide individualised estimates for dose, delineation of various cardiac structures on patient images is required. Automatic multi-atlas based segmentation can provide a consistent, robust solution, however there are challenges to this method. We are aiming to develop and validate a cardiac atlas and segmentation framework, with a focus on the limitations and uncertainties in the process. We present a probabilistic approach to segmentation, which provides a simple method to incorporate inter-observer variation, as well as a useful tool for evaluating the accuracy and sources of error in segmentation. A dataset consisting of 20 planning computed tomography (CT) images of Australian breast cancer patients with delineations of 17 structures (including whole heart, four chambers, coronary arteries and valves) was manually contoured by three independent observers, following a protocol based on a published reference atlas, with verification by a cardiologist. To develop and validate the segmentation framework a leave-one-out cross-validation strategy was implemented. Performance of the automatic segmentations was evaluated relative to inter-observer variability in manuallyderived contours; measures of volume and surface accuracy (Dice similarity coefficient (DSC) and mean absolute surface distance (MASD), respectively) were used to compare automatic segmentation to the consensus segmentation from manual contours. For the whole heart, the resulting segmentation achieved a DSC of 0.944 +0.024, with a MASD of .726 + 1.363 mm. Quantitative results, together with the analysis of probabilistic labelling, indicate the feasibility of accurate and consistent segmentation of larger structures, whereas this is not the case for many smaller structures, where a major limitation in segmentation accuracy is the interobserver variability in manual contouring. }, } |
2019 | Journal | Steven R. Dolly, Yang Lou, Mark A. Anastasio, Hua Li (2019). Task-based image quality assessment in radiation therapy: Initial characterization and demonstration with computer-simulation study. Physics in Medicine and Biology, 64(14), pp. 19. (link) (bib) x @article{Dolly2019, year = { 2019 }, volume = { 64 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Task-based image quality assessment in radiation therapy: Initial characterization and demonstration with computer-simulation study }, pages = { 19 }, number = { 14 }, keywords = { geometric attribute distribution model,learning-based stochastic object models,radiation therapy,task-based image quality assessment,therapeutic operating characteristic curve }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/ab2dc5 }, author = { Dolly and Lou and Anastasio and Li }, abstract = { In the majority of current radiation therapy (RT) applications, image quality is still assessed subjectively or by utilizing physical measures. A novel theory that applies objective task-based image quality assessment in radiation therapy (IQA-in-RT) was recently proposed, in which the area under the therapeutic operating characteristic curve (AUTOC) was employed as the figure-of-merit (FOM) for evaluating RT effectiveness. Although theoretically more appealing than conventional subjective or physical measures, a comprehensive implementation and evaluation of this novel task-based IQA-in-RT theory is required for its further application in improving clinical RT. In this work, a practical and modular IQA-in-RT framework is presented for implementing this theory for the assessment of imaging components on the basis of RT treatment outcomes. Computer-simulation studies are conducted to demonstrate the feasibility and utility of the proposed IQA-in-RT framework in optimizing x-ray computed tomography (CT) pre-treatment imaging, including the optimization of CT imaging dose and image reconstruction parameters. The potential advantages of optimizing imaging components in the RT workflow by use of the AUTOC as the FOM are also compared against those of other physical measures. The results demonstrate that optimization using the AUTOC leads to selecting different parameters from those indicated by physical measures, potentially improving RT performance. The sources of systemic randomness and bias that affect the determination of the AUTOC are also analyzed. The presented work provides a practical solution for the further investigation and analysis of the task-based IQA-in-RT theory and advances its applications in improving RT clinical practice and cancer patient care. }, } |
2019 | Journal | Remi Cresson (2019). A framework for remote sensing images processing using deep learning techniques. IEEE Geoscience and Remote Sensing Letters, 16(1), pp. 25–29. (link) (bib) x @article{Cresson2019, year = { 2019 }, volume = { 16 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A framework for remote sensing images processing using deep learning techniques }, pages = { 25--29 }, number = { 1 }, keywords = { Aerial images,Orfeo Toolbox (OTB),TensorFlow (TF),deep learning (DL),neural networks,remote sensing (RS) }, journal = { IEEE Geoscience and Remote Sensing Letters }, issn = { 15580571 }, eprint = { 1807.06535 }, doi = { 10.1109/LGRS.2018.2867949 }, author = { Cresson }, arxivid = { 1807.06535 }, archiveprefix = { arXiv }, abstract = { Deep learning (DL) techniques are becoming increasingly important to solve a number of image processing tasks. Among common algorithms, convolutional neural network- and recurrent neural network-based systems achieve state-of-the-art results on satellite and aerial imagery in many applications. While these approaches are subject to scientific interest, there is currently no operational and generic implementation available at the user level for the remote sensing (RS) community. In this letter, we present a framework enabling the use of DL techniques with RS images and geospatial data. Our solution takes roots in two extensively used open-source libraries, the RS image processing library Orfeo ToolBox and the high-performance numerical computation library TensorFlow. It can apply deep nets without restriction on image size and is computationally efficient, regardless of hardware configuration. }, } |
2019 | Journal | Yangsean Choi, Jinhee Jang, Yoonho Nam, Na Young Shin, Hyun Seok Choi, So Lyung Jung, Kook Jin Ahn, Bum Soo Kim (2019). Relationship between abnormal hyperintensity on T2-weighted images around developmental venous anomalies and magnetic susceptibility of their collecting veins: In-vivo quantitative susceptibility mapping study. Korean Journal of Radiology, 20(4), pp. 662–670. (link) (bib) x @article{Choi2019, year = { 2019 }, volume = { 20 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Relationship between abnormal hyperintensity on T2-weighted images around developmental venous anomalies and magnetic susceptibility of their collecting veins: In-vivo quantitative susceptibility mapping study }, pages = { 662--670 }, number = { 4 }, keywords = { Developmental venous anomaly,Magnetic resonance imaging,Quantitative susceptibility mapping,Vascular malformation }, journal = { Korean Journal of Radiology }, issn = { 12296929 }, doi = { 10.3348/kjr.2018.0685 }, author = { Choi and Jang and Nam and Shin and Choi and Jung and Ahn and Kim }, abstract = { Objective: A developmental venous anomaly (DVA) is a vascular malformation of ambiguous clinical significance. We aimed to quantify the susceptibility of draining veins ($\chi$vein) in DVA and determine its significance with respect to oxygen metabolism using quantitative susceptibility mapping (QSM). Materials and Methods: Brain magnetic resonance imaging of 27 consecutive patients with incidentally detected DVAs were retrospectively reviewed. Based on the presence of abnormal hyperintensity on T2-weighted images (T2WI) in the brain parenchyma adjacent to DVA, the patients were grouped into edema (E+, n = 9) and non-edema (E-, n = 18) groups. A 3T MR scanner was used to obtain fully flow-compensated gradient echo images for susceptibility-weighted imaging with source images used for QSM processing. The $\chi$vein was measured semi-automatically using QSM. The normalized $\chi$vein was also estimated. Clinical and MR measurements were compared between the E+ and E- groups using Student's t-test or Mann-Whitney U test. Correlations between the $\chi$vein and area of hyperintensity on T2WI and between $\chi$vein and diameter of the collecting veins were assessed. The correlation coefficient was also calculated using normalized veins. Results: The DVAs of the E+ group had significantly higher $\chi$vein (196.5 ± 27.9 vs. 167.7 ± 33.6, p = 0.036) and larger diameter of the draining veins (p = 0.006), and patients were older (p = 0.006) than those in the E- group. The $\chi$vein was also linearly correlated with the hyperintense area on T2WI (r = 0.633, 95{\%} confidence interval 0.333-0.817, p {\textless} 0.001). Conclusion: DVAs with abnormal hyperintensity on T2WI have higher susceptibility values for draining veins, indicating an increased oxygen extraction fraction that might be associated with venous congestion. }, } |
2019 | Journal | Quan Chen, Shiliang Hu, Peiran Long, Fang Lu, Yujie Shi, Yunpeng Li (2019). A Transfer Learning Approach for Malignant Prostate Lesion Detection on Multiparametric MRI. Technology in cancer research & treatment, 18, pp. 9. (link) (bib) x @article{Chen2019, year = { 2019 }, volume = { 18 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A Transfer Learning Approach for Malignant Prostate Lesion Detection on Multiparametric MRI }, pmid = { 31221034 }, pages = { 9 }, keywords = { AI,convolutional neural network,focal therapy,mpMRI,prostate lesion,transfer learning }, journal = { Technology in cancer research {\&} treatment }, issn = { 15330338 }, doi = { 10.1177/1533033819858363 }, author = { Chen and Hu and Long and Lu and Shi and Li }, abstract = { PURPOSE: In prostate focal therapy, it is important to accurately localize malignant lesions in order to increase biological effect of the tumor region while achieving a reduction in dose to noncancerous tissue. In this work, we proposed a transfer learning-based deep learning approach, for classification of prostate lesions in multiparametric magnetic resonance imaging images. METHODS: Magnetic resonance imaging images were preprocessed to remove bias artifact and normalize the data. Two state-of-the-art deep convolutional neural network models, InceptionV3 and VGG-16, were pretrained on ImageNet data set and retuned on the multiparametric magnetic resonance imaging data set. As lesion appearances differ by the prostate zone that it resides in, separate models were trained. Ensembling was performed on each prostate zone to improve area under the curve. In addition, the predictions from lesions on each prostate zone were scaled separately to increase the area under the curve for all lesions combined. RESULTS: The models were tuned to produce the highest area under the curve on validation data set. When it was applied to the unseen test data set, the transferred InceptionV3 model achieved an area under the curve of 0.81 and the transferred VGG-16 model achieved an area under the curve of 0.83. This was the third best score among the 72 methods from 33 participating groups in ProstateX competition. CONCLUSION: The transfer learning approach is a promising method for prostate cancer detection on multiparametric magnetic resonance imaging images. Features learned from ImageNet data set can be useful for medical images. }, } |
2019 | Journal | Luca Buzzatti, Benyameen Keelson, Jildert Apperloo, Thierry Scheerlinck, Jean Pierre Baeyens, Gert Van Gompel, Jef Vandemeulebroucke, Michel de Maeseneer, Johan de Mey, Nico Buls, Erik Cattrysse (2019). Four-dimensional CT as a valid approach to detect and quantify kinematic changes after selective ankle ligament sectioning. Scientific Reports, 9(1), pp. 9. (link) (bib) x @article{Buzzatti2019, year = { 2019 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Four-dimensional CT as a valid approach to detect and quantify kinematic changes after selective ankle ligament sectioning }, pages = { 9 }, number = { 1 }, journal = { Scientific Reports }, issn = { 20452322 }, doi = { 10.1038/s41598-018-38101-5 }, author = { Buzzatti and Keelson and Apperloo and Scheerlinck and Baeyens and {Van Gompel} and Vandemeulebroucke and Maeseneer and Mey and Buls and Cattrysse }, abstract = { The objective of the current study was to explore the potential of dynamic computed tomography to detect kinematic changes, induced by sequential sectioning of the lateral collateral ligaments of the ankle, during full motion sequence of the talocrural joint. A custom-made device was used to induce cyclic controlled ankle inversion movement in one fresh frozen cadaver leg. A 256-slice CT scanner was used to investigate four different scenarios. Scenario 1 with all ligaments intact was first investigated followed by sequential section of the anterior talo-fibular ligament (Scenario 2), the calcaneo-fibular ligament (Scenario 3) and posterior talo-fibular ligament (Scenario 4). Off-line image processing based on semi-automatic segmentation and bone rigid registration was performed. Motion parameters such as translation, rotational angles and orientation and position of the axis of rotation were calculated. Differences between scenarios were calculated. Progressive increase of cranio-caudal displacement up to 3.9 mm and flexion up to 10° compared to Scenario 1 were reported. Progressive changes in orientation (up to 20.6°) and position (up to 4.1 mm) of the axis of rotation were also shown. Estimated effective dose of 0.005 mSv (1.9 mGy CTDIvol) was reported. This study demonstrated that kinematic changes due to the absence of ligament integrity can be detected with 4DCT with minimal radiation exposure. Identifying abnormal kinematic patterns could have future application in helping clinicians to choose patients' optimal treatment. Therefore, further studies with bigger in vitro sample sizes and consequent investigations in vivo are recommended to confirm the current findings. }, } |
2019 | Journal | Ryan E. Breighner, Eric A. Bogner, Susan C. Lee, Matthew F. Koff, Hollis G. Potter (2019). Evaluation of Osseous Morphology of the Hip Using Zero Echo Time Magnetic Resonance Imaging. American Journal of Sports Medicine, 47(14), pp. 3460–3468. (link) (bib) x @article{Breighner2019, year = { 2019 }, volume = { 47 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of Osseous Morphology of the Hip Using Zero Echo Time Magnetic Resonance Imaging }, pages = { 3460--3468 }, number = { 14 }, keywords = { FAI,MRI,femoroacetabular impingement,imaging }, journal = { American Journal of Sports Medicine }, issn = { 15523365 }, doi = { 10.1177/0363546519878170 }, author = { Breighner and Bogner and Lee and Koff and Potter }, abstract = { Background: Femoroacetabular impingement syndrome (FAIS) is a common disorder of the hip resulting in groin pain and ultimately osteoarthritis. Radiologic assessment of FAI morphologies, which may present with overlapping radiologic features of hip dysplasia, often requires the use of computed tomography (CT) for evaluation of osseous abnormality, owing to the difficulty of direct visualization of cortical and subchondral bone with conventional magnetic resonance imaging (MRI). The use of a zero echo time (ZTE) MRI pulse sequence may obviate the need for CT by rendering bone directly from MRI. Purpose/Hypothesis: The purpose was to explore the application of ZTE MRI to the assessment of osseous FAI and dysplasia morphologies of the hip. It was hypothesized that angular measurements from ZTE images would show significant agreement with measurements obtained from CT images. Study Design: Cohort study (diagnosis); Level of evidence, 2. Methods: Thirty-eight hips from 23 patients were imaged with ZTE MRI and CT. Clinically relevant angular measurements of hip morphology were made in both modalities and compared to assess agreement. Measurements included coronal and sagittal center-edge angles, femoral neck-shaft angle, acetabular version (at 1-, 2-, and 3-o'clock positions), T{\"{o}}nnis angle, alpha angle, and modified-beta angle. Interrater agreement was assessed for a subset of 10 hips by 2 raters. Intermodal agreement was assessed on the complete cohort and a single rater. Results: Interrater agreement was demonstrated in both CT and ZTE, with intraclass correlation coefficient values ranging from 0.636 to 0.990 for ZTE and 0.747 to 0.983 for CT, indicating “good” to “excellent” agreement. Intermodal agreement was also shown to be significant, with intraclass correlation coefficients ranging from 0.618 to 0.904. Conclusion: Significant agreement of angular measurements for hip morphology exists between ZTE MRI and CT imaging. ZTE MRI may be an effective method to quantitatively evaluate osseous hip morphology. }, } |
2019 | Journal | Kishore Balasubramanian, N. P. Ananthamoorthy (2019). Analysis of hybrid statistical textural and intensity features to discriminate retinal abnormalities through classifiers. Proceedings of the Institution of Mechanical Engineers, Part H: Journal of Engineering in Medicine, 233(5), pp. 506–514. (link) (bib) x @article{Balasubramanian2019, year = { 2019 }, volume = { 233 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Analysis of hybrid statistical textural and intensity features to discriminate retinal abnormalities through classifiers }, pages = { 506--514 }, number = { 5 }, keywords = { Glaucoma,blood vessel,classifier,clustering,feature selection,fundus image,thresholding }, journal = { Proceedings of the Institution of Mechanical Engineers, Part H: Journal of Engineering in Medicine }, issn = { 20413033 }, doi = { 10.1177/0954411919835856 }, author = { Balasubramanian and Ananthamoorthy }, abstract = { Retinal image analysis relies on the effectiveness of computational techniques to discriminate various abnormalities in the eye like diabetic retinopathy, macular degeneration and glaucoma. The onset of the disease is often unnoticed in case of glaucoma, the effect of which is felt only at a later stage. Diagnosis of such degenerative diseases warrants early diagnosis and treatment. In this work, performance of statistical and textural features in retinal vessel segmentation is evaluated through classifiers like extreme learning machine, support vector machine and Random Forest. The fundus images are initially preprocessed for any noise reduction, image enhancement and contrast adjustment. The two-dimensional Gabor Wavelets and Partition Clustering is employed on the preprocessed image to extract the blood vessels. Finally, the combined hybrid features comprising statistical textural, intensity and vessel morphological features, extracted from the image, are used to detect glaucomatous abnormality through the classifiers. A crisp decision can be taken depending on the classifying rates of the classifiers. Public databases RIM-ONE and high-resolution fundus and local datasets are used for evaluation with threefold cross validation. The evaluation is based on performance metrics through accuracy, sensitivity and specificity. The evaluation of hybrid features obtained an overall accuracy of 97{\%} when tested using classifiers. The support vector machine classifier is able to achieve an accuracy of 93.33{\%} on high-resolution fundus, 93.8{\%} on RIM-ONE dataset and 95.3{\%} on local dataset. For extreme learning machine classifier, the accuracy is 95.1{\%} on high-resolution fundus, 97.8{\%} on RIM-ONE and 96.8{\%} on local dataset. An accuracy of 94.5{\%} on high-resolution fundus 92.5{\%} on RIM-ONE and 94.2{\%} on local dataset is obtained for the random forest classifier. Validation of the experiment results indicate that the hybrid features can be deployed in supervised classifiers to discriminate retinal abnormalities effectively. }, } |
2019 | Journal | Hiroyuki Ai, Ryuichi Okada, Midori Sakura, Thomas Wachtler, Hidetoshi Ikeno (2019). Neuroethology of the waggle dance: How followers interact with the waggle dancer and detect spatial information. Insects, 10(10), pp. 16. (link) (bib) x @article{Ai2019, year = { 2019 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Neuroethology of the waggle dance: How followers interact with the waggle dancer and detect spatial information }, pages = { 16 }, number = { 10 }, keywords = { Antenna-mechanosensory center,Brain,Computational analysis,Distance information,Honeybee,Polarized light processing,Sensory processing,Standard brain,Vibration,Waggle dance }, journal = { Insects }, issn = { 20754450 }, doi = { 10.3390/insects10100336 }, author = { Ai and Okada and Sakura and Wachtler and Ikeno }, abstract = { Since the honeybee possesses eusociality, advanced learning, memory ability, and information sharing through the use of various pheromones and sophisticated symbol communication (i.e., the “waggle dance”), this remarkable social animal has been one of the model symbolic animals for biological studies, animal ecology, ethology, and neuroethology. Karl von Frisch discovered the meanings of the waggle dance and called the communication a “dance language.” Subsequent to this discovery, it has been extensively studied how effectively recruits translate the code in the dance to reach the advertised destination and how the waggle dance information conflicts with the information based on their own foraging experience. The dance followers, mostly foragers, detect and interact with the waggle dancer, and are finally recruited to the food source. In this review, we summarize the current state of knowledge on the neural processing underlying this fascinating behavior. }, } |
2019 | Journal | Gina Belmonte, Vincenzo Ciancia, Diego Latella, Mieke Massink (2019). Voxlogica: A spatial model checker for declarative image analysis. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 11427 LNCS, pp. 281–298. (link) (bib) x @article{Belmonte2018, year = { 2019 }, volume = { 11427 LNCS }, url = { http://arxiv.org/abs/1811.05677 }, title = { Voxlogica: A spatial model checker for declarative image analysis }, pages = { 281--298 }, month = { nov }, keywords = { Closure spaces,Medical Imaging,Model checking,Spatial logics }, journal = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, issn = { 16113349 }, isbn = { 9783030174613 }, eprint = { 1811.05677 }, doi = { 10.1007/978-3-030-17462-0_16 }, author = { Belmonte and Ciancia and Latella and Massink }, arxivid = { 1811.05677 }, archiveprefix = { arXiv }, abstract = { Spatial and spatio-temporal model checking techniques have a wide range of application domains, among which large scale distributed systems and signal and image analysis. We explore a new domain, namely (semi-)automatic contouring in Medical Imaging, introducing the tool VoxLogicA which merges the state-of-the-art library of computational imaging algorithms ITK with the unique combination of declarative specification and optimised execution provided by spatial logic model checking. The result is a rapid, logic based analysis development methodology. The analysis of an existing benchmark of medical images for segmentation of brain tumours shows that simple VoxLogicA analysis can reach state-of-the-art accuracy, competing with best-in-class algorithms, with the advantage of explainability and easy replicability. Furthermore, due to a two-orders-of-magnitude speedup compared to the existing general-purpose spatio-temporal model checker topochecker, VoxLogicA enables interactive development of analysis of 3D medical images, which can greatly facilitate the work of professionals in this domain. }, } |
2019 | Journal | Sophie B. Sébille, Anne Sophie Rolland, Matthieu Faillot, Fernando Perez-Garcia, Antoine Colomb-Clerc, Brian Lau, Sylvie Dumas, Sara Fernandez Vidal, Marie Laure Welter, Chantal Francois, Eric Bardinet, Carine Karachi (2019). Normal and pathological neuronal distribution of the human mesencephalic locomotor region. Movement Disorders, 34(2), pp. 218–227. (link) (bib) x @article{Sebille2019, year = { 2019 }, volume = { 34 }, url = { https://onlinelibrary.wiley.com/doi/abs/10.1002/mds.27578 }, title = { Normal and pathological neuronal distribution of the human mesencephalic locomotor region }, pages = { 218--227 }, number = { 2 }, month = { feb }, keywords = { Gait disorders,Parkinson's disease,cuneiform nucleus,pedunculopontine nucleus,progressive supranuclear palsy }, journal = { Movement Disorders }, issn = { 15318257 }, doi = { 10.1002/mds.27578 }, author = { S{\'{e}}bille and Rolland and Faillot and Perez-Garcia and Colomb-Clerc and Lau and Dumas and Vidal and Welter and Francois and Bardinet and Karachi }, abstract = { Background: Deep brain stimulation of the pedunculopontine nucleus has been performed to treat dopamine-resistant gait and balance disorders in patients with degenerative diseases. The outcomes, however, are variable, which may be the result of the lack of a well-defined anatomical target. Objectives: The objectives of this study were to identify the main neuronal populations of the pedunculopontine and the cuneiform nuclei that compose the human mesencephalic locomotor region and to compare their 3-dimensional distribution with those found in patients with Parkinson's disease and progressive supranuclear palsy. Methods: We used high-field MRI, immunohistochemistry, and in situ hybridization to characterize the distribution of the different cell types, and we developed software to merge all data within a common 3-dimensional space. Results: We found that cholinergic, GABAergic, and glutamatergic neurons comprised the main cell types of the mesencephalic locomotor region, with the peak densities of cholinergic and GABAergic neurons similarly located within the rostral pedunculopontine nucleus. Cholinergic and noncholinergic neuronal losses were homogeneous in the mesencephalic locomotor region of patients, with the peak density of remaining neurons at the same location as in controls. The degree of denervation of the pedunculopontine nucleus was highest in patients with progressive supranuclear palsy, followed by Parkinson's disease patients with falls. Conclusions: The peak density of cholinergic and GABAergic neurons was located similarly within the rostral pedunculopontine nucleus not only in controls but also in pathological cases. The neuronal loss was homogeneously distributed and highest in the pedunculopontine nucleus of patients with falls, which suggests a potential pathophysiological link. {\textcopyright} 2018 International Parkinson and Movement Disorder Society. }, } |
2019 | Journal | Xuejun Zhang, Xiaomin Tan, Xin Gao, Dongbo Wu, Xiangrong Zhou, Hiroshi Fujita (2019). Non-rigid registration of multi-phase liver CT data using fully automated landmark detection and TPS deformation. Cluster Computing, 22, pp. 15305–15319. (link) (bib) x @article{RN842, year = { 2019 }, volume = { 22 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044478175{\&}doi=10.1007{\%}2Fs10586-018-2567-3{\&}partnerID=40{\&}md5=43af833a57941962e238b16f43fb845f }, type = { Journal Article }, title = { Non-rigid registration of multi-phase liver CT data using fully automated landmark detection and TPS deformation }, pages = { 15305--15319 }, keywords = { Edge textures,Landmark detection,Liver registration,TPS }, journal = { Cluster Computing }, issn = { 15737543 }, doi = { 10.1007/s10586-018-2567-3 }, author = { Zhang and Tan and Gao and Wu and Zhou and Fujita }, abstract = { In case of the complicated anatomical structure of the liver, landmark points on a three dimensional (3D) liver surface is hardly distinguished as corresponding pairs visually and automated landmark placing will be extremely time saving for liver registration. This paper presents a fully automated landmark detection method to register livers on multi-phase computed tomography (CT) images. Edge texture features and Support Vector Machine (SVM) are applied to detect the discriminated landmarks of the liver, including both surface and internal points. Using the information of liver shape, 3D gray level co-occurrence matrix is calculated into texture features, from which the most informatics there features are selected by our optimization algorithm for choosing a sub-set of features from a high dimensional feature set. Then automated landmarks detection begins at scanning surface points on the pre-contrast and portal venous phase images, where positive outputs of the SVM classifier are regarded as initial candidates and final candidates are obtained by eliminating false positives (FPs). Finally, relied on the detected landmarks, thin plate splines (TPS) algorithm is used to register livers. Five surface landmarks, together with internal landmarks of the liver center from every 25 mm slice interval, can be detected automatically with sensitivity of 88.33{\%} and accuracy of 98.5{\%}. Surface-based mean error (SME) is decreased from 3.80 to 2.87 mm on average, while SME value has increased 32.4 and 8.0{\%} on average respectively when comparing with the rigid and B-spline methods. The results demonstrate that edge textures and SVM classifier are effective in the automated landmark detection. Together with TPS algorithm, fully automated liver registration is able to be achieved on multi-phase CT images. }, } |
2019 | Journal | Xinyuan Zhang, Yanqiu Feng, Wufan Chen, Xin Li, Andreia V. Faria, Qianjin Feng, Susumu Mori (2019). Linear Registration of Brain MRI Using Knowledge-Based Multiple Intermediator Libraries. Frontiers in Neuroscience, 13, pp. NA (link) (bib) x @article{RN844, year = { 2019 }, volume = { 13 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85073020740{\&}doi=10.3389{\%}2Ffnins.2019.00909{\&}partnerID=40{\&}md5=eedcc60b9393d11a272628fc4bcf4e67 }, type = { Journal Article }, title = { Linear Registration of Brain MRI Using Knowledge-Based Multiple Intermediator Libraries }, keywords = { MNI space,T1-weighted brain image,dice value,linear registration,mediator selection }, journal = { Frontiers in Neuroscience }, issn = { 1662453X }, doi = { 10.3389/fnins.2019.00909 }, author = { Zhang and Feng and Chen and Li and Faria and Feng and Mori }, abstract = { Linear registration is often the crucial first step for various types of image analysis. Although this is mathematically simple, failure is not uncommon. When investigating the brain by magnetic resonance imaging (MRI), the brain is the target organ for registration but the existence of other tissues, in addition to a variety of fields of view, different brain locations, orientations and anatomical features, poses some serious fundamental challenges. Consequently, a number of different algorithms have been put forward to minimize potential errors. In the present study, we tested a knowledge-based approach that can be combined with any form of registration algorithm. This approach consisted of a library of intermediate images (mediators) with known transformation to the target image. Test images were first registered to all mediators and the best mediator was selected to ensure optimum registration to the target. In order to select the best mediator, we evaluated two similarity criteria: the sum of squared differences and mutual information. This approach was applied to 48 mediators and 96 test images. In order to reduce one of the main drawbacks of the approach, increased computation time, we reduced the size of the library by clustering. Our results indicated clear improvement in registration accuracy. }, } |
2019 | Journal | Ruiping Zhang, Lei Zhu, Zhengting Cai, Wei Jiang, Jian Li, Chengwen Yang, Chunxu Yu, Bo Jiang, Wei Wang, Wengui Xu, Xiangfei Chai, Xiaodong Zhang, Yong Tang (2019). Potential feature exploration and model development based on 18F-FDG PET/CT images for differentiating benign and malignant lung lesions. European Journal of Radiology, 121, pp. 9. (link) (bib) x @article{RN808, year = { 2019 }, volume = { 121 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Potential feature exploration and model development based on 18F-FDG PET/CT images for differentiating benign and malignant lung lesions }, pages = { 9 }, keywords = { CT-radiomics features,Lung lesion,PET metabolic parameters,Potential feature }, journal = { European Journal of Radiology }, issn = { 18727727 }, doi = { 10.1016/j.ejrad.2019.108735 }, author = { Zhang and Zhu and Cai and Jiang and Li and Yang and Yu and Jiang and Wang and Xu and Chai and Zhang and Tang }, abstract = { Purpose: The study is to explore potential features and develop classification models for distinguishing benign and malignant lung lesions based on CT-radiomics features and PET metabolic parameters extracted from PET/CT images. Materials and methods: A retrospective study was conducted in baseline 18 F-flurodeoxyglucose positron emission tomography/ computed tomography (18 F-FDG PET/CT) images of 135 patients. The dataset was utilized for feature extraction of CT-radiomics features and PET metabolic parameters based on volume of interest, then went through feature selection and model development with strategy of five-fold cross-validation. Specifically, model development used support vector machine, PET metabolic parameters selection used Akaike's information criterion, and CT-radiomics were reduced by the least absolute shrinkage and selection operator method then forward selection approach. The diagnostic performances of CT-radiomics, PET metabolic parameters and combination of both were illustrated by receiver operating characteristic (ROC) curves, and compared by Delong test. Five groups of selected PET metabolic parameters and CT-radiomics were counted, and potential features were found and analyzed with Mann-Whitney U test. Results: The CT-radiomics, PET metabolic parameters, and combination of both among five subsets showed mean area under the curve (AUC) of 0.820 ± 0.053, 0.874 ± 0.081, and 0.887 ± 0.046, respectively. No significant differences in ROC among models were observed through pairwise comparison in each fold (P-value from 0.09 to 0.81, Delong test). The potential features were found to be SurfaceVolumeRatio and SUVpeak (P {\textless} 0.001 of both, U test). Conclusion: The classification models developed by CT-radiomics features and PET metabolic parameters based on PET/CT images have substantial diagnostic capacity on lung lesions. }, } |
2019 | Journal | Qi Zhang, Terry Peters, Aaron Fenster (2019). Layer-based visualization and biomedical information exploration of multi-channel large histological data. Computerized Medical Imaging and Graphics, 72, pp. 34–46. (link) (bib) x @article{RN955, year = { 2019 }, volume = { 72 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061394368{\&}doi=10.1016{\%}2Fj.compmedimag.2019.01.004{\&}partnerID=40{\&}md5=da16d481a2565d81446681597a01b201 }, type = { Journal Article }, title = { Layer-based visualization and biomedical information exploration of multi-channel large histological data }, pages = { 34--46 }, keywords = { Biomedical information,Large histological data,Layer-based data navigation,Texture extraction and mapping,Visualization,Volume of interest }, journal = { Computerized Medical Imaging and Graphics }, issn = { 18790771 }, doi = { 10.1016/j.compmedimag.2019.01.004 }, author = { Zhang and Peters and Fenster }, abstract = { Background and objective: Modern microscopes can acquire multi-channel large histological data from tissues of human beings or animals, which contain rich biomedical information for disease diagnosis and biological feature analysis. However, due to the large size, fuzzy tissue structure, and complicated multiple elements integrated in the image color space, it is still a challenge for current software systems to effectively calculate histological data, show the inner tissue structures and unveil hidden biomedical information. Therefore, we developed new algorithms and a software platform to address this issue. Methods: This paper presents a multi-channel biomedical data computing and visualization system that can efficiently process large 3D histological images acquired from high-resolution microscopes. A novelty of our system is that it can dynamically display a volume of interest and extract tissue information using a layer-based data navigation scheme. During the data exploring process, the actual resolution of the loaded data can be dynamically determined and updated, and data rendering is synchronized in four display windows at each data layer, where 2D textures are extracted from the imaging volume and mapped onto the displayed clipping planes in 3D space. Results: To test the efficiency and scalability of this system, we performed extensive evaluations using several different hardware systems and large histological color datasets acquired from a CryoViz 3D digital system. The experimental results demonstrated that our system can deliver interactive data navigation speed and display detailed imaging information in real time, which is beyond the capability of commonly available biomedical data exploration software platforms. Conclusion: Taking advantage of both CPU (central processing unit) main memory and GPU (graphics processing unit) graphics memory, the presented software platform can efficiently compute, process and visualize very large biomedical data and enhance data information. The performance of this system can satisfactorily address the challenges of navigating and interrogating volumetric multi-spectral large histological image at multiple resolution levels. }, } |
2019 | Journal | Ziv Yaniv, Bradley C. Lowekamp, Hans J. Johnson, Richard Beare (2019). Correction to: SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research (Journal of Digital Imaging, (2018), 31, 3, (290-303), 10.1007/s10278-017-0037-8). Journal of Digital Imaging, 32(6), pp. 1118. (link) (bib) x @article{RN810, year = { 2019 }, volume = { 32 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Correction to: SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research (Journal of Digital Imaging, (2018), 31, 3, (290-303), 10.1007/s10278-017-0037-8) }, pages = { 1118 }, number = { 6 }, journal = { Journal of Digital Imaging }, issn = { 1618727X }, doi = { 10.1007/s10278-018-0165-9 }, author = { Yaniv and Lowekamp and Johnson and Beare }, abstract = { This paper had published originally without open access, but has since been republished with open access. }, } |
2019 | Journal | Qi Xing, Parag Chitnis, Siddhartha Sikdar, Jonia Alshiek, S. Abbas Shobeiri, Qi Wei (2019). M3VR—A multi-stage, multi-resolution, and multi-volumes-of-interest volume registration method applied to 3D endovaginal ultrasound. PLoS ONE, 14(11), pp. NA (link) (bib) x @article{RN861, year = { 2019 }, volume = { 14 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075267830{\&}doi=10.1371{\%}2Fjournal.pone.0224583{\&}partnerID=40{\&}md5=18cd44cc5e14de2956edb1e316ba170a }, type = { Journal Article }, title = { M3VR—A multi-stage, multi-resolution, and multi-volumes-of-interest volume registration method applied to 3D endovaginal ultrasound }, number = { 11 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0224583 }, author = { Xing and Chitnis and Sikdar and Alshiek and {Abbas Shobeiri} and Wei }, abstract = { Heterogeneity of echo-texture and lack of sharply delineated tissue boundaries in diagnostic ultrasound images make three-dimensional (3D) registration challenging, especially when the volumes to be registered are considerably different due to local changes. We implemented a novel computational method that optimally registers volumetric ultrasound image data containing significant and local anatomical differences. It is A Multi-stage, Multi-resolution, and Multi-volumes-of-interest Volume Registration Method. A single region registration is optimized first for a close initial alignment to avoid convergence to a locally optimal solution. Multiple sub-volumes of interest can then be selected as target alignment regions to achieve confident consistency across the volume. Finally, a multi-resolution rigid registration is performed on these sub-volumes associated with different weights in the cost function. We applied the method on 3D endovaginal ultrasound image data acquired from patients during biopsy procedure of the pelvic floor muscle. Systematic assessment of our proposed method through cross validation demonstrated its accuracy and robustness. The algorithm can also be applied on medical imaging data of other modalities for which the traditional rigid registration methods would fail. }, } |
2019 | Journal | Tianwu Xie, Azadeh Akhavanallaf, Habib Zaidi (2019). Construction of patient-specific computational models for organ dose estimation in radiological imaging. Medical Physics, 46(5), pp. 2403–2411. (link) (bib) x @article{RN849, year = { 2019 }, volume = { 46 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063261918{\&}doi=10.1002{\%}2Fmp.13471{\&}partnerID=40{\&}md5=7e6f21940c6d8f7bbd4b733b927a0667 }, type = { Journal Article }, title = { Construction of patient-specific computational models for organ dose estimation in radiological imaging }, pages = { 2403--2411 }, number = { 5 }, keywords = { Monte Carlo simulations,computational models,radiation dose,radiological imaging }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.13471 }, author = { Xie and Akhavanallaf and Zaidi }, abstract = { Purpose: Diagnostic imaging procedures require optimization depending on the medical task at hand, the apparatus being used, and patient physical and anatomical characteristics. The assessment of the radiation dose and associated risks plays a key role in safety and quality management for radiation protection purposes. In this work, we aim at developing a methodology for personalized organ-level dose assessment in x-ray computed tomography (CT) imaging. Methods: Regional voxel models representing reference patient-specific computational phantoms were generated through image segmentation of CT images for four patients. The best-fitting anthropomorphic phantoms were selected from a previously developed comprehensive phantom library according to patient's anthropometric parameters, then registered to the anatomical masks (skeleton, lung, and body contour) of patients to produce a patient-specific whole-body phantom. Well-established image registration metrics including Jaccard's coefficients for each organ, organ mass, body perimeter, organ-surface distance, and effective diameter are compared between the reference patient model, registered model, and anchor phantoms. A previously validated Monte Carlo code is utilized to calculate the absorbed dose in target organs along with the effective dose delivered to patients. The calculated absorbed doses from the reference patient models are then compared with the produced personalized model, anchor phantom, and those reported by commercial dose monitoring systems. Results: The evaluated organ-surface distance and body effective diameter metrics show a mean absolute difference between patient regional voxel models, serving as reference, and patient-specific models around 4.4{\%} and 4.5{\%}, respectively. Organ-level radiation doses of patient-specific models are in good agreement with those of the corresponding patient regional voxel models with a mean absolute difference of 9.1{\%}. The mean absolute difference of organ doses for the best-fitting model extracted from the phantom library and Radimetrics™ commercial dose tracking software are 15.5{\%} and 41.1{\%}, respectively. Conclusion: The results suggest that the proposed methodology improves the accuracy of organ-level dose estimation in CT, especially for extreme cases [high body mass index (BMI) and large skeleton]. Patient-specific radiation dose calculation and risk assessment can be performed using the proposed methodology for both monitoring of cumulative radiation exposure of patients and epidemiological studies. Further validation using a larger database is warranted. }, } |
2019 | Journal | Alberto Traverso, Michal Kazmierski, Zhenwei Shi, Petros Kalendralis, Mattea Welch, Henrik Dahl Nissen, David Jaffray, Andre Dekker, Leonard Wee (2019). Stability of radiomic features of apparent diffusion coefficient (ADC) maps for locally advanced rectal cancer in response to image pre-processing. Physica Medica, 61, pp. 44–51. (link) (bib) x @article{RN813, year = { 2019 }, volume = { 61 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Stability of radiomic features of apparent diffusion coefficient (ADC) maps for locally advanced rectal cancer in response to image pre-processing }, pages = { 44--51 }, keywords = { Apparent diffusion coefficient,Diffusion weighted imaging,Locally advanced rectal carcinoma,Magnetic resonance imaging,Radiomic feature reproducibility }, journal = { Physica Medica }, issn = { 1724191X }, doi = { 10.1016/j.ejmp.2019.04.009 }, author = { Traverso and Kazmierski and Shi and Kalendralis and Welch and Nissen and Jaffray and Dekker and Wee }, abstract = { Quantitative imaging features (radiomics) extracted from apparent diffusion coefficient (ADC) maps of rectal cancer patients can provide additional information to support treatment decision. Most available radiomic computational packages allow extraction of hundreds to thousands of features. However, two major factors can influence the reproducibility of radiomic features: interobserver variability, and imaging filtering applied prior to features extraction. In this exploratory study we seek to determine to what extent various commonly-used features are reproducible with regards to the mentioned factors using ADC maps from two different clinics (56 patients). Features derived from intensity distribution histograms are less sensitive to manual tumour delineation differences, noise in ADC images, pixel size resampling and intensity discretization. Shape features appear to be strongly affected by delineation quality. On the whole, textural features appear to be poorly or moderately reproducible with respect to the image pre-processing perturbations we reproduced. }, } |
2019 | Journal | Roman Shkarin, Andrei Shkarin, Svetlana Shkarina, Angelica Cecilia, Roman A. Surmenev, Maria A. Surmeneva, Venera Weinhardt, Tilo Baumbach, Ralf Mikut (2019). Quanfima: An open source Python package for automated fiber analysis of biomaterials. PLoS ONE, 14(4), pp. NA (link) (bib) x @article{RN854, year = { 2019 }, volume = { 14 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064242516{\&}doi=10.1371{\%}2Fjournal.pone.0215137{\&}partnerID=40{\&}md5=4418e4939d68c3c8368a16476e37b7fb }, type = { Journal Article }, title = { Quanfima: An open source Python package for automated fiber analysis of biomaterials }, number = { 4 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0215137 }, author = { Shkarin and Shkarin and Shkarina and Cecilia and Surmenev and Surmeneva and Weinhardt and Baumbach and Mikut }, abstract = { Hybrid 3D scaffolds composed of different biomaterials with fibrous structure or enriched with different inclusions (i.e., nano- and microparticles) have already demonstrated their positive effect on cell integration and regeneration. The analysis of fibers in hybrid biomaterials, especially in a 3D space is often difficult due to their various diameters (from micro to nanoscale) and compositions. Though biomaterials processing workflows are implemented, there are no software tools for fiber analysis that can be easily integrated into such workflows. Due to the demand for reproducible science with Jupyter notebooks and the broad use of the Python programming language, we have developed the new Python package quanfima offering a complete analysis of hybrid biomaterials, that include the determination of fiber orientation, fiber and/or particle diameter and porosity. Here, we evaluate the provided tensor-based approach on a range of generated datasets under various noise conditions. Also, we show its application to the X-ray tomography datasets of polycaprolactone fibrous scaffolds pure and containing silicate-substituted hydroxyapatite microparticles, hydrogels enriched with bioglass contained strontium and alpha-tricalcium phosphate microparticles for bone tissue engineering and porous cryogel 3D scaffold for pancreatic cell culturing. The results obtained with the help of the developed package demonstrated high accuracy and performance of orientation, fibers and microparticles diameter and porosity analysis. }, } |
2019 | Journal | Patrick Schelb, Simon Kohl, Jan Philipp Radtke, Manuel Wiesenfarth, Philipp Kickingereder, Sebastian Bickelhaupt, Tristan Anselm Kuder, Albrecht Stenzinger, Markus Hohenfellner, Heinz Peter Schlemmer, Klaus H. Maier-Hein, David Bonekamp (2019). Classification of cancer at prostate MRI: Deep Learning versus Clinical PI-RADS Assessment. Radiology, 293(3), pp. 607–617. (link) (bib) x @article{RN809, year = { 2019 }, volume = { 293 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Classification of cancer at prostate MRI: Deep Learning versus Clinical PI-RADS Assessment }, pages = { 607--617 }, number = { 3 }, journal = { Radiology }, issn = { 15271315 }, doi = { 10.1148/radiol.2019190938 }, author = { Schelb and Kohl and Radtke and Wiesenfarth and Kickingereder and Bickelhaupt and Kuder and Stenzinger and Hohenfellner and Schlemmer and Maier-Hein and Bonekamp }, abstract = { Background: Men suspected of having clinically significant prostate cancer (sPC) increasingly undergo prostate MRI. The potential of deep learning to provide diagnostic support for human interpretation requires further evaluation. Purpose: To compare the performance of clinical assessment to a deep learning system optimized for segmentation trained with T2-weighted and diffusion MRI in the task of detection and segmentation of lesions suspicious for sPC. Materials and Methods: In this retrospective study, T2-weighted and diffusion prostate MRI sequences from consecutive men examined with a single 3.0-T MRI system between 2015 and 2016 were manually segmented. Ground truth was provided by combined targeted and extended systematic MRI-transrectal US fusion biopsy, with sPC defined as International Society of Urological Pathology Gleason grade group greater than or equal to 2. By using split-sample validation, U-Net was internally validated on the training set (80{\%} of the data) through cross validation and subsequently externally validated on the test set (20{\%} of the data). U-Net-derived sPC probability maps were calibrated by matching sextant-based cross-validation performance to clinical performance of Prostate Imaging Reporting and Data System (PI-RADS). Performance of PI-RADS and U-Net were compared by using sensitivities, specificities, predictive values, and Dice coefficient. Results: A total of 312 men (median age, 64 years; interquartile range [IQR], 58-71 years) were evaluated. The training set consisted of 250 men (median age, 64 years; IQR, 58-71 years) and the test set of 62 men (median age, 64 years; IQR, 60-69 years). In the test set, PI-RADS cutoffs greater than or equal to 3 versus cutoffs greater than or equal to 4 on a per-patient basis had sensitivity of 96{\%} (25 of 26) versus 88{\%} (23 of 26) at specificity of 22{\%} (eight of 36) versus 50{\%} (18 of 36). U-Net at probability thresholds of greater than or equal to 0.22 versus greater than or equal to 0.33 had sensitivity of 96{\%} (25 of 26) versus 92{\%} (24 of 26) (both P . .99) with specificity of 31{\%} (11 of 36) versus 47{\%} (17 of 36) (both P . .99), not statistically different from PI-RADS. Dice coefficients were 0.89 for prostate and 0.35 for MRI lesion segmentation. In the test set, coincidence of PI-RADS greater than or equal to 4 with U-Net lesions improved the positive predictive value from 48{\%} (28 of 58) to 67{\%} (24 of 36) for U-Net probability thresholds greater than or equal to 0.33 (P = .01), while the negative predictive value remained unchanged (83{\%} [25 of 30] vs 83{\%} [43 of 52]; P . .99). Conclusion: U-Net trained with T2-weighted and diffusion MRI achieves similar performance to clinical Prostate Imaging Reporting and Data System assessment. }, } |
2019 | Journal | Hedieh Sajedi, Nastaran Pardakhti (2019). Age Prediction Based on Brain MRI Image: A Survey. Journal of Medical Systems, 43(8), pp. NA (link) (bib) x @article{RN846, year = { 2019 }, volume = { 43 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068766678{\&}doi=10.1007{\%}2Fs10916-019-1401-7{\&}partnerID=40{\&}md5=5d6cacfa42ec287f82160fe89c70fe3a }, type = { Journal Article }, title = { Age Prediction Based on Brain MRI Image: A Survey }, pmid = { 31297614 }, number = { 8 }, keywords = { Age prediction,BAE,Brain MRI,Brain age,Chronological age,Deep Learning,Image processing,Machine Learning }, journal = { Journal of Medical Systems }, issn = { 1573689X }, doi = { 10.1007/s10916-019-1401-7 }, author = { Sajedi and Pardakhti }, abstract = { Human age prediction is an interesting and applicable issue in different fields. It can be based on various criteria such as face image, DNA methylation, chest plate radiographs, knee radiographs, dental images and etc. Most of the age prediction researches have mainly been based on images. Since the image processing and Machine Learning (ML) techniques have grown up, the investigations were led to use them in age prediction problem. The implementations would be used in different fields, especially in medical applications. Brain Age Estimation (BAE) has attracted more attention in recent years and it would be so helpful in early diagnosis of some neurodegenerative diseases such as Alzheimer, Parkinson, Huntington, etc. BAE is performed on Magnetic Resonance Imaging (MRI) images to compute the brain ages. Studies based on brain MRI shows that there is a relation between accelerated aging and accelerated brain atrophy. This refers to the effects of neurodegenerative diseases on brain structure while making the whole of it older. This paper reviews and summarizes the main approaches for age prediction based on brain MRI images including preprocessing methods, useful tools used in different research works and the estimation algorithms. We categorize the BAE methods based on two factors, first the way of processing MRI images, which includes pixel-based, surface-based, or voxel-based methods and second, the generation of ML algorithms that includes traditional or Deep Learning (DL) methods. The modern techniques as DL methods help MRI based age prediction to get results that are more accurate. In recent years, more precise and statistical ML approaches have been utilized with the help of related tools for simplifying computations and getting accurate results. Pros and cons of each research and the challenges in each work are expressed and some guidelines and deliberations for future research are suggested. }, } |
2019 | Journal | Elisabeth Pfaehler, Alex Zwanenburg, Johan R. de Jong, Ronald Boellaard (2019). RACAT: An open source and easy to use radiomics calculator tool. PLoS ONE, 14(2), pp. NA (link) (bib) x @article{RN856, year = { 2019 }, volume = { 14 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061901064{\&}doi=10.1371{\%}2Fjournal.pone.0212223{\&}partnerID=40{\&}md5=3929eeb04d5b73fc66f936910e38e854 }, type = { Journal Article }, title = { RACAT: An open source and easy to use radiomics calculator tool }, number = { 2 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0212223 }, author = { Pfaehler and Zwanenburg and Jong and Boellaard }, abstract = { Purpose The widely known field ‘Radiomics' aims to provide an extensive image based phenotyping of e.g. tumors using a wide variety of feature values extracted from medical images. Therefore, it is of utmost importance that feature values calculated by different institutes follow the same feature definitions. For this purpose, the imaging biomarker standardization initiative (IBSI) provides detailed mathematical feature descriptions, as well as (mathematical) test phantoms and corresponding reference feature values. We present here an easy to use radiomic feature calculator, RaCaT, which provides the calculation of a large number of radiomic features for all kind of medical images which are in compliance with the standard. Methods The calculator is implemented in C++ and comes as a standalone executable. Therefore, it can be easily integrated in any programming language, but can also be called from the command line. No programming skills are required to use the calculator. The software architecture is highly modularized so that it is easily extendible. The user can also download the source code, adapt it if needed and build the calculator from source. The calculated feature values are compliant with the ones provided by the IBSI standard. Source code, example files for the software configuration, and documentation can be found online on GitHub (https://github.com/ellipfaehlerUMCG/RaCat). Results The comparison with the standard values shows that all calculated features as well as image preprocessing steps, comply with the IBSI standard. The performance is also demonstrated on clinical examples. Conclusions The authors successfully implemented an easy to use Radiomics calculator that can be called from any programming language or from the command line. Image preprocessing and feature settings and calculations can be adjusted by the user. }, } |
2019 | Journal | Steffie M.B. Peters, Niels R. van der Werf, Marcel Segbers, Floris H.P. van Velden, Roel Wierts, Koos (J ).A.K. Blokland, Mark W. Konijnenberg, Sergiy V. Lazarenko, Eric P. Visser, Martin Gotthardt (2019). Towards standardization of absolute SPECT/CT quantification: a multi-center and multi-vendor phantom study. EJNMMI Physics, 6(1), pp. 14. (link) (bib) x @article{RN807, year = { 2019 }, volume = { 6 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Towards standardization of absolute SPECT/CT quantification: a multi-center and multi-vendor phantom study }, pages = { 14 }, number = { 1 }, keywords = { SPECT/CT,absolute quantification,performance evaluation,recovery coefficient }, journal = { EJNMMI Physics }, issn = { 21977364 }, doi = { 10.1186/s40658-019-0268-5 }, author = { Peters and Werf and Segbers and Velden and Wierts and Blokland and Konijnenberg and Lazarenko and Visser and Gotthardt }, abstract = { Abstract: Absolute quantification of radiotracer distribution using SPECT/CT imaging is of great importance for dosimetry aimed at personalized radionuclide precision treatment. However, its accuracy depends on many factors. Using phantom measurements, this multi-vendor and multi-center study evaluates the quantitative accuracy and inter-system variability of various SPECT/CT systems as well as the effect of patient size, processing software and reconstruction algorithms on recovery coefficients (RC). Methods: Five SPECT/CT systems were included: Discovery™ NM/CT 670 Pro (GE Healthcare), Precedence™ 6 (Philips Healthcare), Symbia Intevo™, and Symbia™ T16 (twice) (Siemens Healthineers). Three phantoms were used based on the NEMA IEC body phantom without lung insert simulating body mass indexes (BMI) of 25, 28, and 47 kg/m2. Six spheres (0.5–26.5 mL) and background were filled with 0.1 and 0.01 MBq/mL 99mTc-pertechnetate, respectively. Volumes of interest (VOI) of spheres were obtained by a region growing technique using a 50{\%} threshold of the maximum voxel value corrected for background activity. RC, defined as imaged activity concentration divided by actual activity concentration, were determined for maximum (RCmax) and mean voxel value (RCmean) in the VOI for each sphere diameter. Inter-system variability was expressed as median absolute deviation (MAD) of RC. Acquisition settings were standardized. Images were reconstructed using vendor-specific 3D iterative reconstruction algorithms with institute-specific settings used in clinical practice and processed using a standardized, in-house developed processing tool based on the SimpleITK framework. Additionally, all data were reconstructed with a vendor-neutral reconstruction algorithm (Hybrid Recon™; Hermes Medical Solutions). Results: RC decreased with decreasing sphere diameter for each system. Inter-system variability (MAD) was 16 and 17{\%} for RCmean and RCmax, respectively. Standardized reconstruction decreased this variability to 4 and 5{\%}. High BMI hampers quantification of small lesions ({\textless} 10 ml). Conclusion: Absolute SPECT quantification in a multi-center and multi-vendor setting is feasible, especially when reconstruction protocols are standardized, paving the way for a standard for absolute quantitative SPECT. }, } |
2019 | Journal | Alex M. Pagnozzi, Jurgen Fripp, Stephen E. Rose (2019). Quantifying deep grey matter atrophy using automated segmentation approaches: A systematic review of structural MRI studies. NeuroImage, 201, pp. 20. (link) (bib) x @article{RN768, year = { 2019 }, volume = { 201 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Quantifying deep grey matter atrophy using automated segmentation approaches: A systematic review of structural MRI studies }, pages = { 20 }, keywords = { Deep grey matter,Magnetic resonance imaging,Segmentation,Subcortical anatomies }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2019.116018 }, author = { Pagnozzi and Fripp and Rose }, abstract = { The deep grey matter (DGM) nuclei of the brain play a crucial role in learning, behaviour, cognition, movement and memory. Although automated segmentation strategies can provide insight into the impact of multiple neurological conditions affecting these structures, such as Multiple Sclerosis (MS), Huntington's disease (HD), Alzheimer's disease (AD), Parkinson's disease (PD) and Cerebral Palsy (CP), there are a number of technical challenges limiting an accurate automated segmentation of the DGM. Namely, the insufficient contrast of T1 sequences to completely identify the boundaries of these structures, as well as the presence of iso-intense white matter lesions or extensive tissue loss caused by brain injury. Therefore in this systematic review, 269 eligible studies were analysed and compared to determine the optimal approaches for addressing these technical challenges. The automated approaches used among the reviewed studies fall into three broad categories, atlas-based approaches focusing on the accurate alignment of atlas priors, algorithmic approaches which utilise intensity information to a greater extent, and learning-based approaches that require an annotated training set. Studies that utilise freely available software packages such as FIRST, FreeSurfer and LesionTOADS were also eligible, and their performance compared. Overall, deep learning approaches achieved the best overall performance, however these strategies are currently hampered by the lack of large-scale annotated data. Improving model generalisability to new datasets could be achieved in future studies with data augmentation and transfer learning. Multi-atlas approaches provided the second-best performance overall, and may be utilised to construct a “silver standard” annotated training set for deep learning. To address the technical challenges, providing robustness to injury can be improved by using multiple channels, highly elastic diffeomorphic transformations such as LDDMM, and by following atlas-based approaches with an intensity driven refinement of the segmentation, which has been done with the Expectation Maximisation (EM) and level sets methods. Accounting for potential lesions should be achieved with a separate lesion segmentation approach, as in LesionTOADS. Finally, to address the issue of limited contrast, R2*, T2* and QSM sequences could be used to better highlight the DGM due to its higher iron content. Future studies could look to additionally acquire these sequences by retaining the phase information from standard structural scans, or alternatively acquiring these sequences for only a training set, allowing models to learn the “improved” segmentation from T1-sequences alone. }, } |
2019 | Journal | Jan Oliver Neumann, Benito Campos, B. Younes, Martin Jakobs, Andreas Unterberg, Karl Kiening, Alexander Hubert (2019). Evaluation of three automatic brain vessel segmentation methods for stereotactical trajectory planning. Computer Methods and Programs in Biomedicine, 182, pp. 8. (link) (bib) x @article{RN794, year = { 2019 }, volume = { 182 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of three automatic brain vessel segmentation methods for stereotactical trajectory planning }, pages = { 8 }, keywords = { Decision support,Stereotaxy,Vessel segmentation }, journal = { Computer Methods and Programs in Biomedicine }, issn = { 18727565 }, doi = { 10.1016/j.cmpb.2019.105037 }, author = { Neumann and Campos and Younes and Jakobs and Unterberg and Kiening and Hubert }, abstract = { Background and objective: Stereotactical procedures require exact trajectory planning to avoid blood vessels in the trajectory path. Innovation in imaging and image recognition techniques have facilitated the automatic detection of blood vessels during the planning process and may improve patient safety in the future. To assess the feasibility of a vessel detection and warning system using currently available imaging and vessel segmentation techniques. Methods: Image data were acquired from post-contrast, isovolumetric T1-weighted sequences (T1CE) and time.-of-flight MR angiography at 3T or 7T from a total of nine subjects. Vessel segmentation by a combination of a vessel-enhancement filter with subsequent level-set segmentation was evaluated using three different methods (Vesselness, FastMarching and LevelSet) in 45 stereotactic trajectories. Segmentation results were compared to a gold-standard of manual segmentation performed jointly by two human experts. Results: The LevelSet method performed best with a mean interclass correlation coefficient (ICC) of 0.76 [0.73, 0.81] compared to the FastMarching method with ICC 0.70 [0.67, 0.73] respectively. The Vesselness algorithm achieved clearly inferior overall performance with a mean ICC of 0.56 [0.53, 0.59]. The differences in mean ICC between all segmentation methods were statistically significant (p {\textless} 0.001 with post-hoc p {\textless} 0.026). The LevelSet method performed likewise good in MPRAGE and 3T-TOF images and excellent in 7T-TOF image data. The negative predictive value (NPV) was very high ({\textgreater}97{\%}) for all methods and modalities. Positive predictive values (PPV) were found in the overall range of 65–90{\%} likewise depending on algorithm and modality. This pattern reflects the disposition of all segmentation methods – in case of misclassification - to produce preferentially false-positive than false-negative results. In a clinical setting, two to three potential collision warnings would be given per trajectory on average with a PPV of around 50{\%}. Conclusions: It is feasible to integrate a clinically meaningful vessel detection and collision warning system into stereotactical planning software. Both, T1CE and MRA sequences are suitable as image data for such an application. }, } |
2019 | Journal | Davide Micieli, Triestino Minniti, Giuseppe Gorini (2019). NeuTomPy toolbox, a Python package for tomographic data processing and reconstruction. SoftwareX, 9, pp. 260–264. (link) (bib) x @article{RN817, year = { 2019 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { NeuTomPy toolbox, a Python package for tomographic data processing and reconstruction }, pages = { 260--264 }, keywords = { Neutron imaging,Tomographic reconstruction software,Tomography }, journal = { SoftwareX }, issn = { 23527110 }, doi = { 10.1016/j.softx.2019.01.005 }, author = { Micieli and Minniti and Gorini }, abstract = { In this article we present the NeuTomPy Toolbox, a new Python package for tomographic data processing and reconstruction. The toolbox includes pre-processing algorithms, artifacts removal and a wide range of iterative reconstruction methods as well as the Filtered Back Projection algorithm. The NeuTomPy toolbox was conceived primarily for neutron tomography datasets and developed to support the need of users and researchers to compare state-of-the-art reconstruction methods and choose the optimal data processing workflow for their data. In fact, in several cases sparse-view datasets are acquired to reduce scan time during a neutron tomography experiment. Hence, there is great interest in improving quality of the reconstructed images by means of iterative methods and advanced image-processing algorithms. The toolbox has a modular design, multi-threading capabilities and it supports Windows, Linux and Mac OS operating systems. The NeuTomPy toolbox is open source and it is released under the GNU General Public License v3, encouraging researchers and developers to contribute. In this paper we present an overview of the main toolbox functionalities and finally we show a typical usage example. }, } |
2019 | Journal | Andreas Mang, Amir Gholami, Christos Davatzikos, George Biros (2019). Claire: A distributed-memory solver for constrained large deformation diffeomorphic image registration. SIAM Journal on Scientific Computing, 41(5), pp. C548–C584. (link) (bib) x @article{RN862, year = { 2019 }, volume = { 41 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074644073{\&}doi=10.1137{\%}2F18M1207818{\&}partnerID=40{\&}md5=e3c350f33ddb25e3d2f751778e14fc3c }, type = { Journal Article }, title = { Claire: A distributed-memory solver for constrained large deformation diffeomorphic image registration }, pages = { C548--C584 }, number = { 5 }, keywords = { Diffeomorphic image registration,Distributed-memory algorithm,KKT preconditioner,LDDMM,Newton-Krylov method,Optimal control,PDE-constrained optimization }, journal = { SIAM Journal on Scientific Computing }, issn = { 10957197 }, eprint = { 1808.04487 }, doi = { 10.1137/18M1207818 }, author = { Mang and Gholami and Davatzikos and Biros }, arxivid = { 1808.04487 }, archiveprefix = { arXiv }, abstract = { With this work we release $\backslash$ttC $\backslash$ttL $\backslash$ttA $\backslash$ttI $\backslash$ttR $\backslash$ttE, a distributed-memory implementation of an effective solver for constrained large deformation diffeomorphic image registration problems in three dimensions. We consider an optimal control formulation. We invert for a stationary velocity field that parameterizes the deformation map. Our solver is based on a globalized, preconditioned, inexact reduced space Gauss-Newton-Krylov scheme. We exploit state-of-the-art techniques in scientific computing to develop an effective solver that scales to thousands of distributed memory nodes on high-end clusters. We present the formulation, discuss algorithmic features, describe the software package, and introduce an improved preconditioner for the reduced space Hessian to speed up the convergence of our solver. We test registration performance on synthetic and real data. We demonstrate registration accuracy on several neuroimaging datasets. We compare the performance of our scheme against different flavors of the $\backslash$ttD $\backslash$tte $\backslash$ttm $\backslash$tto $\backslash$ttn $\backslash$tts algorithm for diffeomorphic image registration. We study convergence of our preconditioner and our overall algorithm. We report scalability results on state-of-the-art supercomputing platforms. We demonstrate that we can solve registration problems for clinically relevant data sizes in two to four minutes on a standard compute node with 20 cores, attaining excellent data fidelity. With the present work we achieve a speedup of (on average) 5$\backslash$times with a peak performance of up to 17$\backslash$times compared to our former work. }, } |
2019 | Journal | Yue Lu, Yu Ma, Hui Wang, Yuan Wang (2019). Multi-atlaslabel fusion based on U-Net. Chinese Journal of Liquid Crystals and Displays, 34(11), pp. 1090–1103. (link) (bib) x @article{RN785, year = { 2019 }, volume = { 34 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi-atlaslabel fusion based on U-Net }, pages = { 1090--1103 }, number = { 11 }, keywords = { Hippocampus,Label fusion,Multi-atlas,U-Net }, journal = { Chinese Journal of Liquid Crystals and Displays }, issn = { 10072780 }, doi = { 10.3788/YJYXS20193411.1091 }, author = { Lu and Ma and Wang and Wang }, abstract = { In order to effectively improve the accuracy of multi-atlas segmentation algorithm of hippocampus, U-Net convolutional neural network is applied to label fusion of multi-atlas. The algorithm in the atlases selection performs mutual information and gradient similarity calculation, avoids the interference of the surrounding tissue structure on the atlas selection, selects the floating image group which is more suitable for the target map. In the pre-processing stage, extracting the region of interest centered on hippocampus can effectively reduce the size of data. In the registration process, re-sampling is used instead of the coarse registration, which reduces the time, and then uses the diffeomorphic demons algorithm, which has good smoothness, continuity and topological retentiveness. In the label fusion stage, an improved U-Net network based on deep learning theory for multi-atlas MRI hippocampal segmentation algorithm is proposed. The experimental results show that the segmentation accuracy of the improved algorithm is about 5{\%} higher than that of the traditional algorithm, the algorithm time is reduced by about 50{\%}. The improved U-Net network based multi-atlas hippocampal segmentation algorithm has the characteristics of high precision and high efficiency for segmentation of the hippocampus in target image. }, } |
2019 | Journal | Xuesong Lu, Yunfei Zha, Yuchuan Qiao, Defeng Wang (2019). Feature-Based Deformable Registration Using Minimal Spanning Tree for Prostate MR Segmentation. IEEE Access, 7, pp. 138645–138656. (link) (bib) x @article{RN860, year = { 2019 }, volume = { 7 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077958932{\&}doi=10.1109{\%}2FACCESS.2019.2943485{\&}partnerID=40{\&}md5=daba943919bc91c3fa945bebda516afa }, type = { Journal Article }, title = { Feature-Based Deformable Registration Using Minimal Spanning Tree for Prostate MR Segmentation }, pages = { 138645--138656 }, keywords = { Deformable registration,local self-similarity,minimal spanning tree,patch-based label fusion,prostate segmentation,$\alpha$-mutual information }, journal = { IEEE Access }, issn = { 21693536 }, doi = { 10.1109/ACCESS.2019.2943485 }, author = { Lu and Zha and Qiao and Wang }, abstract = { Automatic and accurate segmentation of the prostate is still a challenging task due to intensity inhomogeneity and complicated deformation of MR images. To tackle these problems with multi-atlas segmentation, in this paper, we propose a new metric for image registration and new descriptor for label fusion. First, to reduce the amount of edges in entropic graph, a modified {\$}\backslashalpha {\$} -mutual information ( {\$}\backslashalpha {\$} -MI) based on fast minimal spanning tree (MST) is implemented for deformable registration. Second, localized {\$}\backslashalpha {\$} -MI allowing for the spatial information is proposed with the stochastic gradient optimization, and the feature space is encoded by a sparse auto-encoder. Finally, a multi-scale descriptor utilizing local self-similarity is integrated into the patch-based label fusion to obtain final segmentation. Experiments were performed on two subsets of totally 46 T2-weighted prostate MR images from 46 patients. Compared to {\$}\backslashalpha {\$} -MI based on {\$}{\{}k{\}}{\$} -nearest neighbor graph, the registration time of {\$}\backslashalpha {\$} -MI based on fast MST can be reduced by almost half. The median Dice overlap of registration using localized {\$}\backslashalpha {\$} -MI on one subset is shown to improve significantly from 0.725 to 0.764 ( {\$}p=1.14\backslashtimes 10{\^{}}{\{}-5{\}}{\$} ), compared to using {\$}\backslashalpha {\$} -MI without the spatial information. The median Dice overlap of prostate segmentation using the proposed method on 20 testing images of another subset is 0.871, and the median Hausdorff distance is 8.013 mm, which demonstrate a comparable accuracy to state-of-the-art methods. }, } |
2019 | Journal | Karen L\'opez-Linares, Inmaculada Garc\'ia, Ainhoa Garc\'ia, Camilo Cortes, Gemma Piella, Iván Mac\'ia, Jér\^ome Noailly, Miguel A. González Ballester (2019). Image-Based 3D Characterization of Abdominal Aortic Aneurysm Deformation After Endovascular Aneurysm Repair. Frontiers in Bioengineering and Biotechnology, 7, pp. NA (link) (bib) x @article{RN841, year = { 2019 }, volume = { 7 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075353420{\&}doi=10.3389{\%}2Ffbioe.2019.00267{\&}partnerID=40{\&}md5=a232028ba988f3c1b4c51cbbb0913dc4 }, type = { Journal Article }, title = { Image-Based 3D Characterization of Abdominal Aortic Aneurysm Deformation After Endovascular Aneurysm Repair }, keywords = { abdominal aortic aneurysm,biomarker,biomechanics,computed tomography angiography,deformation,follow-up,prognosis,strain analysis }, journal = { Frontiers in Bioengineering and Biotechnology }, issn = { 22964185 }, doi = { 10.3389/fbioe.2019.00267 }, author = { L{\'{o}}pez-Linares and Garc{\'{i}}a and Garc{\'{i}}a and Cortes and Piella and Mac{\'{i}}a and Noailly and {Gonz{\'{a}}lez Ballester} }, abstract = { An abdominal aortic aneurysm (AAA) is a focal dilation of the abdominal aorta, that if not treated, tends to grow and may rupture. The most common treatment for AAAs is the endovascular aneurysm repair (EVAR), which requires that patients undergo Computed Tomography Angiography (CTA)-based post-operative lifelong surveillance due to the possible appearance of complications. These complications may again lead to AAA dilation and rupture. However, there is a lack of advanced quantitative image-analysis tools to support the clinicians in the follow-up. Currently, the approach is to evaluate AAA diameter changes along time to infer the progress of the patient and the post-operative risk of AAA rupture. An increased AAA diameter is usually associated with a higher rupture risk, but there are some small AAAs that rupture, whereas other larger aneurysms remain stable. This means that the diameter-based rupture risk assessment is not suitable for all the cases, and there is increasing evidence that the biomechanical behavior of the AAA may provide additional valuable information regarding the progression of the disease and the risk of rupture. Hence, we propose a promising methodology for post-operative CTA time-series registration and subsequent aneurysm biomechanical strain analysis. From these strains, quantitative image-based descriptors are extracted using a principal component analysis of the tensile and compressive strain fields. Evaluated on 22 patients, our approach yields a mean area under the curve of 88.6{\%} when correlating the strain-based quantitative descriptors with the long-term patient prognosis. This suggests that the strain information directly extracted from the CTA images is able to capture the biomechanical behavior of the aneurysm without relying on finite element modeling and simulation. Furthermore, the extracted descriptors set the basis for possible future imaging biomarkers that may be used in clinical practice. Apart from the diameter, these biomarkers may be used to assess patient prognosis and to enable informed decision making after an EVAR intervention, especially in difficult uncertain cases. }, } |
2019 | Journal | Jon D. Klingensmith, Addison L. Elliott, Amy H. Givan, Zechariah D. Faszold, Cory L. Mahan, Adam M. Doedtman (2019). Development and evaluation of a method for segmentation of cardiac, subcutaneous, and visceral adipose tissue from Dixon magnetic resonance images. Journal of Medical Imaging, 6(01), pp. 1. (link) (bib) x @article{RN818, year = { 2019 }, volume = { 6 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Development and evaluation of a method for segmentation of cardiac, subcutaneous, and visceral adipose tissue from Dixon magnetic resonance images }, pages = { 1 }, number = { 01 }, journal = { Journal of Medical Imaging }, issn = { 2329-4310 }, doi = { 10.1117/1.jmi.6.1.014004 }, author = { Klingensmith and Elliott and Givan and Faszold and Mahan and Doedtman }, abstract = { Magnetic resonance imaging (MRI) has evolved into the gold standard for quantifying excess adiposity, but reliable, efficient use in longitudinal studies requires analysis of large numbers of images. The objective of this study is to develop and evaluate a segmentation method designed to identify cardiac, subcutaneous, and visceral adipose tissue (VAT) in Dixon MRI scans. The proposed method is evaluated using 10 scans from volunteer females 18- to 35-years old, with body mass indexes between 30 and 39.99 kg / m2. Cross-sectional area (CSA) for cardiac adipose tissue (CAT), subcutaneous adipose tissue (SAT), and VAT, is compared to manually-traced results from three observers. Comparisons of CSA are made in 191 images for CAT, 394 images for SAT, and 50 images for VAT. The segmentation correlated well with respect to average observer CSA with Pearson correlation coefficient (R2) values of 0.80 for CAT, 0.99 for SAT, and 0.99 for VAT. The proposed method provides accurate segmentation of CAT, SAT, and VAT and provides an option to support longitudinal studies of obesity intervention. }, } |
2019 | Journal | Tsuyoshi Ito (2019). Effects of different segmentation methods on geometric morphometric data collection from primate skulls. Methods in Ecology and Evolution, 10(11), pp. 1972–1984. (link) (bib) x @article{RN811, year = { 2019 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Effects of different segmentation methods on geometric morphometric data collection from primate skulls }, pages = { 1972--1984 }, number = { 11 }, keywords = { computed tomography,geometric morphometrics,measurement error,repeatability,segmentation }, journal = { Methods in Ecology and Evolution }, issn = { 2041210X }, doi = { 10.1111/2041-210X.13274 }, author = { Ito }, abstract = { An increasing number of studies are analysing the shapes of objects using geometric morphometrics with tomographic data, which are often segmented and transformed to three-dimensional (3D) surface models before measurement. This study aimed to evaluate the effects of different image segmentation methods on geometric morphometric data collection using computed tomography data collected from non-human primate skulls. Three segmentation methods based on a visually selected threshold, a half-maximum height protocol and a gradient and watershed algorithm were compared. For each method, the efficiency of surface reconstruction, the accuracy of landmark placement and the level of variation in shape and size compared with various levels of biological variation were evaluated. The visual-based method inflated the surface in high-density anatomical regions, whereas the half-maximum height protocol resulted in a large number of artificial holes and erosion. However, the gradient-based method mitigated these issues and generated the most efficient surface model. The segmentation method used had a much smaller effect on shape and size variation than interspecific and inter-individual differences. However, this effect was statistically significant and not negligible when compared with intra-individual (fluctuating asymmetric) variation. Although the gradient-based method is not widely used in geometric morphometric analyses, it may be one of promising options for reconstructing 3D surfaces. When evaluating small variations, such as fluctuating asymmetry, care should be taken around combining 3D data that were obtained using different segmentation methods. }, } |
2019 | Journal | Pascal Hagenmuller, Frederic Flin, Marie Dumont, Fran\ccois Tuzet, Isabel Peinke, Philippe Lapalus, Anne Dufour, Jacques Roulle, Laurent Pézard, Didier Voisin, Edward Ando, Sabine Rolland Du Roscoat, Pascal Charrier (2019). Motion of dust particles in dry snow under temperature gradient metamorphism. Cryosphere, 13(9), pp. 2345–2359. (link) (bib) x @article{RN812, year = { 2019 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Motion of dust particles in dry snow under temperature gradient metamorphism }, pages = { 2345--2359 }, number = { 9 }, journal = { Cryosphere }, issn = { 19940424 }, doi = { 10.5194/tc-13-2345-2019 }, author = { Hagenmuller and Flin and Dumont and Tuzet and Peinke and Lapalus and Dufour and Roulle and P{\'{e}}zard and Voisin and Ando and {Rolland Du Roscoat} and Charrier }, abstract = { The deposition of light-absorbing particles (LAPs) such as mineral dust and black carbon on snow is responsible for a highly effective climate forcing, through darkening of the snow surface and associated feedbacks. The interplay between post-depositional snow transformation (metamorphism) and the dynamics of LAPs in snow remains largely unknown. We obtained time series of X-ray tomography images of dust-contaminated samples undergoing dry snow metamorphism at around-2C. They provide the first observational evidence that temperature gradient metamorphism induces dust particle motion in snow, while no movement is observed under isothermal conditions. Under temperature gradient metamorphism, dust particles can enter the ice matrix due to sublimation-condensation processes and spread down mainly by falling into the pore space. Overall, such motions might reduce the radiative impact of dust in snow, in particular in arctic regions where temperature gradient metamorphism prevails. }, } |
2019 | Journal | Amanda Farias Gomes, Danieli Moura Brasil, Amaro Il\'idio Vespasiano Silva, Deborah Queiroz Freitas, Francisco Haiter-Neto, Francisco Carlos Groppo (2019). Accuracy of ITK-SNAP software for 3D analysis of a non-regular topography structure. Oral Radiology, NA pp. NA (link) (bib) x @article{RN957, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068856165{\&}doi=10.1007{\%}2Fs11282-019-00397-y{\&}partnerID=40{\&}md5=951d995522aefad42b00463ceb33e155 }, type = { Journal Article }, title = { Accuracy of ITK-SNAP software for 3D analysis of a non-regular topography structure }, keywords = { Cone beam computed tomography,Cross-sectional anatomy,Software,Three-dimensional imaging }, journal = { Oral Radiology }, issn = { 16139674 }, doi = { 10.1007/s11282-019-00397-y }, author = { Gomes and Brasil and Silva and Freitas and Haiter-Neto and Groppo }, abstract = { Objectives: To evaluate the accuracy of ITK-SNAP software for measuring volumes of a non-regular shape structure, using cone beam computed tomography (CBCT) scans, besides for developing a mathematical model to correct the software measurement error in case it existed. Methods: A phantom made by moulding a rubber duck's head was filled with total (38,000 mm3) and partial volumes of water (7000 mm3, 14,000 mm3, 21,000 mm3, 28,000 mm3 and 35,000 mm3), which constituted the gold standards. The sound phantom and the phantom filled with different volumes of water were scanned in a Picasso Trio CBCT unit set at 80 kVp, 3.7 mA, 0.2 mm3 voxel and 12 × 8.5 cm field of view. Semi-automatic segmentation was performed with ITK-SNAP 3.0 software by two trained oral radiologists. Linear regression analyzed the relation between ITK-SNAP calculated volumes and the gold standard. Intraclass correlation coefficient was applied to analyze the reproducibility of the method. Significance level was set at 5{\%}. Results: Linear regression analysis showed a significant relationship between ITK-SNAP volumes and the gold standard (F = 22,537.3, p {\textless} 0.0001), with an R2 of 0.9993. The average error found was 4.7 (± 4.3) {\%}. To minimize this error, a mathematical model was developed and provided a reduction of it. ICC revealed excellent intra-examiner agreements for both examiners 1 (ICC = 0.9991, p {\textless} 0.0001) and 2 (ICC = 0.9989, p {\textless} 0.0001). Likewise, inter-examiner agreement was excellent (ICC = 0.9991, p {\textless} 0.0001). Conclusion: The software showed to be accurate for evaluating non-regular shape structures. The mathematical model developed reduced an already small error on the software's measurements. }, } |
2019 | Journal | S. Giordanengo, A. Vignati, A. Attili, M. Ciocca, M. Donetti, F. Fausti, L. Manganaro, F. M. Milian, S. Molinelli, V. Monaco, G. Russo, R. Sacchi, M. Varasteh Anvar, R. Cirio (2019). RIDOS: A new system for online computation of the delivered dose distributions in scanning ion beam therapy. Physica Medica, 60, pp. 139–149. (link) (bib) x @article{RN786, year = { 2019 }, volume = { 60 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { RIDOS: A new system for online computation of the delivered dose distributions in scanning ion beam therapy }, pages = { 139--149 }, keywords = { Dose delivery,GPU,Online dose computation,Pencil beam scanning }, journal = { Physica Medica }, issn = { 1724191X }, doi = { 10.1016/j.ejmp.2019.03.029 }, author = { Giordanengo and Vignati and Attili and Ciocca and Donetti and Fausti and Manganaro and Milian and Molinelli and Monaco and Russo and Sacchi and {Varasteh Anvar} and Cirio }, abstract = { Purpose: To describe a new system for scanned ion beam therapy, named RIDOS (Real-time Ion DOse planning and delivery System), which performs real time delivered dose verification integrating the information from a clinical beam monitoring system with a Graphic Processing Unit (GPU) based dose calculation in patient Computed Tomography. Methods: A benchmarked dose computation algorithm for scanned ion beams has been parallelized and adapted to run on a GPU architecture. A workstation equipped with a NVIDIA GPU has been interfaced through a National Instruments PXI-crate with the dose delivery system of the Italian National Center of Oncological Hadrontherapy (CNAO) to receive in real-time the measured beam parameters. Data from a patient monitoring system are also collected to associate the respiratory phases with each spot during the delivery of the dose. Using both measured and planned spot properties, RIDOS evaluates during the few seconds of inter-spill time the cumulative delivered and prescribed dose distributions and compares them through a fast $\gamma$-index algorithm. Results: The accuracy of the GPU-based algorithms was assessed against the CPU-based ones and the differences were found below 1‰. The cumulative planned and delivered doses are computed at the end of each spill in about 300 ms, while the dose comparison takes approximatively 400 ms. The whole operation provides the results before the next spill starts. Conclusions: RIDOS system is able to provide a fast computation of the delivered dose in the inter-spill time of the CNAO facility and allows to monitor online the dose deposition accuracy all along the treatment. }, } |
2019 | Journal | V. Giannini, S. Mazzetti, I. Bertotto, C. Chiarenza, S. Cauda, E. Delmastro, C. Bracco, A. Di Dia, F. Leone, E. Medico, A. Pisacane, D. Ribero, M. Stasi, D. Regge (2019). Predicting locally advanced rectal cancer response to neoadjuvant therapy with 18 F-FDG PET and MRI radiomics features. European Journal of Nuclear Medicine and Molecular Imaging, 46(4), pp. 878–888. (link) (bib) x @article{RN852, year = { 2019 }, volume = { 46 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060031543{\&}doi=10.1007{\%}2Fs00259-018-4250-6{\&}partnerID=40{\&}md5=4bf80d097b9dd71dfebe905f5b3b38d8 }, type = { Journal Article }, title = { Predicting locally advanced rectal cancer response to neoadjuvant therapy with 18 F-FDG PET and MRI radiomics features }, pages = { 878--888 }, number = { 4 }, keywords = { 18 F-FDG PET/CT imaging,Locally advanced rectal cancer,Magnetic resonance imaging,Prediction of treatment response,Radiomics,Texture features }, journal = { European Journal of Nuclear Medicine and Molecular Imaging }, issn = { 16197089 }, doi = { 10.1007/s00259-018-4250-6 }, author = { Giannini and Mazzetti and Bertotto and Chiarenza and Cauda and Delmastro and Bracco and {Di Dia} and Leone and Medico and Pisacane and Ribero and Stasi and Regge }, abstract = { Purpose: Pathological complete response (pCR) following neoadjuvant chemoradiotherapy or radiotherapy in locally advanced rectal cancer (LARC) is reached in approximately 15–30{\%} of cases, therefore it would be useful to assess if pretreatment of 18 F-FDG PET/CT and/or MRI texture features can reliably predict response to neoadjuvant therapy in LARC. Methods: Fifty-two patients were dichotomized as responder (pR+) or non-responder (pR-) according to their pathological tumor regression grade (TRG) as follows: 22 as pR+ (nine with TRG = 1, 13 with TRG = 2) and 30 as pR- (16 with TRG = 3, 13 with TRG = 4 and 1 with TRG = 5). First-order parameters and 21 second-order texture parameters derived from the Gray-Level Co-Occurrence matrix were extracted from semi-automatically segmented tumors on T2w MRI, ADC maps, and PET/CT acquisitions. The role of each texture feature in predicting pR+ was assessed with monoparametric and multiparametric models. Results: In the mono-parametric approach, PET homogeneity reached the maximum AUC (0.77; sensitivity = 72.7{\%} and specificity = 76.7{\%}), while PET glycolytic volume and ADC dissimilarity reached the highest sensitivity (both 90.9{\%}). In the multiparametric analysis, a logistic regression model containing six second-order texture features (five from PET and one from T2w MRI) yields the highest predictivity in distinguish between pR+ and pR- patients (AUC = 0.86; sensitivity = 86{\%}, and specificity = 83{\%} at the Youden index). Conclusions: If preliminary results of this study are confirmed, pretreatment PET and MRI could be useful to personalize patient treatment, e.g., avoiding toxicity of neoadjuvant therapy in patients predicted pR-. }, } |
2019 | Journal | Maximilian N. Diefenbach, Jakob Meineke, Stefan Ruschke, Thomas Baum, Alexandra Gersing, Dimitrios C. Karampinos (2019). On the sensitivity of quantitative susceptibility mapping for measuring trabecular bone density. Magnetic Resonance in Medicine, 81(3), pp. 1739–1754. (link) (bib) x @article{RN814, year = { 2019 }, volume = { 81 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { On the sensitivity of quantitative susceptibility mapping for measuring trabecular bone density }, pages = { 1739--1754 }, number = { 3 }, keywords = { susceptibility mapping,trabecular bone density }, journal = { Magnetic Resonance in Medicine }, issn = { 15222594 }, doi = { 10.1002/mrm.27531 }, author = { Diefenbach and Meineke and Ruschke and Baum and Gersing and Karampinos }, abstract = { Purpose: To develop a methodological framework to simultaneously measure R2* and magnetic susceptibility in trabecularized yellow bone marrow and to investigate the sensitivity of Quantitative Susceptibility Mapping (QSM) for measuring trabecular bone density using a non-UTE multi-gradient echo sequence. Methods: The ankle of 16 healthy volunteers and two patients was scanned using a time-interleaved multi-gradient-echo (TIMGRE) sequence. After field mapping based on water–fat separation methods and background field removal based on the Laplacian boundary value method, three different QSM dipole inversion schemes were implemented. Mean susceptibility values in regions of different trabecular bone density in the calcaneus were compared to the corresponding values in the R2* maps, bone volume to total volume ratios (BV/TV) estimated from high resolution imaging (in 14 subjects), and CT attenuation (in two subjects). In addition, numerical simulations were performed in a simplified trabecular bone model of randomly positioned spherical bone inclusions to verify and compare the scaling of R2* and susceptibility with BV/TV. Results: Differences in calcaneus trabecularization were well depicted in susceptibility maps, in good agreement with high-resolution MR and CT images. Simulations and in vivo scans showed a linear relationship of measured susceptibility with BV/TV and R2*. The ankle in vivo results showed a strong linear correlation between susceptibility and R2* (R2 = 0.88, p {\textless} 0.001) with a slope and intercept of −0.004 and 0.2 ppm, respectively. Conclusions: A method for multi-paramteric mapping, including R2* -mapping and QSM was developed for measuring trabecularized yellow bone marrow, showing good sensitivity of QSM for measuring trabecular bone density. }, } |
2019 | Journal | Shusil Dangi, Cristian A. Linte, Ziv Yaniv (2019). A distance map regularized CNN for cardiac cine MR image segmentation. Medical Physics, 46(12), pp. 5637–5651. (link) (bib) x @article{RN833, year = { 2019 }, volume = { 46 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074595228{\&}doi=10.1002{\%}2Fmp.13853{\&}partnerID=40{\&}md5=e49d5b643280b5d657eba92082261710 }, type = { Journal Article }, title = { A distance map regularized CNN for cardiac cine MR image segmentation }, pages = { 5637--5651 }, number = { 12 }, keywords = { cardiac segmentation,convolutional neural network,magnetic resonance imaging,multi-task learning,regularization,task uncertainty weighting }, journal = { Medical Physics }, issn = { 00942405 }, eprint = { 1901.01238 }, doi = { 10.1002/mp.13853 }, author = { Dangi and Linte and Yaniv }, arxivid = { 1901.01238 }, archiveprefix = { arXiv }, abstract = { Purpose: Cardiac image segmentation is a critical process for generating personalized models of the heart and for quantifying cardiac performance parameters. Fully automatic segmentation of the left ventricle (LV), the right ventricle (RV), and the myocardium from cardiac cine MR images is challenging due to variability of the normal and abnormal anatomy, as well as the imaging protocols. This study proposes a multi-task learning (MTL)-based regularization of a convolutional neural network (CNN) to obtain accurate segmenation of the cardiac structures from cine MR images. Methods: We train a CNN network to perform the main task of semantic segmentation, along with the simultaneous, auxiliary task of pixel-wise distance map regression. The network also predicts uncertainties associated with both tasks, such that their losses are weighted by the inverse of their corresponding uncertainties. As a result, during training, the task featuring a higher uncertainty is weighted less and vice versa. The proposed distance map regularizer is a decoder network added to the bottleneck layer of an existing CNN architecture, facilitating the network to learn robust global features. The regularizer block is removed after training, so that the original number of network parameters does not change. The trained network outputs per-pixel segmentation when a new patient cine MR image is provided as an input. Results: We show that the proposed regularization method improves both binary and multi-class segmentation performance over the corresponding state-of-the-art CNN architectures. The evaluation was conducted on two publicly available cardiac cine MRI datasets, yielding average Dice coefficients of 0.84 ± 0.03 and 0.91 ± 0.04. We also demonstrate improved generalization performance of the distance map regularized network on cross-dataset segmentation, showing as much as 42{\%} improvement in myocardium Dice coefficient from 0.56 ± 0.28 to 0.80 ± 0.14. Conclusions: We have presented a method for accurate segmentation of cardiac structures from cine MR images. Our experiments verify that the proposed method exceeds the segmentation performance of three existing state-of-the-art methods. Furthermore, several cardiac indices that often serve as diagnostic biomarkers, specifically blood pool volume, myocardial mass, and ejection fraction, computed using our method are better correlated with the indices computed from the reference, ground truth segmentation. Hence, the proposed method has the potential to become a non-invasive screening and diagnostic tool for the clinical assessment of various cardiac conditions, as well as a reliable aid for generating patient specific models of the cardiac anatomy for therapy planning, simulation, and guidance. }, } |
2019 | Journal | Oscar Cuadros Linares, Jonas Bianchi, Dirceu Raveli, Jo\~ao Batista Neto, Bernd Hamann (2019). Mandible and skull segmentation in cone beam computed tomography using super-voxels and graph clustering. Visual Computer, 35(10), pp. 1461–1474. (link) (bib) x @article{RN843, year = { 2019 }, volume = { 35 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045951942{\&}doi=10.1007{\%}2Fs00371-018-1511-0{\&}partnerID=40{\&}md5=a294525f068f2ad564fa94a0fcd267bf }, type = { Journal Article }, title = { Mandible and skull segmentation in cone beam computed tomography using super-voxels and graph clustering }, pages = { 1461--1474 }, number = { 10 }, keywords = { Bone segmentation,Cone beam computed tomography,Graph clustering,Mandible,Skull,Super-voxels }, journal = { Visual Computer }, issn = { 01782789 }, doi = { 10.1007/s00371-018-1511-0 }, author = { {Cuadros Linares} and Bianchi and Raveli and {Batista Neto} and Hamann }, abstract = { Cone beam computed tomography (CBCT) is a medical imaging technique employed for diagnosis and treatment of patients with cranio-maxillofacial deformities. CBCT 3D reconstruction and segmentation of bones such as mandible or maxilla are essential procedures in surgical and orthodontic treatments. However, CBCT image processing may be impaired by features such as low contrast, inhomogeneity, noise and artifacts. Besides, values assigned to voxels are relative Hounsfield units unlike traditional computed tomography (CT). Such drawbacks render CBCT segmentation a difficult and time-consuming task, usually performed manually with tools designed for medical image processing. We present an interactive two-stage method for the segmentation of CBCT: (i) we first perform an automatic segmentation of bone structures with super-voxels, allowing a compact graph representation of the 3D data; (ii) next, a user-placed seed process guides a graph partitioning algorithm, splitting the extracted bones into mandible and skull. We have evaluated our segmentation method in three different scenarios and compared the results with ground truth data of the mandible and the skull. Results show that our method produces accurate segmentation and is robust to changes in parameters. We also compared our method with two similar segmentation strategy and showed that it produces more accurate segmentation. Finally, we evaluated our method for CT data of patients with deformed or missing bones and the segmentation was accurate for all data. The segmentation of a typical CBCT takes in average 5 min, which is faster than most techniques currently available. }, } |
2019 | Journal | Roberto Cassetta, Pierluigi Piersimoni, Marco Riboldi, Valentina Giacometti, Vladmir Bashkirov, Guido Baroni, Caesar Ordonez, George Coutrakon, Reinhard Schulte (2019). Accuracy of low-dose proton CT image registration for pretreatment alignment verification in reference to planning proton CT. Journal of Applied Clinical Medical Physics, 20(4), pp. 83–90. (link) (bib) x @article{RN850, year = { 2019 }, volume = { 20 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063859861{\&}doi=10.1002{\%}2Facm2.12565{\&}partnerID=40{\&}md5=cf8e867163571978728a5b92c8fc3fab }, type = { Journal Article }, title = { Accuracy of low-dose proton CT image registration for pretreatment alignment verification in reference to planning proton CT }, pages = { 83--90 }, number = { 4 }, keywords = { deformable image registration,image reconstruction,proton CT,rigid image registration }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1002/acm2.12565 }, author = { Cassetta and Piersimoni and Riboldi and Giacometti and Bashkirov and Baroni and Ordonez and Coutrakon and Schulte }, abstract = { Purpose: Proton CT (pCT) has the ability to reduce inherent uncertainties in proton treatment by directly measuring the relative proton stopping power with respect to water, thereby avoiding the uncertain conversion of X-ray CT Hounsfield unit to relative stopping power and the deleterious effect of X- ray CT artifacts. The purpose of this work was to further evaluate the potential of pCT for pretreatment positioning using experimental pCT data of a head phantom. Methods: The performance of a 3D image registration algorithm was tested with pCT reconstructions of a pediatric head phantom. A planning pCT simulation scan of the phantom was obtained with 200 MeV protons and reconstructed with a 3D filtered back projection (FBP) algorithm followed by iterative reconstruction and a representative pretreatment pCT scan was reconstructed with FBP only to save reconstruction time. The pretreatment pCT scan was rigidly transformed by prescribing random errors with six degrees of freedom or deformed by the deformation field derived from a head and neck cancer patient to the pretreatment pCT reconstruction, respectively. After applying the rigid or deformable image registration algorithm to retrieve the original pCT image before transformation, the accuracy of the registration was assessed. To simulate very low-dose imaging for patient setup, the proton CT images were reconstructed with 100{\%}, 50{\%}, 25{\%}, and 12.5{\%} of the total number of histories of the original planning pCT simulation scan, respectively. Results: The residual errors in image registration were lower than 1 mm and 1° of magnitude regardless of the anatomic directions and imaging dose. The mean residual errors ranges found for rigid image registration were from −0.29 ± 0.09 to 0.51 ± 0.50 mm for translations and from −0.05 ± 0.13 to 0.08 ± 0.08 degrees for rotations. The percentages of sub-millimetric errors found, for deformable image registration, were between 63.5{\%} and 100{\%}. Conclusion: This experimental head phantom study demonstrated the potential of low-dose pCT imaging for 3D image registration. Further work is needed to confirm the value pCT for pretreatment image-guided proton therapy. }, } |
2019 | Journal | Tobias Buchacker, Christian Mühlfeld, Christoph Wrede, Willi L. Wagner, Richard Beare, Matt McCormick, Roman Grothausmann (2019). Assessment of the Alveolar Capillary Network in the Postnatal Mouse Lung in 3D Using Serial Block-Face Scanning Electron Microscopy. Frontiers in Physiology, 10, pp. NA (link) (bib) x @article{RN840, year = { 2019 }, volume = { 10 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85076709753{\&}doi=10.3389{\%}2Ffphys.2019.01357{\&}partnerID=40{\&}md5=06fa9637c496f5d61d2f175943a787c0 }, type = { Journal Article }, title = { Assessment of the Alveolar Capillary Network in the Postnatal Mouse Lung in 3D Using Serial Block-Face Scanning Electron Microscopy }, keywords = { 3D reconstruction,capillary network,lung,segmentation,serial block-face scanning electron microscopy }, journal = { Frontiers in Physiology }, issn = { 1664042X }, doi = { 10.3389/fphys.2019.01357 }, author = { Buchacker and M{\"{u}}hlfeld and Wrede and Wagner and Beare and McCormick and Grothausmann }, abstract = { The alveolar capillary network (ACN) has a large surface area that provides the basis for an optimized gas exchange in the lung. It needs to adapt to morphological changes during early lung development and alveolarization. Structural alterations of the pulmonary vasculature can lead to pathological functional conditions such as in bronchopulmonary dysplasia and various other lung diseases. To understand the development of the ACN and its impact on the pathogenesis of lung diseases, methods are needed that enable comparative analyses of the complex three-dimensional structure of the ACN at different developmental stages and under pathological conditions. In this study a newborn mouse lung was imaged with serial block-face scanning electron microscopy (SBF-SEM) to investigate the ACN and its surrounding structures before the alveolarization process begins. Most parts but not all of the examined ACN contain two layers of capillaries, which were repeatedly connected with each other. A path from an arteriole to a venule was extracted and straightened to allow cross-sectional visualization of the data along the path within a plane. This allows a qualitative characterization of the structures that erythrocytes pass on their way through the ACN. One way to define regions of the ACN supplied by specific arterioles is presented and used for analyses. Pillars, possibly intussusceptive, were found in the vasculature but no specific pattern was observed in regard to parts of the saccular septa. This study provides 3D information with a resolution of about 150 nm on the microscopic structure of a newborn mouse lung and outlines some of the potentials and challenges of SBF-SEM for 3D analyses of the ACN. }, } |
2019 | Journal | Luisa Sánchez Brea, Danilo Andrade De Jesus, Muhammad Faizan Shirazi, Michael Pircher, Theo van Walsum, Stefan Klein (2019). Review on retrospective procedures to correct retinal motion artefacts in OCT imaging. Applied Sciences (Switzerland), 9(13), pp. NA (link) (bib) x @article{RN848, year = { 2019 }, volume = { 9 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068857722{\&}doi=10.3390{\%}2Fapp9132700{\&}partnerID=40{\&}md5=609d603321731686ea1ae522250c7c6f }, type = { Journal Article }, title = { Review on retrospective procedures to correct retinal motion artefacts in OCT imaging }, number = { 13 }, keywords = { Image registration,Optical coherence tomography,Retinal motion artefacts }, journal = { Applied Sciences (Switzerland) }, issn = { 20763417 }, doi = { 10.3390/app9132700 }, author = { Brea and {De Jesus} and Shirazi and Pircher and Walsum and Klein }, abstract = { Motion artefacts from involuntary changes in eye fixation remain a major imaging issue in optical coherence tomography (OCT). This paper reviews the state-of-the-art of retrospective procedures to correct retinal motion and axial eye motion artefacts in OCT imaging. Following an overview of motion induced artefacts and correction strategies, a chronological survey of retrospective approaches since the introduction of OCT until the current days is presented. Pre-processing, registration, and validation techniques are described. The review finishes by discussing the limitations of the current techniques and the challenges to be tackled in future developments. }, } |
2019 | Journal | Wafa Boukellouz, Abdelouahab Moussaoui, Abdelmalik Taleb-Ahmed, Christine Boydev (2019). Multiatlas Fusion with a Hybrid CT Number Correction Technique for Subject-Specific Pseudo-CT Estimation in the Context of MRI-Only Radiation Therapy. Journal of Medical Imaging and Radiation Sciences, 50(3), pp. 425–440. (link) (bib) x @article{RN845, year = { 2019 }, volume = { 50 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85065894779{\&}doi=10.1016{\%}2Fj.jmir.2019.03.184{\&}partnerID=40{\&}md5=710b20f7941f0fb981b12e9164a50e6a }, type = { Journal Article }, title = { Multiatlas Fusion with a Hybrid CT Number Correction Technique for Subject-Specific Pseudo-CT Estimation in the Context of MRI-Only Radiation Therapy }, pages = { 425--440 }, number = { 3 }, keywords = { MR-only radiotherapy,Pseudo-CT,brain,hybrid CT number correction,multiatlas fusion }, journal = { Journal of Medical Imaging and Radiation Sciences }, issn = { 18767982 }, doi = { 10.1016/j.jmir.2019.03.184 }, author = { Boukellouz and Moussaoui and Taleb-Ahmed and Boydev }, abstract = { Objective: To propose a hybrid multiatlas fusion and correction approach to estimate a pseudo–computed tomography (pCT) image from T2-weighted brain magnetic resonance (MR) images in the context of MRI-only radiotherapy. Materials and Methods: A set of eleven pairs of T2-weighted MR and CT brain images was included. Using leave-one-out cross-validation, atlas MR images were registered to the target MRI with multimetric, multiresolution deformable registration. The subsequent deformations were applied to the atlas CT images, producing uncorrected pCT images. Afterward, a three-dimensional hybrid CT number correction technique was used. This technique uses information about MR intensity, spatial location, and tissue label from segmented MR images with the fuzzy c-means algorithm and combines them in a weighted fashion to correct Hounsfield unit values of the uncorrected pCT images. The corrected pCT images were then fused into a final pCT image. Results: The proposed hybrid approach proved to be performant in correcting Hounsfield unit values in terms of qualitative and quantitative measures. Average correlation was 0.92 and 0.91 for the proposed approach by taking the mean and the median, respectively, compared with 0.86 for the uncorrected unfused version. Average values of dice similarity coefficient for bone were 0.68 and 0.72 for the fused corrected pCT images by taking the mean and the median, respectively, compared with 0.65 for the uncorrected unfused version indicating a significant bone estimation improvement. Conclusion: A hybrid fusion and correction method is presented to estimate a pCT image from T2-weighted brain MR images. }, } |
2019 | Journal | Yehuda K. Ben-Zikri, Ziv R. Yaniv, Karl Baum, Cristian A. Linte (2019). A marker-free registration method for standing X-ray panorama reconstruction for hip-knee-ankle axis deformity assessment. Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization, 7(4), pp. 464–478. (link) (bib) x @article{RN816, year = { 2019 }, volume = { 7 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A marker-free registration method for standing X-ray panorama reconstruction for hip-knee-ankle axis deformity assessment }, pages = { 464--478 }, number = { 4 }, keywords = { Long-limb X-ray,axial deformity at the knee,hip-knee-ankle angle,image registration,panorama reconstruction,segmentation }, journal = { Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization }, issn = { 21681171 }, doi = { 10.1080/21681163.2018.1537859 }, author = { Ben-Zikri and Yaniv and Baum and Linte }, abstract = { Accurate measurement of knee alignment, quantified by the hip-knee-ankle (HKA) angle (varus-valgus), serves as an essential biomarker in the diagnosis of various orthopaedic conditions and selection of appropriate therapies. Such angular deformities are assessed from standing X-ray panoramas. However, the limited field-of-view of traditional X-ray imaging systems necessitates the acquisition of several sector images to capture an individual's standing posture, and their subsequent ‘stitching' to reconstruct a panoramic image. Such panoramas are typically constructed manually by an X-ray imaging technician, often using various external markers attached to the individual's clothing and visible in two adjacent sector images. To eliminate human error, user-induced variability, improve consistency and reproducibility, and reduce the time associated with the traditional manual ‘stitching' protocol, here we propose an automatic panorama construction method that only relies on anatomical features reliably detected in the images, eliminating the need for any external markers or manual input from the technician. The method first performs a rough segmentation of the femur and the tibia, then the sector images are registered by evaluating a distance metric between the corresponding bones along their medial edge. The identified translations are then used to generate the standing panorama image. The method was evaluated on 95 patient image datasets from a database of X-ray images acquired across 10 clinical sites as part of the screening process for a multi-site clinical trial. The panorama reconstruction parameters yielded by the proposed method were compared to those used for the manual panorama construction, which served as gold-standard. The horizontal translation differences were 0:43 ± 1:95 mm 0:26 ± 1:43mm for the femur and tibia respectively, while the vertical translation differences were 3:76 ± 22:35 mm and 1:85 ± 6:79 mm for the femur and tibia, respectively. Our results showed no statistically significant differences between the HKA angles measured using the automated vs. the manually generated panoramas, and also led to similar decisions with regards to the patient inclusion/exclusion in the clinical trial. Thus, the proposed method was shown to provide comparable performance to manual panorama construction, with increased efficiency, consistency and robustness. }, } |
2019 | Journal | Bhim M. Adhikari, Neda Jahanshad, Dinesh Shukla, Jessica Turner, Dominik Grotegerd, Udo Dannlowski, Harald Kugel, Jennifer Engelen, Bruno Dietsche, Axel Krug, Tilo Kircher, Els Fieremans, Jelle Veraart, Dmitry S. Novikov, Premika S.W. Boedhoe, Ysbrand D. van der Werf, Odile A. van den Heuvel, Jonathan Ipser, Anne Uhlmann, Dan J. Stein, Erin Dickie, Aristotle N. Voineskos, Anil K. Malhotra, Fabrizio Pizzagalli, Vince D. Calhoun, Lea Waller, Ilja M. Veer, Hernik Walter, Robert W. Buchanan, David C. Glahn, L. Elliot Hong, Paul M. Thompson, Peter Kochunov (2019). A resting state fMRI analysis pipeline for pooling inference across diverse cohorts: an ENIGMA rs-fMRI protocol. Brain Imaging and Behavior, 13(5), pp. 1453–1467. (link) (bib) x @article{RN826, year = { 2019 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A resting state fMRI analysis pipeline for pooling inference across diverse cohorts: an ENIGMA rs-fMRI protocol }, pages = { 1453--1467 }, number = { 5 }, keywords = { ENIGMA EPI template,Large multi-site studies,Processing pipelines }, journal = { Brain Imaging and Behavior }, issn = { 19317565 }, doi = { 10.1007/s11682-018-9941-x }, author = { Adhikari and Jahanshad and Shukla and Turner and Grotegerd and Dannlowski and Kugel and Engelen and Dietsche and Krug and Kircher and Fieremans and Veraart and Novikov and Boedhoe and Werf and Heuvel and Ipser and Uhlmann and Stein and Dickie and Voineskos and Malhotra and Pizzagalli and Calhoun and Waller and Veer and Walter and Buchanan and Glahn and Hong and Thompson and Kochunov }, abstract = { Large-scale consortium efforts such as Enhancing NeuroImaging Genetics through Meta-Analysis (ENIGMA) and other collaborative efforts show that combining statistical data from multiple independent studies can boost statistical power and achieve more accurate estimates of effect sizes, contributing to more reliable and reproducible research. A meta- analysis would pool effects from studies conducted in a similar manner, yet to date, no such harmonized protocol exists for resting state fMRI (rsfMRI) data. Here, we propose an initial pipeline for multi-site rsfMRI analysis to allow research groups around the world to analyze scans in a harmonized way, and to perform coordinated statistical tests. The challenge lies in the fact that resting state fMRI measurements collected by researchers over the last decade vary widely, with variable quality and differing spatial or temporal signal-to-noise ratio (tSNR). An effective harmonization must provide optimal measures for all quality data. Here we used rsfMRI data from twenty-two independent studies with approximately fifty corresponding T1-weighted and rsfMRI datasets each, to (A) review and aggregate the state of existing rsfMRI data, (B) demonstrate utility of principal component analysis (PCA)-based denoising and (C) develop a deformable ENIGMA EPI template based on the representative anatomy that incorporates spatial distortion patterns from various protocols and populations. }, } |
2019 | In Collection | Daniel Tward, Xu Li, Bingxing Huo, Brian Lee, Partha Mitra, Michael Miller (2019). 3D Mapping of Serial Histology Sections with Anomalies Using a Novel Robust Deformable Registration Algorithm. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 162–173. (link) (bib) x @incollection{Tward2019, year = { 2019 }, volume = { 11846 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075564799{\&}doi=10.1007{\%}2F978-3-030-33226-6{\_}18{\&}partnerID=40{\&}md5=09c3ae4bdd0efa79e25ca5a9e196c9eb }, type = { Serial }, title = { 3D Mapping of Serial Histology Sections with Anomalies Using a Novel Robust Deformable Registration Algorithm }, pages = { 162--173 }, keywords = { Histology,Image registration,Neuroimaging }, issn = { 16113349 }, isbn = { 9783030332259 }, doi = { 10.1007/978-3-030-33226-6_18 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Tward and Li and Huo and Lee and Mitra and Miller }, abstract = { The neuroimaging field is moving toward micron scale and molecular features in digital pathology and animal models. These require mapping to common coordinates for annotation, statistical analysis, and collaboration. An important example, the BRAIN Initiative Cell Census Network, is generating 3D brain cell atlases in mouse, and ultimately primate and human. We aim to establish RNAseq profiles from single neurons and nuclei across the mouse brain, mapped to Allen Common Coordinate Framework (CCF). Imaging includes (Forumala Presented). 500 tape-transfer cut 20 (Forumala Presented). m thick Nissl-stained slices per brain. In key areas 100 {\&}{\#}x0024;{\&}{\#}x0024;$\backslash$upmu {\&}{\#}x0024;{\&}{\#}x0024; m thick slices with 0.5–2 mm diameter circular regions punched out for snRNAseq are imaged. These contain abnormalities including contrast changes and missing tissue, two challenges not jointly addressed in diffeomorphic image registration. Existing methods for mapping 3D images to histology require manual steps unacceptable for high throughput, or are sensitive to damaged tissue. Our approach jointly: registers 3D CCF to 2D slices, models contrast changes, estimates abnormality locations. Our registration uses 4 unknown deformations: 3D diffeomorphism, 3D affine, 2D diffeomorphism per-slice, 2D rigid per-slice. Contrast changes are modeled using unknown cubic polynomials per-slice. Abnormalities are estimated using Gaussian mixture modeling. The Expectation Maximization algorithm is used iteratively, with E step: compute posterior probabilities of abnormality, M step: registration and intensity transformation minimizing posterior-weighted sum-of-square-error. We produce per-slice anatomical labels using Allen Institute's ontology, and publicly distribute results online, with several typical and abnormal slices shown here. This work has further applications in digital pathology, and 3D brain mapping with stroke, multiple sclerosis, or other abnormalities. }, } |
2019 | In Collection | Roman Pryamonosov, Alexander Danilov (2019). Robustness analysis of coronary arteries segmentation. In Smart Innovation, Systems and Technologies, pp. 331–344. (link) (bib) x @incollection{Pryamonosov2019, year = { 2019 }, volume = { 133 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060769579{\&}doi=10.1007{\%}2F978-3-030-06228-6{\_}26{\&}partnerID=40{\&}md5=2732253554b40b79957430c0178be429 }, type = { Serial }, title = { Robustness analysis of coronary arteries segmentation }, pages = { 331--344 }, keywords = { Cardiovascular applications,Computed tomography,Contrast enhanced,Coronary arteries,Image segmentation,Personalized medicine }, issn = { 21903026 }, isbn = { 9783030062279 }, doi = { 10.1007/978-3-030-06228-6_26 }, booktitle = { Smart Innovation, Systems and Technologies }, author = { Pryamonosov and Danilov }, abstract = { Segmentation of medical scans is the first and fundamental stage of numerical modeling of the human cardiovascular system. In this chapter, we analyze the results of coronary arteries segmentation using our approach for ten contrast-enhanced Computer Tomography Angiography datasets with different image quality and contrast phases. The segmentation is also affected by the patient anatomy, the shape and the scope of images. Our results show that the contrast phase timing is crucial for successful automatic segmentation. These factors form restrictions on the input data for automatic segmentation algorithms. Nevertheless, user guidance such as manual seeding and setting of thresholds can be used to significantly improve segmentation results and weaken the input restrictions. }, } |
2019 | In Collection | Renzo Phellan, Thomas Lindner, Michael Helle, Alexandre X. Falc\~ao, Nils D. Forkert (2019). The Effect of Labeling Duration and Temporal Resolution on Arterial Transit Time Estimation Accuracy in 4D ASL MRA Datasets - A Flow Phantom Study. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 141–148. (link) (bib) x @incollection{Phellan2019, year = { 2019 }, volume = { 11794 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075765108{\&}doi=10.1007{\%}2F978-3-030-33327-0{\_}17{\&}partnerID=40{\&}md5=34fc3b4595995b6c0ddd47325b464be7 }, type = { Serial }, title = { The Effect of Labeling Duration and Temporal Resolution on Arterial Transit Time Estimation Accuracy in 4D ASL MRA Datasets - A Flow Phantom Study }, pages = { 141--148 }, keywords = { Arterial transit time,Blood flow,Hemodynamic analysis,Model fitting }, issn = { 16113349 }, isbn = { 9783030333263 }, doi = { 10.1007/978-3-030-33327-0_17 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Phellan and Lindner and Helle and Falc{\~{a}}o and Forkert }, abstract = { Medical imaging modalities, such as four-dimensional arterial spin label magnetic resonance angiography (4D ASL MRA), can acquire blood flow data of the cerebrovascular system. These datasets are useful to determine criteria of normality and diagnose, study, and follow-up on the treatment progress of cerebrovascular diseases. In particular, variations in the arterial transit time (ATT) are related to hemodynamic impairment as a consequence of vascular diseases. In order to obtain accurate ATT estimations, the acquisition parameters of the applied image modality need to be properly tuned. In case of 4D ASL MRA, two important acquisition parameters are the blood labeling duration and the temporal resolution. This paper evaluates the effect of different settings for the two mentioned parameters on the accuracy of the ATT estimation in 4D ASL MRA datasets. Six 4D ASL MRA datasets of a pipe containing a mixture of glycerine and water, circulated with constant flow rate using a pump, are acquired with different labeling duration and temporal resolution. A mathematical model is then fitted to the observed signal in order to estimate the ATT. The results indicate that the lowest average absolute error between the ground-truth and estimated ATT is achieved when the longest labeling duration of 1000 ms and the highest temporal resolution of 60 ms are used. The insight obtained from the experiments using a flow phantom, under controlled conditions, can be extended to tune acquisition parameters of 4D ASL MRA datasets of human subjects. }, } |
2019 | In Collection | Kyoung Jin Noh, Sang Jun Park, Soochahn Lee (2019). Fine-scale vessel extraction in fundus images by registration with fluorescein angiography. In D Shen, P T Yap, T Liu, T M Peters, A Khan, L H Staib, C Essert, S Zhou, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 779–787. (link) (bib) x @incollection{RN836, year = { 2019 }, volume = { 11764 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075630951{\&}doi=10.1007{\%}2F978-3-030-32239-7{\_}86{\&}partnerID=40{\&}md5=e5b88e05907541abbf2b36b425c9f544 }, type = { Serial }, title = { Fine-scale vessel extraction in fundus images by registration with fluorescein angiography }, publisher = { Springer }, pages = { 779--787 }, keywords = { Filamentary vessels,Fine-scale vessel segmentation,Fluorescein angiography,Fundus images,Registration }, issn = { 16113349 }, isbn = { 9783030322380 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-030-32239-7_86 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Noh and Park and Lee }, abstract = { We present a new framework for fine-scale vessel segmentation from fundus images through registration and segmentation of corresponding fluorescein angiography (FA) images. In FA, fluorescent dye is used to highlight the vessels and increase their contrast. Since these highlights are temporally dispersed among multiple FA frames, we first register the FA frames and aggregate the per-frame segmentations to construct a detailed vessel mask. The constructed FA vessel mask is then registered to the fundus image based on an initial fundus vessel mask. Postprocessing is performed to refine the final vessel mask. Registration of FA frames, as well as registration of FA vessel mask to the fundus image, are done by similar hierarchical coarse-to-fine frameworks, both comprising rigid and non-rigid registration. Two CNNs with identical network structures, both trained on public datasets but with different settings, are used for vessel segmentation. The resulting final vessel segmentation contains fine-scale, filamentary vessels extracted from FA and corresponding to the fundus image. We provide quantitative evaluation as well as qualitative examples which support the robustness and the accuracy of the proposed method. }, } |
2019 | In Collection | Shusil Dangi, Ziv Yaniv, Cristian A. Linte (2019). Left Ventricle Segmentation and Quantification from Cardiac Cine MR Images via Multi-task Learning. In S Li, K McLeod, A Young, K Rhode, M Pop, J Zhao, M Sermesant, T Mansi, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 21–31. (link) (bib) x @incollection{RN838, year = { 2019 }, volume = { 11395 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064043381{\&}doi=10.1007{\%}2F978-3-030-12029-0{\_}3{\&}partnerID=40{\&}md5=89d8de887e5c7b9bb24cf49448d058fe }, type = { Serial }, title = { Left Ventricle Segmentation and Quantification from Cardiac Cine MR Images via Multi-task Learning }, publisher = { Springer Verlag }, pages = { 21--31 }, issn = { 16113349 }, isbn = { 9783030120283 }, eprint = { 1809.10221 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-030-12029-0_3 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Dangi and Yaniv and Linte }, arxivid = { 1809.10221 }, archiveprefix = { arXiv }, abstract = { Segmentation of the left ventricle and quantification of various cardiac contractile functions is crucial for the timely diagnosis and treatment of cardiovascular diseases. Traditionally, the two tasks have been tackled independently. Here we propose a convolutional neural network based multi-task learning approach to perform both tasks simultaneously, such that, the network learns better representation of the data with improved generalization performance. Probabilistic formulation of the problem enables learning the task uncertainties during the training, which are used to automatically compute the weights for the tasks. We performed a five fold cross-validation of the myocardium segmentation obtained from the proposed multi-task network on 97 patient 4-dimensional cardiac cine-MRI datasets available through the STACOM LV segmentation challenge against the provided gold-standard myocardium segmentation, obtaining a Dice overlap of (Formula Presented) and mean surface distance of (Formula Presented) mm, while simultaneously estimating the myocardial area with mean absolute difference error of (Formula Presented). }, } |
2019 | In Conf. Proceedings | Ibraheem Al-Dhamari, Sabine Bauer, Eva Keller, Dietrich Paulus (2019). Automatic detection of cervical spine ligaments origin and insertion points. In Proceedings - International Symposium on Biomedical Imaging, pp. 48–51, New York. (link) (bib) x @inproceedings{AlDhamari, year = { 2019 }, volume = { 2019-April }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85073909464{\&}doi=10.1109{\%}2FISBI.2019.8759223{\&}partnerID=40{\&}md5=1c62e6194c979866476e3863b6d3f513 {\%}3CGo to }, type = { Conference Proceedings }, title = { Automatic detection of cervical spine ligaments origin and insertion points }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 48--51 }, keywords = { Acir,Asgd,Atlas-segmentation,Detection,Origin and insertion points,Registration,Spine }, issn = { 19458452 }, isbn = { 9781538636411 }, doi = { 10.1109/ISBI.2019.8759223 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Al-Dhamari and Bauer and Keller and Paulus }, address = { New York }, abstract = { Creating patient-specific simulation models helps to make customised implant or treatment plans. To create such models, exact locations of the Origin and Insertion Points of the Ligaments (OIPL) are required. Locating these OIPL is usually done manually through a time-consuming procedure.A fast method to detect these OIPL automatically using spine atlas-based segmentation is proposed in this paper. The average detection rate is 96.16{\%} with a standard deviation of 3.45. The required time to detect these points is approximately 5 seconds. The proposed method can be generalised to detect any other important points or features related to a specific vertebra.The method is implemented as an open-source plugin for 3D Slicer. The method and the datasets can be download for free from a public server. }, } |
2019 | In Conf. Proceedings | Natan Andrade, Fabio Augusto Faria, Fábio Augusto Menocci Cappabianco (2019). A Practical Review on Medical Image Registration: From Rigid to Deep Learning Based Approaches. In Proceedings - 31st Conference on Graphics, Patterns and Images, SIBGRAPI 2018, pp. 463–470, New York. (link) (bib) x @inproceedings{Andrade2018, year = { 2019 }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062231809{\&}doi=10.1109{\%}2FSIBGRAPI.2018.00066{\&}partnerID=40{\&}md5=14da8e5ef491a8566fbbd2a58e32c72b }, type = { Book Section }, title = { A Practical Review on Medical Image Registration: From Rigid to Deep Learning Based Approaches }, series = { SIBGRAPI - Brazilian Symposium on Computer Graphics and Image Processing }, publisher = { Ieee }, pages = { 463--470 }, keywords = { Deep Learning,Image Registration,Medical Imaging }, isbn = { 9781538692646 }, doi = { 10.1109/SIBGRAPI.2018.00066 }, booktitle = { Proceedings - 31st Conference on Graphics, Patterns and Images, SIBGRAPI 2018 }, author = { Andrade and Faria and Cappabianco }, address = { New York }, abstract = { The large variety of medical image modalities (e.g. Computed Tomography, Magnetic Resonance Imaging, and Positron Emission Tomography) acquired from the same body region of a patient together with recent advances in computer architectures with faster and larger CPUs and GPUs allows a new, exciting, and unexplored world for image registration area. A precise and accurate registration of images makes possible understanding the etiology of diseases, improving surgery planning and execution, detecting otherwise unnoticed health problem signals, and mapping functionalities of the brain. The goal of this paper is to present a review of the state-of-the-art in medical image registration starting from the preprocessing steps, covering the most popular methodologies of the literature and finish with the more recent advances and perspectives from the application of Deep Learning architectures. }, } |
2019 | In Conf. Proceedings | Maximilian Weber, Daniel Wild, Jurgen Wallner, Jan Egger (2019). A Client/Server based Online Environment for the Calculation of Medical Segmentation Scores. In Proceedings of the Annual International Conference of the IEEE Engineering in Medicine and Biology Society, EMBS, pp. 3463–3467. (link) (bib) x @inproceedings{Weber, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077880308{\&}doi=10.1109{\%}2FEMBC.2019.8856481{\&}partnerID=40{\&}md5=289fafd6a7830c42d4e57271141c9bce }, type = { Conference Proceedings }, title = { A Client/Server based Online Environment for the Calculation of Medical Segmentation Scores }, pages = { 3463--3467 }, issn = { 1557170X }, isbn = { 9781538613115 }, doi = { 10.1109/EMBC.2019.8856481 }, booktitle = { Proceedings of the Annual International Conference of the IEEE Engineering in Medicine and Biology Society, EMBS }, author = { Weber and Wild and Wallner and Egger }, abstract = { Image segmentation plays a major role in medical imaging. Especially in radiology, the detection and development of tumors and other diseases can be supported by image segmentation applications. Tools that provide image segmentation and calculation of segmentation scores are not available at any time for every device due to the size and scope of functionalities they offer. These tools need huge periodic updates and do not properly work on old or weak systems. However, medical use-cases often require fast and accurate results. A complex and slow software can lead to additional stress and thus unnecessary errors. The aim of this contribution is the development of a cross-platform tool for medical segmentation use-cases. The goal is a device-independent and always available possibility for medical imaging including manual segmentation and metric calculation. The result is Studierfenster (studierfenster.at), a web-tool for manual segmentation and segmentation metric calculation. In this contribution, the focus lies on the segmentation metric calculation part of the tool. It provides the functionalities of calculating directed and undirected Hausdorff Distance (HD) and Dice Similarity Coefficient (DSC) scores for two uploaded volumes, filtering for specific values, searching for specific values in the calculated metrics and exporting filtered metric lists in different file formats. }, } |
2019 | In Conf. Proceedings | Zhengru Shen, Marco Spruit (2019). LOCATE: A web application to link open-source clinical software with literature. In HEALTHINF 2019 - 12th International Conference on Health Informatics, Proceedings; Part of 12th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2019, pp. 294–301. (link) (bib) x @inproceedings{Shena, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064637892{\&}partnerID=40{\&}md5=e5fa660669c558180b42f873a3ef3683 }, type = { Conference Proceedings }, title = { LOCATE: A web application to link open-source clinical software with literature }, pages = { 294--301 }, keywords = { Github Repository,Literature,Open-source Clinical Software,Web Application }, isbn = { 9789897583537 }, doi = { 10.5220/0007378702940301 }, booktitle = { HEALTHINF 2019 - 12th International Conference on Health Informatics, Proceedings; Part of 12th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2019 }, author = { Shen and Spruit }, abstract = { Nowadays, the effective utilization of open-source software could significantly boost both clinical research and practices, especially in resource-poor countries. However, the plethora of open-source clinical software has left many people unable to quickly locate the appropriate one for their needs. Commonly available software quality metrics and software documentation, such as downloads, forks, stars, and readme files, are useful selection criteria, but they only indicate the software quality from the perspective of IT experts. This paper proposes a method that offers additional insights on the performance and effectiveness of clinical software. It links open-source clinical software with relevant scientific literature, such as papers that use case studies of clinical software to reveal the strength and weakness of a given software from the clinical perspective. To interactively present the open-source clinical software and their related literature, we have developed the LOCATE web application that enables users to explore related literature for a given opensource clinical software. Moreover, the peer-review cycle of the application allows users to improve the application by confirming, adding or removing related literature. An evaluation experiment of the five most popular open-source clinical tools demonstrates the potential usefulness of LOCATE. }, } |
2019 | In Conf. Proceedings | Santiago González Izard, Scar Alonso Plaza, Ramiro Sánchez Torres, Juan Antonio Juanes Méndez, Francisco José Garc\'ia-Pẽalvo (2019). NextMed, Augmented and Virtual Reality platform for 3D medical imaging visualization: Explanation of the software platform developed for 3D models visualization related with medical images using Augmented and Virtual Reality technology. In ACM International Conference Proceeding Series, pp. 459–468. (link) (bib) x @inproceedings{Izard, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075443653{\&}doi=10.1145{\%}2F3362789.3362936{\&}partnerID=40{\&}md5=4c4690e277fd6b868cb7be97c738a3ed }, type = { Conference Proceedings }, title = { NextMed, Augmented and Virtual Reality platform for 3D medical imaging visualization: Explanation of the software platform developed for 3D models visualization related with medical images using Augmented and Virtual Reality technology }, pages = { 459--468 }, keywords = { Augmented Reality,Automatic Segmentation,Medical Imaging,Virtual Reality }, isbn = { 9781450371919 }, doi = { 10.1145/3362789.3362936 }, booktitle = { ACM International Conference Proceeding Series }, author = { Izard and Plaza and Torres and M{\'{e}}ndez and Garc{\'{i}}a-Pẽalvo }, abstract = { The visualization of the radiological results with more advanced techniques than the current ones, such as Augmented Reality and Virtual Reality technologies, represent a great advance for medical professionals, by eliminating their imagination capacity as an indispensable requirement for the understanding of medical images. The problem is that for its application it is necessary to segment the anatomical areas of interest, and this currently involves the intervention of the human being. The Nextmed project is presented as a complete solution that includes DICOM images import, automatic segmentation of certain anatomical structures, 3D mesh generation of the segmented area, visualization engine with Augmented Reality and Virtual Reality, all thanks to different software platforms that have been implemented and detailed, including results obtained from real patients. We will focus on the visualization platform using both Augmented and Virtual Reality technologies to allow medical professionals to work with 3d model representation of medical images in a different way taking advantage of new technologies. }, } |
2019 | In Conf. Proceedings | Beatriz Paniagua, Jack Prothero, Jean-Baptiste Vimort, Antonio Carlos O. Ruellas, James S. Marron, Lucia Cevidanes, Erika Benavides, Matthew M. McCormick, Pablo Hernandez-Cerdan (2019). Advanced statistical analysis to classify high dimensionality textural probability-distribution matrices. In NA pp. 42. (bib) x @inproceedings{Paniagua2019, year = { 2019 }, title = { Advanced statistical analysis to classify high dimensionality textural probability-distribution matrices }, publisher = { SPIE-Intl Soc Optical Eng }, pages = { 42 }, month = { mar }, issn = { 0277-786X }, isbn = { 9781510625532 }, doi = { 10.1117/12.2507978 }, author = { Paniagua and Prothero and Vimort and Ruellas and Marron and Cevidanes and Benavides and McCormick and Hernandez-Cerdan }, } |
2019 | In Conf. Proceedings | M. Vera, Y. Huérfano, E. Gelvez, O. Valbuena, J. Salazar, V. Molina, M. I. Vera, W. Salazar, F. Sáenz (2019). Segmentation of brain tumors using a semi-automatic computational strategy. In Journal of Physics: Conference Series, pp. NA (link) (bib) x @inproceedings{RN855, year = { 2019 }, volume = { 1160 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062482878{\&}doi=10.1088{\%}2F1742-6596{\%}2F1160{\%}2F1{\%}2F012002{\&}partnerID=40{\&}md5=03a70ac3ff75e964350486d67a57b008 }, type = { Conference Proceedings }, title = { Segmentation of brain tumors using a semi-automatic computational strategy }, publisher = { Institute of Physics Publishing }, number = { 1 }, issn = { 17426596 }, isbn = { 17426588 (ISSN) }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1088/1742-6596/1160/1/012002 }, booktitle = { Journal of Physics: Conference Series }, author = { Vera and Hu{\'{e}}rfano and Gelvez and Valbuena and Salazar and Molina and Vera and Salazar and S{\'{a}}enz }, abstract = { In this work, a semi-automatic computational strategy is proposed for brain tumor segmentation. The filtering (erosion + gaussian filters), segmentation (level set technique) and quantification (BT volume) stages are applied to magnetic resonance imaging in order to generate the three-dimensional morphology of brain tumors. The Jaccard's Similarity Index is considered to contrast manual segmentation with semi-automatic segmentations of brain tumor. In this sense, the highest Jaccard's Similarity Index provides the best parameters of the techniques that constitute the semi-automatic computational strategy. Results are promising, showing an excellent correlation between these segmentations. The volume is used for the brain tumors characterization. }, } |
2019 | In Conf. Proceedings | Fakrul Islam Tushar, Basel Alyafi, Md Kamrul Hasan, Lavsen Dahal (2019). Brain tissue segmentation using neuronet with different pre-processing techniques. In 2019 Joint 8th International Conference on Informatics, Electronics and Vision, ICIEV 2019 and 3rd International Conference on Imaging, Vision and Pattern Recognition, icIVPR 2019 with International Conference on Activity and Behavior Computing, ABC 2019, pp. 223–227. (link) (bib) x @inproceedings{RN835, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074060440{\&}doi=10.1109{\%}2FICIEV.2019.8858515{\&}partnerID=40{\&}md5=8ce695907c0589166ae123e5259efcf0 }, type = { Conference Proceedings }, title = { Brain tissue segmentation using neuronet with different pre-processing techniques }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 223--227 }, keywords = { Brain tissue segmentation,Dice Similarity Coefficient (DSC),Fully Convolution Network (FCN),IBSR18,Magnetic resonance imaging (MRI),NeuroNet,Residual Network (ResNet) }, isbn = { 9781728107868 }, doi = { 10.1109/ICIEV.2019.8858515 }, booktitle = { 2019 Joint 8th International Conference on Informatics, Electronics and Vision, ICIEV 2019 and 3rd International Conference on Imaging, Vision and Pattern Recognition, icIVPR 2019 with International Conference on Activity and Behavior Computing, ABC 2019 }, author = { Tushar and Alyafi and Hasan and Dahal }, abstract = { Automatic segmentation of brain Magnetic Resonance Imaging (MRI) images is one of the vital steps for quantitative analysis of brain for further inspection. In this paper, NeuroNet has been adopted to segment the brain tissues (white matter (WM), grey matter (GM) and cerebrospinal fluid (CSF)) which uses Residual Network (ResNet) in encoder and Fully Convolution Network (FCN) in the decoder. To achieve the best performance, various hyper-parameters have been tuned, while, network parameters (kernel and bias) were initialized using the NeuroNet pre-trained model. Different pre-processing pipelines have also been introduced to get a robust trained model. The model has been trained and tested on IBSR18 data-set. To validate the research outcome, performance was measured quantitatively using Dice Similarity Coefficient (DSC) and is reported on average as 0.84 for CSF, 0.94 for GM, and 0.94 for WM. The outcome of the research indicates that for the IBSR18 data-set, pre-processing and proper tuning of hyper-parameters for NeuroNet model have improvement in DSC for the brain tissue segmentation. }, } |
2019 | In Conf. Proceedings | Jagjeet Nain, Johannes Mueller (2019). Improving band to band registration accuracy of SEVIRI level 1.5 products. In Image and Signal Processing for Remote Sensing XXV 2019, pp. 2. (link) (bib) x @inproceedings{RN857, year = { 2019 }, volume = { 11155 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078146492{\&}doi=10.1117{\%}2F12.2532730{\&}partnerID=40{\&}md5=9a2bd6db58344dccac47650014f184cb }, type = { Conference Proceedings }, title = { Improving band to band registration accuracy of SEVIRI level 1.5 products }, publisher = { SPIE }, pages = { 2 }, issn = { 1996756X }, isbn = { 9781510630130 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1117/12.2532730 }, booktitle = { Image and Signal Processing for Remote Sensing XXV 2019 }, author = { Nain and Mueller }, } |
2019 | In Conf. Proceedings | Muhammad Jawad, Vladimir Molchanov, Lars Linsen (2019). Coordinated image- and feature-space visualization for interactive magnetic resonance spectroscopy imaging data analysis. In VISIGRAPP 2019 - Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, pp. 118–128. (link) (bib) x @inproceedings{RN859, year = { 2019 }, volume = { 3 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064760921{\&}partnerID=40{\&}md5=85bea0be829568fc67dd653faf50fc47 }, type = { Conference Proceedings }, title = { Coordinated image- and feature-space visualization for interactive magnetic resonance spectroscopy imaging data analysis }, publisher = { SciTePress }, pages = { 118--128 }, keywords = { Coordinated views,Medical visualization,Multidimensional data visualization,Spectral imaging analysis }, isbn = { 9789897583544 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.5220/0007571801180128 }, booktitle = { VISIGRAPP 2019 - Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications }, author = { Jawad and Molchanov and Linsen }, abstract = { Magnetic Resonance Spectroscopy Imaging (MRSI) is a medical imaging method that measures per voxel a spectrum of signal intensities. It allows for the analysis of chemical compositions within the scanned tissue, which is particularly useful for tumor classification and measuring its infiltration of healthy tissue. Common analysis approaches consider one metabolite concentration at a time to produce intensity maps in the image space, which does not consider all relevant information at hand. We propose a system that uses coordinated views between image-space visualizations and visual representations of the spectral (or feature) space. Coordinated interaction allows for analyzing both aspects and relating the analysis results back to the other for further investigations. We demonstrate how our system can be used to analyze brain tumors. }, } |
2019 | In Conf. Proceedings | Pablo Hernandez-Cerdan, Beatriz Paniagua, Jack Prothero, James S. Marron, Eric Livingston, Ted Bateman, Matthew M. McCormick (2019). Methods for quantitative characterization of bone injury from computed-tomography images. In Medical Imaging 2019: Biomedical Applications in Molecular, Structural, and Functional Imaging, pp. 40. (link) (bib) x @inproceedings{RN858, year = { 2019 }, volume = { 10953 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068414398{\&}doi=10.1117{\%}2F12.2513007{\&}partnerID=40{\&}md5=919e41c5d51d55066df5c354c9c554f4 }, type = { Conference Proceedings }, title = { Methods for quantitative characterization of bone injury from computed-tomography images }, publisher = { SPIE }, pages = { 40 }, issn = { 0277-786X }, isbn = { 9781510625532 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2513007 }, booktitle = { Medical Imaging 2019: Biomedical Applications in Molecular, Structural, and Functional Imaging }, author = { Hernandez-Cerdan and Paniagua and Prothero and Marron and Livingston and Bateman and McCormick }, } |
2019 | In Conf. Proceedings | Neel Dey, Shijie Li, Katharina Bermond, Rainer Heintzmann, Christine A. Curcio, Thomas Ach, Guido Gerig (2019). Multi-modal image fusion for multispectral super-resolution in microscopy. In Medical Imaging 2019: Image Processing, pp. 12. (link) (bib) x @inproceedings{RN837, year = { 2019 }, volume = { 10949 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068325681{\&}doi=10.1117{\%}2F12.2512598{\&}partnerID=40{\&}md5=bb0775b42af9f68ef3a088e55974d94e }, type = { Conference Proceedings }, title = { Multi-modal image fusion for multispectral super-resolution in microscopy }, publisher = { SPIE }, pages = { 12 }, issn = { 0277-786X }, isbn = { 9781510625457 }, editor = { [object Object],[object Object],[object Object],[object Object] }, doi = { 10.1117/12.2512598 }, booktitle = { Medical Imaging 2019: Image Processing }, author = { Dey and Li and Bermond and Heintzmann and Curcio and Ach and Gerig }, abstract = { Spectral imaging is a ubiquitous tool in modern biochemistry. Despite acquiring dozens to thousands of spectral channels, existing technology cannot capture spectral images at the same spatial resolution as structural microscopy. Due to partial voluming and low light exposure, spectral images are often difficult to interpret and analyze. This highlights a need to upsample the low-resolution spectral image by using spatial information contained in the high-resolution image, thereby creating a fused representation with high specificity both spatially and spectrally. In this paper, we propose a framework for the fusion of co-registered structural and spectral microscopy images to create super-resolved representations of spectral images. As a first application, we super-resolve spectral images of ex-vivo retinal tissue imaged with confocal laser scanning microscopy, by using spatial information from structured illumination microscopy. Second, we super-resolve mass spectroscopic images of mouse brain tissue, by using spatial information from high-resolution histology images. We present a systematic validation of model assumptions crucial towards maintaining the original nature of spectra and the applicability of super-resolution. Goodness-of-fit for spectral predictions are evaluated through functional R2 values, and the spatial quality of the super-resolved images are evaluated using normalized mutual information. {\textcopyright} COPYRIGHT SPIE. Downloading of the abstract is permitted for personal use only. }, } |
2019 | In Conf. Proceedings | Ander Arbelaiz, Aitor Moreno, I\~nigo Barandiaran, Alejandro Garc\'ia-Alonso (2019). Progressive ray-casting volume rendering with WebGL for visual assessment of air void distribution in quality control. In Proceedings - Web3D 2019: 24th International ACM Conference on 3D Web Technology, pp. NA (link) (bib) x @inproceedings{RN847, year = { 2019 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85071158969{\&}doi=10.1145{\%}2F3329714.3338131{\&}partnerID=40{\&}md5=2a6a9a2b43279419c898a8f0a38d80a6 }, type = { Conference Proceedings }, title = { Progressive ray-casting volume rendering with WebGL for visual assessment of air void distribution in quality control }, publisher = { Association for Computing Machinery, Inc }, keywords = { Industrial application,Progressive rendering,Quality control,Ray-casting,Ubiquitous platforms,Void segmentation,Volume rendering,Web,WebGL }, isbn = { 9781450367981 }, editor = { [object Object] }, doi = { 10.1145/3329714.3338131 }, booktitle = { Proceedings - Web3D 2019: 24th International ACM Conference on 3D Web Technology }, author = { Arbelaiz and Moreno and Barandiaran and Garc{\'{i}}a-Alonso }, abstract = { Due to a lack of ubiquitous tools for volume data visualization, 3D rendering of volumetric content is shared and distributed as 2D media (video and static images). This work shows how using open web technologies (HTML5, JavaScript,WebGL and SVG), high quality volume rendering is achievable in an interactive manner with any WebGL-enabled device. In the web platform, real-time volume rendering algorithms are constrained to small datasets. This work presents a WebGL progressive ray-casting volume rendering approach that allows the interactive visualization of larger datasets with a higher rendering quality. This approach is better suited for devices with low compute capacity such as tablets and mobile devices. As a validation case, the presented method is used in an industrial quality inspection use case to visually assess the air void distribution of a plastic injection mould component in the web browser. }, } |
2018 | Book chapter | M Schiwarth, J Weissenbock, B Plank, B Frohler, C Heinzl, J Kastner, Iop (2018). NA in Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers, Iop Publishing Ltd, pp. NA IOP Conference Series-Materials Science and Engineering, Vol. 406. (link) (bib) x @inbook{Schiwarth2018, year = { 2018 }, volume = { 406 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers }, series = { IOP Conference Series-Materials Science and Engineering }, publisher = { Iop Publishing Ltd }, doi = { 10.1088/1757-899x/406/1/012014 }, booktitle = { 13th International Conference on Textile Composites }, author = { Schiwarth and Weissenbock and Plank and Frohler and Heinzl and Kastner and Iop }, address = { Bristol }, } |
2018 | Book chapter | Y Suter, C Rummel, R Wiest, M Reyes, Ieee (2018). NA in FAST AND UNCERTAINTY-AWARE CEREBRAL CORTEX MORPHOMETRY ESTIMATION USING RANDOM FOREST REGRESSION, Ieee, pp. 1052–1055, IEEE International Symposium on Biomedical Imaging. (link) (bib) x @inbook{Suter2018, year = { 2018 }, url = { {\%}3CGo to }, type = { Book Section }, title = { FAST AND UNCERTAINTY-AWARE CEREBRAL CORTEX MORPHOMETRY ESTIMATION USING RANDOM FOREST REGRESSION }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 1052--1055 }, isbn = { 978-1-5386-3636-7 }, booktitle = { 2018 Ieee 15th International Symposium on Biomedical Imaging }, author = { Suter and Rummel and Wiest and Reyes and Ieee }, address = { New York }, } |
2018 | Book chapter | Jack Prothero, Matthew McCormick, Beatriz Paniagua, Jean-Baptiste Vimort, Antonio Carlos Ruellas, J.S. Marron, Lucia Cevidanes, Erika Benavides (2018). NA in Detection of bone loss via subchondral bone analysis, Edited by B Gimi, A Krol, Spie-Int Soc Optical Engineering, pp. 25, Proceedings of SPIE, Vol. 10578, ISBN: 0277-786X. (link) (bib) x @inbook{Vimort2018, year = { 2018 }, volume = { 10578 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Detection of bone loss via subchondral bone analysis }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 25 }, issn = { 0277-786X }, isbn = { 9781510616455 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2293654 }, booktitle = { Medical Imaging 2018: Biomedical Applications in Molecular, Structural, and Functional Imaging }, author = { Prothero and McCormick and Paniagua and Vimort and Ruellas and Marron and Cevidanes and Benavides }, address = { Bellingham }, abstract = { {\textcopyright} 2018 SPIE. To date, there is no single sign, symptom, or test that can clearly diagnose early stages of Temporomandibular Joint Osteoarthritis (TMJ OA). However, it has been observed that changes in the bone occur in early stages of this disease, involving structural changes both in the texture and morphometry of the bone marrow and the subchondral cortical plate. In this paper we present a tool to detect and highlight subtle variations in subchondral bone structure obtained from high resolution Cone Beam Computed Tomography (hr-CBCT) in order to help with detecting early TMJ OA. The proposed tool was developed in ITK and 3DSlicer and it has been disseminated as open-source software tools. We have validated both our texture analysis and morphometry analysis biomarkers for detection of TMJ OA comparing hr-CBCT to $\mu$CT. Our initial statistical results using the multidimensional features computed with our tool indicate that it is possible to classify areas of demonstrated loss of trabecular bone in both $\mu$CT and hr-CBCT. This paper describes the first steps to alleviate the current inability of radiological changes to diagnose TMJ OA before morphological changes are too advanced by quantifying subchondral bone biomarkers. This paper indicates that texture based and morphometry based biomarkers have the potential to identify OA patients at risk for further bone destruction. }, } |
2018 | Book chapter | Andrew Harris, Jessica Kishimoto, Aaron Fenster, Sandrine de Ribaupierre, Lori Gardi (2018). NA in Automated registration and stitching of multiple 3D ultrasound images for monitoring neonatal intraventricular hemorrhage, Edited by N Duric, B C Byram, Spie-Int Soc Optical Engineering, pp. 42, Proceedings of SPIE, Vol. 10580, ISBN: 16057422. (link) (bib) x @inbook{Harris2018, year = { 2018 }, volume = { 10580 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Automated registration and stitching of multiple 3D ultrasound images for monitoring neonatal intraventricular hemorrhage }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 42 }, issn = { 16057422 }, isbn = { 9781510616493 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2292925 }, booktitle = { Medical Imaging 2018: Ultrasonic Imaging and Tomography }, author = { Harris and Kishimoto and Fenster and Ribaupierre and Gardi }, address = { Bellingham }, abstract = { {\textcopyright} 2018 SPIE. Dilatation of the cerebral ventricles is a common condition in preterm neonates with intraventricular hemorrhage (IVH). Post Hemorrhagic Ventricular Dilatation (PHVD) can lead to lifelong neurological impairment caused by ischemic injury due to increased intracranial pressure, and without treatment can lead to death. Previously, we have developed and validated a 3D ultrasound (US) system to monitor the progression of ventricle volumes (VV) in IVH patients; however, many patients with severe PHVD have ventricles so large they cannot be imaged within a single 3D US image. This limits the utility of atlas based segmentation algorithms required to measure VV as parts of the ventricles are in separate 3D US images, and thus, an already challenging segmentation becomes increasingly difficult to solve. Without a more automated segmentation, the clinical utility of 3D US ventricle volumes cannot be fully realized due to the large number of images and patients required to validate the technique in a clinical trials. Here, we describe the initial results of an automated {\^{a}}stitching' algorithm used to register and combine multiple 3D US images of the ventricles of patients with PHVD. Our registration results show that we were able to register these images with an average target registration error (TRE) of 4.25±1.95 mm. }, } |
2018 | Book chapter | David Tessier, Derek J. Gillies, Lori Gardi, Ashley Mercado, Aaron Fenster (2018). NA in Ring navigation: an ultrasound-guided technique using real-time motion compensation for prostate biopsies, Edited by B Fei, R J Webster, Spie-Int Soc Optical Engineering, pp. 52, Proceedings of SPIE, Vol. 10576, ISBN: 16057422. (link) (bib) x @inbook{Gillies2018, year = { 2018 }, volume = { 10576 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Ring navigation: an ultrasound-guided technique using real-time motion compensation for prostate biopsies }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 52 }, issn = { 16057422 }, isbn = { 9781510616417 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2292922 }, booktitle = { Medical Imaging 2018: Image-Guided Procedures, Robotic Interventions, and Modeling }, author = { Tessier and Gillies and Gardi and Mercado and Fenster }, address = { Bellingham }, abstract = { Prostate cancer has the second highest noncutaneous cancer incidence in men. Three-dimensional (3D) transrectal ultrasound (TRUS) fused with a magnetic resonance image (MRI) is used to guide prostate biopsy as an alternative technique to conventional 2D TRUS sextant biopsy. The TRUS-MRI fusion technique can provide intraoperative needle guidance to suspicious cancer tissues identified on MRI, increasing the targeting capabilities of a physician. Currently, 3D TRUS-MR guided biopsy suffers from image and target misalignment caused by various forms of prostate motion. Thus, we previously developed a real-time motion compensation algorithm to align 2D and 3D TRUS images with an update rate around an ultrasound system frame rate. During clinical implementation, observations of image misalignment occurred when obtaining tissue samples near the left and right boundaries of the prostate. To minimize transducer translation on the rectal wall and avoid prostate motion and deformation, we are proposing the use of a 3D model-based ring navigation procedure. This navigation keeps the transducer positioned towards the centroid of the prostate when guiding the tracked biopsy gun to targets. Prostate biopsy was performed on three patients while using real-time motion compensation in the background. Our navigation approach was compared to a conventional 2D TRUS-guided procedure using approximately 20 2D and 3D TRUS image pairs and resulted in median {\{}[{\}}first quartile, third quartile] registration errors of 2.0 {\{}[{\}}1.3,2.5] mm and 3.4 {\{}[{\}}1.5, 8.2] mm, respectively. Using our navigation approach, registration error and variability were reduced, potentially suggesting a more robust technique when performing continuous motion compensation. }, } |
2018 | Book chapter | Jorge Roberto Lopes dos Santos, Heron Werner, Alberto Raposo, Jan Hurtado, Vinicius Arcoverde, Gerson Ribeiro (2018). NA in A Proposal for Combining Ultrasound, Magnetic Resonance Imaging and Force Feedback Technology, During the Pregnancy, to Physically Feel the Fetus, Edited by V G Duffy, Springer International Publishing Ag, pp. 502–512, Lecture Notes in Computer Science, Vol. 10917 LNCS, ISBN: 16113349. (link) (bib) x @inbook{Santos2018, year = { 2018 }, volume = { 10917 LNCS }, url = { {\%}3CGo to }, type = { Book Section }, title = { A Proposal for Combining Ultrasound, Magnetic Resonance Imaging and Force Feedback Technology, During the Pregnancy, to Physically Feel the Fetus }, series = { Lecture Notes in Computer Science }, publisher = { Springer International Publishing Ag }, pages = { 502--512 }, keywords = { Fetus,Haptics,Interaction,MRI,Ultrasound }, issn = { 16113349 }, isbn = { 9783319913964 }, editor = { [object Object] }, doi = { 10.1007/978-3-319-91397-1_40 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Santos} and Werner and Raposo and Hurtado and Arcoverde and Ribeiro }, address = { Cham }, abstract = { Evolutions in image-scanning technology have led to vast improvements in the fetal assessment. Ultrasound (US) is the main technology for fetal evaluation. Magnetic resonance imaging (MRI) is generally used when US cannot provide high-quality images. This paper presents an interactive bidirectional actuated human-machine interface proposal developed by the combination of a haptic device system (force-feedback technology) and a non-invasive medical image technology. }, } |
2018 | Book chapter | Tatyana V. Danilova, Alexey O. Manturov, Gleb O. Mareev, Oleg V. Mareev, Innokentiy K. Alaytsev (2018). NA in Creation of anatomical models from CT data, Edited by V L Derbov, D E Postnov, Spie-Int Soc Optical Engineering, pp. 18, Proceedings of SPIE, Vol. 10717, ISBN: 16057422. (link) (bib) x @inbook{Alaytsev2018, year = { 2018 }, volume = { 10717 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Creation of anatomical models from CT data }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 18 }, issn = { 16057422 }, isbn = { 9781510620032 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2309318 }, booktitle = { Saratov Fall Meeting 2017: Laser Physics and Photonics Xviii; and Computational Biophysics and Analysis of Biomedical Data Iv }, author = { Danilova and Manturov and Mareev and Mareev and Alaytsev }, address = { Bellingham }, abstract = { Computed tomography is a great source of biomedical data because it allows a detailed exploration of complex anatomical structures. Some structures are not visible on CT scans, and some are hard to distinguish due to partial volume effect. CT datasets require preprocessing before using them as anatomical models in a simulation system. The work describes segmentation and data transformation methods for an anatomical model creation from the CT data. The result models may be used for visual and haptic rendering and drilling simulation in a virtual surgery system. {\textcopyright} 2018 SPIE. }, } |
2018 | Book chapter | Dario Augusto Borges Oliveira, Matheus Palhares Viana (2018). NA in Lung nodule synthesis using cnn-based latent data representation, Edited by A Gooya, O Goksel, I Oguz, N Burgos, Springer International Publishing Ag, pp. 111–118, Lecture Notes in Computer Science, Vol. 11037 LNCS, ISBN: 16113349. (link) (bib) x @inbook{RN822, year = { 2018 }, volume = { 11037 LNCS }, url = { {\%}3CGo to }, type = { Book Section }, title = { Lung nodule synthesis using cnn-based latent data representation }, series = { Lecture Notes in Computer Science }, publisher = { Springer International Publishing Ag }, pages = { 111--118 }, keywords = { Convolutional neural networks,Generative models,Lung nodule false positive reduction,Multivariate Gaussian mixture models,Nodules synthesis }, issn = { 16113349 }, isbn = { 9783030005351 }, editor = { [object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-030-00536-8_12 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Oliveira and Viana }, address = { Cham }, abstract = { Convolutional neural networks (CNNs) have been widely used to address various image analysis problems at the cost of intensive computational load and large amounts of annotated training data. When it comes to Medical Imaging, annotation is often complicated and/or expensive, and innovative methods for dealing with small or very imbalanced training sets are mostly welcome. In this context, this paper proposes a novel approach for efficiently synthesizing volumetric patch data from a small amount of samples using their latent data. Our method consists of two major steps. First, we train a 3D CNN auto-encoder for unsupervised learning of volumetric latent data by means of multivariate Gaussian mixture models (GMMs): while the encoder finds latent representations of volumes using GMMs, the decoder uses the estimated GMMs parameters to reconstruct the volume observed in the input. Then, we modify latent data of samples at training time to generate similar, but different, new samples: we run non-rigid registrations between patches decoded from real latent data and patches decoded from modified latent data, and warp the corresponding original image patches using the resulting displacement fields. We evaluated our method in the context of lung nodules synthesis using the publicly available LUNA challenge dataset, and generated new realistic samples out of real lung nodules, preserving their original texture and neighbouring anatomical structures. Our results demonstrate that 3D CNNs trained using our synthesis method were able to consistently deliver lower lung nodule false positive rates, which indicates an improvement in the networks discriminant power. }, } |
2018 | Book chapter | I. A. Illan, J. Ramirez, J. M. Gorriz, K. Pinker, A. Meyer-Baese (2018). NA in Reproducible evaluation of registration algorithms for movement correction in dynamic contrast enhancing magnetic resonance imaging for breast cancer diagnosis, Edited by D Stoyanov, Z Taylor, B Kainz, G Maicas, R R Beichel, Springer International Publishing Ag, pp. 124–131, Lecture Notes in Computer Science, Vol. 11040 LNCS, ISBN: 16113349. (link) (bib) x @inbook{RN821, year = { 2018 }, volume = { 11040 LNCS }, url = { {\%}3CGo to }, type = { Book Section }, title = { Reproducible evaluation of registration algorithms for movement correction in dynamic contrast enhancing magnetic resonance imaging for breast cancer diagnosis }, series = { Lecture Notes in Computer Science }, publisher = { Springer International Publishing Ag }, pages = { 124--131 }, keywords = { DCE-MRI,Diffeomorphism,Medical image processing,Non-affine registration,Optical flow,Registration,Reproducibility }, issn = { 16113349 }, isbn = { 9783030009458 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-030-00946-5_14 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Illan and Ramirez and Gorriz and Pinker and Meyer-Baese }, address = { Cham }, abstract = { Accurate methods for computer aided diagnosis of breast cancer increase accuracy of detection and provide support to physicians in detecting challenging cases. In dynamic contrast enhancing magnetic resonance imaging (DCE-MRI), motion artifacts can appear as a result of patient displacements. Non-linear deformation algorithms for breast image registration provide with a solution to the correspondence problem in contrast with affine models. In this study we evaluate 3 popular non-linear registration algorithms: MIRTK, Demons, SyN Ants, and compare to the affine baseline. We propose automatic measures for reproducible evaluation on the DCE-MRI breast-diagnosis TCIA-database, based on edge detection and clustering algorithms, and provide a rank of the methods according to these measures. }, } |
2018 | Journal | Ziv Yaniv, Bradley C Lowekamp, Hans J Johnson, Richard Beare (2018). SimpleITK image-analysis notebooks: a collaborative environment for education and reproducible research. Journal of digital imaging, 31(3), pp. 290–303. (bib) x @article{yaniv2018simpleitk, year = { 2018 }, volume = { 31 }, title = { SimpleITK image-analysis notebooks: a collaborative environment for education and reproducible research }, publisher = { Springer International Publishing }, pages = { 290--303 }, number = { 3 }, journal = { Journal of digital imaging }, author = { Yaniv and Lowekamp and Johnson and Beare }, } |
2018 | Journal | Yi Hong, Lauren J. O'Donnell, Peter Savadjiev, Fan Zhang, Demian Wassermann, Ofer Pasternak, Hans Johnson, Jane Paulsen, Jean Paul Vonsattel, Nikos Makris, Carl F. Westin, Yogesh Rathi (2018). Genetic load determines atrophy in hand cortico-striatal pathways in presymptomatic Huntington's disease. Human Brain Mapping, 39(10), pp. 3871–3883. (bib) x @article{hong2018genetic, year = { 2018 }, volume = { 39 }, title = { Genetic load determines atrophy in hand cortico-striatal pathways in presymptomatic Huntington's disease }, pages = { 3871--3883 }, number = { 10 }, keywords = { CAG-repeats,cortico-striatal pathways,diffusion MRI,isotropic volume fraction,prodromal-HD }, journal = { Human Brain Mapping }, issn = { 10970193 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Hong et al/Human Brain Mapping/Hong et al. - 2018 - Genetic load determines atrophy in hand cortico-striatal pathways in presymptomatic Huntington's disease.pdf:pdf }, doi = { 10.1002/hbm.24217 }, author = { Hong and O'Donnell and Savadjiev and Zhang and Wassermann and Pasternak and Johnson and Paulsen and Vonsattel and Makris and Westin and Rathi }, abstract = { Huntington's disease (HD) is an inherited neurodegenerative disorder that causes progressive breakdown of striatal neurons. Standard white matter integrity measures like fractional anisotropy and mean diffusivity derived from diffusion tensor imaging were analyzed in prodromal-HD subjects; however, they studied either a whole brain or specific subcortical white matter structures with connections to cortical motor areas. In this work, we propose a novel analysis of a longitudinal cohort of 243 prodromal-HD individuals and 88 healthy controls who underwent two or more diffusion MRI scans as part of the PREDICT-HD study. We separately trace specific white matter fiber tracts connecting the striatum (caudate and putamen) with four cortical regions corresponding to the hand, face, trunk, and leg motor areas. A multi-tensor tractography algorithm with an isotropic volume fraction compartment allows estimating diffusion of fast-moving extra-cellular water in regions containing crossing fibers and provides quantification of a microstructural property related to tissue atrophy. The tissue atrophy rate is separately analyzed in eight cortico-striatal pathways as a function of CAG-repeats (genetic load) by statistically regressing out age effect from our cohort. The results demonstrate a statistically significant increase in isotropic volume fraction (atrophy) bilaterally in hand fiber connections to the putamen with increasing CAG-repeats, which connects the genetic abnormality (CAG-repeats) to an imaging-based microstructural marker of tissue integrity in specific white matter pathways in HD. Isotropic volume fraction measures in eight cortico-striatal pathways are also correlated significantly with total motor scores and diagnostic confidence levels, providing evidence of their relevance to HD clinical presentation. }, } |
2018 | Journal | Ziv Yaniv, Bradley C. Lowekamp, Hans J. Johnson, Richard Beare (2018). SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research. Journal of Digital Imaging, 31(3), pp. 290–303. (bib) x @article{yaniv2018simpleitk, year = { 2018 }, volume = { 31 }, title = { SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research }, publisher = { Springer International Publishing }, pmid = { 29181613 }, pages = { 290--303 }, number = { 3 }, keywords = { Image analysis,Open-source software,Python,R,Registration,Segmentation }, journal = { Journal of Digital Imaging }, issn = { 1618727X }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Yaniv et al/Journal of Digital Imaging/Yaniv et al. - 2018 - SimpleITK Image-Analysis Notebooks a Collaborative Environment for Education and Reproducible Research.pdf:pdf }, doi = { 10.1007/s10278-017-0037-8 }, author = { Yaniv and Lowekamp and Johnson and Beare }, abstract = { Modern scientific endeavors increasingly require team collaborations to construct and interpret complex computational workflows. This work describes an image-analysis environment that supports the use of computational tools that facilitate reproducible research and support scientists with varying levels of software development skills. The Jupyter notebook web application is the basis of an environment that enables flexible, well-documented, and reproducible workflows via literate programming. Image-analysis software development is made accessible to scientists with varying levels of programming experience via the use of the SimpleITK toolkit, a simplified interface to the Insight Segmentation and Registration Toolkit. Additional features of the development environment include user friendly data sharing using online data repositories and a testing framework that facilitates code maintenance. SimpleITK provides a large number of examples illustrating educational and research-oriented image analysis workflows for free download from GitHub under an Apache 2.0 license: github.com/InsightSoftwareConsortium/SimpleITK-Notebooks. }, } |
2018 | Journal | Ali Ghayoor, Jatin G. Vaidya, Hans J. Johnson (2018). Robust automated constellation-based landmark detection in human brain imaging. NeuroImage, 170, pp. 471–481. (bib) x @article{Johnson2018, year = { 2018 }, volume = { 170 }, title = { Robust automated constellation-based landmark detection in human brain imaging }, pages = { 471--481 }, keywords = { Automated landmark detection,Morphometric measures,Principle component analysis,Statistical shape models }, journal = { NeuroImage }, issn = { 10959572 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Ghayoor, Vaidya, Johnson/NeuroImage/Ghayoor, Vaidya, Johnson - 2018 - Robust automated constellation-based landmark detection in human brain imaging.pdf:pdf }, doi = { 10.1016/j.neuroimage.2017.04.012 }, author = { Ghayoor and Vaidya and Johnson }, abstract = { A robust fully automated algorithm for identifying an arbitrary number of landmark points in the human brain is described and validated. The proposed method combines statistical shape models with trained brain morphometric measures to estimate midbrain landmark positions reliably and accurately. Gross morphometric constraints provided by automatically identified eye centers and the center of the head mass are shown to provide robust initialization in the presence of large rotations in the initial head orientation. Detection of primary midbrain landmarks are used as the foundation from which extended detection of an arbitrary set of secondary landmarks in different brain regions by applying a linear model estimation and principle component analysis. This estimation model sequentially uses the knowledge of each additional detected landmark as an improved foundation for improved prediction of the next landmark location. The accuracy and robustness of the presented method was evaluated by comparing the automatically generated results to two manual raters on 30 identified landmark points extracted from each of 30 T1-weighted magnetic resonance images. For the landmarks with unambiguous anatomical definitions, the average discrepancy between the algorithm results and each human observer differed by less than 1 mm from the average inter-observer variability when the algorithm was evaluated on imaging data collected from the same site as the model building data. Similar results were obtained when the same model was applied to a set of heterogeneous image volumes from seven different collection sites representing 3 scanner manufacturers. This method is reliable for general application in large-scale multi-site studies that consist of a variety of imaging data with different orientations, spacings, origins, and field strengths. }, } |
2018 | Journal | Tufve Nyholm, Stina Svensson, Sebastian Andersson, Joakim Jonsson, Maja Sohlin, Christian Gustafsson, Elisabeth Kjellén, Karin Söderström, Per Albertsson, Lennart Blomqvist, Björn Zackrisson, Lars E. Olsson, Adalsteinn Gunnlaugsson (2018). MR and CT data with multiobserver delineations of organs in the pelvic area-Part of the Gold Atlas project:. Medical Physics, 45(3), pp. 1295–1300. (link) (bib) x @article{Nyholm2018, year = { 2018 }, volume = { 45 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MR and CT data with multiobserver delineations of organs in the pelvic area-Part of the Gold Atlas project: }, pages = { 1295--1300 }, number = { 3 }, keywords = { CT,MRI,open dataset,organs at risk,radiotherapy }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.12748 }, author = { Nyholm and Svensson and Andersson and Jonsson and Sohlin and Gustafsson and Kjell{\'{e}}n and S{\"{o}}derstr{\"{o}}m and Albertsson and Blomqvist and Zackrisson and Olsson and Gunnlaugsson }, abstract = { Purpose: We describe a public dataset with MR and CT images of patients performed in the same position with both multiobserver and expert consensus delineations of relevant organs in the male pelvic region. The purpose was to provide means for training and validation of segmentation algorithms and methods to convert MR to CT like data, i.e., so called synthetic CT (sCT). Acquisition and validation methods: T1-and T2-weighted MR images as well as CT data were collected for 19 patients at three different departments. Five experts delineated nine organs for each patient based on the T2-weighted MR images. An automatic method was used to fuse the delineations. Starting from each fused delineation, a consensus delineation was agreed upon by the five experts for each organ and patient. Segmentation overlap between user delineations with respect to the consensus delineations was measured to describe the spread of the collected data. Finally, an open-source software was used to create deformation vector fields describing the relation between MR and CT images to further increase the usability of the dataset. Data format and usage notes: The dataset has been made publically available to be used for academic purposes, and can be accessed from https://zenodo.org/record/583096. Potential applications: The dataset provides a useful source for training and validation of segmentation algorithms as well as methods to convert MR to CT-like data (sCT). To give some examples: The T2-weighted MR images with their consensus delineations can directly be used as a template in an existing atlas-based segmentation engine; the expert delineations are useful to validate the performance of a segmentation algorithm as they provide a way to measure variability among users which can be compared with the result of an automatic segmentation; and the pairwise deformably registered MR and CT images can be a source for an atlas-based sCT algorithm or for validation of sCT algorithm. }, } |
2018 | Journal | D\vzenan Zuki\'c, Darrin W. Byrd, Paul E. Kinahan, Andinet Enquobahrie (2018). Calibration Software for Quantitative PET/CT Imaging Using Pocket Phantoms. Tomography (Ann Arbor, Mich.), 4(3), pp. 148–158. (link) (bib) x @article{Zukic2018, year = { 2018 }, volume = { 4 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Calibration Software for Quantitative PET/CT Imaging Using Pocket Phantoms }, pages = { 148--158 }, number = { 3 }, keywords = { PET imaging,bias,calibration,correction,phantom }, journal = { Tomography (Ann Arbor, Mich.) }, issn = { 2379139X }, doi = { 10.18383/j.tom.2018.00020 }, author = { Zuki{\'{c}} and Byrd and Kinahan and Enquobahrie }, abstract = { Multicenter clinical trials that use positron emission tomography (PET) imaging frequently rely on stable bias in imaging biomarkers to assess drug effectiveness. Many well-documented factors cause variability in PET intensity values. Two of the largest scanner-dependent errors are scanner calibration and reconstructed image resolution variations. For clinical trials, an increase in measurement error significantly increases the number of patient scans needed. We aim to provide a robust quality assurance system using portable PET/computed tomography "pocket" phantoms and automated image analysis algorithms with the goal of reducing PET measurement variability. A set of the "pocket" phantoms was scanned with patients, affixed to the underside of a patient bed. Our software analyzed the obtained images and estimated the image parameters. The analysis consisted of 2 steps, automated phantom detection and estimation of PET image resolution and global bias. Performance of the algorithm was tested under variations in image bias, resolution, noise, and errors in the expected sphere size. A web-based application was implemented to deploy the image analysis pipeline in a cloud-based infrastructure to support multicenter data acquisition, under Software-as-a-Service (SaaS) model. The automated detection algorithm localized the phantom reliably. Simulation results showed stable behavior when image properties and input parameters were varied. The PET "pocket" phantom has the potential to reduce and/or check for standardized uptake value measurement errors. }, } |
2018 | Journal | Duo Zhang, P. Hendrik Pretorius, Michael Ghaly, Qi Zhang, Michael A. King, Greta S.P. Mok (2018). Evaluation of different respiratory gating schemes for cardiac SPECT. Journal of Nuclear Cardiology, NA pp. NA (link) (bib) x @article{Zhang2018, year = { 2018 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051860976{\&}doi=10.1007{\%}2Fs12350-018-1392-7{\&}partnerID=40{\&}md5=58fd21a00fccc01830e89c4949b3bcee }, type = { Journal Article }, title = { Evaluation of different respiratory gating schemes for cardiac SPECT }, keywords = { Cardiac perfusion,Respiratory gating,SPECT/CT,Simulation }, journal = { Journal of Nuclear Cardiology }, issn = { 15326551 }, doi = { 10.1007/s12350-018-1392-7 }, author = { Zhang and Pretorius and Ghaly and Zhang and King and Mok }, abstract = { Background: Respiratory gating reduces motion blurring in cardiac SPECT. Here we aim to evaluate the performance of three respiratory gating strategies using a population of digital phantoms with known truth and clinical data. Methods: We analytically simulated 60 projections for 10 XCAT phantoms with 99mTc-sestamibi distributions using three gating schemes: equal amplitude gating (AG), equal count gating (CG), and equal time gating (TG). Clinical list-mode data for 10 patients who underwent 99mTc-sestamibi scans were also processed using the 3 gating schemes. Reconstructed images in each gate were registered to a reference gate, averaged and reoriented to generate the polar plots. For simulations, image noise, relative difference (RD) of averaged count for each of the 17 segment, and relative defect size difference (RSD) were analyzed. For clinical data, image intensity profile and FWHM were measured across the left ventricle wall. Results: For simulations, AG and CG methods showed significantly lower RD and RSD compared to TG, while noise variation was more non-uniform through different gates for AG. In the clinical study, AG and CG had smaller FWHM than TG. Conclusions: AG and CG methods show better performance for motion reduction and are recommended for clinical respiratory gating SPECT implementation. }, } |
2018 | Journal | Wei Cheng Yan, Pooya Davoodi, Sanjairaj Vijayavenkataraman, Yuan Tian, Wei Cheng Ng, Jerry Y.H. Fuh, Kim Samirah Robinson, Chi Hwa Wang (2018). 3D bioprinting of skin tissue: From pre-processing to final product evaluation. Advanced Drug Delivery Reviews, 132, pp. 270–295. (link) (bib) x @article{Yan2018, year = { 2018 }, volume = { 132 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { 3D bioprinting of skin tissue: From pre-processing to final product evaluation }, pages = { 270--295 }, keywords = { 3D bioprinting,Artificial skin,Skin tissue engineering,Tissue engineering }, journal = { Advanced Drug Delivery Reviews }, issn = { 18728294 }, doi = { 10.1016/j.addr.2018.07.016 }, author = { Yan and Davoodi and Vijayavenkataraman and Tian and Ng and Fuh and Robinson and Wang }, abstract = { Bioprinted skin tissue has the potential for aiding drug screening, formulation development, clinical transplantation, chemical and cosmetic testing, as well as basic research. Limitations of conventional skin tissue engineering approaches have driven the development of biomimetic skin equivalent via 3D bioprinting. A key hope for bioprinting skin is the improved tissue authenticity over conventional skin equivalent construction, enabling the precise localization of multiple cell types and appendages within a construct. The printing of skin faces challenges broadly associated with general 3D bioprinting, including the selection of cell types and biomaterials, and additionally requires in vitro culture formats that allow for growth at an air-liquid interface. This paper provides a thorough review of current 3D bioprinting technologies used to engineer human skin constructs and presents the overall pipelines of designing a biomimetic artificial skin via 3D bioprinting from the design phase (i.e. pre-processing phase) through the tissue maturation phase (i.e. post-processing) and into final product evaluation for drug screening, development, and drug delivery applications. }, } |
2018 | Journal | Sanjairaj Vijayavenkataraman, Wei Cheng Yan, Wen Feng Lu, Chi Hwa Wang, Jerry Ying Hsi Fuh (2018). 3D bioprinting of tissues and organs for regenerative medicine. Advanced Drug Delivery Reviews, 132, pp. 296–332. (link) (bib) x @article{Vijayavenkataraman2018, year = { 2018 }, volume = { 132 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { 3D bioprinting of tissues and organs for regenerative medicine }, pages = { 296--332 }, keywords = { 3D bioprinting,3D printing,Bioprinting,Organ printing,Regenerative medicine,Tissue engineering }, journal = { Advanced Drug Delivery Reviews }, issn = { 18728294 }, doi = { 10.1016/j.addr.2018.07.004 }, author = { Vijayavenkataraman and Yan and Lu and Wang and Fuh }, abstract = { 3D bioprinting is a pioneering technology that enables fabrication of biomimetic, multiscale, multi-cellular tissues with highly complex tissue microenvironment, intricate cytoarchitecture, structure-function hierarchy, and tissue-specific compositional and mechanical heterogeneity. Given the huge demand for organ transplantation, coupled with limited organ donors, bioprinting is a potential technology that could solve this crisis of organ shortage by fabrication of fully-functional whole organs. Though organ bioprinting is a far-fetched goal, there has been a considerable and commendable progress in the field of bioprinting that could be used as transplantable tissues in regenerative medicine. This paper presents a first-time review of 3D bioprinting in regenerative medicine, where the current status and contemporary issues of 3D bioprinting pertaining to the eleven organ systems of the human body including skeletal, muscular, nervous, lymphatic, endocrine, reproductive, integumentary, respiratory, digestive, urinary, and circulatory systems were critically reviewed. The implications of 3D bioprinting in drug discovery, development, and delivery systems are also briefly discussed, in terms of in vitro drug testing models, and personalized medicine. While there is a substantial progress in the field of bioprinting in the recent past, there is still a long way to go to fully realize the translational potential of this technology. Computational studies for study of tissue growth or tissue fusion post-printing, improving the scalability of this technology to fabricate human-scale tissues, development of hybrid systems with integration of different bioprinting modalities, formulation of new bioinks with tuneable mechanical and rheological properties, mechanobiological studies on cell-bioink interaction, 4D bioprinting with smart (stimuli-responsive) hydrogels, and addressing the ethical, social, and regulatory issues concerning bioprinting are potential futuristic focus areas that would aid in successful clinical translation of this technology. }, } |
2018 | Journal | Subashini Srinivasan, Brian A. Hargreaves, Bruce L. Daniel (2018). Fat-based registration of breast dynamic contrast enhanced water images. Magnetic Resonance in Medicine, 79(4), pp. 2408–2414. (link) (bib) x @article{Srinivasan2018, year = { 2018 }, volume = { 79 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Fat-based registration of breast dynamic contrast enhanced water images }, pages = { 2408--2414 }, number = { 4 }, keywords = { breast deformable motion,fat-based registration }, journal = { Magnetic Resonance in Medicine }, issn = { 15222594 }, doi = { 10.1002/mrm.26851 }, author = { Srinivasan and Hargreaves and Daniel }, abstract = { Purpose: In this study, a 3D fat-based deformable registration algorithm was developed for registering dynamic contrast-enhanced breast images. Methods: The mutual information similarity measure with free-form deformation motion correction in rapidly enhancing lesions can introduce motion. However, in Dixon-based fat-water separated acquisitions, the nonenhancing fat signal can directly be used to estimate deformable motion, which can be later used to deform the water images. Qualitative comparison of the fat-based registration method to a water-based registration method, and to the unregistered images, was performed by two experienced readers. Quantitative analysis of the registration was evaluated by estimating the mean-squared signal difference on the fat images. Results: Using a scale of 0 (no motion) to 2 ({\textgreater} 4 voxels of motion), the average image quality score of the fat-based registered images was 0.5 ± 0.6, water-based registration was 0.8 ± 0.8, and the unregistered dataset was 1.6 ± 0.6. The mean-squared-signal-difference metric on the fat images was significantly lower for fat-based registered images compared with both water-based registered and unregistered images. Conclusions: Fat-based registration of breast dynamic contrast-enhanced images is a promising technique for performing deformable motion correction of breast without introducing new motion. Magn Reson Med 79:2408–2414, 2018. {\textcopyright} 2017 International Society for Magnetic Resonance in Medicine. }, } |
2018 | Journal | Roozbeh Shams, Yiming Xiao, Francois Hebert, Matthew Abramowitz, Rupert Brooks, Hassan Rivaz (2018). Assessment of Rigid Registration Quality Measures in Ultrasound-Guided Radiotherapy. IEEE Transactions on Medical Imaging, 37(2), pp. 428–437. (link) (bib) x @article{Shams2018, year = { 2018 }, volume = { 37 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030780937{\&}doi=10.1109{\%}2FTMI.2017.2755695{\&}partnerID=40{\&}md5=c0b386f770628888db0e4e521b9905ba }, type = { Journal Article }, title = { Assessment of Rigid Registration Quality Measures in Ultrasound-Guided Radiotherapy }, pages = { 428--437 }, number = { 2 }, keywords = { Radiotherapy,bootstrapping,image registration,motion management,quality management,supervised learning }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2017.2755695 }, author = { Shams and Xiao and Hebert and Abramowitz and Brooks and Rivaz }, abstract = { Image guidance has become the standard of care for patient positioning in radiotherapy, where image registration is often a critical step to help manage patient motion. However, in practice, verification of registration quality is often adversely affected by difficulty in manual inspection of 3-D images and time constraint, thus affecting the therapeutic outcome. Therefore, we proposed to employ both bootstrapping and the supervised learning methods of linear discriminant analysis and random forest to help robustly assess registration quality in ultrasound-guided radiotherapy. We validated both approaches using phantom and real clinical ultrasound images, and showed that both performed well for the task. While learning-based techniques offer better accuracy and shorter evaluation time, bootstrapping requires no prior training and has a higher sensitivity. }, } |
2018 | Journal | Daniel Schmitz, Sascha E.A. Muenzing, Martin Schober, Nicole Schubert, Martina Minnerop, Thomas Lippert, Katrin Amunts, Markus Axer (2018). Derivation of Fiber Orientations From Oblique Views Through Human Brain Sections in 3D-Polarized Light Imaging. Frontiers in Neuroanatomy, 12, pp. 15. (link) (bib) x @article{Schmitz2018, year = { 2018 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Derivation of Fiber Orientations From Oblique Views Through Human Brain Sections in 3D-Polarized Light Imaging }, pages = { 15 }, keywords = { 3D-PLI,Fiber architecture,Modeling,Neuroimaging,White matter anatomy }, journal = { Frontiers in Neuroanatomy }, issn = { 16625129 }, doi = { 10.3389/fnana.2018.00075 }, author = { Schmitz and Muenzing and Schober and Schubert and Minnerop and Lippert and Amunts and Axer }, abstract = { 3D-Polarized Light Imaging (3D-PLI) enables high-resolution three-dimensional mapping of the nerve fiber architecture in unstained histological brain sections based on the intrinsic birefringence of myelinated nerve fibers. The interpretation of the measured birefringent signals comes with conjointly measured information about the local fiber birefringence strength and the fiber orientation. In this study, we present a novel approach to disentangle both parameters from each other based on a weighted least squares routine (ROFL) applied to oblique polarimetric 3D-PLI measurements. This approach was compared to a previously described analytical method on simulated and experimental data obtained from a post mortem human brain. Analysis of the simulations revealed in case of ROFL a distinctly increased level of confidence to determine steep and flat fiber orientations with respect to the brain sectioning plane. Based on analysis of histological sections of a human brain dataset, it was demonstrated that ROFL provides a coherent characterization of cortical, subcortical, and white matter regions in terms of fiber orientation and birefringence strength, within and across sections. Oblique measurements combined with ROFL analysis opens up new ways to determine physical brain tissue properties by means of 3D-PLI microscopy. }, } |
2018 | Journal | Mar\'ia Soledad Ram\'irez, Francisco José Garc\'ia-Pe\~nalvo (2018). Co-creation and open innovation: Systematic literature review. Comunicar, 26(54), pp. 9–18. (link) (bib) x @article{Ramirez2018, year = { 2018 }, volume = { 26 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Co-creation and open innovation: Systematic literature review }, pages = { 9--18 }, number = { 54 }, keywords = { Citizen science,Collaboration,Innovation,Knowledge,Knowledge co-creation,Open science,Openness,Validation }, journal = { Comunicar }, issn = { 19883293 }, doi = { 10.3916/C54-2018-01 }, author = { Ram{\'{i}}rez and Garc{\'{i}}a-Pe{\~{n}}alvo }, abstract = { Open science, as a common good, opens possibilities for the development of nations, through innovations and collaborative constructions, which help to democratize knowledge. Advances in this area are still emerging, and the open science, co-creation of knowledge and open innovation triangle, is presented as an opportunity to generate an original contribution from research to open educational theory and practices. The study analyzed the articles that addressed this triangle, in order to identify the contexts and challenges that arise in open innovation and the co-creation of knowledge to promote open science. The method was a systematic literature review (SLR) of 168 articles published in open access format, from January 2014 to May 2017 in the Web of Science and Scopus databases. In the validation process, the York University criteria were used: inclusion and exclusion, relevance of the pertinent studies, evaluation of the quality / validity of included studies and description of data / basic studies. The findings showed that the most-widely publicized contexts were in the United States and Brazil, in the business and academic sectors (closely followed by the social sector), and the challenges were open to innovation, opening and research. The research concludes that the context and practices of collaboration are substantial elements for innovation and open science. }, } |
2018 | Journal | Sebastian Primpke, Marisa Wirth, Claudia Lorenz, Gunnar Gerdts (2018). Reference database design for the automated analysis of microplastic samples based on Fourier transform infrared (FTIR) spectroscopy. Analytical and Bioanalytical Chemistry, 410(21), pp. 5131–5141. (link) (bib) x @article{Primpke2018, year = { 2018 }, volume = { 410 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Reference database design for the automated analysis of microplastic samples based on Fourier transform infrared (FTIR) spectroscopy }, pages = { 5131--5141 }, number = { 21 }, keywords = { Database,FTIR,Imaging,Infrared,Microplastics,Spectroscopy }, journal = { Analytical and Bioanalytical Chemistry }, issn = { 16182650 }, doi = { 10.1007/s00216-018-1156-x }, author = { Primpke and Wirth and Lorenz and Gerdts }, abstract = { The identification of microplastics becomes increasingly challenging with decreasing particle size and increasing sample heterogeneity. The analysis of microplastic samples by Fourier transform infrared (FTIR) spectroscopy is a versatile, bias-free tool to succeed at this task. In this study, we provide an adaptable reference database, which can be applied to single-particle identification as well as methods like chemical imaging based on FTIR microscopy. The large datasets generated by chemical imaging can be further investigated by automated analysis, which does, however, require a carefully designed database. The novel database design is based on the hierarchical cluster analysis of reference spectra in the spectral range from 3600 to 1250 cm−1. The hereby generated database entries were optimized for the automated analysis software with defined reference datasets. The design was further tested for its customizability with additional entries. The final reference database was extensively tested on reference datasets and environmental samples. Data quality by means of correct particle identification and depiction significantly increased compared to that of previous databases, proving the applicability of the concept and highlighting the importance of this work. Our novel database provides a reference point for data comparison with future and previous microplastic studies that are based on different databases. [Figure not available: see fulltext.]. }, } |
2018 | Journal | H. Prasetio, J. Wölfelschneider, M. Ziegler, M. Serpa, B. Witulla, C. Bert (2018). Dose calculation and verification of the Vero gimbal tracking treatment delivery. Physics in Medicine and Biology, 63(3), pp. 16. (link) (bib) x @article{Prasetio2018, year = { 2018 }, volume = { 63 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Dose calculation and verification of the Vero gimbal tracking treatment delivery }, pages = { 16 }, number = { 3 }, keywords = { MU distribution,dose reconstruction,dynamic tumor tracking,film dosimetry,gimbal tracking }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aaa617 }, author = { Prasetio and W{\"{o}}lfelschneider and Ziegler and Serpa and Witulla and Bert }, abstract = { The Vero linear accelerator delivers dynamic tumor tracking (DTT) treatment using a gimbal motion. However, the availability of treatment planning systems (TPS) to simulate DTT is limited. This study aims to implement and verify the gimbal tracking beam geometry in the dose calculation. Gimbal tracking was implemented by rotating the reference CT outside the TPS according to the ring, gantry, and gimbal tracking position obtained from the tracking log file. The dose was calculated using these rotated CTs. The geometric accuracy was verified by comparing calculated and measured film response using a ball bearing phantom. The dose was verified by comparing calculated 2D dose distributions and film measurements in a ball bearing and a homogeneous phantom using a gamma criterion of 2{\%}/2 mm. The effect of implementing the gimbal tracking beam geometry in a 3D patient data dose calculation was evaluated using dose volume histograms (DVH). Geometrically, the gimbal tracking implementation accuracy was {\textless}0.94 mm. The isodose lines agreed with the film measurement. The largest dose difference of 9.4{\%} was observed at maximum tilt positions with an isocenter and target separation of 17.51 mm. Dosimetrically, gamma passing rates were {\textgreater}98.4{\%}. The introduction of the gimbal tracking beam geometry in the dose calculation shifted the DVH curves by 0.05{\%}-1.26{\%} for the phantom geometry and by 5.59{\%} for the patient CT dataset. This study successfully demonstrates a method to incorporate the gimbal tracking beam geometry into dose calculations. By combining CT rotation and MU distribution according to the log file, the TPS was able to simulate the Vero tracking treatment dose delivery. The DVH analysis from the gimbal tracking dose calculation revealed changes in the dose distribution during gimbal DTT that are not visible with static dose calculations. }, } |
2018 | Journal | Jonas Pichat, Juan Eugenio Iglesias, Tarek Yousry, Sébastien Ourselin, Marc Modat (2018). A Survey of Methods for 3D Histology Reconstruction. Medical Image Analysis, 46, pp. 73–105. (link) (bib) x @article{Pichat2018, year = { 2018 }, volume = { 46 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A Survey of Methods for 3D Histology Reconstruction }, pages = { 73--105 }, keywords = { 3D reconstruction,Histology,MRI,Medical imaging,Registration }, journal = { Medical Image Analysis }, issn = { 13618423 }, doi = { 10.1016/j.media.2018.02.004 }, author = { Pichat and Iglesias and Yousry and Ourselin and Modat }, abstract = { Histology permits the observation of otherwise invisible structures of the internal topography of a specimen. Although it enables the investigation of tissues at a cellular level, it is invasive and breaks topology due to cutting. Three-dimensional (3D) reconstruction was thus introduced to overcome the limitations of single-section studies in a dimensional scope. 3D reconstruction finds its roots in embryology, where it enabled the visualisation of spatial relationships of developing systems and organs, and extended to biomedicine, where the observation of individual, stained sections provided only partial understanding of normal and abnormal tissues. However, despite bringing visual awareness, recovering realistic reconstructions is elusive without prior knowledge about the tissue shape. 3D medical imaging made such structural ground truths available. In addition, combining non-invasive imaging with histology unveiled invaluable opportunities to relate macroscopic information to the underlying microscopic properties of tissues through the establishment of spatial correspondences; image registration is one technique that permits the automation of such a process and we describe reconstruction methods that rely on it. It is thereby possible to recover the original topology of histology and lost relationships, gain insight into what affects the signals used to construct medical images (and characterise them), or build high resolution anatomical atlases. This paper reviews almost three decades of methods for 3D histology reconstruction from serial sections, used in the study of many different types of tissue. We first summarise the process that produces digitised sections from a tissue specimen in order to understand the peculiarity of the data, the associated artefacts and some possible ways to minimise them. We then describe methods for 3D histology reconstruction with and without the help of 3D medical imaging, along with methods of validation and some applications. We finally attempt to identify the trends and challenges that the field is facing, many of which are derived from the cross-disciplinary nature of the problem as it involves the collaboration between physicists, histolopathologists, computer scientists and physicians. }, } |
2018 | Journal | Renzo Phellan, Thomas Lindner, Michael Helle, Alexandre X. Falcao, Nils Daniel Forkert (2018). Automatic Temporal Segmentation of Vessels of the Brain Using 4D ASL MRA Images. IEEE Transactions on Biomedical Engineering, 65(7), pp. 1486–1494. (link) (bib) x @article{Phellan2018, year = { 2018 }, volume = { 65 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic Temporal Segmentation of Vessels of the Brain Using 4D ASL MRA Images }, pages = { 1486--1494 }, number = { 7 }, keywords = { Angiography,magnetic resonance angiography,temporal segmentation,vessel enhancement,vessel segmentation }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 15582531 }, doi = { 10.1109/TBME.2017.2759730 }, author = { Phellan and Lindner and Helle and Falcao and Forkert }, abstract = { Objective: Automatic vessel segmentation can be used to process the considerable amount of data generated by four-dimensional arterial spin labeling magnetic resonance angiography (4D ASL MRA) images. Previous segmentation approaches for dynamic series of images propose either reducing the series to a temporal average (tAIP) or maximum intensity projection (tMIP) prior to vessel segmentation, or a separate segmentation of each image. This paper introduces a method that combines both approaches to overcome the specific drawbacks of each technique. Methods: Vessels in the tAIP are enhanced by using the ranking orientation responses of path operators and multiscale vesselness enhancement filters. Then, tAIP segmentation is performed using a seed-based algorithm. In parallel, this algorithm is also used to segment each frame of the series and identify small vessels, which might have been lost in the tAIP segmentation. The results of each individual time frame segmentation are fused using an or boolean operation. Finally, small vessels found only in the fused segmentation are added to the tAIP segmentation. Results: In a quantitative analysis using ten 4D ASL MRA image series from healthy volunteers, the proposed combined approach reached an average Dice coefficient of 0.931, being more accurate than the corresponding tMIP, tAIP, and single time frame segmentation methods with statistical significance. Conclusion : The novel combined vessel segmentation strategy can be used to obtain improved vessel segmentation results from 4D ASL MRA and other dynamic series of images. Significance: Improved vessel segmentation of 4D ASL MRA allows a fast and accurate assessment of cerebrovascular structures. }, } |
2018 | Journal | Bruno Paun, Bart Bijnens, Andrew C. Cook, Timothy J. Mohun, Constantine Butakoff (2018). Quantification of the detailed cardiac left ventricular trabecular morphogenesis in the mouse embryo. Medical Image Analysis, 49, pp. 89–104. (link) (bib) x @article{Paun2018, year = { 2018 }, volume = { 49 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Quantification of the detailed cardiac left ventricular trabecular morphogenesis in the mouse embryo }, pages = { 89--104 }, keywords = { 3D fractal analysis,Cardiac embryology,Cardiac morphogenesis,Cardiac trabeculations,High resolution episcopic microscopy }, journal = { Medical Image Analysis }, issn = { 13618423 }, doi = { 10.1016/j.media.2018.08.001 }, author = { Paun and Bijnens and Cook and Mohun and Butakoff }, abstract = { During embryogenesis, a mammalian heart develops from a simple tubular shape into a complex 4-chamber organ, going through four distinct phases: early primitive tubular heart, emergence of trabeculations, trabecular remodeling and development of the compact myocardium. In this paper we propose a framework for standardized and subject-independent 3D regional myocardial complexity analysis, applied to analysis of the development of the mouse left ventricle. We propose a standardized subdivision of the myocardium into 3D overlapping regions (in our case 361) and a novel visualization of myocardial complexity, whereupon we: 1) extend the fractal dimension, commonly applied to image slices, to 3D and 2) use volume occupied by the trabeculations in each region together with their surface area, in order to quantify myocardial complexity. The latter provides an intuitive characterization of the complexity, given that compact myocardium will tend to occupy a larger volume with little surface area while high surface area with low volume will correspond to highly trabeculated areas. Using 50 mouse embryo images at 5 different gestational ages (10 subjects per gestational age), we demonstrate how the proposed representation and complexity measures describe the development of LV myocardial complexity. The mouse embryo data was acquired using high resolution episcopic microscopy. The complexity analysis per region was carried out using: 3D fractal dimension, myocardial volume, myocardial surface area and ratio between the two. The analysis of gestational ages was performed on embryos of 14.5, 15.5, 16.5, 17.5 and 18.5 embryonic days, and demonstrated that the regional complexity of the trabeculations increases longitudinally from the base to the apex, with a maximum around the middle. The overall complexity decreases with gestational age, being most complex at 14.5. Circumferentially, at ages 14.5, 15.5 and 16.5, the trabeculations show similar complexity everywhere except for the anteroseptal and inferolateral area of the wall, where it is smaller. At 17.5 days, the regions of high complexity become more localized towards the inferoseptal and anterolateral parts of the wall. At 18.5 days, the high complexity area exhibits further localization at the inferoseptal and anterior part of the wall. }, } |
2018 | Journal | Nathan Heath Patterson, Michael Tuck, Adam Lewis, Alexis Kaushansky, Jeremy L. Norris, Raf Van De Plas, Richard M. Caprioli (2018). Next Generation Histology-Directed Imaging Mass Spectrometry Driven by Autofluorescence Microscopy. Analytical Chemistry, 90(21), pp. 12404–12413. (link) (bib) x @article{Patterson2018, year = { 2018 }, volume = { 90 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Next Generation Histology-Directed Imaging Mass Spectrometry Driven by Autofluorescence Microscopy }, pages = { 12404--12413 }, number = { 21 }, journal = { Analytical Chemistry }, issn = { 15206882 }, doi = { 10.1021/acs.analchem.8b02885 }, author = { Patterson and Tuck and Lewis and Kaushansky and Norris and {Van De Plas} and Caprioli }, abstract = { Histology-directed imaging mass spectrometry (IMS) is a spatially targeted IMS acquisition method informed by expert annotation that provides rapid molecular characterization of select tissue structures. The expert annotations are usually determined on digital whole slide images of histological stains where the staining preparation is incompatible with optimal IMS preparation, necessitating serial sections: one for annotation, one for IMS. Registration is then used to align staining annotations onto the IMS tissue section. Herein, we report a next-generation histology-directed platform implementing IMS-compatible autofluorescence (AF) microscopy taken prior to any staining or IMS. The platform enables two histology-directed workflows, one that improves the registration process between two separate tissue sections using automated, computational monomodal AF-to-AF microscopy image registration, and a registration-free approach that utilizes AF directly to identify ROIs and acquire IMS on the same section. The registration approach is fully automated and delivers state of the art accuracy in histology-directed workflows for transfer of annotations (-3-10 $\mu$m based on 4 organs from 2 species) while the direct AF approach is registration-free, allowing targeting of the finest structures visible by AF microscopy. We demonstrate the platform in biologically relevant case studies of liver stage malaria and human kidney disease with spatially targeted acquisition of sparsely distributed (composing less than one tenth of 1{\%} of the tissue section area) malaria infected mouse hepatocytes and glomeruli in the human kidney case study. }, } |
2018 | Journal | Tapan P. Patel, N. Venkatesh Prajna, Sina Farsiu, Nita G. Valikodath, Leslie M. Niziol, Lakshey Dudeja, Kyeong Hwan Kim, Maria A. Woodward (2018). Novel Image-Based Analysis for Reduction of Clinician-Dependent Variability in Measurement of the Corneal Ulcer Size. Cornea, 37(3), pp. 331–339. (link) (bib) x @article{Patel2018, year = { 2018 }, volume = { 37 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Novel Image-Based Analysis for Reduction of Clinician-Dependent Variability in Measurement of the Corneal Ulcer Size }, pmid = { 29256985 }, pages = { 331--339 }, number = { 3 }, keywords = { corneal ulcer,interexaminer variability,random forest segmentation,semiautomated measurement }, journal = { Cornea }, issn = { 15364798 }, doi = { 10.1097/ICO.0000000000001488 }, author = { Patel and Prajna and Farsiu and Valikodath and Niziol and Dudeja and Kim and Woodward }, abstract = { Purpose: To assess variability in corneal ulcer measurements between ophthalmologists and reduce clinician-dependent variability using semiautomated segmentation of the ulcer from photographs. Methods: Three ophthalmologists measured 50 patients' eyes for epithelial defects (EDs) and the stromal infiltrate (SI) size using slit-lamp (SL) calipers. SL photographs were obtained. An algorithm was developed for semiautomatic segmenting of the ED and SI in the photographs. Semiautomatic segmentation was repeated 3 times by different users (2 ophthalmologists and 1 trainee). Clinically significant variability was assessed with intraclass correlation coefficients (ICCs) and the percentage of pairwise measurements differing by ≥0.5 mm. Semiautomatic segmentation measurements were compared with manual delineation of the image by a corneal specialist (gold standard) using Dice similarity coefficients. Results: Ophthalmologists' reliability in measurements by SL calipers had an ICC from 0.84 to 0.88 between examiners. Measurements by semiautomatic segmentation had an ICC from 0.96 to 0.98. SL measures of ulcers by clinical versus semiautomatic segmentation measures differed by ≥0.5 mm in 24{\%} to 38{\%} versus 8{\%} to 28{\%} (ED height); 30{\%} to 52{\%} versus 12{\%} to 34{\%} (ED width); 26{\%} to 38{\%} versus 10{\%} to 32{\%} (SI height); and 38{\%} to 58{\%} versus 14{\%} to 34{\%} (SI width), respectively. Average Dice similarity coefficients between manual and repeated semiautomatic segmentation ranged from 0.83 to 0.86 for the ED and 0.78 to 0.83 for the SI. Conclusions: Variability exists when measuring corneal ulcers, even among ophthalmologists. Photography and computerized methods for quantifying the ulcer size could reduce variability while remaining accurate and impact quantitative measurement endpoints. }, } |
2018 | Journal | Alexander Kostenko, Vladyslav Andriiashen, Kees Joost Batenburg (2018). Registration-based multi-orientation tomography. Optics Express, 26(22), pp. 28982. (link) (bib) x @article{Kostenko2018, year = { 2018 }, volume = { 26 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Registration-based multi-orientation tomography }, pages = { 28982 }, number = { 22 }, journal = { Optics Express }, issn = { 1094-4087 }, doi = { 10.1364/oe.26.028982 }, author = { Kostenko and Andriiashen and Batenburg }, abstract = { We propose a combination of an experimental approach and a reconstruction technique that leads to reduction of artefacts in X-ray computer tomography of strongly attenuating objects. Through fully automatic data alignment, data generated in multiple experiments with varying object orientations are combined. Simulations and experiments show that the solutions computed using algebraic methods based on multiple acquisitions can achieve a dramatic improvement in the reconstruction quality, even when each acquisition generates a reduced number of projections. The approach does not require any advanced setup components making it ideal for laboratory-based X-ray tomography. }, } |
2018 | Journal | Jihun Kim, Yang Kyun Park, David Edmunds, Kevin Oh, Gregory C. Sharp, Brian Winey (2018). Kilovoltage projection streaming-based tracking application (KiPSTA): First clinical implementation during spine stereotactic radiation surgery. Advances in Radiation Oncology, 3(4), pp. 682–692. (link) (bib) x @article{Kim2018, year = { 2018 }, volume = { 3 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050957688{\&}doi=10.1016{\%}2Fj.adro.2018.06.002{\&}partnerID=40{\&}md5=80de6701682b693312a12d441a486b1c }, type = { Journal Article }, title = { Kilovoltage projection streaming-based tracking application (KiPSTA): First clinical implementation during spine stereotactic radiation surgery }, pages = { 682--692 }, number = { 4 }, journal = { Advances in Radiation Oncology }, issn = { 24521094 }, doi = { 10.1016/j.adro.2018.06.002 }, author = { Kim and Park and Edmunds and Oh and Sharp and Winey }, abstract = { Purpose: This study aimed to develop a linac-mounted kilovoltage (kV) projection streaming-based tracking method for vertebral targets during spine stereotactic radiation surgery and evaluate the clinical feasibility of the proposed spine tracking method. Methods and materials: Using real-time kV projection streaming within XVI (Elekta XVI), kV–projection-based tracking was applied to the target vertebral bodies. Two-dimensional in-plane patient translation was calculated via an image registration between digitally reconstructed radiographs (DRRs) and kV projections. DRR was generated from the cone beam computed tomography (CBCT) scan, which was obtained immediately before the tracking session. During a tracking session, each kV projection was streamed for an intensity gradient-based image with similar metric-based registration to the offset DRR. The ground truth displacement for each kV beam angle was calculated at the beam isocenter using the 6 degrees-of-freedom transformation that was obtained by a CBCT-CBCT rigid registration. The resulting translation by the DRR-projection registration was compared with the ground truth displacement. The proposed tracking method was evaluated retrospectively and online, using 7 and 5 spine patients, respectively. Results: The accuracy and precision of spine tracking for in-plane patient motion were 0.5 ± 0.2 and 0.2 ± 0.1 mm. The magnitude of patient motion that was estimated using the CBCT-CBCT rigid registration was (0.5 ± 0.4, 0.4 ± 0.3, 0.3 ± 0.3) mm and (0.3 ± 0.4, 0.2 ± 0.2, 0.5 ± 0.6) mm for all tracking sessions. The intrafraction motion was within 2 mm for all CBCT scans considered. Conclusions: This study demonstrated that the proposed spine tracking method can track intrafraction motion with sub-millimeter accuracy and precision, and sub-second latency. }, } |
2018 | Journal | K. D. Joshi, T. E. Marchant (2018). Iterative peak combination: A robust technique for identifying relevant features in medical image histograms. Biomedical Physics and Engineering Express, 4(1), pp. 10. (link) (bib) x @article{Joshi2018, year = { 2018 }, volume = { 4 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Iterative peak combination: A robust technique for identifying relevant features in medical image histograms }, pages = { 10 }, number = { 1 }, keywords = { cone beam CT,histogram matching,image histogram,image processing }, journal = { Biomedical Physics and Engineering Express }, issn = { 20571976 }, doi = { 10.1088/2057-1976/aa929d }, author = { Joshi and Marchant }, abstract = { Histogram-based methods can be used to analyse and transform medical images. Histogram specification is one such method which has been widely used to transform the histograms of cone beam CT (CBCT) images to match those of corresponding CT images. However, when the derived transformation is applied to the CBCT image pixels, significant artefacts can be produced. We propose the iterative peak combination algorithm, a novel and robust method for automatically identifying relevant features in medical image histograms. The procedure is conceptually simple and can be applied equally well to both CT and CBCT image histograms. We also demonstrate how iterative peak combination can be used to transform CBCT images in such as way as to improve the Hounsfield Unit (HU) calibration of CBCT image pixel values, without introducing additional artefacts. We analyse 36 pelvis CBCT images and show that the average difference in fat tissue pixel values between CT images and CBCT images processed using the iterative peak combination algorithm is 23.7 HU. Compared to 136.7 HU in unprocessed CBCT images and 50.9 in CBCT images processed using histogram specification. }, } |
2018 | Journal | Daniel H\oyer Iversen, Lasse L\ovstakken, Geirmund Unsgård, Ingerid Reinertsen (2018). Automatic intraoperative estimation of blood flow direction during neurosurgical interventions. International Journal of Computer Assisted Radiology and Surgery, 13(5), pp. 693–701. (link) (bib) x @article{Iversen2018, year = { 2018 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic intraoperative estimation of blood flow direction during neurosurgical interventions }, pages = { 693--701 }, number = { 5 }, keywords = { Blood flow,Intraoperative,Neurosurgery,Ultrasound }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-018-1711-0 }, author = { Iversen and L{\o}vstakken and Unsg{\aa}rd and Reinertsen }, abstract = { Purpose: In neurosurgery, reliable information about blood vessel anatomy and flow direction is important to identify, characterize, and avoid damage to the vasculature. Due to ultrasound Doppler angle dependencies and the complexity of the vascular architecture, clinically valuable 3-D flow direction information is currently not available. In this paper, we aim to clinically validate and demonstrate the intraoperative use of a fully automatic method for estimation of 3-D blood flow direction from freehand 2-D Doppler ultrasound. Methods: A 3-D vessel model is reconstructed from 2-D Doppler ultrasound and used to determine the vessel architecture. The blood flow direction is then estimated automatically using the model in combination with Doppler velocity data. To enable testing and validation during surgery, the method was implemented as part of the open-source navigation system CustusX (www.custusx.org). Results: Ten patients were included prospectively. Data from four patients were processed postoperatively, and data from six patients were processed intraoperatively. In total, the blood flow direction was estimated for 48 different blood vessels with a success rate of 98{\%}. Conclusions: In this work, we have shown that the proposed method is suitable for fully automatic estimation of the blood flow direction in intracranial vessels during neurosurgical interventions. The method has the potential to make the understanding of the complex vascular anatomy and flow pattern more intuitive for the surgeon. The method is compatible with intraoperative use, and results can be presented within the limited time frame where they still are of clinical interest. }, } |
2018 | Journal | Meng Yin Ho, Wei Lung Tseng, Furen Xiao (2018). Estimation of the Craniectomy Surface Area by Using Postoperative Images. International Journal of Biomedical Imaging, 2018, pp. 8. (link) (bib) x @article{Ho2018, year = { 2018 }, volume = { 2018 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Estimation of the Craniectomy Surface Area by Using Postoperative Images }, pages = { 8 }, journal = { International Journal of Biomedical Imaging }, issn = { 16874196 }, doi = { 10.1155/2018/5237693 }, author = { Ho and Tseng and Xiao }, abstract = { Decompressive craniectomy (DC) is a neurosurgical procedure performed to relieve the intracranial pressure engendered by brain swelling. However, no easy and accurate method exists for determining the craniectomy surface area. In this study, we implemented and compared three methods of estimating the craniectomy surface area for evaluating the decompressive effort. We collected 118 sets of preoperative and postoperative brain computed tomography images from patients who underwent craniectomy procedures between April 2009 and April 2011. The surface area associated with each craniectomy was estimated using the marching cube and quasi-Monte Carlo methods. The surface area was also estimated using a simple AC method, in which the area is calculated by multiplying the craniectomy length (A) by its height (C). The estimated surface area ranged from 9.46 to 205.32 cm2, with a median of 134.80 cm2. The root-mean-square deviation (RMSD) between the marching cube and quasi-Monte Carlo methods was 7.53 cm2. Furthermore, the RMSD was 14.45 cm2 between the marching cube and AC methods and 12.70 cm2 between the quasi-Monte Carlo and AC methods. Paired t-tests indicated no statistically significant difference between these methods. The marching cube and quasi-Monte Carlo methods yield similar results. The results calculated using the AC method are also clinically acceptable for estimating the DC surface area. Our results can facilitate additional studies on the association of decompressive effort with the effect of craniectomy. }, } |
2018 | Journal | Zhijie He, Hongyang Lu, Xiaojiao Yang, Li Zhang, Yi Wu, Wenxiu Niu, Li Ding, Guili Wang, Shanbao Tong, Jie Jia (2018). Hypoperfusion induced by preconditioning treadmill training in hyper-early reperfusion after cerebral ischemia: A laser speckle imaging study. IEEE Transactions on Biomedical Engineering, 65(1), pp. 219–223. (link) (bib) x @article{He2018, year = { 2018 }, volume = { 65 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Hypoperfusion induced by preconditioning treadmill training in hyper-early reperfusion after cerebral ischemia: A laser speckle imaging study }, pages = { 219--223 }, number = { 1 }, keywords = { Cerebral blood flow,Cerebral ischemia,Exercise preconditioning,Laser speckle contrast imaging,Reperfusion injury }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 15582531 }, doi = { 10.1109/TBME.2017.2695229 }, author = { He and Lu and Yang and Zhang and Wu and Niu and Ding and Wang and Tong and Jia }, abstract = { Exercise preconditioning induces neuroprotective effects during cerebral ischemia and reperfusion, which involves the recovery of cerebral blood flow (CBF). Mechanisms underlying the neuroprotective effects of reestablished CBF following ischemia and reperfusion are unclear. The present study investigated CBF in hyper-early stage of reperfusion by laser speckle contrast imaging, a full-field high-resolution optical imaging technique. Rats with or without treadmill training were subjected to middle cerebral artery occlusion followed by reperfusion. CBF in arteries, veins, and capillaries in hyper-early stage of reperfusion (1, 2, and 3 h after reperfusion) and in subacute stage (24 h after reperfusion) were measured. Neurological scoring and 2,3,5-triphenyltetrazolium chloride staining were further applied to determine the neuroprotective effects of exercise preconditioning. In hyper-early stage of reperfusion, CBF in the rats with exercise preconditioning was reduced significantly in arteries and veins, respectively, compared to rats with no exercise preconditioning. Capillary CBF remained stable in the hyper-early stage of reperfusion, though it increased significantly 24 h after reperfusion in the rats with exercise preconditioning. As a neuroprotective strategy, exercise preconditioning reduced the blood perfusion of arteries and veins in the hyper-early stage of reperfusion, which indicated intervention-induced neuroprotective hypoperfusion after reperfusion onset. }, } |
2018 | Journal | R. Han, T. De Silva, M. Ketcha, A. Uneri, J. H. Siewerdsen (2018). A momentum-based diffeomorphic demons framework for deformable MR-CT image registration. Physics in Medicine and Biology, 63(21), pp. 18. (link) (bib) x @article{Han2018, year = { 2018 }, volume = { 63 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A momentum-based diffeomorphic demons framework for deformable MR-CT image registration }, pmid = { 30353886 }, pages = { 18 }, number = { 21 }, keywords = { deformable registration,image registration,neurosurgery navigation }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aae66c }, author = { Han and {De Silva} and Ketcha and Uneri and Siewerdsen }, abstract = { Neuro-navigated procedures require a high degree of geometric accuracy but are subject to geometric error from complex deformation in the deep brain - e.g. regions about the ventricles due to egress of cerebrospinal fluid (CSF) upon neuroendoscopic approach or placement of a ventricular shunt. We report a multi-modality, diffeomorphic, deformable registration method using momentum-based acceleration of the Demons algorithm to solve the transformation relating preoperative MRI and intraoperative CT as a basis for high-precision guidance. The registration method (pMI-Demons) extends the mono-modality, diffeomorphic form of the Demons algorithm to multi-modality registration using pointwise mutual information (pMI) as a similarity metric. The method incorporates a preprocessing step to nonlinearly stretch CT image values and incorporates a momentum-based approach to accelerate convergence. Registration performance was evaluated in phantom and patient images: first, the sensitivity of performance to algorithm parameter selection (including update and displacement field smoothing, histogram stretch, and the momentum term) was analyzed in a phantom study over a range of simulated deformations; and second, the algorithm was applied to registration of MR and CT images for four patients undergoing minimally invasive neurosurgery. Performance was compared to two previously reported methods (free-form deformation using mutual information (MI-FFD) and symmetric normalization using mutual information (MI-SyN)) in terms of target registration error (TRE), Jacobian determinant (J), and runtime. The phantom study identified optimal or nominal settings of algorithm parameters for translation to clinical studies. In the phantom study, the pMI-Demons method achieved comparable registration accuracy to the reference methods and strongly reduced outliers in TRE (p 0.001 in Kolmogorov-Smirnov test). Similarly, in the clinical study: median TRE = 1.54 mm (0.83-1.66 mm interquartile range, IQR) for pMI-Demons compared to 1.40 mm (1.02-1.67 mm IQR) for MI-FFD and 1.64 mm (0.90-1.92 mm IQR) for MI-SyN. The pMI-Demons and MI-SyN methods yielded diffeomorphic transformations (J {\textgreater} 0) that preserved topology, whereas MI-FFD yielded unrealistic (J {\textless} 0) deformations subject to tissue folding and tearing. Momentum-based acceleration gave a ∼35{\%} speedup of the pMI-Demons method, providing registration runtime of 10.5 min (reduced to 2.2 min on GPU), compared to 15.5 min for MI-FFD and 34.7 min for MI-SyN. The pMI-Demons method achieved registration accuracy comparable to MI-FFD and MI-SyN, maintained diffeomorphic transformation similar to MI-SyN, and accelerated runtime in a manner that facilitates translation to image-guided neurosurgery. }, } |
2018 | Journal | Steven R. Dolly, Yang Lou, Mark A. Anastasio, Hua Li (2018). Learning-based stochastic object models for characterizing anatomical variations. Physics in Medicine and Biology, 63(6), pp. 18. (link) (bib) x @article{Dolly2018, year = { 2018 }, volume = { 63 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Learning-based stochastic object models for characterizing anatomical variations }, pages = { 18 }, number = { 6 }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aab000 }, author = { Dolly and Lou and Anastasio and Li }, abstract = { It is widely known that the optimization of imaging systems based on objective, task-based measures of image quality via computer-simulation requires the use of a stochastic object model (SOM). However, the development of computationally tractable SOMs that can accurately model the statistical variations in human anatomy within a specified ensemble of patients remains a challenging task. Previously reported numerical anatomic models lack the ability to accurately model inter-patient and inter-organ variations in human anatomy among a broad patient population, mainly because they are established on image data corresponding to a few of patients and individual anatomic organs. This may introduce phantom-specific bias into computer-simulation studies, where the study result is heavily dependent on which phantom is used. In certain applications, however, databases of high-quality volumetric images and organ contours are available that can facilitate this SOM development. In this work, a novel and tractable methodology for learning a SOM and generating numerical phantoms from a set of volumetric training images is developed. The proposed methodology learns geometric attribute distributions (GAD) of human anatomic organs from a broad patient population, which characterize both centroid relationships between neighboring organs and anatomic shape similarity of individual organs among patients. By randomly sampling the learned centroid and shape GADs with the constraints of the respective principal attribute variations learned from the training data, an ensemble of stochastic objects can be created. The randomness in organ shape and position reflects the learned variability of human anatomy. To demonstrate the methodology, a SOM of an adult male pelvis is computed and examples of corresponding numerical phantoms are created. }, } |
2018 | Journal | Thomas De Schryver, Manuel Dierick, Marjolein Heyndrickx, Jeroen Van Stappen, Marijn A. Boone, Luc Van Hoorebeke, Matthieu N. Boone (2018). Motion compensated micro-CT reconstruction for in-situ analysis of dynamic processes. Scientific Reports, 8(1), pp. 10. (link) (bib) x @article{DeSchryver2018, year = { 2018 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Motion compensated micro-CT reconstruction for in-situ analysis of dynamic processes }, pages = { 10 }, number = { 1 }, journal = { Scientific Reports }, issn = { 20452322 }, doi = { 10.1038/s41598-018-25916-5 }, author = { {De Schryver} and Dierick and Heyndrickx and {Van Stappen} and Boone and {Van Hoorebeke} and Boone }, abstract = { This work presents a framework to exploit the synergy between Digital Volume Correlation (DVC) and iterative CT reconstruction to enhance the quality of high-resolution dynamic X-ray CT (4D-$\mu$CT) and obtain quantitative results from the acquired dataset in the form of 3D strain maps which can be directly correlated to the material properties. Furthermore, we show that the developed framework is capable of strongly reducing motion artifacts even in a dataset containing a single 360° rotation. }, } |
2018 | Journal | Pallab Datta, Ananya Barui, Yang Wu, Veli Ozbolat, Kazim K. Moncal, Ibrahim T. Ozbolat (2018). Essential steps in bioprinting: From pre- to post-bioprinting. Biotechnology Advances, 36(5), pp. 1481–1504. (link) (bib) x @article{Datta2018, year = { 2018 }, volume = { 36 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Essential steps in bioprinting: From pre- to post-bioprinting }, pmid = { 29909085 }, pages = { 1481--1504 }, number = { 5 }, keywords = { Biofabrication,Bioink,Bioprinter,Bioprinting,Droplet-based bioprinting,Extrusion-based bioprinting,Laser-based bioprinting }, journal = { Biotechnology Advances }, issn = { 07349750 }, doi = { 10.1016/j.biotechadv.2018.06.003 }, author = { Datta and Barui and Wu and Ozbolat and Moncal and Ozbolat }, abstract = { An increasing demand for directed assembly of biomaterials has inspired the development of bioprinting, which facilitates the assembling of both cellular and acellular inks into well-arranged three-dimensional (3D) structures for tissue fabrication. Although great advances have been achieved in the recent decade, there still exist issues to be addressed. Herein, a review has been systematically performed to discuss the considerations in the entire procedure of bioprinting. Though bioprinting is advancing at a rapid pace, it is seen that the whole process of obtaining tissue constructs from this technique involves multiple-stages, cutting across various technology domains. These stages can be divided into three broad categories: pre-bioprinting, bioprinting and post-bioprinting. Each stage can influence others and has a bearing on the performance of fabricated constructs. For example, in pre-bioprinting, tissue biopsy and cell expansion techniques are essential to ensure a large number of cells are available for mass organ production. Similarly, medical imaging is needed to provide high resolution designs, which can be faithfully bioprinted. In the bioprinting stage, compatibility of biomaterials is needed to be matched with solidification kinetics to ensure constructs with high cell viability and fidelity are obtained. On the other hand, there is a need to develop bioprinters, which have high degrees of freedom of movement, perform without failure concerns for several hours and are compact, and affordable. Finally, maturation of bioprinted cells are governed by conditions provided during the post-bioprinting process. This review, for the first time, puts all the bioprinting stages in perspective of the whole process of bioprinting, and analyzes their current state-of-the art. It is concluded that bioprinting community will recognize the relative importance and optimize the parameter of each stage to obtain the desired outcomes. }, } |
2018 | Journal | Antonios Danelakis, Theoharis Theoharis, Dimitrios A. Verganelakis (2018). Survey of automated multiple sclerosis lesion segmentation techniques on magnetic resonance imaging. Computerized Medical Imaging and Graphics, 70, pp. 83–100. (link) (bib) x @article{Danelakis2018, year = { 2018 }, volume = { 70 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Survey of automated multiple sclerosis lesion segmentation techniques on magnetic resonance imaging }, pages = { 83--100 }, keywords = { Automated segmentation,Brain MRI,Multiple sclerosis,Survey }, journal = { Computerized Medical Imaging and Graphics }, issn = { 18790771 }, doi = { 10.1016/j.compmedimag.2018.10.002 }, author = { Danelakis and Theoharis and Verganelakis }, abstract = { Multiple sclerosis (MS) is a chronic disease. It affects the central nervous system and its clinical manifestation can variate. Magnetic Resonance Imaging (MRI) is often used to detect, characterize and quantify MS lesions in the brain, due to the detailed structural information that it can provide. Manual detection and measurement of MS lesions in MRI data is time-consuming, subjective and prone to errors. Therefore, multiple automated methodologies for MRI-based MS lesion segmentation have been proposed. Here, a review of the state-of-the-art of automatic methods available in the literature is presented. The current survey provides a categorization of the methodologies in existence in terms of their input data handling, their main strategy of segmentation and their type of supervision. The strengths and weaknesses of each category are analyzed and explicitly discussed. The positive and negative aspects of the methods are highlighted, pointing out the future trends and, thus, leading to possible promising directions for future research. In addition, a further clustering of the methods, based on the databases used for their evaluation, is provided. The aforementioned clustering achieves a reliable comparison among methods evaluated on the same databases. Despite the large number of methods that have emerged in the field, there is as yet no commonly accepted methodology that has been established in clinical practice. Future challenges such as the simultaneous exploitation of more sophisticated MRI protocols and the hybridization of the most promising methods are expected to further improve the performance of the segmentation. }, } |
2018 | Journal | Weifu Chen, Mingquan Lin, Eli Gibson, Matthew Bastian-Jordan, Derek W. Cool, Zahra Kassam, Huageng Liang, Guocan Feng, Aaron D. Ward, Bernard Chiu (2018). A self-tuned graph-based framework for localization and grading prostate cancer lesions: An initial evaluation based on multiparametric magnetic resonance imaging. Computers in Biology and Medicine, 96, pp. 252–265. (link) (bib) x @article{Chen2018, year = { 2018 }, volume = { 96 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A self-tuned graph-based framework for localization and grading prostate cancer lesions: An initial evaluation based on multiparametric magnetic resonance imaging }, pages = { 252--265 }, keywords = { Laplacian regularized regression model,Multiparametric MRI (mpMRI),Multiple kernel learning,Prostate Imaging and Reporting Data System (PI-RAD,Prostate cancer }, journal = { Computers in Biology and Medicine }, issn = { 18790534 }, doi = { 10.1016/j.compbiomed.2018.03.017 }, author = { Chen and Lin and Gibson and Bastian-Jordan and Cool and Kassam and Liang and Feng and Ward and Chiu }, abstract = { Multiparametric magnetic resonance imaging (mpMRI) has been established as the state-of-the-art examination for the detection and localization of prostate cancer lesions. Prostate Imaging-Reporting and Data System (PI-RADS) has been established as a scheme to standardize the reporting of mpMRI findings. Although lesion delineation and PI-RADS ratings could be performed manually, human delineation and ratings are subjective and time-consuming. In this article, we developed and validated a self-tuned graph-based model for PI-RADS rating prediction. 34 features were obtained at the pixel level from T2-weighted (T2W), apparent diffusion coefficient (ADC) and dynamic contrast enhanced (DCE) images, from which PI-RADS scores were predicted. Two major innovations were involved in this self-tuned graph-based model. First, graph-based approaches are sensitive to the choice of the edge weight. The proposed model tuned the edge weights automatically based on the structure of the data, thereby obviating empirical edge weight selection. Second, the feature weights were tuned automatically to give heavier weights to features important for PI-RADS rating estimation. The proposed framework was evaluated for its lesion localization performance in mpMRI datasets of 12 patients. In the evaluation, the PI-RADS score distribution map generated by the algorithm and from the observers' ratings were binarized by thresholds of 3 and 4. The sensitivity, specificity and accuracy obtained in these two threshold settings ranged from 65 to 77{\%}, 86 to 93{\%} and 85 to 88{\%} respectively, which are comparable to results obtained in previous studies in which non-clinical T2 maps were available. The proposed algorithm took 10s to estimate the PI-RADS score distribution in an axial image. The efficiency achievable suggests that this technique can be developed into a prostate MR analysis system suitable for clinical use after a thorough validation involving more patients. }, } |
2018 | Journal | Damiano Caruso, Marta Zerunian, Maria Ciolina, Domenico de Santis, Marco Rengo, Mumtaz H. Soomro, Gaetano Giunta, Silvia Conforto, Maurizio Schmid, Emanuele Neri, Andrea Laghi (2018). Haralick's texture features for the prediction of response to therapy in colorectal cancer: a preliminary study. Radiologia Medica, 123(3), pp. 161–167. (link) (bib) x @article{Caruso2018, year = { 2018 }, volume = { 123 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85033363764{\&}doi=10.1007{\%}2Fs11547-017-0833-8{\&}partnerID=40{\&}md5=507a4f33003fadbdd5ac25f4d11e4428 }, type = { Journal Article }, title = { Haralick's texture features for the prediction of response to therapy in colorectal cancer: a preliminary study }, pages = { 161--167 }, number = { 3 }, keywords = { Colorectal cancer,Haralick's texture analysis,Response to therapy,T2-weighted MRI }, journal = { Radiologia Medica }, issn = { 18266983 }, doi = { 10.1007/s11547-017-0833-8 }, author = { Caruso and Zerunian and Ciolina and Santis and Rengo and Soomro and Giunta and Conforto and Schmid and Neri and Laghi }, abstract = { Purpose: Haralick features Texture analysis is a recent oncologic imaging biomarker used to assess quantitatively the heterogeneity within a tumor. The aim of this study is to evaluate which Haralick's features are the most feasible in predicting tumor response to neoadjuvant chemoradiotherapy (CRT) in colorectal cancer. Materials and Methods: After MRI and histological assessment, eight patients were enrolled and divided into two groups based on response to neoadjuvant CRT in complete responders (CR) and non-responders (NR). Oblique Axial T2-weighted MRI sequences before CRT were analyzed by two radiologists in consensus drawing a ROI around the tumor. 14 over 192 Haralick's features were extrapolated from normalized gray-level co-occurrence matrix in four different directions. A dedicated statistical analysis was performed to evaluate distribution of the extracted Haralick's features computing mean and standard deviation. Results: Pretreatment MRI examination showed significant value (p {\textless} 0.05) of 5 over 14 computed Haralick texture. In particular, the significant features are the following: concerning energy, contrast, correlation, entropy and inverse difference moment. Conclusions: Five Haralick's features showed significant relevance in the prediction of response to therapy in colorectal cancer and might be used as additional imaging biomarker in the oncologic management of colorectal patients. }, } |
2018 | Journal | Amélie Bonnet-Garnier, Ki\^en Ki\^eu, Tiphaine Aguirre-Lavin, Krisztina Tar, Pierre Flores, Zichuan Liu, Nathalie Peynot, Martine Chebrout, András Dinnyés, Véronique Duranthon, Nathalie Beaujean (2018). Three-dimensional analysis of nuclear heterochromatin distribution during early development in the rabbit. Chromosoma, 127(3), pp. 387–403. (link) (bib) x @article{BonnetGarnier2018, year = { 2018 }, volume = { 127 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Three-dimensional analysis of nuclear heterochromatin distribution during early development in the rabbit }, pages = { 387--403 }, number = { 3 }, keywords = { 3D-FISH,Centromeres,Embryos,Epigenetic modifications,Satellite sequences }, journal = { Chromosoma }, issn = { 14320886 }, doi = { 10.1007/s00412-018-0671-z }, author = { Bonnet-Garnier and Ki{\^{e}}u and Aguirre-Lavin and Tar and Flores and Liu and Peynot and Chebrout and Dinny{\'{e}}s and Duranthon and Beaujean }, abstract = { Changes to the spatial organization of specific chromatin domains such as constitutive heterochromatin have been studied extensively in somatic cells. During early embryonic development, drastic epigenetic reprogramming of both the maternal and paternal genomes, followed by chromatin remodeling at the time of embryonic genome activation (EGA), have been observed in the mouse. Very few studies have been performed in other mammalian species (human, bovine, or rabbit) and the data are far from complete. During this work, we studied the three-dimensional organization of pericentromeric regions during the preimplantation period in the rabbit using specific techniques (3D-FISH) and tools (semi-automated image analysis). We observed that the pericentromeric regions (identified with specific probes for Rsat I and Rsat II genomic sequences) changed their shapes (from pearl necklaces to clusters), their nuclear localizations (from central to peripheral), as from the 4-cell stage. This reorganization goes along with histone modification changes and reduced amount of interactions with nucleolar precursor body surface. Altogether, our results suggest that the 4-cell stage may be a crucial window for events necessary before major EGA, which occurs during the 8-cell stage in the rabbit. }, } |
2018 | Journal | Shant Malkasian, Logan Hubbard, Brian Dertli, Jungnam Kwon, Sabee Molloi (2018). Quantification of vessel-specific coronary perfusion territories using minimum-cost path assignment and computed tomography angiography: Validation in a swine model. Journal of Cardiovascular Computed Tomography, 12(5), pp. 425–435. (bib) x @article{Malkasian2018, year = { 2018 }, volume = { 12 }, title = { Quantification of vessel-specific coronary perfusion territories using minimum-cost path assignment and computed tomography angiography: Validation in a swine model }, publisher = { Elsevier Inc. }, pages = { 425--435 }, number = { 5 }, month = { sep }, keywords = { Angiography,Cardiovascular disease,Computerized tomography,Coronary artery disease,Imaging,Myocardium }, journal = { Journal of Cardiovascular Computed Tomography }, issn = { 1876861X }, doi = { 10.1016/j.jcct.2018.06.006 }, author = { Malkasian and Hubbard and Dertli and Kwon and Molloi }, abstract = { Background: As combined morphological and physiological assessment of coronary artery disease (CAD) is necessary to reliably resolve CAD severity, the objective of this study was to validate an automated minimum-cost path assignment (MCP) technique which enables accurate, vessel-specific assignment of the left (LCA) and right (RCA) coronary perfusion territories using computed tomography (CT) angiography data for both left and right ventricles. Methods: Six swine were used to validate the MCP technique. In each swine, a dynamic acquisition comprised of twenty consecutive volume scans was acquired with a 320-slice CT scanner following peripheral injection of contrast material. From this acquisition the MCP technique was used to automatically assign LCA and RCA perfusion territories for the left and right ventricles, independently. Each animal underwent another dynamic CT acquisition following direct injection of contrast material into the LCA or RCA. Using this acquisition, reference standard LCA and RCA perfusion territories were isolated from the myocardial blush. The accuracy of the MCP technique was evaluated by quantitatively comparing the MCP-derived LCA and RCA perfusion territories to these reference standard territories. Results: All MCP perfusion territory masses (MassMCP) and all reference standard perfusion territory masses (MassRS) in the left ventricle were related by MassMCP = 0.99MassRS+0.35 g (r = 1.00). MassMCP and MassRS in the right ventricle were related by MassMCP = 0.94MassRS+0.39 g (r = 0.96). Conclusion: The MCP technique was validated in a swine animal model and has the potential to be used for accurate, vessel-specific assignment of LCA and RCA perfusion territories in both the left and right ventricular myocardium using CT angiography data. In order to provide a comprehensive morphological and physiological cardiac imaging datasets, the minimum-cost path assignment (MCP) method has been validated, using a swine animal model and computed tomography (CT) imaging. MCP has been shown to accurately quantify left and right coronary artery perfusion territories in the left ventricle, right ventricle and whole heart. The MCP technique provides clinicians and researchers in cardiovascular imaging with a means to accurately and automatically determine coronary-specific perfusion territories in the left and right heart, and could also be used to assess myocardium at-risk distal to a stenosis. }, } |
2018 | Journal | Markus D. Herrmann, David A. Clunie, Andriy Fedorov, Sean W. Doyle, Steven Pieper, Veronica Klepeis, Long P. Le, George L. Mutter, David S. Milstone, Thomas J. Schultz, Ron Kikinis, Gopal K. Kotecha, David H. Hwang, Katherine P. Andriole, A. John Iafrate, James A. Brink, Giles W. Boland, Keith J. Dreyer, Mark Michalski, Jeffrey A. Golden, David N. Louis, Jochen K. Lennerz (2018). Implementing the DICOM standard for digital pathology. Journal of Pathology Informatics, 9(1), pp. NA (bib) x @article{Herrmann2018, year = { 2018 }, volume = { 9 }, title = { Implementing the DICOM standard for digital pathology }, publisher = { Wolters Kluwer Medknow Publications }, pmid = { 30533276 }, number = { 1 }, month = { jan }, keywords = { Computational pathology,DICOMweb,Image compression,Slide scanning,Whole slide imaging }, journal = { Journal of Pathology Informatics }, issn = { 21533539 }, doi = { 10.4103/jpi.jpi_42_18 }, author = { Herrmann and Clunie and Fedorov and Doyle and Pieper and Klepeis and Le and Mutter and Milstone and Schultz and Kikinis and Kotecha and Hwang and Andriole and {John Iafrate} and Brink and Boland and Dreyer and Michalski and Golden and Louis and Lennerz }, abstract = { Background: Digital Imaging and Communications in Medicine (DICOM{\textregistered}) is the standard for the representation, storage, and communication of medical images and related information. A DICOM file format and communication protocol for pathology have been defined; however, adoption by vendors and in the field is pending. Here, we implemented the essential aspects of the standard and assessed its capabilities and limitations in a multisite, multivendor healthcare network. Methods: We selected relevant DICOM attributes, developed a program that extracts pixel data and pixel-related metadata, integrated patient and specimen-related metadata, populated and encoded DICOM attributes, and stored DICOM files. We generated the files using image data from four vendor-specific image file formats and clinical metadata from two departments with different laboratory information systems. We validated the generated DICOM files using recognized DICOM validation tools and measured encoding, storage, and access efficiency for three image compression methods. Finally, we evaluated storing, querying, and retrieving data over the web using existing DICOM archive software. Results: Whole slide image data can be encoded together with relevant patient and specimen-related metadata as DICOM objects. These objects can be accessed efficiently from files or through RESTful web services using existing software implementations. Performance measurements show that the choice of image compression method has a major impact on data access efficiency. For lossy compression, JPEG achieves the fastest compression/decompression rates. For lossless compression, JPEG-LS significantly outperforms JPEG 2000 with respect to data encoding and decoding speed. Conclusion: Implementation of DICOM allows efficient access to image data as well as associated metadata. By leveraging a wealth of existing infrastructure solutions, the use of DICOM facilitates enterprise integration and data exchange for digital pathology. }, } |
2018 | Journal | Francisco P.M. Oliveira, Ana Paula Moreira, Alexandre De Mendon\cca, Ana Verdelho, Carolina Xavier, Dalila Barroca, Joana Rio, Eva Cardoso, \^Angela Cruz, Antero Abrunhosa, Miguel Castelo-Branco (2018). Can 11 C-PiB-PET relative delivery R 1 or 11 C-PiB-PET perfusion replace 18 F-FDG-PET in the assessment of brain neurodegeneration?. Journal of Alzheimer's Disease, 65(1), pp. 89–97. (bib) x @article{Oliveira2018a, year = { 2018 }, volume = { 65 }, title = { Can 11 C-PiB-PET relative delivery R 1 or 11 C-PiB-PET perfusion replace 18 F-FDG-PET in the assessment of brain neurodegeneration? }, publisher = { IOS Press }, pages = { 89--97 }, number = { 1 }, keywords = { 11 C-PIB,18 F-FDG,Alzheimer's disease,compartmental models,neurodegeneration,perfusion }, journal = { Journal of Alzheimer's Disease }, issn = { 18758908 }, doi = { 10.3233/JAD-180274 }, author = { Oliveira and Moreira and {De Mendon{\c{c}}a} and Verdelho and Xavier and Barroca and Rio and Cardoso and Cruz and Abrunhosa and Castelo-Branco }, abstract = { Background: Pittsburgh Compound B (PiB) positron emission tomography (PET) is used to visualize in vivo amyloid plaques in the brain. Frequently the PiB examinations are complemented with a fluorodeoxyglucose (FDG) PET scan to further assess neurodegeneration. Objective: Our goal is to identify alternative correlates of FDG images by assessing which kinetic methods originate PiB derived relative delivery ratio (R 1) images that can be correlated with the FDG images, and to compare them with PiB perfusion (pPiB) images obtained from the early-phase of PiB acquisition. Methods: We selected 52 patients with cognitive impairment who underwent a dynamic PiB and FDG acquisitions. To compute the R 1 images, two simplified reference tissue models (SRTM and SRTM2) and two multi-linear reference tissue models (MRTM and MRTM2) were used. The pPiB images were obtained in two different time intervals. Results: All six types of images were of good quality and highly correlated with the FDG images (mean voxelwise within-subjects r {\textgreater} 0.92). The higher correlation was found for FDG-R 1 (MRTM). Regarding the voxelwise regional correlation, the higher mean all brain correlations was r = 0.825 for FDG-R 1 (MRTM) and statistically significant in the whole brain analysis. Conclusion: All R 1 and pPiB images here tested have potential to assess the metabolic impact of neurodegeneration almost as reliably as the FDG images. However, this is not enough to validate these images for a single-subject analysis compared with the FDG image, and thus they cannot yet be used clinically to replace the FDG image before such evaluation. }, } |
2018 | Journal | Francisco Oliveira, Antoine Leuzy, Jo\~ao Castelhano, Konstantinos Chiotis, Steen Gregers Hasselbalch, Juha Rinne, Alexandre Mendon\cca, Markus Otto, Alberto Lle\'o, Isabel Santana, Jarkko Johansson, Sarah Anderl-Straub, Christine Arnim, Ambros Beer, Rafael Blesa, Juan Fortea, Herukka Sanna-Kaisa, Erik Portelius, Josef Pannee, Henrik Zetterberg, Kaj Blennow, Ana P. Moreira, Antero Abrunhosa, Agneta Nordberg, Miguel Castelo-Branco (2018). Data driven diagnostic classification in Alzheimer's disease based on different reference regions for normalization of PiB-PET images and correlation with CSF concentrations of A$\beta$ species. NeuroImage: Clinical, 20, pp. 603–610. (bib) x @article{Oliveira2018, year = { 2018 }, volume = { 20 }, title = { Data driven diagnostic classification in Alzheimer's disease based on different reference regions for normalization of PiB-PET images and correlation with CSF concentrations of A$\beta$ species }, publisher = { Elsevier Inc. }, pages = { 603--610 }, month = { jan }, journal = { NeuroImage: Clinical }, issn = { 22131582 }, doi = { 10.1016/j.nicl.2018.08.023 }, author = { Oliveira and Leuzy and Castelhano and Chiotis and Hasselbalch and Rinne and Mendon{\c{c}}a and Otto and Lle{\'{o}} and Santana and Johansson and Anderl-Straub and Arnim and Beer and Blesa and Fortea and Sanna-Kaisa and Portelius and Pannee and Zetterberg and Blennow and Moreira and Abrunhosa and Nordberg and Castelo-Branco }, abstract = { Positron emission tomography (PET) neuroimaging with the Pittsburgh Compound{\_}B (PiB) is widely used to assess amyloid plaque burden. Standard quantification approaches normalize PiB-PET by mean cerebellar gray matter uptake. Previous studies suggested similar pons and white-matter uptake in Alzheimer's disease (AD) and healthy controls (HC), but lack exhaustive comparison of normalization across the three regions, with data-driven diagnostic classification. We aimed to compare the impact of distinct reference regions in normalization, measured by data-driven statistical analysis, and correlation with cerebrospinal fluid (CSF) amyloid $\beta$ (A$\beta$) species concentrations. 243 individuals with clinical diagnosis of AD, HC, mild cognitive impairment (MCI) and other dementias, from the Biomarkers for Alzheimer's/Parkinson's Disease (BIOMARKAPD) initiative were included. PiB-PET images and CSF concentrations of A$\beta$38, A$\beta$40 and A$\beta$42 were submitted to classification using support vector machines. Voxel-wise group differences and correlations between normalized PiB-PET images and CSF A$\beta$ concentrations were calculated. Normalization by cerebellar gray matter and pons yielded identical classification accuracy of AD (accuracy-96{\%}, sensitivity-96{\%}, specificity-95{\%}), and significantly higher than A$\beta$ concentrations (best accuracy 91{\%}). Normalization by the white-matter showed decreased extent of statistically significant multivoxel patterns and was the only method not outperforming CSF biomarkers, suggesting statistical inferiority. A$\beta$38 and A$\beta$40 correlated negatively with PiB-PET images normalized by the white-matter, corroborating previous observations of correlations with non-AD-specific subcortical changes in white-matter. In general, when using the pons as reference region, higher voxel-wise group differences and stronger correlation with A$\beta$42, the A$\beta$42/A$\beta$40 or A$\beta$42/A$\beta$38 ratios were found compared to normalization based on cerebellar gray matter. }, } |
2018 | Journal | Pádraig Looney, Gordon N. Stevenson, Kypros H. Nicolaides, Walter Plasencia, Malid Molloholli, Stavros Natsis, Sally L. Collins (2018). Fully automated, real-time 3D ultrasound segmentation to estimate first trimester placental volume using deep learning. JCI insight, 3(11), pp. NA (bib) x @article{Looney2018, year = { 2018 }, volume = { 3 }, title = { Fully automated, real-time 3D ultrasound segmentation to estimate first trimester placental volume using deep learning }, publisher = { NLM (Medline) }, number = { 11 }, month = { jun }, keywords = { Diagnostic imaging,Obstetrics/gynecology,Reproductive Biology }, journal = { JCI insight }, issn = { 23793708 }, doi = { 10.1172/jci.insight.120178 }, author = { Looney and Stevenson and Nicolaides and Plasencia and Molloholli and Natsis and Collins }, abstract = { We present a new technique to fully automate the segmentation of an organ from 3D ultrasound (3D-US) volumes, using the placenta as the target organ. Image analysis tools to estimate organ volume do exist but are too time consuming and operator dependant. Fully automating the segmentation process would potentially allow the use of placental volume to screen for increased risk of pregnancy complications. The placenta was segmented from 2,393 first trimester 3D-US volumes using a semiautomated technique. This was quality controlled by three operators to produce the "ground-truth" data set. A fully convolutional neural network (OxNNet) was trained using this ground-truth data set to automatically segment the placenta. OxNNet delivered state-of-the-art automatic segmentation. The effect of training set size on the performance of OxNNet demonstrated the need for large data sets. The clinical utility of placental volume was tested by looking at predictions of small-for-gestational-age babies at term. The receiver-operating characteristics curves demonstrated almost identical results between OxNNet and the ground-truth). Our results demonstrated good similarity to the ground-truth and almost identical clinical results for the prediction of SGA. }, } |
2018 | Journal | Guang Li, August Sun, Xingyu Nie, Jason Moody, Kirk Huang, Shirong Zhang, Satyam Sharma, Joseph Deasy (2018). Introduction of a pseudo demons force to enhance deformation range for robust reconstruction of super-resolution time-resolved 4DMRI. Medical Physics, 45(11), pp. 5197–5207. (bib) x @article{Li2018, year = { 2018 }, volume = { 45 }, title = { Introduction of a pseudo demons force to enhance deformation range for robust reconstruction of super-resolution time-resolved 4DMRI }, publisher = { John Wiley and Sons Ltd. }, pages = { 5197--5207 }, number = { 11 }, month = { nov }, keywords = { deformable image registration (DIR),image-guided radiotherapy (IGRT),multi-breath motion assessment,time-resolved four-dimensional magnetic resonance }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.13179 }, author = { Li and Sun and Nie and Moody and Huang and Zhang and Sharma and Deasy }, abstract = { Purpose: The purpose of this study was to enhance the deformation range of demons-based deformable image registration (DIR) for large respiration-induced organ motion in the reconstruction of time-resolved four-dimensional magnetic resonance imaging (TR-4DMRI) for multi-breath motion simulation. Methods: A demons-based DIR algorithm was modified to enhance the deformation range for TR-4DMRI reconstruction using the super-resolution approach. A pseudo demons force was introduced to accelerate the coarse deformation in a multi-resolution (n = 3) DIR approach. The intensity gradient of a voxel was applied to its neighboring (5 × 5 × 5) voxels with a weight of Gaussian probability profile ($\sigma$ = 1 voxel) to extend the demons force, especially on those voxels that have little intensity gradience but high-intensity difference. A digital 4DMRI phantom with 3–8 cm diaphragmatic motions was used for DIR comparison. Six volunteers were scanned with two high-resolution (highR: 2 × 2 × 2 mm3) breath-hold (BH) 3DMR images at full inhalation (BHI) and full exhalation (BHE) and low-resolution (lowR: 5 × 5 × 5 mm3) free-breathing (FB) 3DMR cine images (2 Hz) under an IRB-approved protocol. A cross-consistency check (CCC) (BHI→FB←BHE), with voxel intensity correlation (VIC) and inverse consistency error (ICE), was introduced for cross-verification of TR-4DMRI reconstruction. Results: Using the digital phantom, the maximum deformable magnitude is doubled using the modified DIR from 3 to 6 cm at the diaphragm. In six human subjects, the first 15-iteration DIR using the pseudo force deforms 200 ± 150{\%} more than the original force, and succeeds in all 12 cases, whereas the original demons-based DIR failed in 67{\%} of tested cases. Using the pseudo force, high VIC ({\textgreater}0.9) and small ICE (1.6 ± 0.6 mm) values are observed for DIR of BHI{\&}BHE, BHI→FB, and BHE→FB. The CCC identifies four questionable cases, in which two cases need further DIR refinement, without missing true negative. Conclusions: The introduction of a pseudo demons force enhances the largest deformation magnitude up to 6 cm. The cross-consistency check ensures the quality of TR-4DMRI reconstruction. Further investigation is ongoing to fully characterize TR-4DMRI for potential multi-breathing-cycle radiotherapy simulation. }, } |
2018 | Journal | Paolo Zaffino, Delia Ciardo, Patrik Raudaschl, Karl Fritscher, Rosalinda Ricotti, Daniela Alterio, Giulia Marvaso, Cristiana Fodor, Guido Baroni, Francesco Amato, Roberto Orecchia, Barbara Alicja Jereczek-Fossa, Gregory C. Sharp, Maria Francesca Spadea (2018). Multi atlas based segmentation: Should we prefer the best atlas group over the group of best atlases?. Physics in Medicine and Biology, 63(12), pp. 9. (link) (bib) x @article{RN820, year = { 2018 }, volume = { 63 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi atlas based segmentation: Should we prefer the best atlas group over the group of best atlases? }, pages = { 9 }, number = { 12 }, keywords = { atlas selection,convolutional neural network,medical image segmentation,multi atlas based segmentation,oracle selection }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aac712 }, author = { Zaffino and Ciardo and Raudaschl and Fritscher and Ricotti and Alterio and Marvaso and Fodor and Baroni and Amato and Orecchia and Jereczek-Fossa and Sharp and Spadea }, abstract = { Multi atlas based segmentation (MABS) uses a database of atlas images, and an atlas selection process is used to choose an atlas subset for registration and voting. In the current state of the art, atlases are chosen according to a similarity criterion between the target subject and each atlas in the database. In this paper, we propose a new concept for atlas selection that relies on selecting the best performing group of atlases rather than the group of highest scoring individual atlases. Experiments were performed using CT images of 50 patients, with contours of brainstem and parotid glands. The dataset was randomly split into two groups: 20 volumes were used as an atlas database and 30 served as target subjects for testing. Classic oracle selection, where atlases are chosen by the highest dice similarity coefficient (DSC) with the target, was performed. This was compared to oracle group selection, where all the combinations of atlas subgroups were considered and scored by computing DSC with the target subject. Subsequently, convolutional neural networks were designed to predict the best group of atlases. The results were also compared with the selection strategy based on normalized mutual information (NMI). Oracle group was proven to be significantly better than classic oracle selection (p {\textless} 10-5). Atlas group selection led to a median ± interquartile DSC of 0.740 ± 0.084, 0.718 ± 0.086 and 0.670 ± 0.097 for brainstem and left/right parotid glands respectively, outperforming NMI selection 0.676 ± 0.113, 0.632 ± 0.104 and 0.606 ± 0.118 (p {\textless} 0.001) as well as classic oracle selection. The implemented methodology is a proof of principle that selecting the atlases by considering the performance of the entire group of atlases instead of each single atlas leads to higher segmentation accuracy, being even better then current oracle strategy. This finding opens a new discussion about the most appropriate atlas selection criterion for MABS. }, } |
2018 | Journal | M. Ben Youssef, F. Lavergne, K. Sab, K. Miled, J. Neji (2018). Upscaling the elastic stiffness of foam concrete as a three-phase composite material. Cement and Concrete Research, 110, pp. 13–23. (link) (bib) x @article{RN872, year = { 2018 }, volume = { 110 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046623861{\&}doi=10.1016{\%}2Fj.cemconres.2018.04.021{\&}partnerID=40{\&}md5=2e128f4fc86a6df7726434940ab8ceb7 }, type = { Journal Article }, title = { Upscaling the elastic stiffness of foam concrete as a three-phase composite material }, pages = { 13--23 }, keywords = { Foam concrete,Homogenization,Microstructure,Numerical simulations,Young modulus }, journal = { Cement and Concrete Research }, issn = { 00088846 }, doi = { 10.1016/j.cemconres.2018.04.021 }, author = { Youssef and Lavergne and Sab and Miled and Neji }, abstract = { The stiffness of foam concrete depends primarily on the added porosity. Nevertheless, by performing 3D elastic numerical simulations on artificial unit cells in the frame of periodic homogenization, it is shown that describing foam concrete as a porous material is not sufficient to explain the experimental measurements of the Young modulus for added porosity higher than 40{\%}. Indeed, introducing sand as a third phase enables to recover accurate estimates of the Young modulus. Furthermore, for highly porous concrete foams, it is shown that the stress concentrates in thin members deprived of stiff sand particles, thus leading to a softer overall stiffness. }, } |
2018 | Journal | Tianwu Xie, Paolo Zanotti-Fregonara, Agathe Edet-Sanson, Habib Zaidi (2018). Patient-specific computational model and dosimetry calculations for PET/CT of a patient pregnant with twins. Journal of Nuclear Medicine, 59(9), pp. 1451–1458. (link) (bib) x @article{RN867, year = { 2018 }, volume = { 59 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053206243{\&}doi=10.2967{\%}2Fjnumed.117.205286{\&}partnerID=40{\&}md5=2ee14ea7a0f6a85f10442957bf4c6b2e }, type = { Journal Article }, title = { Patient-specific computational model and dosimetry calculations for PET/CT of a patient pregnant with twins }, pages = { 1451--1458 }, number = { 9 }, keywords = { CT,Fetus,Monte Carlo simulation,Pregnant female models,Radiation dosimetry }, journal = { Journal of Nuclear Medicine }, issn = { 2159662X }, doi = { 10.2967/jnumed.117.205286 }, author = { Xie and Zanotti-Fregonara and Edet-Sanson and Zaidi }, abstract = { The radiation dose delivered to pregnant patients during radiologic imaging procedures raises health concerns because the developing embryo and fetus are considered to be highly radiosensitive. To appropriately weigh the diagnostic benefits against the radiation risks, the radiologist needs reasonably accurate and detailed estimates of the fetal dose. Expanding our previously developed series of computational phantoms for pregnant women, we here describe a personalized model for twin pregnancy, based on an actual clinical scan. Methods: The model is based on a standardized hybrid pregnant female and fetus phantom and on a clinical case of a patient who underwent an 18F-FDG PET/CT scan while expecting twins at 25 weeks' gestation. This model enabled us to produce a realistic physical representation of the pregnant patient and to estimate the maternal and fetal organ doses from the 18FFDG and CT components. The Monte Carlo N-Particle Extended general-purpose code was used for radiation transport simulation. Results: The 18F-FDG doses for the 2 fetuses were 3.78 and 3.99 mGy, and the CT doses were 0.76 and 0.70 mGy, respectively. Therefore, the relative contribution of 18F-FDG and CT to the total dose to the fetuses was about 84{\%} and 16{\%}, respectively. Meanwhile, for 18F-FDG, the calculated personalized absorbed dose was about 40{\%}-50{\%} higher than the doses reported by other dosimetry computer software tools. Conclusion: Our approach to constructing personalized computational models allows estimation of a patient- specific radiation dose, even in cases with unusual anatomic features such as a twin pregnancy. Our results also show that, even in twins, the fetal organ doses from both 18F-FDG and CT present a certain variability linked to the anatomic characteristics. The CT fetal dose is smaller than the 18F-FDG PET dose. }, } |
2018 | Journal | Davis M. Vigneault, Amir Pourmorteza, Marvin L. Thomas, David A. Bluemke, J. Alison Noble (2018). SiSSR: Simultaneous subdivision surface registration for the quantification of cardiac function from computed tomography in canines. Medical Image Analysis, 46, pp. 215–228. (link) (bib) x @article{RN876, year = { 2018 }, volume = { 46 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044919491{\&}doi=10.1016{\%}2Fj.media.2018.03.009{\&}partnerID=40{\&}md5=24f381dd27d1b120828f01ad803dafe5 }, type = { Journal Article }, title = { SiSSR: Simultaneous subdivision surface registration for the quantification of cardiac function from computed tomography in canines }, pages = { 215--228 }, keywords = { Cardiac computed tomography,Loop subdivision surface,Personalized cardiac mesh generation,Regional cardiac function }, journal = { Medical Image Analysis }, issn = { 13618423 }, doi = { 10.1016/j.media.2018.03.009 }, author = { Vigneault and Pourmorteza and Thomas and Bluemke and Noble }, abstract = { Recent improvements in cardiac computed tomography (CCT) allow for whole-heart functional studies to be acquired at low radiation dose ({\textless}2mSv) and high-temporal resolution ({\textless}100ms) in a single heart beat. Although the extraction of regional functional information from these images is of great clinical interest, there is a paucity of research into the quantification of regional function from CCT, contrasting with the large body of work in echocardiography and cardiac MR. Here we present the Simultaneous Subdivision Surface Registration (SiSSR) method: a fast, semi-automated image analysis pipeline for quantifying regional function from contrast-enhanced CCT. For each of thirteen adult male canines, we construct an anatomical reference mesh representing the left ventricular (LV) endocardium, obviating the need for a template mesh to be manually sculpted and initialized. We treat this generated mesh as a Loop subdivision surface, and adapt a technique previously described in the context of 3-D echocardiography to register these surfaces to the endocardium efficiently across all cardiac frames simultaneously. Although previous work performs the registration at a single resolution, we observe that subdivision surfaces naturally suggest a multiresolution approach, leading to faster convergence and avoiding local minima. We additionally make two notable changes to the cost function of the optimization, explicitly encouraging plausible biological motion and high mesh quality. Finally, we calculate an accepted functional metric for CCT from the registered surfaces, and compare our results to an alternate state-of-the-art CCT method. }, } |
2018 | Journal | Joshua A. Taillon, Christopher Pellegrinelli, Yi Lin Huang, Eric D. Wachsman, Lourdes G. Salamanca-Riba (2018). Improving microstructural quantification in FIB/SEM nanotomography. Ultramicroscopy, 184, pp. 24–38. (link) (bib) x @article{RN889, year = { 2018 }, volume = { 184 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027830512{\&}doi=10.1016{\%}2Fj.ultramic.2017.07.017{\&}partnerID=40{\&}md5=ec5c5bcb14a5795c82946322610e647d }, type = { Journal Article }, title = { Improving microstructural quantification in FIB/SEM nanotomography }, pages = { 24--38 }, keywords = { 3D reconstruction,Focused ion beam,Microstructure quantification,Scanning electron microscopy,Tortuosity,Triple phase boundaries }, journal = { Ultramicroscopy }, issn = { 18792723 }, doi = { 10.1016/j.ultramic.2017.07.017 }, author = { Taillon and Pellegrinelli and Huang and Wachsman and Salamanca-Riba }, abstract = { FIB/SEM nanotomography (FIB-nt) is a powerful technique for the determination and quantification of the three-dimensional microstructure in subsurface features. Often times, the microstructure of a sample is the ultimate determiner of the overall performance of a system, and a detailed understanding of its properties is crucial in advancing the materials engineering of a resulting device. While the FIB-nt technique has developed significantly in the 15 years since its introduction, advanced nanotomographic analysis is still far from routine, and a number of challenges remain in data acquisition and post-processing. In this work, we present a number of techniques to improve the quality of the acquired data, together with easy-to-implement methods to obtain “advanced” microstructural quantifications. The techniques are applied to a solid oxide fuel cell cathode of interest to the electrochemistry community, but the methodologies are easily adaptable to a wide range of material systems. Finally, results from an analyzed sample are presented as a practical example of how these techniques can be implemented. }, } |
2018 | Journal | Pawel Siciarz, Boyd McCurdy, Faiez Alshafa, Peter Greer, Joan Hatton, Philip Wright (2018). Evaluation of CT to CBCT non-linear dense anatomical block matching registration for prostate patients. Biomedical Physics and Engineering Express, 4(4), pp. 15. (link) (bib) x @article{RN795, year = { 2018 }, volume = { 4 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of CT to CBCT non-linear dense anatomical block matching registration for prostate patients }, pages = { 15 }, number = { 4 }, keywords = { CT-CBCT deformable image registration,dense anatomical block matching,prostate cancer,radiation therapy }, journal = { Biomedical Physics and Engineering Express }, issn = { 20571976 }, doi = { 10.1088/2057-1976/aacada }, author = { Siciarz and McCurdy and Alshafa and Greer and Hatton and Wright }, abstract = { Deformable image registration (DIR) is a rapidly developing discipline in the field of medical imaging that has found numerous applications in modern radiation therapy. To be used in the clinical environment, DIR requires an accurate and robust algorithm supported by the careful evaluation. The purpose of this study was to evaluate the performance of the non-linear Dense Anatomical Block Matching (DABM) algorithm for CT-CBCT image registration of prostate cancer patients. Pre-treatment CT (pCT) images of five prostate patients that underwent intensity modulated radiation therapy (IMRT) were selected for this work. Mid-treatment CBCT data sets acquired during radiotherapy course were used to help validate the algorithm performance and benchmark against other commonly used DIR algorithms. Rigid alignment was followed by the DIR of considered images. After registration, structures (PTV, GTV, Bladder and Rectum) delineated on the pCT were deformed using the obtained deformation vector fields (DVFs), then propagated to the CBCT images and compared to the analogous contours delineated on the CBCT by an experienced radiation oncologist. The accuracy of image registration was assessed by several quantitative metrics: Dice Similarity Coefficient (DSC), Hausdorff Distances (HD; average and 95th percentile), Center of the Mass Shift (COM) as well as by physician validation. The topology of the obtained deformation vector fields was analyzed by the Jacobian determinant. The accuracy of the inverted DFVs was investigated by the application of the Inverse Consistency Error (ICE). The performance of the DABM algorithm was quantitatively compared to Rigid, Affine and B-spline algorithms. Results indicate that for all the patients and anatomical structures considered here, both the accuracy and the consistency of the DABM algorithm are considerably better than the other evaluated registration methods. Generated DVFs have a well-preserved topology and small ICEs. Presented findings show that DABM is a promising alternative to the existing common strategies for CT-CBCT image registration and its application in the adaptive radiation therapy of the pelvic region. }, } |
2018 | Journal | Eungjune Shim, Youngjun Kim, Deukhee Lee, Byung Hoon Lee, Sungkyung Woo, Kunwoo Lee (2018). 2D-3D registration for 3D analysis of lower limb alignment in a weight-bearing condition. Applied Mathematics, 33(1), pp. 59–70. (link) (bib) x @article{RN787, year = { 2018 }, volume = { 33 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { 2D-3D registration for 3D analysis of lower limb alignment in a weight-bearing condition }, pages = { 59--70 }, number = { 1 }, keywords = { 2D-3D registration,3D analysis,CT,X-ray,simulated annealing }, journal = { Applied Mathematics }, issn = { 10051031 }, doi = { 10.1007/s11766-018-3459-2 }, author = { Shim and Kim and Lee and Lee and Woo and Lee }, abstract = { X-ray imaging is the conventional method for diagnosing the orthopedic condition of a patient. Computerized Tomography(CT) scanning is another diagnostic method that provides patient's 3D anatomical information. However, both methods have limitations when diagnosing the whole leg; X-ray imaging does not provide 3D information, and normal CT scanning cannot be performed with a standing posture. Obtaining 3D data regarding the whole leg in a standing posture is clinically important because it enables 3D analysis in the weight bearing condition. Based on these clinical needs, a hardware-based bi-plane X-ray imaging system has been developed; it uses two orthogonal X-ray images. However, such methods have not been made available in general clinics because of the hight cost. Therefore, we proposed a widely adaptive method for 2D X-ray image and 3D CT scan data. By this method, it is possible to threedimensionally analyze the whole leg in standing posture. The optimal position that generates the most similar image is the captured X-ray image. The algorithm verifies the similarity using the performance of the proposed method by simulation-based experiments. Then, we analyzed the internal-external rotation angle of the femur using real patient data. Approximately 10.55 degrees of internal rotations were found relative to the defined anterior-posterior direction. In this paper, we present a useful registration method using the conventional X-ray image and 3D CT scan data to analyze the whole leg in the weight-bearing condition. }, } |
2018 | Journal | Mostafa Salem, Mariano Cabezas, Sergi Valverde, Deborah Pareto, Arnau Oliver, Joaquim Salvi, \`Alex Rovira, Xavier Llad\'o (2018). A supervised framework with intensity subtraction and deformation field features for the detection of new T2-w lesions in multiple sclerosis. NeuroImage: Clinical, 17, pp. 607–615. (link) (bib) x @article{RN790, year = { 2018 }, volume = { 17 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A supervised framework with intensity subtraction and deformation field features for the detection of new T2-w lesions in multiple sclerosis }, pages = { 607--615 }, keywords = { Automatic new lesion detection,Brain,MRI,Machine learning,Multiple sclerosis }, journal = { NeuroImage: Clinical }, issn = { 22131582 }, doi = { 10.1016/j.nicl.2017.11.015 }, author = { Salem and Cabezas and Valverde and Pareto and Oliver and Salvi and Rovira and Llad{\'{o}} }, abstract = { Introduction Longitudinal magnetic resonance imaging (MRI) analysis has an important role in multiple sclerosis diagnosis and follow-up. The presence of new T2-w lesions on brain MRI scans is considered a prognostic and predictive biomarker for the disease. In this study, we propose a supervised approach for detecting new T2-w lesions using features from image intensities, subtraction values, and deformation fields (DF). Methods One year apart multi-channel brain MRI scans were obtained for 60 patients, 36 of them with new T2-w lesions. Images from both temporal points were preprocessed and co-registered. Afterwards, they were registered using multi-resolution affine registration, allowing their subtraction. In particular, the DFs between both images were computed with the Demons non-rigid registration algorithm. Afterwards, a logistic regression model was trained with features from image intensities, subtraction values, and DF operators. We evaluated the performance of the model following a leave-one-out cross-validation scheme. Results In terms of detection, we obtained a mean Dice similarity coefficient of 0.77 with a true-positive rate of 74.30{\%} and a false-positive detection rate of 11.86{\%}. In terms of segmentation, we obtained a mean Dice similarity coefficient of 0.56. The performance of our model was significantly higher than state-of-the-art methods. Conclusions The performance of the proposed method shows the benefits of using DF operators as features to train a supervised learning model. Compared to other methods, the proposed model decreases the number of false-positives while increasing the number of true-positives, which is relevant for clinical settings. }, } |
2018 | Journal | Prakasham Rumajogee, Svetlana Altamentova, Lijun Li, Junyi Li, Jian Wang, Alan Kuurstra, Mohamad Khazaei, Stephanie Beldick, Ravi S. Menon, Derek Van der Kooy, Michael G. Fehlings (2018). Exogenous neural precursor cell transplantation results in structural and functional recovery in a hypoxic-ischemic hemiplegic mouse model. eNeuro, 5(5), pp. NA (link) (bib) x @article{RN869, year = { 2018 }, volume = { 5 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057951115{\&}doi=10.1523{\%}2FENEURO.0369-18.2018{\&}partnerID=40{\&}md5=904cdf49f286dcf83962f118f9096771 }, type = { Journal Article }, title = { Exogenous neural precursor cell transplantation results in structural and functional recovery in a hypoxic-ischemic hemiplegic mouse model }, number = { 5 }, keywords = { Cerebral palsy,Hypoxic-ischemia,Myelination,Neural precursor cells,Oligodendrocytes,White matter injury }, journal = { eNeuro }, issn = { 23732822 }, doi = { 10.1523/ENEURO.0369-18.2018 }, author = { Rumajogee and Altamentova and Li and Li and Wang and Kuurstra and Khazaei and Beldick and Menon and Kooy} and Fehlings }, abstract = { Cerebral palsy (CP) is a common pediatric neurodevelopmental disorder, frequently resulting in motor and developmental deficits and often accompanied by cognitive impairments. A regular pathobiological hallmark of CP is oligodendrocyte maturation impairment resulting in white matter (WM) injury and reduced axonal myelination. Regeneration therapies based on cell replacement are currently limited, but neural precursor cells (NPCs), as cellular support for myelination, represent a promising regeneration strategy to treat CP, although the transplantation parameters (e.g., timing, dosage, mechanism) remain to be determined. We optimized a hemiplegic mouse model of neonatal hypoxia-ischemia that mirrors the pathobiological hallmarks of CP and transplanted NPCs into the corpus callosum (CC), a major white matter structure impacted in CP patients. The NPCs survived, engrafted, and differentiated morphologically in male and female mice. Histology and MRI showed repair of lesioned structures. Furthermore, electrophysiology revealed functional myelination of the CC (e.g., restoration of conduction velocity), while cylinder and CatWalk tests demonstrated motor recovery of the affected forelimb. Endogenous oligodendrocytes, recruited in the CC following transplantation of exogenous NPCs, are the principal actors in this recovery process. The lack of differentiation of the transplanted NPCs is consistent with enhanced recovery due to an indirect mechanism, such as a trophic and/or “bio-bridge�? support mediated by endogenous oligodendrocytes. Our work establishes that transplantation of NPCs represents a viable therapeutic strategy for CP treatment, and that the enhanced recovery is mediated by endogenous oligodendrocytes. This will further our understanding and contribute to the improvement of cellular therapeutic strategies. }, } |
2018 | Journal | Oliver Rübel, Benjamin P. Bowen (2018). BASTet: Shareable and Reproducible Analysis and Visualization of Mass Spectrometry Imaging Data via OpenMSI. IEEE Transactions on Visualization and Computer Graphics, 24(1), pp. 1025–1035. (link) (bib) x @article{RN888, year = { 2018 }, volume = { 24 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028711599{\&}doi=10.1109{\%}2FTVCG.2017.2744479{\&}partnerID=40{\&}md5=ed08c67415ad9ee80b1a99e0116908fa }, type = { Journal Article }, title = { BASTet: Shareable and Reproducible Analysis and Visualization of Mass Spectrometry Imaging Data via OpenMSI }, pages = { 1025--1035 }, number = { 1 }, keywords = { Analysis Workflows,Data management,Data provenance,Data sharing,Mass spectrometry imaging,Visualization }, journal = { IEEE Transactions on Visualization and Computer Graphics }, issn = { 10772626 }, doi = { 10.1109/TVCG.2017.2744479 }, author = { R{\"{u}}bel and Bowen }, abstract = { Mass spectrometry imaging (MSI) is a transformative imaging method that supports the untargeted, quantitative measurement of the chemical composition and spatial heterogeneity of complex samples with broad applications in life sciences, bioenergy, and health. While MSI data can be routinely collected, its broad application is currently limited by the lack of easily accessible analysis methods that can process data of the size, volume, diversity, and complexity generated by MSI experiments. The development and application of cutting-edge analytical methods is a core driver in MSI research for new scientific discoveries, medical diagnostics, and commercial-innovation. However, the lack of means to share, apply, and reproduce analyses hinders the broad application, validation, and use of novel MSI analysis methods. To address this central challenge, we introduce the Berkeley Analysis and Storage Toolkit (BASTet), a novel framework for shareable and reproducible data analysis that supports standardized data and analysis interfaces, integrated data storage, data provenance, workflow management, and a broad set of integrated tools. Based on BASTet, we describe the extension of the OpenMSI mass spectrometry imaging science gateway to enable web-based sharing, reuse, analysis, and visualization of data analyses and derived data products. We demonstrate the application of BASTet and OpenMSI in practice to identify and compare characteristic substructures in the mouse brain based on their chemical composition measured via MSI. }, } |
2018 | Journal | Darlan Bruno Pontes Quintanilha, Arist\'ofanes Corr\^ea Silva, Anselmo Cardoso De Paiva, Marcelo Gattass (2018). Mixture of dynamic textures applied to temporal analysis of lung lesions. Journal of Computational and Theoretical Nanoscience, 15(6-7), pp. 1839–1852. (link) (bib) x @article{RN884, year = { 2018 }, volume = { 15 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057328183{\&}doi=10.1166{\%}2Fjctn.2018.7321{\&}partnerID=40{\&}md5=332bb09efa18d59dcf58b44028959bc7 }, type = { Journal Article }, title = { Mixture of dynamic textures applied to temporal analysis of lung lesions }, pages = { 1839--1852 }, number = { 6-7 }, keywords = { Dynamic texture mixture,Lung cancer,Medical image,Temporal analysis,Tissue change detection }, journal = { Journal of Computational and Theoretical Nanoscience }, issn = { 15461963 }, doi = { 10.1166/jctn.2018.7321 }, author = { Quintanilha and Silva and {De Paiva} and Gattass }, abstract = { Lung cancer is still one of the most popular types of cancer worldwide. The analysis of consecutive timely spaced CT scans is a useful tool to analyze the lesion's malignant behavior during treatment or for the follow up of lesions undetermined as to its nature (malignant or benign). This paper aims to propose a method to obtain more detailed information about the lesion changes through complementary studies on the biological activity of pulmonary nodules. The methodology presented uses mixture of dynamic textures to cluster different tissues of lung lesions, in accordance with the density change over time, and to describe regional changes using similarity measures. The study was conducted on two chest computed tomography databases. The Public Lung Database (PLD), which has lesions that undergo evaluation for drug therapy and a private database of lung lesions of undetermined diagnosis. The lesions from the public database had areas with density variations in the range from 0.01{\%} to 110.53{\%}. Lesions from the private database showed regions with density variations from 0.11{\%} to 46.94{\%} range. The density change analysis proposed, that is done by regionally different locations, provides more detailed information about their change over time. Lesions considered volumetrically stable may contain locations that suffer noticeable changes, as well as lesions with large volumetric growth may not show a significant change in density. }, } |
2018 | Journal | Nirvedh H. Meshram, Tomy Varghese (2018). GPU accelerated multilevel lagrangian carotid strain imaging. IEEE Transactions on Ultrasonics, Ferroelectrics, and Frequency Control, 65(8), pp. 1370–1379. (link) (bib) x @article{RN871, year = { 2018 }, volume = { 65 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047652346{\&}doi=10.1109{\%}2FTUFFC.2018.2841346{\&}partnerID=40{\&}md5=635a26fdcb2c9b17367aeddc67197444 }, type = { Journal Article }, title = { GPU accelerated multilevel lagrangian carotid strain imaging }, pages = { 1370--1379 }, number = { 8 }, keywords = { Carotid strain imaging,compute unified device architecture (CUDA),elastography,graphics processing unit (GPU),ultrasound }, journal = { IEEE Transactions on Ultrasonics, Ferroelectrics, and Frequency Control }, issn = { 08853010 }, doi = { 10.1109/TUFFC.2018.2841346 }, author = { Meshram and Varghese }, abstract = { A multilevel Lagrangian carotid strain imaging algorithm is analyzed to identify computational bottlenecks for implementation on a graphics processing unit (GPU). Displacement tracking including regularization was found to be the most computationally expensive aspect of this strain imaging algorithm taking about 2.2 h for an entire cardiac cycle. This intensive displacement tracking was essential to obtain Lagrangian strain tensors. However, most of the computational techniques used for displacement tracking are parallelizable, and hence GPU implementation is expected to be beneficial. A new scheme for subsample displacement estimation referred to as a multilevel global peak finder was also developed since the Nelder-Mead simplex optimization technique used in the CPU implementation was not suitable for GPU implementation. GPU optimizations to minimize thread divergence and utilization of shared and texture memories were also implemented. This enables efficient use of the GPU computational hardware and memory bandwidth. Overall, an application speedup of 168.75 × was obtained enabling the algorithm to finish in about 50 s for a cardiac cycle. Last, comparison of GPU and CPU implementations demonstrated no significant difference in the quality of displacement vector and strain tensor estimation with the two implementations up to a 5{\%} interframe deformation. Hence, a GPU implementation is feasible for clinical adoption and opens opportunity for other computationally intensive techniques. }, } |
2018 | Journal | Tim McGraw, Alejandro Guayaquil-Sosa (2018). Hybrid rendering of exploded views for medical image atlas visualization. Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization, 6(6), pp. 668–677. (link) (bib) x @article{RN960, year = { 2018 }, volume = { 6 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85024392095{\&}doi=10.1080{\%}2F21681163.2017.1343686{\&}partnerID=40{\&}md5=627a45812ac92f205dffd7dce1423f77 }, type = { Journal Article }, title = { Hybrid rendering of exploded views for medical image atlas visualization }, pages = { 668--677 }, number = { 6 }, keywords = { Biomedical visualisation,brain atlas,volume rendering }, journal = { Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization }, issn = { 21681171 }, doi = { 10.1080/21681163.2017.1343686 }, author = { McGraw and Guayaquil-Sosa }, abstract = { Medical image atlases contain much information about human anatomy, but learning the shapes of anatomical regions and making sense of the overall structure defined in the atlas can be problematic. Atlases may contain hundreds of regions with complex shapes which can be tightly packed together. This makes visualisation difficult since the shapes can fit together in complex ways and visually obscure each other. In this work, we describe a technique which enables interactive exploration of medical image atlases that permits the hierarchical structure of the atlas and the content of an underlying medical image to be investigated simultaneously. Our method enables a user to create visualizations of the atlas similar to the exploded views used in technical illustrations to show the structure of mechanical assemblies. These views are constrained by the geometry of the atlas and the hierarchical structure to reduce the complexity of user interaction. We also enable the user to explode the atlas meshes themselves. The atlas meshes are registered with a medical image which is displayed on the cut surfaces of the meshes using raycasting. Results from the AAL human brain atlas are presented and discussed. }, } |
2018 | Journal | Ping Lu, Livia Barazzetti, Vimal Chandran, Kate Gavaghan, Stefan Weber, Nicolas Gerber, Mauricio Reyes (2018). Highly accurate facial nerve segmentation refinement from CBCT/CT imaging using a super-resolution classification approach. IEEE Transactions on Biomedical Engineering, 65(1), pp. 178–188. (link) (bib) x @article{RN961, year = { 2018 }, volume = { 65 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047089305{\&}doi=10.1109{\%}2FTBME.2017.2697916{\&}partnerID=40{\&}md5=1e4c788c488d0055ad983d484d1cb30c }, type = { Journal Article }, title = { Highly accurate facial nerve segmentation refinement from CBCT/CT imaging using a super-resolution classification approach }, pages = { 178--188 }, number = { 1 }, keywords = { CBCT,CT,Cochlear implantation,Facial nerve,Micro-CT,Segmentation,Superresolution,Supervised learning }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 15582531 }, doi = { 10.1109/TBME.2017.2697916 }, author = { Lu and Barazzetti and Chandran and Gavaghan and Weber and Gerber and Reyes }, abstract = { Facial nerve segmentation is of considerable importance for preoperative planning of cochlear implantation. However, it is strongly influenced by the relatively low resolution of the cone-beam computed tomography (CBCT) images used in clinical practice. In this paper, we propose a super-resolution classification method, which refines a given initial segmentation of the facial nerve to a subvoxel classification level from CBCT/CT images. The super-resolution classification method learns the mapping from low-resolution CBCT/CT images to high-resolution facial nerve label images, obtained from manual segmentation on micro-CT images. We present preliminary results on dataset, 15 ex vivo samples scanned including pairs of CBCT/CT scans and high-resolution micro-CT scans, with a leave-one-out evaluation, and manual segmentations on micro-CT images as ground truth. Our experiments achieved a segmentation accuracy with a Dice coefficient of 0.818 ± 0.052, surface-to-surface distance of 0.121 ± 0.030 mm, and Hausdorff distance of 0.715 ± 0.169 mm. We compared the proposed technique to two other semiautomated segmentation software tools, ITK-SNAP and GeoS, and show the ability of the proposed approach to yield subvoxel levels of accuracy in delineating the facial nerve. }, } |
2018 | Journal | Anton S. Kornilov, Ilia V. Safonov (2018). An overview of watershed algorithm implementations in open source libraries. Journal of Imaging, 4(10), pp. NA (link) (bib) x @article{RN866, year = { 2018 }, volume = { 4 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059431965{\&}doi=10.3390{\%}2Fjimaging4100123{\&}partnerID=40{\&}md5=3ef917586e8b50f8e99994a08d52d2c9 }, type = { Journal Article }, title = { An overview of watershed algorithm implementations in open source libraries }, number = { 10 }, keywords = { Computational complexity,Flooding,Memory consumption,Open source software,Processing speed,Rain falling,Watershed segmentation }, journal = { Journal of Imaging }, issn = { 2313433X }, doi = { 10.3390/jimaging4100123 }, author = { Kornilov and Safonov }, abstract = { Watershed is a widespread technique for image segmentation. Many researchers apply the method implemented in open source libraries without a deep understanding of its characteristics and limitations. In the review, we describe benchmarking outcomes of six open-source marker-controlled watershed implementations for the segmentation of 2D and 3D images. Even though the considered solutions are based on the same algorithm by flooding having O(n)computational complexity, these implementations have significantly different performance. In addition, building of watershed lines grows processing time. High memory consumption is one more bottleneck for dealing with huge volumetric images. Sometimes, the usage of more optimal software is capable of mitigating the issues with the long processing time and insufficient memory space. We assume parallel processing is capable of overcoming the current limitations. However, the development of concurrent approaches for the watershed segmentation remains a challenging problem. }, } |
2018 | Journal | Taha S. Koltukluoǧlu, Pablo J. Blanco (2018). Boundary control in computational haemodynamics. Journal of Fluid Mechanics, 847, pp. 329–364. (link) (bib) x @article{RN873, year = { 2018 }, volume = { 847 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047266553{\&}doi=10.1017{\%}2Fjfm.2018.329{\&}partnerID=40{\&}md5=50aee576fbf3bbc2426b262c16341404 }, type = { Journal Article }, title = { Boundary control in computational haemodynamics }, pages = { 329--364 }, keywords = { blood flow,control theory,variational methods }, journal = { Journal of Fluid Mechanics }, issn = { 14697645 }, doi = { 10.1017/jfm.2018.329 }, author = { Koltukluoǧlu and Blanco }, abstract = { In this work, a data assimilation method is proposed following an optimise-then-discretise approach, and is applied in the context of computational haemodynamics. The methodology aims to make use of phase-contrast magnetic resonance imaging to perform optimal flow control in computational fluid dynamic simulations. Flow matching between observations and model predictions is performed in luminal regions, excluding near-wall areas, improving the near-wall flow reconstruction to enhance the estimation of related quantities such as wall shear stresses. The proposed approach remarkably improves the flow field at the aortic root and reveals a great potential for predicting clinically relevant haemodynamic phenomenology. This work presents model validation against an analytical solution using the standard 3-D Hagen-Poiseuille flow, and validation with real data involving the flow control problem in a glass replica of a human aorta imaged with a 3T magnetic resonance scanner. In vitro experiments consist of both a numerically generated reference flow solution, which is considered as the ground truth, as well as real flow MRI data obtained from phase-contrast flow acquisitions. The validation against the in vitro flow MRI experiments is performed for different flow regimes and model parameters including different mesh refinements. }, } |
2018 | Journal | Artur Klepaczko, Piotr Szczypi\'nski, Micha\l Strzelecki, Ludomir Stefa\'nczyk (2018). Simulation of phase contrast angiography for renal arterial models. BioMedical Engineering Online, 17(1), pp. NA (link) (bib) x @article{RN877, year = { 2018 }, volume = { 17 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045392503{\&}doi=10.1186{\%}2Fs12938-018-0471-y{\&}partnerID=40{\&}md5=eb7e35093af28b9fca81c4e33c818b84 }, type = { Journal Article }, title = { Simulation of phase contrast angiography for renal arterial models }, number = { 1 }, keywords = { Blood flow simulation,Kidney vasculature modeling,MRI simulation,Phase contrast angiography,Vessel segmentation }, journal = { BioMedical Engineering Online }, issn = { 1475925X }, doi = { 10.1186/s12938-018-0471-y }, author = { Klepaczko and Szczypi{\'{n}}ski and Strzelecki and Stefa{\'{n}}czyk }, abstract = { Background: With the development of versatile magnetic resonance acquisition techniques there arises a need for more advanced imaging simulation tools to enable adequate image appearance prediction, measurement sequence design and testing thereof. Recently, there is a growing interest in phase contrast angiography (PCA) sequence due to the capabilities of blood flow quantification that it offers. Moreover, as it is a non-contrast enhanced protocol, it has become an attractive option in areas, where usage of invasive contrast agents is not indifferent for the imaged tissue. Monitoring of the kidney function is an example of such an application. Results: We present a computer framework for simulation of the PCA protocol, both conventional and accelerated with echo-planar imaging (EPI) readout, and its application to the numerical models of kidney vasculatures. Eight patient-specific renal arterial trees were reconstructed following vessel segmentation in real computed tomography angiograms. In addition, a synthetic model was designed using a vascular tree growth simulation algorithm. The results embrace a series of synthetic PCA images of the renal arterial trees giving insight into the image formation and quantification of kidney hemodynamics. Conclusions: The designed simulation framework enables quantification of the PCA measurement error in relation to ground-truth flow velocity data. The mean velocity measurement error for the reconstructed renal arterial trees range from 1.5 to 12.8{\%} of the aliasing velocity value, depending on image resolution and flip angle. No statistically significant difference was observed between measurements obtained using EPI with a number of echos (NETL) = 4 and conventional PCA. In case of higher NETL factors peak velocity values can be underestimated up to 34{\%}. }, } |
2018 | Journal | Pablo Hernandez-Cerdan, Bradley W. Mansel, Andrew Leis, Leif Lundin, Martin A.K. Williams (2018). Structural Analysis of Polysaccharide Networks by Transmission Electron Microscopy: Comparison with Small-Angle X-ray Scattering. Biomacromolecules, 19(3), pp. 989–995. (link) (bib) x @article{RN882, year = { 2018 }, volume = { 19 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85043595060{\&}doi=10.1021{\%}2Facs.biomac.7b01773{\&}partnerID=40{\&}md5=395df47da135ea25e2ffd52248d04f49 }, type = { Journal Article }, title = { Structural Analysis of Polysaccharide Networks by Transmission Electron Microscopy: Comparison with Small-Angle X-ray Scattering }, pages = { 989--995 }, number = { 3 }, journal = { Biomacromolecules }, issn = { 15264602 }, doi = { 10.1021/acs.biomac.7b01773 }, author = { Hernandez-Cerdan and Mansel and Leis and Lundin and Williams }, abstract = { Polysaccharide gels assembled from the anionic biopolymers pectin and carrageenan have been studied using transmission electron microscopy (TEM). Gels were formed in several different ways: for pectin, hydrogen bonding was used to form junction zones between strands, whereas for carrageenan systems, several different ion types were used to form ionotropic networks. Using this approach, several distinct network architectures were realized. In addition to preparing gelled samples for electron microscopy, a set of samples was taken without performing the additional treatment necessitated by the TEM measurements, and these were studied directly by small-angle X-ray scattering (SAXS). Taking careful consideration of the relative merits of different image sizes and available processing techniques, the real-space images acquired by TEM were used via radial integration of the Fourier transform to produce simulated scattering patterns. These intensity-versus-wavevector plots were compared with the results of SAXS experiments carried out on the unadulterated gels using synchrotron radiation. Although information regarding chain thicknesses and flexibilities was found to be modified by labeling and changes in the dielectric constant and mechanical properties of the surroundings in the TEM, the studies carried out here show that careful protocols can produce data sets where information acquired above ∼20 nm is broadly consistent with that obtained by SAXS studies carried out on unadulterated samples. The fact that at larger length scale the structure of these water-rich networks seems largely preserved in the TEM samples suggests that three-dimensional (3D) TEM tomography experiments carried out with careful sample preparation will be valuable tools for measuring network architecture and connectivity; information that is lost in SAXS owing to the intrinsic averaging nature of the technique. }, } |
2018 | Journal | Christina Gillmann, Thomas Wischgoll, Bernd Hamann, Hans Hagen (2018). Accurate and reliable extraction of surfaces from image data using a multi-dimensional uncertainty model. Graphical Models, 99, pp. 13–21. (link) (bib) x @article{RN868, year = { 2018 }, volume = { 99 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051412806{\&}doi=10.1016{\%}2Fj.gmod.2018.07.004{\&}partnerID=40{\&}md5=5d10dfa161a616c746fa9eae0597d362 }, type = { Journal Article }, title = { Accurate and reliable extraction of surfaces from image data using a multi-dimensional uncertainty model }, pages = { 13--21 }, keywords = { Parameter space exploration,Surface extraction,Uncertainty visualization }, journal = { Graphical Models }, issn = { 15240703 }, doi = { 10.1016/j.gmod.2018.07.004 }, author = { Gillmann and Wischgoll and Hamann and Hagen }, abstract = { Surface extraction is an important step in the image processing pipeline to estimate the size and shape of an object. Unfortunately, state of the art surface extraction algorithms form a straight forward extraction based on a pre-defined value that can lead to surfaces, that are not accurate. Furthermore, most isosurface extraction algorithms lack the ability to communicate uncertainty originating from the image data. This can lead to a rejection of such algorithms in many applications. To solve this problem, we propose a methodology to extract and optimize surfaces from image data based on a defined uncertainty model. To identify optimal parameters, the presented method defines a parameter space that is evaluated and rates each extraction run based on the remaining surface uncertainty. The resulting surfaces can be explored intuitively in an interactive framework. We applied our methodology to a variety of datasets to demonstrate the quality of the resulting surfaces. }, } |
2018 | Journal | Christina Gillmann, Robin G.C. Maack, Tobias Post, Thomas Wischgoll, Hans Hagen (2018). An Uncertainty-aware Workflow for Keyhole Surgery Planning using Hierarchical Image Semantics. Visual Informatics, 2(1), pp. 26–36. (link) (bib) x @article{RN878, year = { 2018 }, volume = { 2 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85066739022{\&}doi=10.1016{\%}2Fj.visinf.2018.04.004{\&}partnerID=40{\&}md5=373c953331ead7b18b936c470ef6d581 }, type = { Journal Article }, title = { An Uncertainty-aware Workflow for Keyhole Surgery Planning using Hierarchical Image Semantics }, pages = { 26--36 }, number = { 1 }, keywords = { Keyhole Surgery Planning,Uncertainty Visualization,Visual Analytics Workflow }, journal = { Visual Informatics }, issn = { 2468502X }, doi = { 10.1016/j.visinf.2018.04.004 }, author = { Gillmann and Maack and Post and Wischgoll and Hagen }, abstract = { Keyhole surgeries become increasingly important in clinical daily routine as they help minimizing the damage of a patient's healthy tissue. The planning of keyhole surgeries is based on medical imaging and an important factor that influences the surgeries' success. Due to the image reconstruction process, medical image data contains uncertainty that exacerbates the planning of a keyhole surgery. In this paper we present a visual workflow that helps clinicians to examine and compare different surgery paths as well as visualizing the patients' affected tissue. The analysis is based on the concept of hierarchical image semantics, that segment the underlying image data with respect to the input images' uncertainty and the users understanding of tissue composition. Users can define arbitrary surgery paths that they need to investigate further. The defined paths can be queried by a rating function to identify paths that fulfill user-defined properties. The workflow allows a visual inspection of the affected tissues and its substructures. Therefore, the workflow includes a linked view system indicating the three-dimensional location of selected surgery paths as well as how these paths affect the patients tissue. To show the effectiveness of the presented approach, we applied it to the planning of a keyhole surgery of a brain tumor removal and a kneecap surgery. }, } |
2018 | Journal | Mahsa Dadar, Vladimir S. Fonov, D. Louis Collins (2018). A comparison of publicly available linear MRI stereotaxic registration techniques. NeuroImage, 174, pp. 191–200. (link) (bib) x @article{RN875, year = { 2018 }, volume = { 174 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044150830{\&}doi=10.1016{\%}2Fj.neuroimage.2018.03.025{\&}partnerID=40{\&}md5=a707203bafd861c84c210833597b8ad5 }, type = { Journal Article }, title = { A comparison of publicly available linear MRI stereotaxic registration techniques }, pages = { 191--200 }, keywords = { Linear registration,MRI,Quality control }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2018.03.025 }, author = { Dadar and Fonov and Collins }, abstract = { Introduction: Linear registration to a standard space is one of the major steps in processing and analyzing magnetic resonance images (MRIs) of the brain. Here we present an overview of linear stereotaxic MRI registration and compare the performance of 5 publicly available and extensively used linear registration techniques in medical image analysis. Methods: A set of 9693 T1-weighted MR images were obtained for testing from 4 datasets: ADNI, PREVENT-AD, PPMI, and HCP, two of which have multi-center and multi-scanner data and three of which have longitudinal data. Each individual native image was linearly registered to the MNI ICBM152 average template using five versions of MRITOTAL from MINC tools, FLIRT from FSL, two versions of Elastix, spm{\_}affreg from SPM, and ANTs linear registration techniques. Quality control (QC) images were generated from the registered volumes and viewed by an expert rater to assess the quality of the registrations. The QC image contained 60 sub-images (20 of each of axial, sagittal, and coronal views at different levels throughout the brain) overlaid with contours of the ICBM152 template, enabling the expert rater to label the registration as acceptable or unacceptable. The performance of the registration techniques was then compared across different datasets. In addition, the effect of image noise, intensity non-uniformity, age, head size, and atrophy on the performance of the techniques was investigated by comparing differences between age, scaling factor, ventricle volume, brain volume, and white matter hyperintensity (WMH) volumes between passed and failed cases for each method. Results: The average registration failure rate among all datasets was 27.41{\%}, 27.14{\%}, 12.74{\%}, 13.03{\%}, 0.44{\%} for the five versions of MRITOTAL techniques, 8.87{\%} for ANTs, 11.11{\%} for FSL, 12.35{\%} for Elastix Affine, 24.40{\%} for Elastix Similarity, and 30.66{\%} for SPM. There were significant effects of signal to noise ratio, image intensity non-uniformity estimates, as well as age, head size, and atrophy related changes between passed and failed registrations. Conclusion: Our experiments show that the Revised BestLinReg had the best performance among the evaluated registration techniques while all techniques performed worse for images with higher levels of noise and non-uniformity as well as atrophy related changes. }, } |
2018 | Journal | Kuang Che Chang Chien, Han Yen Tu, Ching Huang Hsieh, Chau Jern Cheng, Chun Yen Chang (2018). Regional fringe analysis for improving depth measurement in phase-shifting fringe projection profilometry. Measurement Science and Technology, 29(1), pp. NA (link) (bib) x @article{RN885, year = { 2018 }, volume = { 29 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039165127{\&}doi=10.1088{\%}2F1361-6501{\%}2Faa94a5{\&}partnerID=40{\&}md5=2a3721b607941509869568960082b198 }, type = { Journal Article }, title = { Regional fringe analysis for improving depth measurement in phase-shifting fringe projection profilometry }, number = { 1 }, keywords = { Phase-shifting,classifcation,depth measurement,fringe projection proflometry,fuzzy analysis,region-based segmentation }, journal = { Measurement Science and Technology }, issn = { 13616501 }, doi = { 10.1088/1361-6501/aa94a5 }, author = { Chien and Tu and Hsieh and Cheng and Chang }, abstract = { This study proposes a regional fringe analysis (RFA) method to detect the regions of a target object in captured shifted images to improve depth measurement in phase-shifting fringe projection profilometry (PS-FPP). In the RFA method, region-based segmentation is exploited to segment the de-fringed image of a target object, and a multi-level fuzzy-based classification with five presented features is used to analyze and discriminate the regions of an object from the segmented regions, which were associated with explicit fringe information. Then, in the experiment, the performance of the proposed method is tested and evaluated on 26 test cases made of five types of materials. The qualitative and quantitative results demonstrate that the proposed RFA method can effectively detect the desired regions of an object to improve depth measurement in the PS-FPP system. }, } |
2018 | Journal | Andre Bongers, Eric Hau, Han Shen (2018). Short Diffusion Time Diffusion-Weighted Imaging With Oscillating Gradient Preparation as an Early Magnetic Resonance Imaging Biomarker for Radiation Therapy Response Monitoring in Glioblastoma: A Preclinical Feasibility Study. International Journal of Radiation Oncology Biology Physics, 102(4), pp. 1014–1023. (link) (bib) x @article{RN865, year = { 2018 }, volume = { 102 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041512823{\&}doi=10.1016{\%}2Fj.ijrobp.2017.12.280{\&}partnerID=40{\&}md5=acf090aa80c436bcbeed0e2695f55e04 }, type = { Journal Article }, title = { Short Diffusion Time Diffusion-Weighted Imaging With Oscillating Gradient Preparation as an Early Magnetic Resonance Imaging Biomarker for Radiation Therapy Response Monitoring in Glioblastoma: A Preclinical Feasibility Study }, pages = { 1014--1023 }, number = { 4 }, journal = { International Journal of Radiation Oncology Biology Physics }, issn = { 1879355X }, doi = { 10.1016/j.ijrobp.2017.12.280 }, author = { Bongers and Hau and Shen }, abstract = { Purpose: To investigate a novel alternative diffusion-weighted imaging (DWI) approach using oscillating gradients preparation (OGSE) to obtain much shorter effective diffusion times ($\Delta$eff) for tumor response monitoring by apparent diffusion coefficient (ADC) mapping in a glioblastoma mouse model. Methods and Materials: Twenty-four BALB/c nude mice inoculated with U87 glioblastoma cells were randomized into a control group and an irradiation group, which underwent a 15-day fractioned radiation therapy (RT) course with 2 Gy/d. Therapy response was assessed by mapping of ADCs at 6 time points using an in-house implementation of a cos-OGSE DWI sequence with $\Delta$eff = 1.25 ms and compared with a standard pulsed gradient DWI protocol (PGSE) with typical clinical diffusion time $\Delta$eff = 18 ms. Longitudinal ADC changes in tumor and contralateral white matter (WM) were statistically assessed using repeated-measures analysis of variance and post hoc (Sidak) testing. Results: On short $\Delta$eff OGSE maps tumor ADC was generally 30{\%}-50{\%} higher than in surrounding WM. Areas correlated well with histology. Tumor identification was generally more difficult on PGSE maps owing to nonsignificant WM/tumor contrast. During RT, OGSE maps also showed significant tumor ADC increase (approximately 15{\%}) in response to radiation, consistently seen after 14-Gy RT dose. The clinical reference (PGSE) showed lower sensitivity to radiation changes, and no significant response across the radiation group and time course could be detected. Conclusion: Our short $\Delta$eff DWI method using OGSE better reflected histologically defined tumor areas and enabled more consistent and earlier detection of microstructural radiation changes than conventional methods. Oscillating gradients preparation offers significant potential as a robust microstructural RT response biomarker, potentially helping to shift important therapy decisions to earlier stages in the RT time course. }, } |
2018 | Journal | Richard Beare, Bradley Lowekamp, Ziv Yaniv (2018). Image segmentation, registration and characterization in R with simpleITK. Journal of Statistical Software, 86(8), pp. 1–35. (link) (bib) x @article{RN819, year = { 2018 }, volume = { 86 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Image segmentation, registration and characterization in R with simpleITK }, pages = { 1--35 }, number = { 8 }, keywords = { Image processing,Image registration,Image segmentation,Medical imaging,R }, journal = { Journal of Statistical Software }, issn = { 15487660 }, doi = { 10.18637/jss.v086.i08 }, author = { Beare and Lowekamp and Yaniv }, abstract = { Many types of medical and scientific experiments acquire raw data in the form of images. Various forms of image processing and image analysis are used to transform the raw image data into quantitative measures that are the basis of subsequent statistical analysis. In this article we describe the SimpleITK R package. SimpleITK is a simplified interface to the insight segmentation and registration toolkit (ITK). ITK is an open source C++ toolkit that has been actively developed over the past 18 years and is widely used by the medical image analysis community. SimpleITK provides packages for many interpreter environments, including R. Currently, it includes several hundred classes for image analysis including a wide range of image input and output, filtering operations, and higher level components for segmentation and registration. Using SimpleITK, development of complex combinations of image and statistical analysis procedures is feasible. This article includes several examples of computational image analysis tasks implemented using SimpleITK, including spherical marker localization, multi-modal image registration, segmentation evaluation, and cell image analysis. }, } |
2018 | Journal | Tobias N. Andersen, Tron A. Darvann, Shumei Murakami, Per Larsen, Yurie Senda, Anders Bilde, Christian V. Buchwald, Sven Kreiborg (2018). Accuracy and precision of manual segmentation of the maxillary sinus in MR images - A method study. British Journal of Radiology, 91(1085), pp. NA (link) (bib) x @article{RN886, year = { 2018 }, volume = { 91 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045980566{\&}doi=10.1259{\%}2Fbjr.20170663{\&}partnerID=40{\&}md5=fb09baba9be82130126233860365e1f2 }, type = { Journal Article }, title = { Accuracy and precision of manual segmentation of the maxillary sinus in MR images - A method study }, number = { 1085 }, journal = { British Journal of Radiology }, issn = { 00071285 }, doi = { 10.1259/bjr.20170663 }, author = { Andersen and Darvann and Murakami and Larsen and Senda and Bilde and Buchwald and Kreiborg }, abstract = { Objective: To assess the accuracy and precision of segmentation of the maxillary sinus in MR images to evaluate the potential usefulness of this modality in longitudinal studies of sinus development. Methods: A total of 15 healthy subjects who had been both craniofacial CT and MR scanned were included and the 30 maxillary sinus volumes were evaluated using segmentation. Two of the authors did segmentation of MRI and one of these authors did double segmentation. Agreement in results between CT and MRI as well as inter- and intraexaminer errors were evaluated by statistical and three-dimensional analysis. Results: The intraclass correlation coefficient for volume measurements for both method error, inter- and intraexaminer agreement were {\textgreater} 0.9 [maximal 95{\%} confidence interval of 0.989-0.997, p {\textless} 0.001] and the limit of agreement for all parameters were {\textless} 5.1{\%}. Segmentation errors were quantified in terms of overlap [Dice Coefficient (DICE) {\textgreater} 0.9 = excellent agreement] and border distance [95{\%} percentile Hausdorff Distance (HD) {\textless} 2 mm = acceptable agreement]. The results were replicable and not influenced by systematic errors. Conclusion: We found a high accuracy and precision of manual segmentation of the maxillary sinus in MR images. The largest mean errors were found close to the orbit and the teeth. Advances in knowledge: MRI can be used for 3D models of the paranasal sinuses with equally good results as CT and allows longitudinal follow-up of sinus development. }, } |
2018 | Journal | Richa Agarwal, Oliver Diaz, Xavier Llad\'o, Albert Gubern-Mérida, Joan C. Vilanova, Robert Mart\'i (2018). Lesion Segmentation in Automated 3D Breast Ultrasound: Volumetric Analysis. Ultrasonic Imaging, 40(2), pp. 97–112. (link) (bib) x @article{RN788, year = { 2018 }, volume = { 40 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Lesion Segmentation in Automated 3D Breast Ultrasound: Volumetric Analysis }, pages = { 97--112 }, number = { 2 }, keywords = { ABUS (Automated Breast Ultrasound),breast cancer,lesion segmentation,temporal,volumetric analysis,watershed }, journal = { Ultrasonic Imaging }, issn = { 01617346 }, doi = { 10.1177/0161734617737733 }, author = { Agarwal and Diaz and Llad{\'{o}} and Gubern-M{\'{e}}rida and Vilanova and Mart{\'{i}} }, abstract = { Mammography is the gold standard screening technique in breast cancer, but it has some limitations for women with dense breasts. In such cases, sonography is usually recommended as an additional imaging technique. A traditional sonogram produces a two-dimensional (2D) visualization of the breast and is highly operator dependent. Automated breast ultrasound (ABUS) has also been proposed to produce a full 3D scan of the breast automatically with reduced operator dependency, facilitating double reading and comparison with past exams. When using ABUS, lesion segmentation and tracking changes over time are challenging tasks, as the three-dimensional (3D) nature of the images makes the analysis difficult and tedious for radiologists. The goal of this work is to develop a semi-automatic framework for breast lesion segmentation in ABUS volumes which is based on the Watershed algorithm. The effect of different de-noising methods on segmentation is studied showing a significant impact (p {\textless} 0.05) on the performance using a dataset of 28 temporal pairs resulting in a total of 56 ABUS volumes. The volumetric analysis is also used to evaluate the performance of the developed framework. A mean Dice Similarity Coefficient of 0.69 ± 0.11 with a mean False Positive ratio 0.35 ± 0.14 has been obtained. The Pearson correlation coefficient between the segmented volumes and the corresponding ground truth volumes is r2 = 0.960 (p = 0.05). Similar analysis, performed on 28 temporal (prior and current) pairs, resulted in a good correlation coefficient r2 = 0.967 (p {\textless} 0.05) for prior and r2 = 0.956 (p {\textless} 0.05) for current cases. The developed framework showed prospects to help radiologists to perform an assessment of ABUS lesion volumes, as well as to quantify volumetric changes during lesions diagnosis and follow-up. }, } |
2018 | In Collection | David Kügler, Martin Andrade Jastrzebski, Anirban Mukhopadhyay (2018). Instrument pose estimation using registration for otobasis surgery. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 105–114. (link) (bib) x @incollection{Kuegler2018, year = { 2018 }, volume = { 10883 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048674870{\&}doi=10.1007{\%}2F978-3-319-92258-4{\_}10{\&}partnerID=40{\&}md5=e5ef96e3629df844b717127296fb9d5c }, type = { Serial }, title = { Instrument pose estimation using registration for otobasis surgery }, pages = { 105--114 }, issn = { 16113349 }, isbn = { 9783319922577 }, doi = { 10.1007/978-3-319-92258-4_10 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { K{\"{u}}gler and Jastrzebski and Mukhopadhyay }, abstract = { Clinical outcome of several Minimally Invasive Surgeries (MIS) heavily depend on the accuracy of intraoperative pose estimation of the surgical instrument from intraoperative x-rays. The estimation consists of finding the tool in a given set of x-rays and extracting the necessary data to recreate the tool's pose for further navigation - resulting in severe consequences of incorrect estimation. Though state-of-the-art MIS literature has exploited image registration as a tool for instrument pose estimation, lack of practical considerations in previous study design render their conclusion ineffective from a clinical standpoint. One major issue of such a study is the lack of Ground Truth in clinical data -as there are no direct ways of measuring the ground truth pose and indirect estimation accumulates error. A systematic way to overcome this problem is to generate Digitally Reconstructed Radiographs (DRR), however, such procedure generates data which are free from measuring errors (e.g. noise, number of projections), resulting claims of registration performance inconclusive. Generalization of registration performance across different instruments with different Degrees of Freedom (DoF) has not been studied as well. By marrying a rigorous study design involving several clinical scenarios with, for example, several optimizers, metrics and others parameters for image registration, this paper bridges this gap effectively. Although the pose estimation error scales inversely with instrument size, we show image registration generalizes well for different instruments and DoF. In particular, it is shown that increasing the number of x-ray projections can reduce the pose estimation error significantly across instruments - which might lead to the acquisition of several x-rays for pose estimation in a clinical workflow. }, } |
2018 | In Collection | Kaia Achim, Hernando Mart\'inez Vergara, Jean Baptiste Pettit (2018). Spatial transcriptomics: Constructing a single-cell resolution transcriptome-wide expression atlas. In Methods in Molecular Biology, pp. 111–125. (link) (bib) x @incollection{Achim2018, year = { 2018 }, volume = { 1649 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85033693516{\&}doi=10.1007{\%}2F978-1-4939-7213-5{\_}7{\&}partnerID=40{\&}md5=697ad0e6f4701bd81a55352e26ae2fbb }, type = { Serial }, title = { Spatial transcriptomics: Constructing a single-cell resolution transcriptome-wide expression atlas }, pages = { 111--125 }, keywords = { Gene expression,Image registration,Single-cell mRNA-seq,Spatial transcriptomics }, issn = { 10643745 }, doi = { 10.1007/978-1-4939-7213-5_7 }, booktitle = { Methods in Molecular Biology }, author = { Achim and Vergara and Pettit }, abstract = { The method described here aims at the construction of a single-cell resolution gene expression atlas for an animal or tissue, combining in situ hybridization (ISH) and single-cell mRNA-sequencing (scRNAseq). A high resolution and medium-coverage gene expression atlas of an animal or tissue of interest can be obtained by performing a series of ISH experiments, followed by a process of image registration and gene expression averaging. Using the overlapping fraction of the genes, concomitantly obtained scRNAseq data can be fitted into the spatial context of the gene expression atlas, complementing the coverage by genes. }, } |
2018 | In Collection | Ewelina \'Swi\catek-Najwer, Magdalena \.Zuk, Marcin Majak, Micha\l Popek (2018). The rigid registration of CT and scanner dataset for computer aided surgery. In Lecture Notes in Computational Vision and Biomechanics, pp. 345–353. (link) (bib) x @incollection{RN880, year = { 2018 }, volume = { 27 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032361988{\&}doi=10.1007{\%}2F978-3-319-68195-5{\_}38{\&}partnerID=40{\&}md5=f66e1b53e39dbae26c9578f6ea426d78 }, type = { Serial }, title = { The rigid registration of CT and scanner dataset for computer aided surgery }, publisher = { Springer Netherlands }, pages = { 345--353 }, issn = { 22129413 }, isbn = { 22129391 (ISSN) }, doi = { 10.1007/978-3-319-68195-5_38 }, booktitle = { Lecture Notes in Computational Vision and Biomechanics }, author = { \'{S}}wi{\c{a}}tek-Najwer and {\.{Z}}uk and Majak and Popek }, abstract = { The main aim of this work was to perform rigid registration of Computed Tomography (CT) and scanner datasets. The surgeon applies CT and scanner datasets in computer aided surgery and performs registration in order to visualize the location of surgical instrument on screen. It is well known fact that the registration procedure is crucial for efficient computer aiding of surgery. Selected algorithm should take into account types of datasets, required accuracy and time of calculations. The algorithms are classified basing on the various criteria: e.g. precision (coarse and fine registration), types of pointset (set of pair of corresponding points – so called point-point method, unorganized sets of points – so called surface registration). The paper presents exemplary results of applying the following algorithms: Landmark Transform (point-point registration), two methods of uninitialized Iterative Closest Point type (surface registration) and a hybrid method. The evaluated factors were: distance error (mean, minimal and maximal value) and running time of algorithm. The algorithms were tested on various datasets: (1) two similar datasets from Computed Tomography (one is geometrically transformed), (2) Computed Tomography dataset and cloud of points recorded using 3D Artec Space Spider scanner. In the first case the mean error values equaled: 102.08 mm – 121.70 mm for uninitialized ICPs methods, 0.005 mm for Landmark Transform method, and 0.0003 mm for hybrid method. The slowest algorithms in our tests were ICPs methods, faster was hybrid algorithm, and the fastest was Landmark Transform method. In the second case the distance errors were evaluated in four selected points, and the smallest errors were: 23.21 mm for uninitialized ICPs method, 0.69 mm for Landmark Transform, 9.03 for hybrid method. All algorithms were relatively slow for these large datasets, the fastest was Landmark Transform. In the second part of research we analysed the Target Registration Error (TRE) for fused Computed Tomography and scanner-recorded dataset. The TRE values equaled 0.7 mm - 2.8 mm. The results of CT – scanner datasets registration highly depend on the similarity of sets, especially their overlapping, but also their resolutions and uniformities. }, } |
2018 | In Collection | Massimiliano Mercuri, Andrew J. Narracott, Dr Hose, Cemil Göksu (2018). An automatic method for aortic segmentation based on level-set methods using multiple seed points. In Lecture Notes in Computational Vision and Biomechanics, pp. 875–882. (link) (bib) x @incollection{RN881, year = { 2018 }, volume = { 27 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032380517{\&}doi=10.1007{\%}2F978-3-319-68195-5{\_}95{\&}partnerID=40{\&}md5=332ed513ca2cfe55ca8c7c67df29dd9d }, type = { Serial }, title = { An automatic method for aortic segmentation based on level-set methods using multiple seed points }, publisher = { Springer Netherlands }, pages = { 875--882 }, keywords = { Automatic segmentation, Level-set method,Thoracic Aortic Aneurysm (TAA),Virtual aortic surgery planning }, issn = { 22129413 }, isbn = { 22129391 (ISSN) }, doi = { 10.1007/978-3-319-68195-5_95 }, booktitle = { Lecture Notes in Computational Vision and Biomechanics }, author = { Mercuri and Narracott and Hose and G{\"{o}}ksu }, abstract = { Thoracic Aortic Aneurysm (TAA) is an enlargement of the aortic lumen at chest level. An accurate assessment of the geometry of the enlarged vessel is crucial when planning vascular interventions. This study developed an automatic method to extract aortic geometry and supra-aortic vessels from computerized tomography (CT) images. The proposed method consists of a fast-marching level-set method for detection of the initial aortic region from multiple seed points automatically selected along the pre-extracted vessel centerline, and a level-set method for extraction of the detailed aortic geometry from the initial aortic region. The automatic method was implemented inside Endosize (Therenva, Rennes), a commercially available software used for planning minimally invasive techniques. The performance of the algorithm was compare with the existing Endosize segmentation method (based on the region growing approach). For this comparison a CT dataset from an open source data file system (Osirix Advanced Imaging in 3D, 2016) was used. Results showed that, whilst the segmentation time increased (956 s for the new method, 0.308 s for the existing one), the new method produced a more accurate aortic segmentation, particularly in the region of supra-aortic branches. Further work to examine the efficacy of the proposed method should include a statistical study of performance across many datasets. }, } |
2018 | In Collection | Maria H. Listewnik, Hanna Piwowarska-Bilska, Krzysztof Safranow, Jacek Iwanowski, Maria Laszczy\'nska, Maria Chosia, Marek Ostrowski, Bo\.zena Birkenfeld, Przemys\law Mazurek (2018). CT–SPECT analyzer - A tool for CT and SPECT data fusion and volumetric visualization. In M Choras, R S Choras, editor, Advances in Intelligent Systems and Computing, pp. 11–18. (link) (bib) x @incollection{RN879, year = { 2018 }, volume = { 681 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031410415{\&}doi=10.1007{\%}2F978-3-319-68720-9{\_}2{\&}partnerID=40{\&}md5=333a82c9184f554ff151234b1617f13b }, type = { Serial }, title = { CT–SPECT analyzer - A tool for CT and SPECT data fusion and volumetric visualization }, publisher = { Springer Verlag }, pages = { 11--18 }, keywords = { CT,Data fusion,SPECT,Volumetric visualization }, issn = { 21945357 }, isbn = { 9783319687193 }, editor = { [object Object],[object Object] }, doi = { 10.1007/978-3-319-68720-9_2 }, booktitle = { Advances in Intelligent Systems and Computing }, author = { Listewnik and Piwowarska-Bilska and Safranow and Iwanowski and Laszczy{\'{n}}ska and Chosia and Ostrowski and Birkenfeld and Mazurek }, abstract = { Data fusion and specific visualization of CT and SPECT are important for diagnosis and research purposes. Selected problems are considered in the paper and are related to the developed CT–SPECT Analyzer software. Hierarchical mapping with SPECT priority for maximum value of rays is applied in this software. Three variants of color mappings are presented. Some practical aspects related to low quality of CT are considered also. The most promising is the rainbow gradient with gamma curve adjustment. }, } |
2018 | In Collection | Pieter Thomas Boonen, Nico Buls, Gert Van Gompel, Yannick De Brucker, Dimitri Aerden, Johan De Mey, Jef Vandemeulebroucke (2018). Automated quantification of blood flow velocity from time-resolved CT angiography. In S Lee, E Trucco, L Maier-Hein, S Moriconi, S Albarqouni, P Jannin, S Balocco, G Zahnd, D Mateus, Z Taylor, S Demirci, D Stoyanov, R Sznitman, A Martel, V Cheplygina, E Granger, L Duong, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 11–18. (link) (bib) x @incollection{RN959, year = { 2018 }, volume = { 11043 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055830741{\&}doi=10.1007{\%}2F978-3-030-01364-6{\_}2{\&}partnerID=40{\&}md5=9917ad93753ce090a308f5318500b9e3 }, type = { Serial }, title = { Automated quantification of blood flow velocity from time-resolved CT angiography }, publisher = { Springer Verlag }, pages = { 11--18 }, keywords = { Artery segmentation,Blood velocity,Lower extremities,Peripheral arterial disease,Time-resolved CTA }, issn = { 16113349 }, isbn = { 9783030013639 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-030-01364-6_2 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Boonen and Buls and {Van Gompel} and {De Brucker} and Aerden and {De Mey} and Vandemeulebroucke }, abstract = { Contrast-enhanced computed tomography angiography (CE-CTA) provides valuable, non-invasive assessment of lower extremity peripheral arterial disease (PAD). The advent of wide beam CT scanners has enabled multiple CT acquisitions over the same structure at a high frame rate, facilitating time-resolved CTA acquisitions. In this study, we investigate the technical feasibility of automatically quantifying the bolus arrival time and blood velocity in the arteries below the knee from time-resolved CTA. Our approach is based on arterial segmentation and local estimation of the bolus arrival time. The results are compared to values obtained through manual reading of the datasets and show good agreement. Based on a small patient study, we explore initial utility of these quantitative measures for the diagnosis of lower extremity PAD. }, } |
2018 | In Collection | Ankur Biswas, Santi P. Maity, Paritosh Bhattacharya (2018). Optimal Geometric Active Contours: Application to Human Brain Segmentation. In J K Mandal, D Sinha, editor, Communications in Computer and Information Science, pp. 646–657. (link) (bib) x @incollection{RN975, year = { 2018 }, volume = { 836 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053181661{\&}doi=10.1007{\%}2F978-981-13-1343-1{\_}53{\&}partnerID=40{\&}md5=65a97c1936282996fbe6ce8dfeb09631 }, type = { Serial }, title = { Optimal Geometric Active Contours: Application to Human Brain Segmentation }, publisher = { Springer Verlag }, pages = { 646--657 }, keywords = { Dice similarity coefficient,Geometric active contour,Jaccard similarity coefficient,Level set,Magnetic resonance imaging (MRI) }, issn = { 18650929 }, isbn = { 9789811313424 }, editor = { [object Object],[object Object] }, doi = { 10.1007/978-981-13-1343-1_53 }, booktitle = { Communications in Computer and Information Science }, author = { Biswas and Maity and Bhattacharya }, abstract = { An efficient Segmentation of lateral ventricles plays a vital role in quantitatively analyzing the global and regional information in magnetic resonance imaging (MRI) of human brain. In this paper, a semi automatic segmentation methodology to support the study of efficient pathologies of the lateral ventricles along with white matter and gray matter of human brain is proposed. The segmentation is executed using an optimal geometric active contour with level set methods. A nominal anatomical knowledge is incorporated into the methodology in order to choose the most probable surfaces of the lateral ventricles of human brain, even if they are disconnected, and to eliminate addition of non ventricle cerebrospinal fluid (CSF) regions. The proposed segmentation method is applied to multislice MRI data and compared with region growing algorithms. The results Dice similarity coefficient 0.955, Jaccard similarity coefficient 0.815 demonstrates the reliability and efficiency. }, } |
2018 | In Conf. Proceedings | M. Schiwarth, J. Weissenböck, B. Plank, B. Fröhler, C. Heinzl, J. Kastner (2018). Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers. In IOP Conference Series: Materials Science and Engineering, pp. NA Bristol. (link) (bib) x @inproceedings{Schiwarth2018, year = { 2018 }, volume = { 406 }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054221856{\&}doi=10.1088{\%}2F1757-899X{\%}2F406{\%}2F1{\%}2F012014{\&}partnerID=40{\&}md5=e1757458a87eb396d4743ad19f658d5a }, type = { Book Section }, title = { Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers }, series = { IOP Conference Series-Materials Science and Engineering }, publisher = { Iop Publishing Ltd }, number = { 1 }, issn = { 1757899X }, doi = { 10.1088/1757-899X/406/1/012014 }, booktitle = { IOP Conference Series: Materials Science and Engineering }, author = { Schiwarth and Weissenb{\"{o}}ck and Plank and Fr{\"{o}}hler and Heinzl and Kastner }, address = { Bristol }, abstract = { Fiber-reinforced polymers (FRPs) are of great importance in various industries because of their superior properties as compared to conventional materials, their versatile processing, and their wide application possibilities. To fulfil the high-quality standards in its respective applications, industrial 3D X-ray computed tomography (XCT) is increasingly used. It enables an accurate, non-destructive characterization of material features such as inclusions, voids, fibers, or other reinforcements, which is of core importance for material and component design. In this work we present FeatureAnalyzer, a generalization of the previously introduced PorosityAnalyzer tool, which allows to analyze dataset series as generated for exploring the parameter space of image processing workflows (including pre-filtering, segmentation, postprocessing or quantification) applied to XCT datasets of fiber-reinforced polymers. With a scatter plot matrix (SPLOM), the characteristics of the features of interest may be examined in more detail regarding the used input and output parameters. Individual results may be selected in the SPLOM and analyzed using 2D slice views and 3D renderings. For this work, three different samples (sample {\#}1 - {\#}3) were scanned by means of XCT and were evaluated by using FeatureAnalyzer. The samples {\#}1 and {\#}2 have a porosity value of approximately 1.7 vol. {\%}. By using the FeatureAnalyzer in combination with SPLOM, the threshold parameters could be analyzed before the over-segmentation of voids occurs. Additional evaluations by parallel coordinates clearly show, that sample {\#}2 has a higher number of spherical voids in the center of the specimen compared to sample {\#}1. By evaluating the resin content of sample {\#}3, the individual layer thickness could be measured. The source code of the tool is available on Github: https://github.com/3dct/open-iA/ }, } |
2018 | In Conf. Proceedings | James Fishbaugh, Laura Pascal, Luke Fischer, Tung Nguyen, Celso Boen, Joao Goncalves, Guido Gerig, Beatriz Paniagua (2018). Estimating shape correspondence for populations of objects with complex topology. In Proceedings - International Symposium on Biomedical Imaging, pp. 1010–1013, New York. (link) (bib) x @inproceedings{Fishbaugh2018a, year = { 2018 }, volume = { 2018-April }, url = { https://ieeexplore.ieee.org/document/8363742/ {\%}3CGo to }, type = { Book Section }, title = { Estimating shape correspondence for populations of objects with complex topology }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 1010--1013 }, month = { apr }, keywords = { Complex topology,Diffeomorphic shape registration,Orthognathic surgery,Statistical shape analysis,Statistical shape modeling }, issn = { 19458452 }, isbn = { 9781538636367 }, doi = { 10.1109/ISBI.2018.8363742 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Fishbaugh and Pascal and Fischer and Nguyen and Boen and Goncalves and Gerig and Paniagua }, address = { New York }, abstract = { Statistical shape analysis captures the geometric properties of a given set of shapes, obtained from medical images, by means of statistical methods. Orthognathic surgery is a type of craniofacial surgery that is aimed at correcting severe skeletal deformities in the mandible and maxilla. Methods assuming spherical topology cannot represent the class of anatomical structures exhibiting complex geometries and topologies, including the mandible. In this paper we propose methodology based on non-rigid deformations of 3D geometries to be applied to objects with thin, complex structures. We are able to accurately and quantitatively characterize bone healing at the osteotomy site as well as condylar remodeling for three orthognathic surgery cases, demonstrating the effectiveness of the proposed methodology. }, } |
2018 | In Conf. Proceedings | Yannick Suter, Christian Rummel, Roland Wiest, Mauricio Reyes (2018). Fast and uncertainty-aware cerebral cortex morphometry estimation using random forest regression. In Proceedings - International Symposium on Biomedical Imaging, pp. 1052–1055, New York. (link) (bib) x @inproceedings{Suter2018, year = { 2018 }, volume = { 2018-April }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048099086{\&}doi=10.1109{\%}2FISBI.2018.8363752{\&}partnerID=40{\&}md5=e22ed68e255aa6c6af3b32100df7276a }, type = { Book Section }, title = { Fast and uncertainty-aware cerebral cortex morphometry estimation using random forest regression }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 1052--1055 }, keywords = { Cortical curvature,Cortical thickness,Human brain morphometry,Machine learning,Random forest regression }, issn = { 19458452 }, isbn = { 9781538636367 }, doi = { 10.1109/ISBI.2018.8363752 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Suter and Rummel and Wiest and Reyes }, address = { New York }, abstract = { The cortical thickness and curvature of the human brain have proven to be valuable markers to detect and monitor neurodegenerative diseases [1]. Since the computational burden of currently available tools for brain morphometry is very high, this analysis often is only used for retrospective studies and not routinely in the clinics. A first attempt at a clinical use of cortical morphology is reported in [2]. We present an experiment for fast morphometry estimations using Random Forest (RF) regression [3] directly from MR imaging data. An uncertainty-aware voxel-wise, parcellation-wise, and multioutput model was built to estimate the thickness and mean curvature of the human cerebral cortex in 15 minutes instead of many hours for mesh-based tools. Preliminary results on a healthy controls database with 315 subjects show a substantial bias for the voxel-wise prediction, but high scan-rescan robustness, the proposed multi-output-parcellation prediction demonstrates the feasibility of the approach. }, } |
2018 | In Conf. Proceedings | Ruben Medina, Sebastian Bautista, Villie Morocho (2018). Accuracy of connected confidence left ventricle segmentation in 3-D multi-slice computerized tomography images. In 2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017, pp. 1–6, New York. (link) (bib) x @inproceedings{Medina2017, year = { 2018 }, volume = { 2017-Janua }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045738426{\&}doi=10.1109{\%}2FETCM.2017.8247499{\&}partnerID=40{\&}md5=31a12ff7f15e6264e843b9c8cc503a7c }, type = { Book }, title = { Accuracy of connected confidence left ventricle segmentation in 3-D multi-slice computerized tomography images }, series = { 2017 Ieee Second Ecuador Technical Chapters Meeting }, publisher = { Ieee }, pages = { 1--6 }, keywords = { Connected confidence,Left ventricle segmentation,Multi-slice computerized tomography,Software platform }, isbn = { 9781538638941 }, doi = { 10.1109/ETCM.2017.8247499 }, booktitle = { 2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017 }, author = { Medina and Bautista and Morocho }, address = { New York }, abstract = { Cardiovascular diseases are the main cause of death in the World. This fact has motivated different actions for prevention, diagnosis and monitoring of cardiovascular diseases. In this work, the accuracy of a connected confidence left ventricle segmentation method is performed. This task is accomplished using a software platform for left ventricle segmentation of 3-D cardiac Multi-Slice Computerized Tomography (MSCT) images that is also described. The software platform has as a goal performing research about efficient methods for cardiac image segmentation and quantification. The accuracy assessment of the segmentation method is performed by comparing the estimated segmentation with respect to segmentations manually traced by cardiologists. Results show that the segmentation method provides Dice Similarity coefficients higher than 0.90 with low computational cost. The obtained segmentation is able to include within the left ventricular lumen the papillary trabeculae muscles, enabling further accurate estimation of the left ventricular mass. }, } |
2018 | In Conf. Proceedings | Roman Grothausmann, Christian Mühlfeld, Matthias Ochs, Lars Knudsen (2018). Shape and Facet Analyses of Alveolar Airspaces of the Lung. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 49–64. (bib) x @inproceedings{Grothausmann2018, year = { 2018 }, volume = { 11167 LNCS }, title = { Shape and Facet Analyses of Alveolar Airspaces of the Lung }, publisher = { Springer Verlag }, pages = { 49--64 }, issn = { 16113349 }, isbn = { 9783030047467 }, doi = { 10.1007/978-3-030-04747-4_5 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Grothausmann and M{\"{u}}hlfeld and Ochs and Knudsen }, abstract = { Changes in lung volume during the breathing cycle and also lung diseases are likely to deform even the smallest airspace units, the alveoli. This study reports general ideas to investigate such changes with 3D digital image processing. It comprises morphological characterizations like volume and surface, an evaluation of the angle distribution between facets formed by the septal walls, the number of neighboring alveoli and a shape analysis of the alveolar airspace. The software used is open-source and custom programs are available at: http://github.com/romangrothausmann/. }, } |
2018 | In Conf. Proceedings | Beatriz Paniagua, Hina Shah, Pablo Hernandez-Cerdan, Francois Budin, Deepak Chittajallu, Rick Walter, Andre Mol, Asma Khan, Jean-Baptiste Vimort (2018). Automatic quantification framework to detect cracks in teeth. In Medical Imaging 2018: Biomedical Applications in Molecular, Structural, and Functional Imaging, pp. 55. (link) (bib) x @inproceedings{Paniagua2018, year = { 2018 }, volume = { 10578 }, url = { https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10578/2293603/Automatic-quantification-framework-to-detect-cracks-in-teeth/10.1117/12.2293603.full }, title = { Automatic quantification framework to detect cracks in teeth }, publisher = { SPIE }, pages = { 55 }, month = { mar }, issn = { 0277-786X }, isbn = { 9781510616455 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2293603 }, booktitle = { Medical Imaging 2018: Biomedical Applications in Molecular, Structural, and Functional Imaging }, author = { Paniagua and Shah and Hernandez-Cerdan and Budin and Chittajallu and Walter and Mol and Khan and Vimort }, abstract = { Studies show that cracked teeth are the third most common cause for tooth loss in industrialized countries. If detected early and accurately, patients can retain their teeth for a longer time. Most cracks are not detected early because of the discontinuous symptoms and lack of good diagnostic tools. Currently used imaging modalities like Cone Beam Computed Tomography (CBCT) and intraoral radiography often have low sensitivity and do not show cracks clearly. This paper introduces a novel method that can detect, quantify, and localize cracks automatically in high resolution CBCT (hr-CBCT) scans of teeth using steerable wavelets and learning methods. These initial results were created using hr-CBCT scans of a set of healthy teeth and of teeth with simulated longitudinal cracks. The cracks were simulated using multiple orientations. The crack detection was trained on the most significant wavelet coefficients at each scale using a bagged classifier of Support Vector Machines. Our results show high discriminative specificity and sensitivity of this method. The framework aims to be automatic, reproducible, and open-source. Future work will focus on the clinical validation of the proposed techniques on different types of cracks ex-vivo. We believe that this work will ultimately lead to improved tracking and detection of cracks allowing for longer lasting healthy teeth. }, } |
2018 | In Conf. Proceedings | Marek Wodzinski, Andrzej Skalski (2018). Rigid Registration Method for Medical Volumes with Large Deformations and Missing Data. In International Conference on Systems, Signals, and Image Processing, pp. NA (link) (bib) x @inproceedings{RN870, year = { 2018 }, volume = { 2018-June }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053151071{\&}doi=10.1109{\%}2FIWSSIP.2018.8439679{\&}partnerID=40{\&}md5=c8fc60e15ec9db845e0390f82b94df27 }, type = { Conference Proceedings }, title = { Rigid Registration Method for Medical Volumes with Large Deformations and Missing Data }, publisher = { IEEE Computer Society }, keywords = { Image Registration,Initial Alignment,Medical Imaging,Rigid Registration }, issn = { 21578702 }, isbn = { 9781538669792 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1109/IWSSIP.2018.8439679 }, booktitle = { International Conference on Systems, Signals, and Image Processing }, author = { Wodzinski and Skalski }, abstract = { A rigid registration is a crucial initial step for a correct deformable medical image registration. In this work, we propose rigid registration method resistant to large deformations and missing data. The proposed method is based on the bones segmentation, feature matching and outliers elimination inspired by traditional computer vision approach. The method is compared to other state-of-the-art algorithms, the iterative closest point and intensity-based registration using widely available dataset. The proposed algorithm does not fail into local minima and reconstructs correct deformations for average vector length greater than 150 mm and data overlap ratio less than 50{\%}, where currently applied methods fail. The algorithm is evaluated using angle and magnitude errors between corresponding deformation vectors, Hausdorff distance between bone segmentations and resistance to fail into local minima. }, } |
2018 | In Conf. Proceedings | Jiayu Sun, Shekhar S. Chandra (2018). SPIFFY: A simpler image viewer for medical imaging. In Proceedings of 2018 IEEE 4th Information Technology and Mechatronics Engineering Conference, ITOEC 2018, pp. 297–301. (link) (bib) x @inproceedings{RN864, year = { 2018 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068362296{\&}doi=10.1109{\%}2FITOEC.2018.8740656{\&}partnerID=40{\&}md5=1c3ff161aa23226e508668105edd9d2f }, type = { Conference Proceedings }, title = { SPIFFY: A simpler image viewer for medical imaging }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 297--301 }, keywords = { Cognitive walkthrough evaluation,Human-computer interaction,Medical imaging,Visualizing software }, isbn = { 9781538653739 }, editor = { [object Object] }, doi = { 10.1109/ITOEC.2018.8740656 }, booktitle = { Proceedings of 2018 IEEE 4th Information Technology and Mechatronics Engineering Conference, ITOEC 2018 }, author = { Sun and Chandra }, abstract = { Medical imaging visualization technology takes a significant role in the medical community. With the assistance of medical imaging visualization applications, huge convenience has been brought into clinical diagnosis, monitoring, and treatment. It allows doctors and researchers to see inside the human body, to identify medical problems, and to diagnose diseases. This article presents a lightweight, fast and user-friendly image viewer for medical imaging called SPIFFY. Some developing methodologies with the integration of VTK, ITK, and Qt will be presented in this article. Besides, the minimalist user interface(UI) design of SPIFFY with an application of Human-Computer Interaction(HCI) psychology principles will also be introduced. Moreover, this article will identify the benefits provided by SPIFFY and present a benchmark against some existing medical visualization applications. Experiments using cognitive walkthrough evaluation shows that SPIFFY provides both high effectiveness and efficiency. }, } |
2018 | In Conf. Proceedings | S. Rosati, C. M. Gianfreda, G. Balestra, V. Giannini, S. Mazzetti, D. Regge (2018). Radiomics to predict response to neoadjuvant chemotherapy in rectal cancer: Influence of simultaneous feature selection and classifier optimization. In 2018 IEEE Life Sciences Conference, LSC 2018, pp. 65–68. (link) (bib) x @inproceedings{RN863, year = { 2018 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060201982{\&}doi=10.1109{\%}2FLSC.2018.8572194{\&}partnerID=40{\&}md5=2339615f5270587ffef9440d34bd5ca9 }, type = { Conference Proceedings }, title = { Radiomics to predict response to neoadjuvant chemotherapy in rectal cancer: Influence of simultaneous feature selection and classifier optimization }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 65--68 }, keywords = { Feature selection,Genetic algorithms,Rectal cancer,Response to chemoradiotherapy,SVM optimization }, isbn = { 9781538667095 }, doi = { 10.1109/LSC.2018.8572194 }, booktitle = { 2018 IEEE Life Sciences Conference, LSC 2018 }, author = { Rosati and Gianfreda and Balestra and Giannini and Mazzetti and Regge }, abstract = { According to the guidelines, patients with locally advanced colorectal cancer undergo neoadjuvant chemotherapy. However, response to therapy is reached only up to 30{\%} of cases. Therefore, it would be important to predict response to therapy before treatment. In this study, we demonstrated that the simultaneous optimization of feature subset and classifier parameters on different imaging datasets (T2w, DWI and PET) could improve classification performance. On a dataset of 51 patients (21 responders, 30 non responders), we obtained an accuracy of 90{\%}, 84{\%} and 76{\%} using three optimized SVM classifiers fed with selected features from PET, T2w and ADC images, respectively. }, } |
2018 | In Conf. Proceedings | Pedro Pablo Cespedes Sanchez, Horacio Legal Ayala, Diego Pinto Roa, Gabriel Alberto Gimenez, Lorenzo Lopez, Jose Luis Vazquez Noguera (2018). Intrapatient multimodal medical image registration of brain CT-MRI 3D: an approach based on metaheuristics. In 14th International Symposium on Medical Information Processing and Analysis, SIPAIM 2018, pp. 8. (link) (bib) x @inproceedings{RN883, year = { 2018 }, volume = { 10975 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060546679{\&}doi=10.1117{\%}2F12.2506687{\&}partnerID=40{\&}md5=5125dd9c739585e2fc9a96de92ac84ae }, type = { Conference Proceedings }, title = { Intrapatient multimodal medical image registration of brain CT-MRI 3D: an approach based on metaheuristics }, publisher = { SPIE }, pages = { 8 }, issn = { 1996756X }, isbn = { 9781510626058 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1117/12.2506687 }, booktitle = { 14th International Symposium on Medical Information Processing and Analysis, SIPAIM 2018 }, author = { {Cespedes Sanchez} and {Legal Ayala} and {Pinto Roa} and Gimenez and Lopez and {Vazquez Noguera} }, } |
2018 | In Conf. Proceedings | Damian Borys, Wojciech Serafin, Mariusz Frackiewicz, Krzysztof Psiuk-Maksymowicz, Henryk Palus (2018). A Phantom Study of New Bias Field Correction Method Combining N3 and KHM for MRI Imaging. In Proceedings - 14th International Conference on Signal Image Technology and Internet Based Systems, SITIS 2018, pp. 314–319. (link) (bib) x @inproceedings{RN874, year = { 2018 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85065920171{\&}doi=10.1109{\%}2FSITIS.2018.00055{\&}partnerID=40{\&}md5=db972e30f4168923938969ac8921de9b }, type = { Conference Proceedings }, title = { A Phantom Study of New Bias Field Correction Method Combining N3 and KHM for MRI Imaging }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 314--319 }, keywords = { Intensity nonuniformity correction,MRI,Phantom }, isbn = { 9781538693858 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1109/SITIS.2018.00055 }, booktitle = { Proceedings - 14th International Conference on Signal Image Technology and Internet Based Systems, SITIS 2018 }, author = { Borys and Serafin and Frackiewicz and Psiuk-Maksymowicz and Palus }, abstract = { Signal inhomogeneity in MRI can influence significantly automatic data processing like segmentation, etc. or even affect the diagnostic procedure. In this work, a new method of intensity nonuniformity correction is presented. Our idea was to replace FCM clustering by k-harmonic means in the method that uses a standard N3 correction procedure. The algorithm was tested with MRI dataset acquired from a phantom object using a breast MRI coil to simulate real conditions during the study. Results were compared with five other methods using two indexes-integral uniformity and standard deviation of the signal inside the object. For the presented and improved method, the lowest integral uniformity and the reasonable low signal deviation were obtained. }, } |
2018 | In Conf. Proceedings | Ankur Biswas, P. Bhattacharya, S. P. Maity (2018). An efficient volumetric segmentation of cerebral lateral ventricles. In Procedia Computer Science, pp. 561–568. (link) (bib) x @inproceedings{RN974, year = { 2018 }, volume = { 133 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051328846{\&}doi=10.1016{\%}2Fj.procs.2018.07.084{\&}partnerID=40{\&}md5=e21c6be1fb5f5e12be609a506f661f8b }, type = { Conference Proceedings }, title = { An efficient volumetric segmentation of cerebral lateral ventricles }, publisher = { Elsevier B.V. }, pages = { 561--568 }, keywords = { Geodesic active contour,Lateral Ventricle,MR Images,Volumetric,level set methods }, issn = { 18770509 }, isbn = { 18770509 (ISSN) }, editor = { [object Object],[object Object] }, doi = { 10.1016/j.procs.2018.07.084 }, booktitle = { Procedia Computer Science }, author = { Biswas and Bhattacharya and Maity }, abstract = { Human brain is a set of four communicating network of ventricles heaving with cerebrospinal fluid (CSF) which is located inside the brain parenchyma. An efficient segmentation of cerebral lateral ventricles one in each hemisphere can support the study of efficient pathologies for successful conclusion of various diseases. In this paper, an efficient and fast energy optimised technique for volumetric segmentation of lateral ventricles from MR images of human brain is proposed which is based on geodesic active contours using level set method. The proposed approach consists of mainly four main stages: 1. Preprocessing stage, 2. Presegmentation stage, 3. Contour Evolution with Energy optimisation stage, 4. Termination stage. Experiments on multislice MRI data obtained dice coefficient of 0.955, jaccard coefficient of 0.915 and other surface distance measures demonstrate the advantages of the proposed approach in both accuracy and efficiency. }, } |
2017 | Book | R Medina, S Bautista, V Morocho, Ieee (2017). Accuracy of Connected Confidence Left Ventricle Segmentation in 3-D Multi-Slice Computerized Tomography Images, Ieee, 2017. (link) (bib) x @book{Medina2017, year = { 2017 }, url = { {\%}3CGo to }, type = { Book }, title = { Accuracy of Connected Confidence Left Ventricle Segmentation in 3-D Multi-Slice Computerized Tomography Images }, series = { 2017 Ieee Second Ecuador Technical Chapters Meeting }, publisher = { Ieee }, isbn = { 978-1-5386-3894-1 }, author = { Medina and Bautista and Morocho and Ieee }, address = { New York }, } |
2017 | Book | F Ponzio, E Macii, E Ficarra, S Di Cataldo (2017). A Multi-modal Brain Image Registration Framework for US-guided Neuronavigation Systems Integrating MR and US for Minimally Invasive Neuroimaging, Scitepress, 2017. (link) (bib) x @book{Ponzio2017, year = { 2017 }, url = { {\%}3CGo to }, type = { Book }, title = { A Multi-modal Brain Image Registration Framework for US-guided Neuronavigation Systems Integrating MR and US for Minimally Invasive Neuroimaging }, series = { Proceedings of the 10th International Joint Conference on Biomedical Engineering Systems and Technologies, Vol 2: Bioimaging }, publisher = { Scitepress }, pages = { 114--121 }, isbn = { 978-989-758-215-8 }, doi = { 10.5220/0006239201140121 }, author = { Ponzio and Macii and Ficarra and {Di Cataldo} }, address = { Setubal }, } |
2017 | Book | Ibrahim T. Ozbolat (2017). Design for Bioprinting, Academic Press Ltd-Elsevier Science Ltd, 2017. (link) (bib) x @book{Ozbolat2017, year = { 2017 }, url = { {\%}3CGo to }, type = { Book }, title = { Design for Bioprinting }, series = { 3d Bioprinting: Fundamentals, Principles and Applications }, publisher = { Academic Press Ltd-Elsevier Science Ltd }, pages = { 13--39 }, isbn = { 978-0-12-803030-1; 978-0-12-803010-3 }, doi = { 10.1016/b978-0-12-803010-3.00002-0 }, booktitle = { 3D Bioprinting }, author = { Ozbolat }, address = { London }, abstract = { To bioprint tissue and organ constructs, patient-specific anatomical models need to be obtained; however, these models mainly provide external surface information only. The internal architecture of tissue constructs plays a crucial role as it provides a porous environment for media exchange, vascularization, tissue growth, and engraftment. This chapter discusses currently available medical imaging techniques used in acquisition of anatomical models, including magnetic resonance imaging, computed tomography, and ultrasound, and compares their strengths and limitations. Then, consideration for design architecture is discussed, and various approaches in blueprint modeling of tissue constructs are presented for creation of porous architectures. Next, existing toolpath planning approaches for bioprinting of tissues and organs are presented. Design limitations for bioprinting are discussed, and future perspectives are provided to the reader. }, } |
2017 | Book chapter | H. Tran, J. Grimm, B. Wang, M. A. Smith, A. Gogola, S. Nelson, E. Tyler-Kabara, J. Schuman, G. Wollstein, I. A. Sigal (2017). NA in Mapping in-vivo optic nerve head strains caused by intraocular and intracranial pressures, Edited by K V Larin, D D Sampson, Spie-Int Soc Optical Engineering, pp. 100670B, Proceedings of SPIE, Vol. 10067, ISBN: 0277-786X. (link) (bib) x @inbook{Tran2017, year = { 2017 }, volume = { 10067 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Mapping in-vivo optic nerve head strains caused by intraocular and intracranial pressures }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 100670B }, issn = { 0277-786X }, isbn = { 9781510605756 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2257360 }, booktitle = { Optical Elastography and Tissue Biomechanics IV }, author = { Tran and Grimm and Wang and Smith and Gogola and Nelson and Tyler-Kabara and Schuman and Wollstein and Sigal }, address = { Bellingham }, abstract = { {\textcopyright} 2017 SPIE. Although it is well documented that abnormal levels of either intraocular (IOP) or intracranial pressure (ICP) can lead to potentially blinding conditions, such as glaucoma and papilledema, little is known about how the pressures actually affect the eye. Even less is known about potential interplay between their effects, namely how the level of one pressure might alter the effects of the other. Our goal was to measure in-vivo the pressure-induced stretch and compression of the lamina cribrosa due to acute changes of IOP and ICP. The lamina cribrosa is a structure within the optic nerve head, in the back of the eye. It is important because it is in the lamina cribrosa that the pressure-induced deformations are believed to initiate damage to neural tissues leading to blindness. An eye of a rhesus macaque monkey was imaged in-vivo with optical coherence tomography while IOP and ICP were controlled through cannulas in the anterior chamber and lateral ventricle, respectively. The image volumes were analyzed with a newly developed digital image correlation technique. The effects of both pressures were highly localized, nonlinear and non-monotonic, with strong interactions. Pressure variations from the baseline normal levels caused substantial stretch and compression of the neural tissues in the posterior pole, sometimes exceeding 20{\%}. Chronic exposure to such high levels of biomechanical insult would likely lead to neural tissue damage and loss of vision. Our results demonstrate the power of digital image correlation technique based on non-invasive imaging technologies to help understand how pressures induce biomechanical insults and lead to vision problems. }, } |
2017 | Book chapter | K. D. Joshi, T. E. Marchant, C. J. Moore (2017). NA in Shading correction algorithm for cone-beam CT in radiotherapy: extensive clinical validation of image quality improvement, Edited by T G Flohr, J Y Lo, T G Schmidt, Spie-Int Soc Optical Engineering, pp. 101322A, Proceedings of SPIE, Vol. 10132, ISBN: 16057422. (link) (bib) x @inbook{Joshi2017, year = { 2017 }, volume = { 10132 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Shading correction algorithm for cone-beam CT in radiotherapy: extensive clinical validation of image quality improvement }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 101322A }, issn = { 16057422 }, isbn = { 9781510607095 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1117/12.2254035 }, booktitle = { Medical Imaging 2017: Physics of Medical Imaging }, author = { Joshi and Marchant and Moore }, address = { Bellingham }, } |
2017 | Book chapter | Stefan Hoehme, Adrian Friebel, Seddik Hammad, Dirk Drasdo, Jan G. Hengstler (2017). NA in Creation of three-dimensional liver tissue models from experimental images for systems medicine, Edited by P Stock, B Christ, Humana Press Inc, pp. 319–362, Methods in Molecular Biology, Vol. 1506, ISBN: 10643745. (link) (bib) x @inbook{Hoehme2017, year = { 2017 }, volume = { 1506 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Creation of three-dimensional liver tissue models from experimental images for systems medicine }, series = { Methods in Molecular Biology }, publisher = { Humana Press Inc }, pages = { 319--362 }, keywords = { 2D/3D microscopy,Confocal scanning microscopy,Hepatocyte transplantation,Liver architecture,Liver tissue model,Spatiotemporal model,Systems biology,Systems medicine,TiQuant software }, issn = { 10643745 }, isbn = { 978-1-4939-6506-9; 978-1-4939-6504-5 }, editor = { [object Object],[object Object] }, doi = { 10.1007/978-1-4939-6506-9_22 }, booktitle = { Methods in Molecular Biology }, author = { Hoehme and Friebel and Hammad and Drasdo and Hengstler }, address = { Totowa }, abstract = { In this chapter, we illustrate how three-dimensional liver tissue models can be created from experimental image modalities by utilizing a well-established processing chain of experiments, microscopic imaging, image processing, image analysis and model construction. We describe how key features of liver tissue architecture are quantified and translated into model parameterizations, and show how a systematic iteration of experiments and model simulations often leads to a better understanding of biological phenomena in systems biology and systems medicine. }, } |
2017 | Book chapter | Derek J. Gillies, Lori Gardi, Ren Zhao, Aaron Fenster (2017). NA in Optimization of real-time rigid registration motion compensation for prostate biopsies using 2D/3D ultrasound, Edited by R J Webster, B Fei, Spie-Int Soc Optical Engineering, pp. 101351F, Proceedings of SPIE, Vol. 10135, ISBN: 16057422. (link) (bib) x @inbook{Gillies2017a, year = { 2017 }, volume = { 10135 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Optimization of real-time rigid registration motion compensation for prostate biopsies using 2D/3D ultrasound }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 101351F }, issn = { 16057422 }, isbn = { 9781510607156 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2255006 }, booktitle = { Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling }, author = { Gillies and Gardi and Zhao and Fenster }, address = { Bellingham }, abstract = { {\textcopyright} 2017 SPIE. During image-guided prostate biopsy, needles are targeted at suspicious tissues to obtain specimens that are later examined histologically for cancer. Patient motion causes inaccuracies when using MR-transrectal ultrasound (TRUS) image fusion approaches used to augment the conventional biopsy procedure. Motion compensation using a single, user initiated correction can be performed to temporarily compensate for prostate motion, but a real-time continuous registration offers an improvement to clinical workflow by reducing user interaction and procedure time. An automatic motion compensation method, approaching the frame rate of a TRUS-guided system, has been developed for use during fusion-based prostate biopsy to improve image guidance. 2D and 3D TRUS images of a prostate phantom were registered using an intensitybased algorithm utilizing normalized cross-correlation and Powell's method for optimization with user initiated and continuous registration techniques. The user initiated correction performed with observed computation times of 78 ± 35 ms, 74 ± 28 ms, and 113 ± 49 ms for in-plane, out-of-plane, and roll motions, respectively, corresponding to errors of 0.5 ± 0.5 mm, 1.5 ± 1.4 mm, and 1.5 ± 1.6°. The continuous correction performed significantly faster (p {\textless} 0.05) than the user initiated method, with observed computation times of 31 ± 4 ms, 32 ± 4 ms, and 31 ± 6 ms for in-plane, out-of-plane, and roll motions, respectively, corresponding to errors of 0.2 ± 0.2 mm, 0.6 ± 0.5 mm, and 0.8 ± 0.4°. }, } |
2017 | Book chapter | Margarita Gamarra, Eduardo Zurek, Wilson Nieto, Miguel Jimeno, Deibys Sierra (2017). NA in A service-oriented architecture for bioinformatics: An application in cell image analysis, Edited by A Rocha, A M Correia, H Adeli, L P Reis, S Costanzo, Springer-Verlag Berlin, pp. 724–734, Advances in Intelligent Systems and Computing, Vol. 569, ISBN: 21945357. (link) (bib) x @inbook{Gamarra2017, year = { 2017 }, volume = { 569 }, url = { {\%}3CGo to }, type = { Book Section }, title = { A service-oriented architecture for bioinformatics: An application in cell image analysis }, series = { Advances in Intelligent Systems and Computing }, publisher = { Springer-Verlag Berlin }, pages = { 724--734 }, keywords = { Bioinformatic,Cloud computing,Image processing,SOA }, issn = { 21945357 }, isbn = { 9783319565347 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-319-56535-4_71 }, booktitle = { Advances in Intelligent Systems and Computing }, author = { Gamarra and Zurek and Nieto and Jimeno and Sierra }, address = { Berlin }, abstract = { The advance technology in microscopy and computing has allowed the development of cell image analysis. Cloud Computing offers services, software and computing infrastructure to manage cell images' big data. However the usability of these platforms is adequate to expert users only. Many software tools are oriented to expert users in image processing, likewise the use of bioinformatics require a basic knowledge in programming. In this paper we present a framework to develop a software solution with a Service-Oriented Architecture (SOA) applied to the analysis of cell images using cloud computing. }, } |
2017 | Book chapter | L. Li, D. V. LaBarbera (2017). NA in 3D high-content screening of organoids for drug discovery, Elsevier Inc., pp. 388–415, Vol. 2-8. (link) (bib) x @inbook{RN902, year = { 2017 }, volume = { 2-8 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046151116{\&}doi=10.1016{\%}2FB978-0-12-409547-2.12329-7{\&}partnerID=40{\&}md5=e152840c738581970058285c2f8412a3 }, type = { Book Section }, title = { 3D high-content screening of organoids for drug discovery }, publisher = { Elsevier Inc. }, pages = { 388--415 }, keywords = { 3D imaging technology,3D tissue culture,Drug discovery,Extracellular matrix,High-content screening,High-throughput imaging,High-throughput screening,Image analysis software,Microenvironment,Multicellular tumor spheroids,Organoids,Organotypic,Pluripotent stem cells,Precision medicine,Spheroids }, isbn = { 9780128032008 }, doi = { 10.1016/B978-0-12-409547-2.12329-7 }, booktitle = { Comprehensive Medicinal Chemistry III }, author = { Li and LaBarbera }, abstract = { We are entering a new era of biomedical research that is driven by the demand for more effective therapeutics to prevent and treat human disease. Organoids, cultured ex vivo, are the future of this new era of biomedical research and are poised to replace preclinical 2D cell models, and in some cases animal models of human disease. Therefore, the drug discovery and development pipeline is retooling high-throughput technologies to accommodate organoids as the model of choice. In particular, the marriage of high-content screening (HCS) with organoid models for drug discovery will be a critical component in this new era of drug development. This book chapter is focused on the state-of-the-art HCS technology and how this technology is being retooled for drug discovery and development with human organoids. }, } |
2017 | Journal | Joseph J. Shaffer, Ali Ghayoor, Jeffrey D. Long, Regina Eun Young Kim, Spencer Lourens, Lauren J. O'Donnell, Carl Fredrik Westin, Yogesh Rathi, Vincent Magnotta, Jane S. Paulsen, Hans J. Johnson (2017). Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration. Human Brain Mapping, 38(3), pp. 1460–1477. (link) (bib) x @article{shaffer2017longitudinal, year = { 2017 }, volume = { 38 }, url = { http://www.ncbi.nlm.nih.gov/pubmed/28045213{\%}0Ahttp://doi.wiley.com/10.1002/hbm.23465 }, title = { Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration }, pmid = { 28045213 }, pages = { 1460--1477 }, number = { 3 }, keywords = { Huntington disease,computer-assisted,diffusion magnetic resonance imaging,diffusion tractography,disease progression,image processing,multicenter study,prodromal,white matter }, journal = { Human Brain Mapping }, issn = { 10970193 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Shaffer et al/Human Brain Mapping/Shaffer et al. - 2017 - Longitudinal diffusion changes in prodromal and early HD Evidence of white-matter tract deterioration.pdf:pdf }, doi = { 10.1002/hbm.23465 }, author = { Shaffer and Ghayoor and Long and Kim and Lourens and O'Donnell and Westin and Rathi and Magnotta and Paulsen and Johnson }, annote = { From Duplicate 2 (Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration - Shaffer, Joseph J.; Ghayoor, Ali; Long, Jeffrey D.; Kim, Regina EY Y; Lourens, Spencer; O'Donnell, Lauren J.; Westin, Carl-Fredrik; Rathi, Yogesh; Magnotta, Vincent; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration - Shaffer, Joseph J.; Ghayoor, Ali; Long, Jeffrey D.; Kim, Regina EY; Lourens, Spencer; O'Donnell, Lauren J.; Westin, Carl-Fredrik; Rathi, Yogesh; Magnotta, Vincent; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration - Shaffer, Joseph J; Ghayoor, Ali; Long, Jeffrey D; Kim, Regina E Y; Lourens, Spencer; O'Donnell, Lauren J; Westin, Carl-Fredrik; Rathi, Yogesh; Magnotta, Vincent; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { Introduction: Huntington's disease (HD) is a genetic neurodegenerative disorder that primarily affects striatal neurons. Striatal volume loss is present years before clinical diagnosis; however, white matter degradation may also occur prior to diagnosis. Diffusion-weighted imaging (DWI) can measure microstructural changes associated with degeneration that precede macrostructural changes. DWI derived measures enhance understanding of degeneration in prodromal HD (pre-HD). Methods: As part of the PREDICT-HD study, N = 191 pre-HD individuals and 70 healthy controls underwent two or more (baseline and 1–5 year follow-up) DWI, with n = 649 total sessions. Images were processed using cutting-edge DWI analysis methods for large multicenter studies. Diffusion tensor imaging (DTI) metrics were computed in selected tracts connecting the primary motor, primary somato-sensory, and premotor areas of the cortex with the subcortical caudate and putamen. Pre-HD participants were divided into three CAG-Age Product (CAP) score groups reflecting clinical diagnosis probability (low, medium, or high probabilities). Baseline and longitudinal group differences were examined using linear mixed models. Results: Cross-sectional and longitudinal differences in DTI measures were present in all three CAP groups compared with controls. The high CAP group was most affected. Conclusions: This is the largest longitudinal DWI study of pre-HD to date. Findings showed DTI differences, consistent with white matter degeneration, were present up to a decade before predicted HD diagnosis. Our findings indicate a unique role for disrupted connectivity between the premotor area and the putamen, which may be closely tied to the onset of motor symptoms in HD. Hum Brain Mapp 38:1460–1477, 2017. {\textcopyright} 2017 Wiley Periodicals, Inc. }, } |
2017 | Journal | Wiebke Neumann, Florian Lietzmann, Lothar R. Schad, Frank G. Zöllner (2017). Design eines multimodalen (1H/23Na MR/CT) anthropomorphen Thorax-Phantoms. Zeitschrift fur Medizinische Physik, 27(2), pp. 124–131. (link) (bib) x @article{Neumann2017, year = { 2017 }, volume = { 27 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Design eines multimodalen (1H/23Na MR/CT) anthropomorphen Thorax-Phantoms }, pages = { 124--131 }, number = { 2 }, keywords = { Anthropomorphic phantom,multi-nuclear MRI,multimodal imaging,quantification,thorax phantom }, journal = { Zeitschrift fur Medizinische Physik }, issn = { 18764436 }, doi = { 10.1016/j.zemedi.2016.07.004 }, author = { Neumann and Lietzmann and Schad and Z{\"{o}}llner }, abstract = { Objectives This work proposes a modular, anthropomorphic MR and CT thorax phantom that enables the comparison of experimental studies for quantitative evaluation of deformable, multimodal image registration algorithms and realistic multi-nuclear MR imaging techniques. Methods A human thorax phantom was developed with insertable modules representing lung, liver, ribs and additional tracking spheres. The quality of human tissue mimicking characteristics was evaluated for 1H and 23Na MR as well as CT imaging. The position of landmarks in the lung lobes was tracked during CT image acquisition at several positions during breathing cycles. 1H MR measurements of the liver were repeated after seven months to determine long term stability. Results The modules possess HU, T1 and T2 values comparable to human tissues (lung module: −756 ± 148 HU, artificial ribs: 218 ± 56 HU (low CaCO3 concentration) and 339 ± 121 (high CaCO3 concentration), liver module: T1 = 790 ± 28 ms, T2 = 65 ± 1 ms). Motion analysis showed that the landmarks in the lung lobes follow a 3D trajectory similar to human breathing motion. The tracking spheres are well detectable in both CT and MRI. The parameters of the tracking spheres can be adjusted in the following ranges to result in a distinct signal: HU values from 150 to 900 HU, T1 relaxation time from 550 ms to 2000 ms, T2 relaxation time from 40 ms to 200 ms. Conclusion The presented anthropomorphic multimodal thorax phantom fulfills the demands of a simple, inexpensive system with interchangeable components. In future, the modular design allows for complementing the present set up with additional modules focusing on specific research targets such as perfusion studies, 23Na MR quantification experiments and an increasing level of complexity for motion studies. }, } |
2017 | Journal | Jens Wolfelschneider, Matteo Seregni, Aurora Fassi, Marc Ziegler, Guido Baroni, Rainer Fietkau, Marco Riboldi, Christoph Bert (2017). Examination of a deformable motion model for respiratory movements and 4D dose calculations using different driving surrogates. Medical Physics, 44(6), pp. 2066–2076. (link) (bib) x @article{Wolfelschneider2017, year = { 2017 }, volume = { 44 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Examination of a deformable motion model for respiratory movements and 4D dose calculations using different driving surrogates }, pages = { 2066--2076 }, number = { 6 }, keywords = { 4D dose reconstruction,4DCT,Respiratory motion model,Scale invariant feature transform,Surrogates }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.12243 }, author = { Wolfelschneider and Seregni and Fassi and Ziegler and Baroni and Fietkau and Riboldi and Bert }, abstract = { Purpose: The aim of this study was to evaluate a surrogate-driven motion model based on fourdimensional computed tomography that is able to predict CT volumes corresponding to arbitrary respiratory phases. Furthermore, the comparison of three different driving surrogates is examined and the feasibility of using the model for 4D dose re-calculation will be discussed. Methods: The study is based on repeated 4DCTs of twenty patients treated for bronchial carcinoma and metastasis. The motion model was estimated from the planning 4DCT through deformable image registration. To predict a certain phase of a follow-up 4DCT, the model considers inter-fractional variations (baseline correction) and intra-fractional respiratory parameters (amplitude and phase) derived from surrogates. The estimated volumes resulting from the model were compared to ground-truth clinical 4DCTs using absolute HU differences in the lung region and landmarks localized using the Scale Invariant Feature Transform. Finally, the c-index was used to evaluate the dosimetric effects of the intensity differences measured between the estimated and the ground-truth CTvolumes. Results: The results show absolute HU differences between estimated and ground-truth images with median value (± standard deviation) of (61.3 ± 16.7) HU. Median 3D distances, measured on about 400 matching landmarks in each volume, were (2.9 ± 3.0) mm. 3D errors up to 28.2 mm were found for CT images with artifacts or reduced quality. Pass rates for all surrogate approaches were above 98.9{\%} with a c-criterion of 2{\%}/2 mm. Conclusion: The results depend mainly on the image quality of the initial 4DCT and the deformable image registration. All investigated surrogates can be used to estimate follow-up 4DCT phases, however, uncertainties decrease for volumetric approaches. Application of the model for 4D dose calculations is feasible. }, } |
2017 | Journal | Andrew Townsend, Luca Pagani, Liam Blunt, Paul J. Scott, Xiangqian Jiang (2017). Factors affecting the accuracy of areal surface texture data extraction from X-ray CT. CIRP Annals - Manufacturing Technology, 66(1), pp. 547–550. (link) (bib) x @article{Townsend2017, year = { 2017 }, volume = { 66 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Factors affecting the accuracy of areal surface texture data extraction from X-ray CT }, pages = { 547--550 }, number = { 1 }, keywords = { Additive manufacturing,Metrology,X-ray }, journal = { CIRP Annals - Manufacturing Technology }, issn = { 17260604 }, doi = { 10.1016/j.cirp.2017.04.074 }, author = { Townsend and Pagani and Blunt and Scott and Jiang }, abstract = { The ability to perform non-destructive areal surface analysis of the internal surfaces of additively manufactured (AM) components would be advantageous during product development, process control and product acceptance. Currently industrial X-ray computed tomography (XCT) is the only practical method for imaging the internal surfaces of AM components. A viable method of extracting useable areal surface texture data from XCT scans has now been developed and this paper reports on three measurement and data processing factors affecting the value of areal parameters per ISO 25178-2 generated from XCT volume data using this novel technique. }, } |
2017 | Journal | Sébastien Tourbier, Clemente Velasco-Annis, Vahid Taimouri, Patric Hagmann, Reto Meuli, Simon K. Warfield, Meritxell Bach Cuadra, Ali Gholipour (2017). Automated template-based brain localization and extraction for fetal brain MRI reconstruction. NeuroImage, 155, pp. 460–472. (link) (bib) x @article{Tourbier2017, year = { 2017 }, volume = { 155 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated template-based brain localization and extraction for fetal brain MRI reconstruction }, pages = { 460--472 }, keywords = { B-Spline deformation,Block matching,Brain localization,Fetal brain MRI,Slice-by-slice brain extraction,Slice-to-template registration,Super-resolution reconstruction }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2017.04.004 }, author = { Tourbier and Velasco-Annis and Taimouri and Hagmann and Meuli and Warfield and {Bach Cuadra} and Gholipour }, abstract = { Most fetal brain MRI reconstruction algorithms rely only on brain tissue-relevant voxels of low-resolution (LR) images to enhance the quality of inter-slice motion correction and image reconstruction. Consequently the fetal brain needs to be localized and extracted as a first step, which is usually a laborious and time consuming manual or semi-automatic task. We have proposed in this work to use age-matched template images as prior knowledge to automatize brain localization and extraction. This has been achieved through a novel automatic brain localization and extraction method based on robust template-to-slice block matching and deformable slice-to-template registration. Our template-based approach has also enabled the reconstruction of fetal brain images in standard radiological anatomical planes in a common coordinate space. We have integrated this approach into our new reconstruction pipeline that involves intensity normalization, inter-slice motion correction, and super-resolution (SR) reconstruction. To this end we have adopted a novel approach based on projection of every slice of the LR brain masks into the template space using a fusion strategy. This has enabled the refinement of brain masks in the LR images at each motion correction iteration. The overall brain localization and extraction algorithm has shown to produce brain masks that are very close to manually drawn brain masks, showing an average Dice overlap measure of 94.5{\%}. We have also demonstrated that adopting a slice-to-template registration and propagation of the brain mask slice-by-slice leads to a significant improvement in brain extraction performance compared to global rigid brain extraction and consequently in the quality of the final reconstructed images. Ratings performed by two expert observers show that the proposed pipeline can achieve similar reconstruction quality to reference reconstruction based on manual slice-by-slice brain extraction. The proposed brain mask refinement and reconstruction method has shown to provide promising results in automatic fetal brain MRI segmentation and volumetry in 26 fetuses with gestational age range of 23 to 38 weeks. }, } |
2017 | Journal | E. Soudah, J. Casacuberta, P. J. Gamez-Montero, J. S. Pérez, M. Rodr\'iguez-Cancio, G. Raush, C. H. Li, F. Carreras, R. Castilla (2017). Estimation of wall shear stress using 4D flow cardiovascular MRI and computational fluid dynamics. Journal of Mechanics in Medicine and Biology, 17(3), pp. 16. (link) (bib) x @article{Soudah2017, year = { 2017 }, volume = { 17 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Estimation of wall shear stress using 4D flow cardiovascular MRI and computational fluid dynamics }, pages = { 16 }, number = { 3 }, keywords = { Phase-contrast MRI,blood flow patterns,computational fluid dynamics,velocity mapping,wall shear stress }, journal = { Journal of Mechanics in Medicine and Biology }, issn = { 02195194 }, doi = { 10.1142/S0219519417500464 }, author = { Soudah and Casacuberta and Gamez-Montero and P{\'{e}}rez and Rodr{\'{i}}guez-Cancio and Raush and Li and Carreras and Castilla }, abstract = { In the last few years, wall shear stress (WSS) has arisen as a new diagnostic indicator in patients with arterial disease. There is a substantial evidence that the WSS plays a significant role, together with hemodynamic indicators, in initiation and progression of the vascular diseases. Estimation of WSS values, therefore, may be of clinical significance and the methods employed for its measurement are crucial for clinical community. Recently, four-dimensional (4D) flow cardiovascular magnetic resonance (CMR) has been widely used in a number of applications for visualization and quantification of blood flow, and although the sensitivity to blood flow measurement has increased, it is not yet able to provide an accurate three-dimensional (3D) WSS distribution. The aim of this work is to evaluate the aortic blood flow features and the associated WSS by the combination of 4D flow cardiovascular magnetic resonance (4D CMR) and computational fluid dynamics technique. In particular, in this work, we used the 4D CMR to obtain the spatial domain and the boundary conditions needed to estimate the WSS within the entire thoracic aorta using computational fluid dynamics. Similar WSS distributions were found for cases simulated. A sensitivity analysis was done to check the accuracy of the method. 4D CMR begins to be a reliable tool to estimate the WSS within the entire thoracic aorta using computational fluid dynamics. The combination of both techniques may provide the ideal tool to help tackle these and other problems related to wall shear estimation. }, } |
2017 | Journal | Alexander Schmitz, Sabine C. Fischer, Christian Mattheyer, Francesco Pampaloni, Ernst H.K. Stelzer (2017). Multiscale image analysis reveals structural heterogeneity of the cell microenvironment in homotypic spheroids. Scientific Reports, 7, pp. 13. (link) (bib) x @article{Schmitz2017, year = { 2017 }, volume = { 7 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multiscale image analysis reveals structural heterogeneity of the cell microenvironment in homotypic spheroids }, pages = { 13 }, journal = { Scientific Reports }, issn = { 20452322 }, doi = { 10.1038/srep43693 }, author = { Schmitz and Fischer and Mattheyer and Pampaloni and Stelzer }, abstract = { Three-dimensional multicellular aggregates such as spheroids provide reliable in vitro substitutes for tissues. Quantitative characterization of spheroids at the cellular level is fundamental. We present the first pipeline that provides three-dimensional, high-quality images of intact spheroids at cellular resolution and a comprehensive image analysis that completes traditional image segmentation by algorithms from other fields. The pipeline combines light sheet-based fluorescence microscopy of optically cleared spheroids with automated nuclei segmentation (F score: 0.88) and concepts from graph analysis and computational topology. Incorporating cell graphs and alpha shapes provided more than 30 features of individual nuclei, the cellular neighborhood and the spheroid morphology. The application of our pipeline to a set of breast carcinoma spheroids revealed two concentric layers of different cell density for more than 30,000 cells. The thickness of the outer cell layer depends on a spheroid's size and varies between 50{\%} and 75{\%} of its radius. In differently-sized spheroids, we detected patches of different cell densities ranging from 5 × 10 5 to 1 × 10 6 cells/mm 3. Since cell density affects cell behavior in tissues, structural heterogeneities need to be incorporated into existing models. Our image analysis pipeline provides a multiscale approach to obtain the relevant data for a system-level understanding of tissue architecture. }, } |
2017 | Journal | Curtis T. Rueden, Johannes Schindelin, Mark C. Hiner, Barry E. DeZonia, Alison E. Walter, Ellen T. Arena, Kevin W. Eliceiri (2017). ImageJ2: ImageJ for the next generation of scientific image data. BMC Bioinformatics, 18(1), pp. 26. (link) (bib) x @article{Rueden2017, year = { 2017 }, volume = { 18 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { ImageJ2: ImageJ for the next generation of scientific image data }, pages = { 26 }, number = { 1 }, keywords = { Extensibility,Image processing,ImageJ,ImageJ2,Interoperability,N-dimensional,Open development,Open source,Reproducibility }, journal = { BMC Bioinformatics }, issn = { 14712105 }, eprint = { 1701.05940 }, doi = { 10.1186/s12859-017-1934-z }, author = { Rueden and Schindelin and Hiner and DeZonia and Walter and Arena and Eliceiri }, arxivid = { 1701.05940 }, archiveprefix = { arXiv }, abstract = { Background: ImageJ is an image analysis program extensively used in the biological sciences and beyond. Due to its ease of use, recordable macro language, and extensible plug-in architecture, ImageJ enjoys contributions from non-programmers, amateur programmers, and professional developers alike. Enabling such a diversity of contributors has resulted in a large community that spans the biological and physical sciences. However, a rapidly growing user base, diverging plugin suites, and technical limitations have revealed a clear need for a concerted software engineering effort to support emerging imaging paradigms, to ensure the software's ability to handle the requirements of modern science. Results: We rewrote the entire ImageJ codebase, engineering a redesigned plugin mechanism intended to facilitate extensibility at every level, with the goal of creating a more powerful tool that continues to serve the existing community while addressing a wider range of scientific requirements. This next-generation ImageJ, called "ImageJ2" in places where the distinction matters, provides a host of new functionality. It separates concerns, fully decoupling the data model from the user interface. It emphasizes integration with external applications to maximize interoperability. Its robust new plugin framework allows everything from image formats, to scripting languages, to visualization to be extended by the community. The redesigned data model supports arbitrarily large, N-dimensional datasets, which are increasingly common in modern image acquisition. Despite the scope of these changes, backwards compatibility is maintained such that this new functionality can be seamlessly integrated with the classic ImageJ interface, allowing users and developers to migrate to these new methods at their own pace. Conclusions: Scientific imaging benefits from open-source programs that advance new method development and deployment to a diverse audience. ImageJ has continuously evolved with this idea in mind; however, new and emerging scientific requirements have posed corresponding challenges for ImageJ's development. The described improvements provide a framework engineered for flexibility, intended to support these requirements as well as accommodate future needs. Future efforts will focus on implementing new algorithms in this framework and expanding collaborations with other popular scientific software suites. }, } |
2017 | Journal | Marko Rak, Tim König, Klaus D. Tönnies, Mathias Walke, Jens Ricke, Christian Wybranski (2017). Joint deformable liver registration and bias field correction for MR-guided HDR brachytherapy. International Journal of Computer Assisted Radiology and Surgery, 12(12), pp. 2169–2180. (link) (bib) x @article{Rak2017, year = { 2017 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Joint deformable liver registration and bias field correction for MR-guided HDR brachytherapy }, pages = { 2169--2180 }, number = { 12 }, keywords = { Bias field correction,Deformable registration,High-dose rate brachytherapy,Liver intervention,Magnetic resonance imaging }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-017-1633-2 }, author = { Rak and K{\"{o}}nig and T{\"{o}}nnies and Walke and Ricke and Wybranski }, abstract = { Purpose: In interstitial high-dose rate brachytherapy, liver cancer is treated by internal radiation, requiring percutaneous placement of applicators within or close to the tumor. To maximize utility, the optimal applicator configuration is pre-planned on magnetic resonance images. The pre-planned configuration is then implemented via a magnetic resonance-guided intervention. Mapping the pre-planning information onto interventional data would reduce the radiologist's cognitive load during the intervention and could possibly minimize discrepancies between optimally pre-planned and actually placed applicators. Methods: We propose a fast and robust two-step registration framework suitable for interventional settings: first, we utilize a multi-resolution rigid registration to correct for differences in patient positioning (rotation and translation). Second, we employ a novel iterative approach alternating between bias field correction and Markov random field deformable registration in a multi-resolution framework to compensate for non-rigid movements of the liver, the tumors and the organs at risk. In contrast to existing pre-correction methods, our multi-resolution scheme can recover bias field artifacts of different extents at marginal computational costs. Results: We compared our approach to deformable registration via B-splines, demons and the SyN method on 22 registration tasks from eleven patients. Results showed that our approach is more accurate than the contenders for liver as well as for tumor tissues. We yield average liver volume overlaps of 94.0 ± 2.7{\%} and average surface-to-surface distances of 2.02 ± 0.87 mm and 3.55 ± 2.19 mm for liver and tumor tissue, respectively. The reported distances are close to (or even below) the slice spacing (2.5 – 3.0 mm) of our data. Our approach is also the fastest, taking 35.8 ± 12.8 s per task. Conclusion: The presented approach is sufficiently accurate to map information available from brachytherapy pre-planning onto interventional data. It is also reasonably fast, providing a starting point for computer-aidance during intervention. }, } |
2017 | Journal | S. Primpke, C. Lorenz, R. Rascher-Friesenhausen, G. Gerdts (2017). An automated approach for microplastics analysis using focal plane array (FPA) FTIR microscopy and image analysis. Analytical Methods, 9(9), pp. 1499–1511. (link) (bib) x @article{Primpke2017, year = { 2017 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { An automated approach for microplastics analysis using focal plane array (FPA) FTIR microscopy and image analysis }, pages = { 1499--1511 }, number = { 9 }, journal = { Analytical Methods }, issn = { 17599679 }, doi = { 10.1039/c6ay02476a }, author = { Primpke and Lorenz and Rascher-Friesenhausen and Gerdts }, abstract = { The analysis of imaging data derived from micro-Fourier transform infrared ($\mu$FTIR) microscopy is a powerful tool allowing the analysis of microplastics enriched on membrane filters. In this study we present an automated approach to reduce the time demand currently needed for data analyses. We developed a novel analysis pipeline, based on the OPUS{\textcopyright} Software by Bruker, followed by image analysis with Python and Simple ITK image processing modules. By using this newly developed pipeline it was possible to analyse datasets from focal plane array (FPA) $\mu$FTIR mapping of samples containing up to 1.8 million single spectra. All spectra were compared against a database of different synthetic and natural polymers by various routines followed by benchmark tests with focus on accuracy and quality. The spectral correlation was optimized for high quality data generation, which allowed image analysis. Based on these results an image analysis approach was developed, providing information on particle numbers and sizes for each polymer detected. It was possible to collect all data with relative ease even for complex sample matrices. This approach significantly decreases the time demand for the interpretation of complex FTIR-imaging data and significantly increases the data quality. }, } |
2017 | Journal | Ryan G. Price, Robert A. Knight, Ken Pin Hwang, Ersin Bayram, Siamak P. Nejad-Davarani, Carri K. Glide-Hurst (2017). Optimization of a novel large field of view distortion phantom for MR-only treatment planning. Journal of Applied Clinical Medical Physics, 18(4), pp. 51–61. (link) (bib) x @article{Price2017, year = { 2017 }, volume = { 18 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Optimization of a novel large field of view distortion phantom for MR-only treatment planning }, pages = { 51--61 }, number = { 4 }, keywords = { Distortion,Gradient nonlinearity,MRI,Phantom,Spatial accuracy }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1002/acm2.12090 }, author = { Price and Knight and Hwang and Bayram and Nejad-Davarani and Glide-Hurst }, abstract = { Purpose: MR-only treatment planning requires images of high geometric fidelity, particularly for large fields of view (FOV). However, the availability of large FOV distortion phantoms with analysis software is currently limited. This work sought to optimize a modular distortion phantom to accommodate multiple bore configurations and implement distortion characterization in a widely implementable solution. Method and Materials: To determine candidate materials, 1.0 T MR and CT images were acquired of twelve urethane foam samples of various densities and strengths. Samples were precision-machined to accommodate 6 mm diameter paintballs used as landmarks. Final material candidates were selected by balancing strength, machinability, weight, and cost. Bore sizes and minimum aperture width resulting from couch position were tabulated from the literature (14 systems, 5 vendors). Bore geometry and couch position were simulated using MATLAB to generate machine-specific models to optimize the phantom build. Previously developed software for distortion characterization was modified for several magnet geometries (1.0 T, 1.5 T, 3.0 T), compared against previously published 1.0 T results, and integrated into the 3D Slicer application platform. Results: All foam samples provided sufficient MR image contrast with paintball landmarks. Urethane foam (compressive strength {\~{}}1000 psi, density {\~{}}20 lb/ft3) was selected for its accurate machinability and weight characteristics. For smaller bores, a phantomversion with the following parameters was used: 15 foam plates, 55 9 55 9 37.5 cm3 (L9W9H), 5,082 landmarks, and weight {\~{}}30 kg. To accommodate {\textgreater} 70 cm wide bores, an extended build used 20 plates spanning 55 9 55 9 50 cm3 with 7,497 landmarks and weight {\~{}}44 kg.Distortion characterization softwarewas implemented as an externalmodule into 3DSlicer's plugin framework and results agreed with the literature. Conclusion: The design and implementation of a modular, extendable distortion phantom was optimized for several bore configurations. The phantom and analysis software will be available for multi-institutional collaborations and cross-validation trials to support MR-only planning. }, } |
2017 | Journal | Stephen Parsons, C. Seth Parker, W. Brent Seales (2017). The St. Chad Gospels: Diachronic manuscript registration and visualization. Manuscript Studies, 2(2), pp. 483–498. (link) (bib) x @article{Parsons2017, year = { 2017 }, volume = { 2 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { The St. Chad Gospels: Diachronic manuscript registration and visualization }, pages = { 483--498 }, number = { 2 }, journal = { Manuscript Studies }, issn = { 23801190 }, doi = { 10.1353/mns.2017.0022 }, author = { Parsons and Parker and Seales }, } |
2017 | Journal | Samuel Byeongjun Park, Jung Gun Kim, Ki Woong Lim, Chae Hyun Yoon, Dong Jun Kim, Han Sung Kang, Yung Ho Jo (2017). A magnetic resonance image-guided breast needle intervention robot system: overview and design considerations. International Journal of Computer Assisted Radiology and Surgery, 12(8), pp. 1319–1331. (link) (bib) x @article{Park2017, year = { 2017 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A magnetic resonance image-guided breast needle intervention robot system: overview and design considerations }, pages = { 1319--1331 }, number = { 8 }, keywords = { Bendable needle,Breast cancer,Magnetic resonance imaging,Medical robotics,Needle intervention }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-017-1528-2 }, author = { Park and Kim and Lim and Yoon and Kim and Kang and Jo }, abstract = { Purpose: We developed an image-guided intervention robot system that can be operated in a magnetic resonance (MR) imaging gantry. The system incorporates a bendable needle intervention robot for breast cancer patients that overcomes the space limitations of the MR gantry. Methods: Most breast coil designs for breast MR imaging have side openings to allow manual localization. However, for many intervention procedures, the patient must be removed from the gantry. A robotic manipulation system with integrated image guidance software was developed. Our robotic manipulator was designed to be slim, so as to fit between the patient's side and the MR gantry wall. Only non-magnetic materials were used, and an electromagnetic shield was employed for cables and circuits. The image guidance software was built using open source libraries. In situ feasibility tests were performed in a 3-T MR system. One target point in the breast phantom was chosen by the clinician for each experiment, and our robot moved the needle close to the target point. Results: Without image-guided feedback control, the needle end could not hit the target point (distance = 5 mm) in the first experiment. Using our robotic system, the needle hits the target lesion of the breast phantom at a distance of 2.3 mm from the same target point using image-guided feedback. The second experiment was performed using other target points, and the distance between the final needle end point and the target point was 0.8 mm. Conclusions: We successfully developed an MR-guided needle intervention robot for breast cancer patients. Further research will allow the expansion of these interventions. }, } |
2017 | Journal | Thomas A. Nketia, Heba Sailem, Gustavo Rohde, Raghu Machiraju, Jens Rittscher (2017). Analysis of live cell images: Methods, tools and opportunities. Methods, 115, pp. 65–79. (link) (bib) x @article{Nketia2017, year = { 2017 }, volume = { 115 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Analysis of live cell images: Methods, tools and opportunities }, pages = { 65--79 }, keywords = { Biological image analysis,Cell segmentation,Cell tracking,Live cell imaging,Machine learning,Quantitative biological imaging }, journal = { Methods }, issn = { 10959130 }, doi = { 10.1016/j.ymeth.2017.02.007 }, author = { Nketia and Sailem and Rohde and Machiraju and Rittscher }, abstract = { Advances in optical microscopy, biosensors and cell culturing technologies have transformed live cell imaging. Thanks to these advances live cell imaging plays an increasingly important role in basic biology research as well as at all stages of drug development. Image analysis methods are needed to extract quantitative information from these vast and complex data sets. The aim of this review is to provide an overview of available image analysis methods for live cell imaging, in particular required preprocessing image segmentation, cell tracking and data visualisation methods. The potential opportunities recent advances in machine learning, especially deep learning, and computer vision provide are being discussed. This review includes overview of the different available software packages and toolkits. }, } |
2017 | Journal | Massimo Narizzano, Gabriele Arnulfo, Serena Ricci, Benedetta Toselli, Martin Tisdall, Andrea Canessa, Marco Massimo Fato, Francesco Cardinale (2017). SEEG assistant: A 3DSlicer extension to support epilepsy surgery. BMC Bioinformatics, 18(1), pp. 13. (link) (bib) x @article{Narizzano2017, year = { 2017 }, volume = { 18 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { SEEG assistant: A 3DSlicer extension to support epilepsy surgery }, pages = { 13 }, number = { 1 }, keywords = { Automatic segmentation,Epilepsy,Epileptic zone detections,GMPI,Medical imaging,SEEG }, journal = { BMC Bioinformatics }, issn = { 14712105 }, doi = { 10.1186/s12859-017-1545-8 }, author = { Narizzano and Arnulfo and Ricci and Toselli and Tisdall and Canessa and Fato and Cardinale }, abstract = { Background: In the evaluation of Stereo-Electroencephalography (SEEG) signals, the physicist's workflow involves several operations, including determining the position of individual electrode contacts in terms of both relationship to grey or white matter and location in specific brain regions. These operations are (i) generally carried out manually by experts with limited computer support, (ii) hugely time consuming, and (iii) often inaccurate, incomplete, and prone to errors. Results: In this paper we present SEEG Assistant, a set of tools integrated in a single 3DSlicer extension, which aims to assist neurosurgeons in the analysis of post-implant structural data and hence aid the neurophysiologist in the interpretation of SEEG data. SEEG Assistant consists of (i) a module to localize the electrode contact positions using imaging data from a thresholded post-implant CT, (ii) a module to determine the most probable cerebral location of the recorded activity, and (iii) a module to compute the Grey Matter Proximity Index, i.e. the distance of each contact from the cerebral cortex, in order to discriminate between white and grey matter location of contacts. Finally, exploiting 3DSlicer capabilities, SEEG Assistant offers a Graphical User Interface that simplifies the interaction between the user and the tools. SEEG Assistant has been tested on 40 patients segmenting 555 electrodes, and it has been used to identify the neuroanatomical loci and to compute the distance to the nearest cerebral cortex for 9626 contacts. We also performed manual segmentation and compared the results between the proposed tool and gold-standard clinical practice. As a result, the use of SEEG Assistant decreases the post implant processing time by more than 2 orders of magnitude, improves the quality of results and decreases, if not eliminates, errors in post implant processing. Conclusions: The SEEG Assistant Framework for the first time supports physicists by providing a set of open-source tools for post-implant processing of SEEG data. Furthermore, SEEG Assistant has been integrated into 3D Slicer, a software platform for the analysis and visualization of medical images, overcoming limitations of command-line tools. }, } |
2017 | Journal | Stephen M. Moore, Robert L. McIntosh, Steve Iskra, Alireza Lajevardipour, Andrew W. Wood (2017). Effect of adverse environmental conditions and protective clothing on temperature rise in a human body exposed to radiofrequency electromagnetic fields. Bioelectromagnetics, 38(5), pp. 356–363. (link) (bib) x @article{Moore2017, year = { 2017 }, volume = { 38 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016441570{\&}doi=10.1002{\%}2Fbem.22048{\&}partnerID=40{\&}md5=9f2b71c5da61855e410d615f4c57dfd3 }, type = { Journal Article }, title = { Effect of adverse environmental conditions and protective clothing on temperature rise in a human body exposed to radiofrequency electromagnetic fields }, pages = { 356--363 }, number = { 5 }, keywords = { RF-EMF worker,occupational safety,protective clothing,radiofrequency safety standards,temperature rise }, journal = { Bioelectromagnetics }, issn = { 1521186X }, doi = { 10.1002/bem.22048 }, author = { Moore and McIntosh and Iskra and Lajevardipour and Wood }, abstract = { This study considers the computationally determined thermal profile of a finely discretized, heterogeneous human body model, simulating a radiofrequency electromagnetic field (RF-EMF) worker wearing protective clothing subject to RF-EMF exposure, and subject to various environmental conditions including high ambient temperature and high humidity, with full thermoregulatory mechanisms in place. How the human body responds in various scenarios was investigated, and the information was used to consider safety limits in current international RF-EMF safety guidelines and standards. It was found that different environmental conditions had minimal impact on the magnitude of the thermal response due to RF-EMF exposure, and that the current safety factor of 10 applied in international RF-EMF safety guidelines and standards for RF-EMF workers is generally conservative, though it is only narrowly so when workers are subjected to the most adverse environmental conditions. Bioelectromagnetics. 38:356–363, 2017. {\textcopyright} 2017 Wiley Periodicals, Inc. }, } |
2017 | Journal | Mike Meyer, Nick Polys, Humza Yaqoob, Linda Hinnov, Shuhai Xiao (2017). Beyond the stony veil: Reconstructing the Earth's earliest large animal traces via computed tomography X-ray imaging. Precambrian Research, 298, pp. 341–350. (link) (bib) x @article{Meyer2017, year = { 2017 }, volume = { 298 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Beyond the stony veil: Reconstructing the Earth's earliest large animal traces via computed tomography X-ray imaging }, pages = { 341--350 }, keywords = { Bioturbation,Ediacaran,Lamonte trevallis,South China,Trace fossil }, journal = { Precambrian Research }, issn = { 03019268 }, doi = { 10.1016/j.precamres.2017.05.010 }, author = { Meyer and Polys and Yaqoob and Hinnov and Xiao }, abstract = { Trace fossils are superb lines of evidence for examining the ancient biologic world because they offer an opportunity to infer behavioral ecology of organisms. However, traces can be difficult to parse from their matrix, which leads to the loss of important morphological and behavioral data. This is especially true for the earliest marine animal traces from the Ediacaran Period (635–541 Ma), which are usually small ({\textless}5 mm in diameter) and simple (mostly small horizontal trails and burrows), and are sometimes difficult to be distinguished from co-existing tubular body fossils. There is also evidence that the prevalence of microbial substrates in Ediacaran oceans may have influenced emerging trace makers in non-actualistic ways from a late Phanerozoic perspective (e.g., microbial mats may have facilitated a strong geochemical gradient across the sediment-water interface). Therefore, the discovery of the relatively large traces of Lamonte trevallis from the Ediacaran Shibantan Member of the Denying Formation (∼551–541 Ma) in the Yangtze Gorges area of South China provides a unique opportunity to study early bioturbators. These trace fossils are large enough and have sufficient compositional contrast (relative to the matrix) for in situ analysis via X-ray computed tomography (CT) and microcomputed tomography (microCT). Each analytical method has its own advantages and disadvantages. CT scans can image larger specimens, but cannot adequately resolve small features of interest. MicroCT scans can achieve higher resolution, but can only be used with small samples and may involve more post-processing than CT scans. As demonstrated in this study, X-ray CT and microCT in combination with other 3D imaging techniques and resources have the potential to resolve the 3D morphology of Ediacaran trace fossils. A new Volumetric Bioturbation Intensity (VBI) is also proposed, which quantifies whole rock bioturbation using 3D analysis of subsurface traces. Combined with the ability to examine trace fossils in situ, the VBI can enhance our view of ancient ecologies and life's enduring relationship with sediments. }, } |
2017 | Journal | Midas Meijs, Ajay Patel, Sil C. Van De Leemput, Mathias Prokop, Ewoud J. Van Dijk, Frank Erik De Leeuw, Frederick J.A. Meijer, Bram Van Ginneken, Rashindra Manniesing (2017). Robust Segmentation of the Full Cerebral Vasculature in 4D CT of Suspected Stroke Patients. Scientific Reports, 7(1), pp. NA (link) (bib) x @article{Meijs2017, year = { 2017 }, volume = { 7 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85034432920{\&}doi=10.1038{\%}2Fs41598-017-15617-w{\&}partnerID=40{\&}md5=d67c87465788c1f4c70cc759dd0a0625 }, type = { Journal Article }, title = { Robust Segmentation of the Full Cerebral Vasculature in 4D CT of Suspected Stroke Patients }, number = { 1 }, journal = { Scientific Reports }, issn = { 20452322 }, doi = { 10.1038/s41598-017-15617-w }, author = { Meijs and Patel and {Van De Leemput} and Prokop and {Van Dijk} and {De Leeuw} and Meijer and {Van Ginneken} and Manniesing }, abstract = { A robust method is presented for the segmentation of the full cerebral vasculature in 4-dimensional (4D) computed tomography (CT). The method consists of candidate vessel selection, feature extraction, random forest classification and postprocessing. Image features include among others the weighted temporal variance image and parameters, including entropy, of an intensity histogram in a local region at different scales. These histogram parameters revealed to be a strong feature in the detection of vessels regardless of shape and size. The method was trained and tested on a large database of 264 patients with suspicion of acute ischemia who underwent 4D CT in our hospital in the period January 2014 to December 2015. Five subvolumes representing different regions of the cerebral vasculature were annotated in each image in the training set by medical assistants. The evaluation was done on 242 patients. A total of 16 ({\textless}8{\%}) patients showed severe under or over segmentation and were reported as failures. One out of five subvolumes was randomly annotated in 159 patients and was used for quantitative evaluation. Quantitative evaluation showed a Dice coefficient of 0.91 ± 0.07 and a modified Hausdorff distance of 0.23 ± 0.22 mm. Therefore, robust vessel segmentation in 4D CT is feasible with good accuracy. }, } |
2017 | Journal | A. Murat Maga, Nicholas J. Tustison, Brian B. Avants (2017). A population level atlas of Mus musculus craniofacial skeleton and automated image-based shape analysis. Journal of Anatomy, 231(3), pp. 433–443. (link) (bib) x @article{Maga2017, year = { 2017 }, volume = { 231 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A population level atlas of Mus musculus craniofacial skeleton and automated image-based shape analysis }, pages = { 433--443 }, number = { 3 }, keywords = { geometrics morphometrics,image processing,image-based shape analysis,landmarking,microCT,segmentation }, journal = { Journal of Anatomy }, issn = { 14697580 }, doi = { 10.1111/joa.12645 }, author = { Maga and Tustison and Avants }, abstract = { Laboratory mice are staples for evo/devo and genetics studies. Inbred strains provide a uniform genetic background to manipulate and understand gene–environment interactions, while their crosses have been instrumental in studies of genetic architecture, integration and modularity, and mapping of complex biological traits. Recently, there have been multiple large-scale studies of laboratory mice to further our understanding of the developmental basis, evolution, and genetic control of shape variation in the craniofacial skeleton (i.e. skull and mandible). These experiments typically use micro-computed tomography (micro-CT) to capture the craniofacial phenotype in 3D and rely on manually annotated anatomical landmarks to conduct statistical shape analysis. Although the common choice for imaging modality and phenotyping provides the potential for collaborative research for even larger studies with more statistical power, the investigator (or lab-specific) nature of the data collection hampers these efforts. Investigators are rightly concerned that subtle differences in how anatomical landmarks were recorded will create systematic bias between studies that will eventually influence scientific findings. Even if researchers are willing to repeat landmark annotation on a combined dataset, different lab practices and software choices may create obstacles for standardization beyond the underlying imaging data. Here, we propose a freely available analysis system that could assist in the standardization of micro-CT studies in the mouse. Our proposal uses best practices developed in biomedical imaging and takes advantage of existing open-source software and imaging formats. Our first contribution is the creation of a synthetic template for the adult mouse craniofacial skeleton from 25 inbred strains and five F1 crosses that are widely used in biological research. The template contains a fully segmented cranium, left and right hemi-mandibles, endocranial space, and the first few cervical vertebrae. We have been using this template in our lab to segment and isolate cranial structures in an automated fashion from a mixed population of mice, including craniofacial mutants, aged 4–12.5 weeks. As a secondary contribution, we demonstrate an application of nearly automated shape analysis, using symmetric diffeomorphic image registration. This approach, which we call diGPA, closely approximates the popular generalized Procrustes analysis (GPA) but negates the collection of anatomical landmarks. We achieve our goals by using the open-source advanced normalization tools (ANT) image quantification library, as well as its associated R library (ANTsR) for statistical image analysis. Finally, we make a plea to investigators to commit to using open imaging standards and software in their labs to the extent possible to increase the potential for data exchange and improve the reproducibility of findings. Future work will incorporate more anatomical detail (such as individual cranial bones, turbinals, dentition, middle ear ossicles) and more diversity into the template. }, } |
2017 | Journal | Ka Hei Lok, Lin Shi, Xianlun Zhu, Defeng Wang (2017). Fast and robust brain tumor segmentation using level set method with multiple image information. Journal of X-ray science and technology, 25(2), pp. 301–312. (link) (bib) x @article{Lok2017, year = { 2017 }, volume = { 25 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Fast and robust brain tumor segmentation using level set method with multiple image information }, pages = { 301--312 }, number = { 2 }, keywords = { Brain tumor segmentation,Selective Binary Gaussian Filtering Regularizing L,Singed Pressure Force (SPF),evaluation of tumor segmentation accuracy }, journal = { Journal of X-ray science and technology }, issn = { 10959114 }, doi = { 10.3233/XST-17261 }, author = { Lok and Shi and Zhu and Wang }, abstract = { BACKGROUND: Brain tumor segmentation is a challenging task for its variation in intensity. The phenomenon is caused by the inhomogeneous content of tumor tissue and the choice of imaging modality. In 2010 Zhang developed the Selective Binary Gaussian Filtering Regularizing Level Set (SBGFRLS) model that combined the merits of edge-based and region-based segmentation. OBJECTIVE: To improve the SBGFRLS method by modifying the singed pressure force (SPF) term with multiple image information and demonstrate effectiveness of proposed method on clinical images. METHODS: In original SBGFRLS model, the contour evolution direction mainly depends on the SPF. By introducing a directional term in SPF, the metric could control the evolution direction. The SPF is altered by statistic values enclosed by the contour. This concept can be extended to jointly incorporate multiple image information. The new SPF term is expected to bring a solution for blur edge problem in brain tumor segmentation. The proposed method is validated with clinical images including pre- and post-contrast magnetic resonance images. The accuracy and robustness is compared with sensitivity, specificity, DICE similarity coefficient and Jaccard similarity index. RESULTS: Experimental results show improvement, in particular the increase of sensitivity at the same specificity, in segmenting all types of tumors except for the diffused tumor. CONCLUSION: The novel brain tumor segmentation method is clinical-oriented with fast, robust and accurate implementation and a minimal user interaction. The method effectively segmented homogeneously enhanced, non-enhanced, heterogeneously-enhanced, and ring-enhanced tumor under MR imaging. Though the method is limited by identifying edema and diffuse tumor, several possible solutions are suggested to turn the curve evolution into a fully functional clinical diagnosis tool. }, } |
2017 | Journal | Muhan Liu, Hongbo Guo, Hongbo Liu, Zeyu Zhang, Chongwei Chi, Hui Hui, Di Dong, Zhenhua Hu, Jie Tian (2017). In vivo pentamodal tomographic imaging for small animals. Biomedical Optics Express, 8(3), pp. 1356. (link) (bib) x @article{Liu2017, year = { 2017 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { In vivo pentamodal tomographic imaging for small animals }, pages = { 1356 }, number = { 3 }, journal = { Biomedical Optics Express }, issn = { 2156-7085 }, doi = { 10.1364/boe.8.001356 }, author = { Liu and Guo and Liu and Zhang and Chi and Hui and Dong and Hu and Tian }, abstract = { {\textcopyright} 2017 Optical Society of America. Multimodality molecular imaging emerges as a powerful strategy for correlating multimodal information. We developed a pentamodal imaging system which can perform positron emission tomography, bioluminescence tomography, fluorescence molecular tomography, Cerenkov luminescence tomography and X-ray computed tomography successively. Performance of sub-systems corresponding to different modalities were characterized. In vivo multimodal imaging of an orthotopic hepatocellular carcinoma xenograft mouse model was performed, and acquired multimodal images were fused. The feasibility of pentamodal tomographic imaging system was successfully validated with the imaging application on the mouse model. The ability of integrating anatomical, metabolic, and pharmacokinetic information promises applications of multimodality molecular imaging in precise medicine. }, } |
2017 | Journal | J. S. Lin, D. T. Fuentes, A. Chandler, S. S. Prabhu, J. S. Weinberg, V. Baladandayuthapani, J. D. Hazle, D. Schellingerhout (2017). Performance assessment for brain MR imaging registration methods. American Journal of Neuroradiology, 38(5), pp. 973–980. (link) (bib) x @article{Lin2017, year = { 2017 }, volume = { 38 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Performance assessment for brain MR imaging registration methods }, pages = { 973--980 }, number = { 5 }, journal = { American Journal of Neuroradiology }, issn = { 1936959X }, doi = { 10.3174/ajnr.A5122 }, author = { Lin and Fuentes and Chandler and Prabhu and Weinberg and Baladandayuthapani and Hazle and Schellingerhout }, abstract = { BACKGROUND AND PURPOSE: Clinical brain MR imaging registration algorithms are often made available by commercial vendors without figures of merit. The purpose of this study was to suggest a rational performance comparison methodology for these products. MATERIALS AND METHODS: Twenty patients were imaged on clinical 3T scanners by using 4 sequences: T2-weighted, FLAIR, susceptibility-weighted angiography, and T1 postcontrast. Fiducial landmark sites (n = 1175) were specified throughout these image volumes to define identical anatomic locations across sequences. Multiple registration algorithms were applied by using the T2 sequence as a fixed reference. Euclidean error was calculated before and after each registration and compared with a criterion standard landmark registration. The Euclidean effectiveness ratio is the fraction of Euclidean error remaining after registration, and the statistical effectiveness ratio is similar, but accounts for dispersion and noise. RESULTS: Before registration, error values for FLAIR susceptibility-weighted angiography, and T1 postcontrast were 2.07±0.55 mm, 2.63±0.62 mm, and 3.65±2.00 mm, respectively. Postregistration, the best error values for FLAIR, susceptibility-weighted angiography, and T1 postcontrast were 1.55±0.46 mm, 1.34±0.23 mm, and 1.06±0.16 mm, with Euclidean effectiveness ratio values of 0.493,0.181, and 0.096 and statistical effectiveness ratio values of 0.573, 0.352, and 0.929 for rigid mutual information, affine mutual information, and a commercial GE registration, respectively. CONCLUSIONS: We demonstrate a method for comparing the performance of registration algorithms and suggest the Euclidean error, Euclidean effectiveness ratio, and statistical effectiveness ratio as performance metrics for clinical registration algorithms. These figures of merit allow registration algorithms to be rationally compared. }, } |
2017 | Journal | Chany Lee, Young Jin Jung, Sang Jun Lee, Chang Hwan Im (2017). COMETS2: An advanced MATLAB toolbox for the numerical analysis of electric fields generated by transcranial direct current stimulation. Journal of Neuroscience Methods, 277, pp. 56–62. (link) (bib) x @article{Lee2017, year = { 2017 }, volume = { 277 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { COMETS2: An advanced MATLAB toolbox for the numerical analysis of electric fields generated by transcranial direct current stimulation }, pages = { 56--62 }, keywords = { Electrostatic field,Finite element method (FEM),MATLAB toolbox,Neuromodulation,Transcranial direct current stimulation (tDCS) }, journal = { Journal of Neuroscience Methods }, issn = { 1872678X }, doi = { 10.1016/j.jneumeth.2016.12.008 }, author = { Lee and Jung and Lee and Im }, abstract = { Background Since there is no way to measure electric current generated by transcranial direct current stimulation (tDCS) inside the human head through in vivo experiments, numerical analysis based on the finite element method has been widely used to estimate the electric field inside the head. In 2013, we released a MATLAB toolbox named COMETS, which has been used by a number of groups and has helped researchers to gain insight into the electric field distribution during stimulation. The aim of this study was to develop an advanced MATLAB toolbox, named COMETS2, for the numerical analysis of the electric field generated by tDCS. New method COMETS2 can generate any sizes of rectangular pad electrodes on any positions on the scalp surface. To reduce the large computational burden when repeatedly testing multiple electrode locations and sizes, a new technique to decompose the global stiffness matrix was proposed. Results As examples of potential applications, we observed the effects of sizes and displacements of electrodes on the results of electric field analysis. The proposed mesh decomposition method significantly enhanced the overall computational efficiency. Comparison with existing methods We implemented an automatic electrode modeler for the first time, and proposed a new technique to enhance the computational efficiency. Conclusions In this paper, an efficient toolbox for tDCS analysis is introduced (freely available at http://www.cometstool.com). It is expected that COMETS2 will be a useful toolbox for researchers who want to benefit from the numerical analysis of electric fields generated by tDCS. }, } |
2017 | Journal | Yulia Lakhman, Harini Veeraraghavan, Joshua Chaim, Diana Feier, Debra A. Goldman, Chaya S. Moskowitz, Stephanie Nougaret, Ramon E. Sosa, Hebert Alberto Vargas, Robert A. Soslow, Nadeem R. Abu-Rustum, Hedvig Hricak, Evis Sala (2017). Differentiation of Uterine Leiomyosarcoma from Atypical Leiomyoma: Diagnostic Accuracy of Qualitative MR Imaging Features and Feasibility of Texture Analysis. European Radiology, 27(7), pp. 2903–2915. (link) (bib) x @article{Lakhman2017, year = { 2017 }, volume = { 27 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Differentiation of Uterine Leiomyosarcoma from Atypical Leiomyoma: Diagnostic Accuracy of Qualitative MR Imaging Features and Feasibility of Texture Analysis }, pages = { 2903--2915 }, number = { 7 }, keywords = { Atypical Uterine Leiomyoma,Magnetic Resonance Imaging,Texture Analysis,Uterine Leiomyoma,Uterine Leiomyosarcoma }, journal = { European Radiology }, issn = { 14321084 }, doi = { 10.1007/s00330-016-4623-9 }, author = { Lakhman and Veeraraghavan and Chaim and Feier and Goldman and Moskowitz and Nougaret and Sosa and Vargas and Soslow and Abu-Rustum and Hricak and Sala }, abstract = { Purpose: To investigate whether qualitative magnetic resonance (MR) features can distinguish leiomyosarcoma (LMS) from atypical leiomyoma (ALM) and assess the feasibility of texture analysis (TA). Methods: This retrospective study included 41 women (ALM = 22, LMS = 19) imaged with MRI prior to surgery. Two readers (R1, R2) evaluated each lesion for qualitative MR features. Associations between MR features and LMS were evaluated with Fisher's exact test. Accuracy measures were calculated for the four most significant features. TA was performed for 24 patients (ALM = 14, LMS = 10) with uniform imaging following lesion segmentation on axial T2-weighted images. Texture features were pre-selected using Wilcoxon signed-rank test with Bonferroni correction and analyzed with unsupervised clustering to separate LMS from ALM. Results: Four qualitative MR features most strongly associated with LMS were nodular borders, haemorrhage, “T2 dark” area(s), and central unenhanced area(s) (p ≤ 0.0001 each feature/reader). The highest sensitivity [1.00 (95{\%}CI:0.82-1.00)/0.95 (95{\%}CI: 0.74-1.00)] and specificity [0.95 (95{\%}CI:0.77-1.00)/1.00 (95{\%}CI:0.85-1.00)] were achieved for R1/R2, respectively, when a lesion had ≥3 of these four features. Sixteen texture features differed significantly between LMS and ALM (p-values: {\textless}0.001-0.036). Unsupervised clustering achieved accuracy of 0.75 (sensitivity: 0.70; specificity: 0.79). Conclusions: Combination of ≥3 qualitative MR features accurately distinguished LMS from ALM. TA was feasible. Key Points: • Four qualitative MR features demonstrated the strongest statistical association with LMS. • Combination of ≥3 these features could accurately differentiate LMS from ALM. • Texture analysis was a feasible semi-automated approach for lesion categorization. }, } |
2017 | Journal | Annamária Kiss, Typhaine Moreau, Vincent Mirabet, Cerasela Iliana Calugaru, Arezki Boudaoud, Pradeep Das (2017). Segmentation of 3D images of plant tissues at multiple scales using the level set method. Plant Methods, 13(1), pp. 11. (link) (bib) x @article{Kiss2017, year = { 2017 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Segmentation of 3D images of plant tissues at multiple scales using the level set method }, pages = { 11 }, number = { 1 }, keywords = { 3D,Cell,Cellwall,Confocal image,L1,Level set method,Nucleus,Segmentation,Watershed }, journal = { Plant Methods }, issn = { 17464811 }, doi = { 10.1186/s13007-017-0264-5 }, author = { Kiss and Moreau and Mirabet and Calugaru and Boudaoud and Das }, abstract = { Background: Developmental biology has made great strides in recent years towards the quantification of cellular properties during development. This requires tissues to be imaged and segmented to generate computerised versions that can be easily analysed. In this context, one of the principal technical challenges remains the faithful detection of cellular contours, principally due to variations in image intensity throughout the tissue. Watershed segmentation methods are especially vulnerable to these variations, generating multiple errors due notably to the incorrect detection of the outer surface of the tissue. Results: We use the level set method (LSM) to improve the accuracy of the watershed segmentation in different ways. First, we detect the outer surface of the tissue, reducing the impact of low and variable contrast at the surface during imaging. Second, we demonstrate a new edge function for a level set, based on second order derivatives of the image, to segment individual cells. Finally, we also show that the LSM can be used to segment nuclei within the tissue. Conclusion: The watershed segmentation of the outer cell layer is demonstrably improved when coupled with the LSM-based surface detection step. The tool can also be used to improve watershed segmentation at cell-scale, as well as to segment nuclei within a tissue. The improved segmentation increases the quality of analysis, and the surface detected by our algorithm may be used to calculate local curvature or adapted for other uses, such as mathematical simulations. }, } |
2017 | Journal | Sanghoon Jun, Namkug Kim, Joon Beom Seo, Young Kyung Lee, David A. Lynch (2017). An Ensemble Method for Classifying Regional Disease Patterns of Diffuse Interstitial Lung Disease Using HRCT Images from Different Vendors. Journal of Digital Imaging, 30(6), pp. 761–771. (link) (bib) x @article{Jun2017, year = { 2017 }, volume = { 30 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { An Ensemble Method for Classifying Regional Disease Patterns of Diffuse Interstitial Lung Disease Using HRCT Images from Different Vendors }, pages = { 761--771 }, number = { 6 }, keywords = { Ensemble learning,Inter-scanner variation,Interstitial lung disease (ILD),Multi-center trial,Support vector machine (SVM) }, journal = { Journal of Digital Imaging }, issn = { 1618727X }, doi = { 10.1007/s10278-017-9957-6 }, author = { Jun and Kim and Seo and Lee and Lynch }, abstract = { We propose the use of ensemble classifiers to overcome inter-scanner variations in the differentiation of regional disease patterns in high-resolution computed tomography (HRCT) images of diffuse interstitial lung disease patients obtained from different scanners. A total of 600 rectangular 20 × 20-pixel regions of interest (ROIs) on HRCT images obtained from two different scanners (GE and Siemens) and the whole lung area of 92 HRCT images were classified as one of six regional pulmonary disease patterns by two expert radiologists. Textual and shape features were extracted from each ROI and the whole lung parenchyma. For automatic classification, individual and ensemble classifiers were trained and tested with the ROI dataset. We designed the following three experimental sets: an intra-scanner study in which the training and test sets were from the same scanner, an integrated scanner study in which the data from the two scanners were merged, and an inter-scanner study in which the training and test sets were acquired from different scanners. In the ROI-based classification, the ensemble classifiers showed better (p {\textless} 0.001) accuracy (89.73{\%}, SD = 0.43) than the individual classifiers (88.38{\%}, SD = 0.31) in the integrated scanner test. The ensemble classifiers also showed partial improvements in the intra- and inter-scanner tests. In the whole lung classification experiment, the quantification accuracies of the ensemble classifiers with integrated training (49.57{\%}) were higher (p {\textless} 0.001) than the individual classifiers (48.19{\%}). Furthermore, the ensemble classifiers also showed better performance in both the intra- and inter-scanner experiments. We concluded that the ensemble classifiers provide better performance when using integrated scanner images. }, } |
2017 | Journal | Roman Jakubicek, Jiri Chmelik, Jiri Jan (2017). Vertebrae Segmentation in 3D CT Data: A Review of Methods and Evaluation Approaches. Current Medical Imaging Reviews, 14(6), pp. 853–866. (link) (bib) x @article{Jakubicek2018, year = { 2017 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Vertebrae Segmentation in 3D CT Data: A Review of Methods and Evaluation Approaches }, pages = { 853--866 }, number = { 6 }, journal = { Current Medical Imaging Reviews }, issn = { 15734056 }, doi = { 10.2174/1573405613666170622120228 }, author = { Jakubicek and Chmelik and Jan }, abstract = { {\textcopyright} 2018 Bentham Science Publishers. Background: Robust and accurate segmentation of the spine subdivided into individual vertebrae is necessary for subsequent diagnosis of illnesses related to the spine, particularly those requiring detection and classification of bone lesions. Based on correct vertebra segmentation, the current status of a disease under treatment-as well as its progress-can be determined and followed. Discussion: The problem is complicated by frequent heavy deformations of both the spine axis and individual vertebrae due to illness, so that some vertebrae may differ substantially from expected shapes or even be missing. This overview summarises and discusses so far published methods for spine and vertebrae segmentation in 3D CT thoracic data. Conclusion: It suggests a classification of these algorithms based on the used approaches, complexity of algorithms, as well as on achieved efficiencies. }, } |
2017 | Journal | Olubisi Ige, Stephanie Barnett, John Chiverton, Ayman Nassif, John Williams (2017). Effects of steel fibre-aggregate interaction on mechanical behaviour of steel fibre reinforced concrete. Advances in Applied Ceramics, 116(4), pp. 193–198. (link) (bib) x @article{Ige2017, year = { 2017 }, volume = { 116 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Effects of steel fibre-aggregate interaction on mechanical behaviour of steel fibre reinforced concrete }, pages = { 193--198 }, number = { 4 }, keywords = { Concrete,X-ray CT,flexural properties,steel fibres }, journal = { Advances in Applied Ceramics }, issn = { 17436761 }, doi = { 10.1080/17436753.2017.1284389 }, author = { Ige and Barnett and Chiverton and Nassif and Williams }, abstract = { This work investigated the effects of fibre type, dosage and maximum aggregate size on the mechanical behaviour of concrete reinforced with steel fibres. Hooked-end steel fibres with 50 and 60 mm length and aspect ratios (length/diameter) of 45, 65 and 80 were used with maximum sizes of coarse aggregate of 10 and 20 mm. The same mix proportions of concrete were used throughout the investigation. Flexural testing of 600 mm square panels was performed. Subsequently, cores were taken from these panels and X-ray computed tomography was used to analyse the positioning of fibres in hardened concrete. The experimental results show that the performance of steel fibre-reinforced concrete improved drastically when compared to plain concrete without fibres. Longer, thinner fibres and smaller aggregates were noted to give the best results. }, } |
2017 | Journal | Ludovic Humbert, Yves Martelli, Roger Fonolla, Martin Steghofer, Silvana DI Gregorio, Jorge Malouf, Jordi Romera, Luis Miguel Del Rio Barquero (2017). 3D-DXA: Assessing the Femoral Shape, the Trabecular Macrostructure and the Cortex in 3D from DXA images. IEEE Transactions on Medical Imaging, 36(1), pp. 27–39. (link) (bib) x @article{Humbert2017, year = { 2017 }, volume = { 36 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { 3D-DXA: Assessing the Femoral Shape, the Trabecular Macrostructure and the Cortex in 3D from DXA images }, pages = { 27--39 }, number = { 1 }, keywords = { Bone mineral density,DXA,cortical thickness,image registration,osteoporosis,proximal femur }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2016.2593346 }, author = { Humbert and Martelli and Fonolla and Steghofer and {DI Gregorio} and Malouf and Romera and Barquero }, abstract = { The 3D distribution of the cortical and trabecular bone mass in the proximal femur is a critical component in determining fracture resistance that is not taken into account in clinical routine Dual-energy X-ray Absorptiometry (DXA) examination. In this paper, a statistical shape and appearance model together with a 3D-2D registration approach are used to model the femoral shape and bone density distribution in 3D from an anteroposterior DXA projection. A model-based algorithm is subsequently used to segment the cortex and build a 3D map of the cortical thickness and density. Measurements characterising the geometry and density distribution were computed for various regions of interest in both cortical and trabecular compartments. Models and measurements provided by the '3D-DXA' software algorithm were evaluated using a database of 157 study subjects, by comparing 3D-DXA analyses (using DXA scanners from three manufacturers) with measurements performed by Quantitative Computed Tomography (QCT). The mean point-to-surface distance between 3D-DXA and QCT femoral shapes was 0.93 mm. The mean absolute error between cortical thickness and density estimates measured by 3D-DXA and QCT was 0.33 mm and 72 mg/cm3. Correlation coefficients (R) between the 3D-DXA and QCT measurements were 0.86, 0.93, and 0.95 for the volumetric bone mineral density at the trabecular, cortical, and integral compartments respectively, and 0.91 for the mean cortical thickness. 3D-DXA provides a detailed analysis of the proximal femur, including a separate assessment of the cortical layer and trabecular macrostructure, which could potentially improve osteoporosis management while maintaining DXA as the standard routine modality. }, } |
2017 | Journal | Thomas Huber, Marina Herwerth, Esther Alberts, Jan S. Kirschke, Claus Zimmer, Ruediger Ilg (2017). Automated segmentation reveals silent radiographic progression in adult-onset vanishing white-matter disease. Neuroradiology Journal, 30(1), pp. 5–9. (link) (bib) x @article{Huber2017, year = { 2017 }, volume = { 30 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated segmentation reveals silent radiographic progression in adult-onset vanishing white-matter disease }, pages = { 5--9 }, number = { 1 }, keywords = { Extreme value distribution,automated segmentation,vanishing white-matter disease,white-matter hyperintensities }, journal = { Neuroradiology Journal }, issn = { 19714009 }, doi = { 10.1177/1971400916678222 }, author = { Huber and Herwerth and Alberts and Kirschke and Zimmer and Ilg }, abstract = { Adult-onset vanishing white-matter disease (VWM) is a rare autosomal recessive disease with neurological symptoms such as ataxia and paraparesis, showing extensive white-matter hyperintensities (WMH) on magnetic resonance (MR) imaging. Besides symptom-specific scores like the International Cooperative Ataxia Rating Scale (ICARS), there is no established tool to monitor disease progression. Because of extensive WMH, visual comparison of MR images is challenging. Here, we report the results of an automated method of segmentation to detect alterations in T2-weighted fluid-attenuated-inversion-recovery (FLAIR) sequences in a one-year follow-up study of a clinically stable patient with genetically diagnosed VWM. Signal alterations in MR imaging were quantified with a recently published WMH segmentation method by means of extreme value distribution (EVD). Our analysis revealed progressive FLAIR alterations of 5.84{\%} in the course of one year, whereas no significant WMH change could be detected in a stable multiple sclerosis (MS) control group. This result demonstrates that automated EVD-based segmentation allows a precise and rapid quantification of extensive FLAIR alterations like in VWM and might be a powerful tool for the clinical and scientific monitoring of degenerative white-matter diseases and potential therapeutic interventions. }, } |
2017 | Journal | Daniel Christopher Hoinkiss, David Andrew Porter (2017). Prospective motion correction in 2D multishot MRI using EPI navigators and multislice-to-volume image registration. Magnetic Resonance in Medicine, 78(6), pp. 2127–2135. (link) (bib) x @article{Hoinkiss2017, year = { 2017 }, volume = { 78 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Prospective motion correction in 2D multishot MRI using EPI navigators and multislice-to-volume image registration }, pages = { 2127--2135 }, number = { 6 }, keywords = { EPI navigator,RARE,motion artifacts,mutual information,prospective acquisition correction,real-time feedback }, journal = { Magnetic Resonance in Medicine }, issn = { 15222594 }, doi = { 10.1002/mrm.26951 }, author = { Hoinkiss and Porter }, abstract = { Purpose: Prospective motion correction reduces artifacts in MRI by correcting for subject motion in real time, but techniques are limited for multishot 2-dimensional (2D) sequences. This study addresses this limitation by using 2D echo-planar imaging (EPI) slice navigator acquisitions together with a multislice-to-volume image registration. Methods: The 2D-EPI navigators were integrated into 2D imaging sequences to allow a rapid, real-time motion correction based on the registration of three navigator slices to a reference volume. A dedicated slice-iteration scheme was used to limit mutual spin-saturation effects between navigator and image data. The method was evaluated using T2-weighted spin echo and multishot rapid acquisition with relaxation enhancement (RARE) sequences, and its motion-correction capabilities were compared with those of periodically rotated overlapping parallel lines with enhanced reconstruction (PROPELLER). Validation was performed in vivo using a well-defined motion protocol. Results: Data acquired during subject motion showed residual motion parameters within ±0.5 mm and ±0.5°, and demonstrated a substantial improvement in image quality compared with uncorrected scans. In a comparison to PROPELLER, the proposed technique preserved a higher level of anatomical detail in the presence of subject motion. Conclusions: EPI-navigator-based prospective motion correction using multislice-to-volume image registration can substantially reduce image artifacts, while minimizing spin-saturation effects. The method can be adapted for use in other 2D MRI sequences and promises to improve image quality in routine clinical examinations. Magn Reson Med 78:2127–2135, 2017. {\textcopyright} 2017 International Society for Magnetic Resonance in Medicine. }, } |
2017 | Journal | Matthias Peter Hilty, Jacqueline Pichler, Bulent Ergin, Urs Hefti, Tobias Michael Merz, Can Ince, Marco Maggiorini (2017). Assessment of endothelial cell function and physiological microcirculatory reserve by video microscopy using a topical acetylcholine and nitroglycerin challenge. Intensive Care Medicine Experimental, 5(1), pp. 13. (link) (bib) x @article{Hilty2017, year = { 2017 }, volume = { 5 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Assessment of endothelial cell function and physiological microcirculatory reserve by video microscopy using a topical acetylcholine and nitroglycerin challenge }, pages = { 13 }, number = { 1 }, journal = { Intensive Care Medicine Experimental }, issn = { 2197-425X }, doi = { 10.1186/s40635-017-0139-0 }, author = { Hilty and Pichler and Ergin and Hefti and Merz and Ince and Maggiorini }, abstract = { Assessment of the microcirculation is a promising target for the hemodynamic management of critically ill patients. However, just as the sole reliance on macrocirculatory parameters, single static parameters of the microcirculation may not represent a sufficient guide. Our hypothesis was that by serial topical application of acetylcholine (ACH) and nitroglycerin (NG), the sublingual microcirculation can be challenged to determine its endothelial cell-dependent and smooth muscle-dependent physiological reserve capacity. In 41 healthy subjects, sublingual capillary microscopy was performed before and after topical application of ACH and NG. Total vessel density (TVD) was assessed in parallel using manual computer-assisted image analysis as well as a fully automated analysis pathway utilizing a newly developed computer algorithm. Flow velocity was assessed using space-time diagrams of the venules as well as the algorithm-based calculation of an average perfused speed indicator (APSI). No change in all measured parameters was detected after sublingual topical application of ACH. Sublingual topical application of NG however led to an increase in TVD, space-time diagram-derived venular flow velocity and APSI. No difference was detected in heart rate, blood pressure, and cardiac output as measured by echocardiography, as well as in plasma nitric oxide metabolite content before and after the topical application of ACH and NG. In healthy subjects, the sublingual microcirculatory physiological reserve can be assessed non-invasively by topical application of nitroglycerin without affecting systemic circulation. }, } |
2017 | Journal | C. Gustafsson, F. Nordström, E. Persson, J. Brynolfsson, L. E. Olsson (2017). Assessment of dosimetric impact of system specific geometric distortion in an MRI only based radiotherapy workflow for prostate. Physics in Medicine and Biology, 62(8), pp. 2976–2989. (link) (bib) x @article{Gustafsson2017, year = { 2017 }, volume = { 62 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Assessment of dosimetric impact of system specific geometric distortion in an MRI only based radiotherapy workflow for prostate }, pages = { 2976--2989 }, number = { 8 }, keywords = { MRI only,MRI radiotherapy,MRI treatment planning,QA,distortion,synthetic CT }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aa5fa2 }, author = { Gustafsson and Nordstr{\"{o}}m and Persson and Brynolfsson and Olsson }, abstract = { Dosimetric errors in a magnetic resonance imaging (MRI) only radiotherapy workflow may be caused by system specific geometric distortion from MRI. The aim of this study was to evaluate the impact on planned dose distribution and delineated structures for prostate patients, originating from this distortion. A method was developed, in which computer tomography (CT) images were distorted using the MRI distortion field. The displacement map for an optimized MRI treatment planning sequence was measured using a dedicated phantom in a 3 T MRI system. To simulate the distortion aspects of a synthetic CT (electron density derived from MR images), the displacement map was applied to CT images, referred to as distorted CT images. A volumetric modulated arc prostate treatment plan was applied to the original CT and the distorted CT, creating a reference and a distorted CT dose distribution. By applying the inverse of the displacement map to the distorted CT dose distribution, a dose distribution in the same geometry as the original CT images was created. For 10 prostate cancer patients, the dose difference between the reference dose distribution and inverse distorted CT dose distribution was analyzed in isodose level bins. The mean magnitude of the geometric distortion was 1.97 mm for the radial distance of 200-250 mm from isocenter. The mean percentage dose differences for all isodose level bins, were 0.02{\%} and the radiotherapy structure mean volume deviations were {\textless}0.2{\%}. The method developed can quantify the dosimetric effects of MRI system specific distortion in a prostate MRI only radiotherapy workflow, separated from dosimetric effects originating from synthetic CT generation. No clinically relevant dose difference or structure deformation was found when 3D distortion correction and high acquisition bandwidth was used. The method could be used for any MRI sequence together with any anatomy of interest. }, } |
2017 | Journal | Rebekah H. Griesenauer, Jared A. Weis, Lori R. Arlinghaus, Ingrid M. Meszoely, Michael I. Miga (2017). Breast tissue stiffness estimation for surgical guidance using gravity-induced excitation. Physics in Medicine and Biology, 62(12), pp. 4756–4776. (link) (bib) x @article{Griesenauer2017, year = { 2017 }, volume = { 62 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Breast tissue stiffness estimation for surgical guidance using gravity-induced excitation }, pages = { 4756--4776 }, number = { 12 }, keywords = { Mechanical properties,biomechanical model,breast cancer,elastography,lumpectomy,magnetic resonance imaging,registration }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aa700a }, author = { Griesenauer and Weis and Arlinghaus and Meszoely and Miga }, abstract = { Tissue stiffness interrogation is fundamental in breast cancer diagnosis and treatment. Furthermore, biomechanical models for predicting breast deformations have been created for several breast cancer applications. Within these applications, constitutive mechanical properties must be defined and the accuracy of this estimation directly impacts the overall performance of the model. In this study, we present an image-derived computational framework to obtain quantitative, patient specific stiffness properties for application in image-guided breast cancer surgery and interventions. The method uses two MR acquisitions of the breast in different supine gravity-loaded configurations to fit mechanical properties to a biomechanical breast model. A reproducibility assessment of the method was performed in a test-retest study using healthy volunteers and was further characterized in simulation. In five human data sets, the within subject coefficient of variation ranged from 10.7{\%} to 27{\%} and the intraclass correlation coefficient ranged from 0.91-0.944 for assessment of fibroglandular and adipose tissue stiffness. In simulation, fibroglandular content and deformation magnitude were shown to have significant effects on the shape and convexity of the objective function defined by image similarity. These observations provide an important step forward in characterizing the use of nonrigid image registration methodologies in conjunction with biomechanical models to estimate tissue stiffness. In addition, the results suggest that stiffness estimation methods using gravity-induced excitation can reliably and feasibly be implemented in breast cancer surgery/intervention workflows. }, } |
2017 | Journal | Derek J. Gillies, Lori Gardi, Tharindu De Silva, Shuang Ren Zhao, Aaron Fenster (2017). Real-time registration of 3D to 2D ultrasound images for image-guided prostate biopsy:. Medical Physics, 44(9), pp. 4708–4723. (link) (bib) x @article{Gillies2017, year = { 2017 }, volume = { 44 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Real-time registration of 3D to 2D ultrasound images for image-guided prostate biopsy: }, pages = { 4708--4723 }, number = { 9 }, keywords = { 2D-3D transrectal ultrasound-guided prostate biops,prostate cancer,prostate motion compensation,real-time image registration }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.12441 }, author = { Gillies and Gardi and {De Silva} and Zhao and Fenster }, abstract = { Purpose: During image-guided prostate biopsy, needles are targeted at tissues that are suspicious of cancer to obtain specimen for histological examination. Unfortunately, patient motion causes targeting errors when using an MR-transrectal ultrasound (TRUS) fusion approach to augment the conventional biopsy procedure. This study aims to develop an automatic motion correction algorithm approaching the frame rate of an ultrasound system to be used in fusion-based prostate biopsy systems. Two modes of operation have been investigated for the clinical implementation of the algorithm: motion compensation using a single user initiated correction performed prior to biopsy, and real-time continuous motion compensation performed automatically as a background process. Methods: Retrospective 2D and 3D TRUS patient images acquired prior to biopsy gun firing were registered using an intensity-based algorithm utilizing normalized cross-correlation and Powell's method for optimization. 2D and 3D images were downsampled and cropped to estimate the optimal amount of image information that would perform registrations quickly and accurately. The optimal search order during optimization was also analyzed to avoid local optima in the search space. Error in the algorithm was computed using target registration errors (TREs) from manually identified homologous fiducials in a clinical patient dataset. The algorithm was evaluated for real-time performance using the two different modes of clinical implementations by way of user initiated and continuous motion compensation methods on a tissue mimicking prostate phantom. Results: After implementation in a TRUS-guided system with an image downsampling factor of 4, the proposed approach resulted in a mean ± std TRE and computation time of 1.6 ± 0.6 mm and 57 ± 20 ms respectively. The user initiated mode performed registrations with in-plane, out-of-plane, and roll motions computation times of 108 ± 38 ms, 60 ± 23 ms, and 89 ± 27 ms, respectively, and corresponding registration errors of 0.4 ± 0.3 mm, 0.2 ± 0.4 mm, and 0.8 ± 0.5. The continuous method performed registration significantly faster (P {\textless} 0.05) than the user initiated method, with observed computation times of 35 ± 8 ms, 43 ± 16 ms, and 27 ± 5 ms for in-plane, out-of-plane, and roll motions, respectively, and corresponding registration errors of 0.2 ± 0.3 mm, 0.7 ± 0.4 mm, and 0.8 ± 1.0. Conclusions: The presented method encourages real-time implementation of motion compensation algorithms in prostate biopsy with clinically acceptable registration errors. Continuous motion compensation demonstrated registration accuracy with submillimeter and subdegree error, while performing {\textless} 50 ms computation times. Image registration technique approaching the frame rate of an ultrasound system offers a key advantage to be smoothly integrated to the clinical workflow. In addition, this technique could be used further for a variety of image-guided interventional procedures to treat and diagnose patients by improving targeting accuracy. }, } |
2017 | Journal | César Dom\'inguez, J\'onathan Heras, Vico Pascual (2017). IJ-OpenCV: Combining ImageJ and OpenCV for processing images in biomedicine. Computers in Biology and Medicine, 84, pp. 189–194. (link) (bib) x @article{Dominguez2017, year = { 2017 }, volume = { 84 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { IJ-OpenCV: Combining ImageJ and OpenCV for processing images in biomedicine }, pages = { 189--194 }, keywords = { Biomedicine,Computer vision,Image processing,ImageJ,Interoperability,Machine learning,OpenCV }, journal = { Computers in Biology and Medicine }, issn = { 18790534 }, doi = { 10.1016/j.compbiomed.2017.03.027 }, author = { Dom{\'{i}}nguez and Heras and Pascual }, abstract = { Background and Objective. The effective processing of biomedical images usually requires the interoperability of diverse software tools that have different aims but are complementary. The goal of this work is to develop a bridge to connect two of those tools: ImageJ, a program for image analysis in life sciences, and OpenCV, a computer vision and machine learning library. Methods. Based on a thorough analysis of ImageJ and OpenCV, we detected the features of these systems that could be enhanced, and developed a library to combine both tools, taking advantage of the strengths of each system. The library was implemented on top of the SciJava converter framework. We also provide a methodology to use this library. Results. We have developed the publicly available library IJ-OpenCV that can be employed to create applications combining features from both ImageJ and OpenCV. From the perspective of ImageJ developers, they can use IJ-OpenCV to easily create plugins that use any functionality provided by the OpenCV library and explore different alternatives. From the perspective of OpenCV developers, this library provides a link to the ImageJ graphical user interface and all its features to handle regions of interest. Conclusions. The IJ-OpenCV library bridges the gap between ImageJ and OpenCV, allowing the connection and the cooperation of these two systems. }, } |
2017 | Journal | Benjamin De Leener, Simon Lévy, Sara M. Dupont, Vladimir S. Fonov, Nikola Stikov, D. Louis Collins, Virginie Callot, Julien Cohen-Adad (2017). SCT: Spinal Cord Toolbox, an open-source software for processing spinal cord MRI data. NeuroImage, 145, pp. 24–43. (link) (bib) x @article{DeLeener2017, year = { 2017 }, volume = { 145 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { SCT: Spinal Cord Toolbox, an open-source software for processing spinal cord MRI data }, pages = { 24--43 }, keywords = { Atlas,MRI,Open-source,Software,Spinal cord,Template }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2016.10.009 }, author = { {De Leener} and L{\'{e}}vy and Dupont and Fonov and Stikov and {Louis Collins} and Callot and Cohen-Adad }, abstract = { For the past 25 years, the field of neuroimaging has witnessed the development of several software packages for processing multi-parametric magnetic resonance imaging (mpMRI) to study the brain. These software packages are now routinely used by researchers and clinicians, and have contributed to important breakthroughs for the understanding of brain anatomy and function. However, no software package exists to process mpMRI data of the spinal cord. Despite the numerous clinical needs for such advanced mpMRI protocols (multiple sclerosis, spinal cord injury, cervical spondylotic myelopathy, etc.), researchers have been developing specific tools that, while necessary, do not provide an integrative framework that is compatible with most usages and that is capable of reaching the community at large. This hinders cross-validation and the possibility to perform multi-center studies. In this study we introduce the Spinal Cord Toolbox (SCT), a comprehensive software dedicated to the processing of spinal cord MRI data. SCT builds on previously-validated methods and includes state-of-the-art MRI templates and atlases of the spinal cord, algorithms to segment and register new data to the templates, and motion correction methods for diffusion and functional time series. SCT is tailored towards standardization and automation of the processing pipeline, versatility, modularity, and it follows guidelines of software development and distribution. Preliminary applications of SCT cover a variety of studies, from cross-sectional area measures in large databases of patients, to the precise quantification of mpMRI metrics in specific spinal pathways. We anticipate that SCT will bring together the spinal cord neuroimaging community by establishing standard templates and analysis procedures. }, } |
2017 | Journal | Julie Constanzo, Lise Wei, Huan Hsin Tseng, Issam El Naqa (2017). Radiomics in precision medicine for lung cancer. Translational Lung Cancer Research, 6(6), pp. 635–647. (link) (bib) x @article{Constanzo2017, year = { 2017 }, volume = { 6 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Radiomics in precision medicine for lung cancer }, pages = { 635--647 }, number = { 6 }, keywords = { Biomarkers,Lung cancer,Quantitative imaging,Radiomics }, journal = { Translational Lung Cancer Research }, issn = { 22264477 }, doi = { 10.21037/tlcr.2017.09.07 }, author = { Constanzo and Wei and Tseng and {El Naqa} }, abstract = { With the improvement of external radiotherapy delivery accuracy, such as intensity-modulated and stereotactic body radiation therapy, radiation oncology has recently entered in the era of precision medicine. Despite these precise irradiation modalities, lung cancers remain one of the most aggressive human cancers worldwide, possibly because of diverse genotypic alterations that drive and maintain lung tumorigenesis. It has been long recognized that imaging could aid in the diagnosis, tumor delineation, and monitoring of lung cancer. Moreover, accumulating evidence suggests that imaging information could be further used to tailor treatment type and intensity, as well as predict treatment outcomes in radiotherapy. However, these imaging tasks have been carried out either qualitatively or using simplistic metrics that doesn't take advantage of the full scale of imaging knowledge. Radiomics, which is a recent field of research that aims to provide a more quantitative representation of imaging information relating tumor phenotypes to clinical and genotypic endpoints by embedding extracted image features into predictive mathematical models. These predictive models can be a key component in the clinician decision making and treatment personalization. This review provides an overview of the radiomics application and its methodology for radiation oncology studies in lung cancer. }, } |
2017 | Journal | Delia Ciardo, Marianna Alessandra Gerardi, Sabrina Vigorito, Anna Morra, Veronica Dell'acqua, Federico Javier Diaz, Federica Cattani, Paolo Zaffino, Rosalinda Ricotti, Maria Francesca Spadea, Marco Riboldi, Roberto Orecchia, Guido Baroni, Maria Cristina Leonardi, Barbara Alicja Jereczek-Fossa (2017). Atlas-based segmentation in breast cancer radiotherapy: Evaluation of specific and generic-purpose atlases. Breast, 32, pp. 44–52. (link) (bib) x @article{Ciardo2017, year = { 2017 }, volume = { 32 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Atlas-based segmentation in breast cancer radiotherapy: Evaluation of specific and generic-purpose atlases }, pages = { 44--52 }, keywords = { Atlas-based segmentation,Automatic contouring,Breast cancer radiotherapy,STAPLE contours }, journal = { Breast }, issn = { 15323080 }, doi = { 10.1016/j.breast.2016.12.010 }, author = { Ciardo and Gerardi and Vigorito and Morra and Dell'acqua and Diaz and Cattani and Zaffino and Ricotti and Spadea and Riboldi and Orecchia and Baroni and Leonardi and Jereczek-Fossa }, abstract = { Objectives Atlas-based automatic segmentation (ABAS) addresses the challenges of accuracy and reliability in manual segmentation. We aim to evaluate the contribution of specific-purpose in ABAS of breast cancer (BC) patients with respect to generic-purpose libraries. Materials and methods One generic-purpose and 9 specific-purpose libraries, stratified according to type of surgery and size of thorax circumference, were obtained from the computed tomography of 200 BC patients. Keywords about contralateral breast volume and presence of breast expander/prostheses were recorded. ABAS was validated on 47 independent patients, considering manual segmentation from scratch as reference. Five ABAS datasets were obtained, testing single-ABAS and multi-ABAS with simultaneous truth and performance level estimation (STAPLE). Center of mass distance (CMD), average Hausdorff distance (AHD) and Dice similarity coefficient (DSC) between corresponding ABAS and manual structures were evaluated and statistically significant differences between different surgeries, structures and ABAS strategies were investigated. Results Statistically significant differences between patients who underwent different surgery were found, with superior results for conservative-surgery group, and between different structures were observed: ABAS of heart, lungs, kidneys and liver was satisfactory (median values: CMD{\textless}2 mm, DSC≥0.80, AHD{\textless}1.5 mm), whereas chest wall, breast and spinal cord obtained moderate performance (median values: 2 mm ≤ CMD{\textless}5 mm, 0.60 ≤ DSC{\textless}0.80, 1.5 mm ≤ AHD{\textless}4 mm) and esophagus, stomach, brachial plexus and supraclavicular nodes obtained poor performance (median CMD≥5 mm, DSC{\textless}0.60, AHD≥4 mm). The application of STAPLE algorithm generally yields higher performance and the use of keywords improves results for breast ABAS. Conclusion The homogeneity in the selection of atlases based on multiple anatomical and clinical features and the use of specific-purpose libraries can improve ABAS performance with respect to generic-purpose libraries. }, } |
2017 | Journal | Colleen Bailey, Bernard Siow, Eleftheria Panagiotaki, John H. Hipwell, Thomy Mertzanidou, Julie Owen, Patrycja Gazinska, Sarah E. Pinder, Daniel C. Alexander, David J. Hawkes (2017). Microstructural models for diffusion MRI in breast cancer and surrounding stroma: an ex vivo study. NMR in Biomedicine, 30(2), pp. 13. (link) (bib) x @article{Bailey2017, year = { 2017 }, volume = { 30 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Microstructural models for diffusion MRI in breast cancer and surrounding stroma: an ex vivo study }, pages = { 13 }, number = { 2 }, keywords = { DTI,MRI,anisotropy,breast cancer,diffusion,ex vivo,restriction }, journal = { NMR in Biomedicine }, issn = { 10991492 }, doi = { 10.1002/nbm.3679 }, author = { Bailey and Siow and Panagiotaki and Hipwell and Mertzanidou and Owen and Gazinska and Pinder and Alexander and Hawkes }, abstract = { The diffusion signal in breast tissue has primarily been modelled using apparent diffusion coefficient (ADC), intravoxel incoherent motion (IVIM) and diffusion tensor (DT) models, which may be too simplistic to describe the underlying tissue microstructure. Formalin-fixed breast cancer samples were scanned using a wide range of gradient strengths, durations, separations and orientations. A variety of one- and two-compartment models were tested to determine which best described the data. Models with restricted diffusion components and anisotropy were selected in most cancerous regions and there were no regions in which conventional ADC or DT models were selected. Maps of ADC generally related to cellularity on histology, but maps of parameters from more complex models suggest that both overall cell volume fraction and individual cell size can contribute to the diffusion signal, affecting the specificity of ADC to the tissue microstructure. The areas of coherence in diffusion anisotropy images were small, approximately 1 mm, but the orientation corresponded to stromal orientation patterns on histology. }, } |
2017 | Journal | Oscar Acosta, Eugenia Mylona, Mathieu Le Dain, Camille Voisin, Thibaut Lizee, Bastien Rigaud, Carolina Lafond, Khemara Gnep, Renaud de Crevoisier (2017). Multi-atlas-based segmentation of prostatic urethra from planning CT imaging to quantify dose distribution in prostate cancer radiotherapy. Radiotherapy and Oncology, 125(3), pp. 492–499. (link) (bib) x @article{Acosta2017, year = { 2017 }, volume = { 125 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi-atlas-based segmentation of prostatic urethra from planning CT imaging to quantify dose distribution in prostate cancer radiotherapy }, pages = { 492--499 }, number = { 3 }, keywords = { Atlas-based segmentation,Dose computation,Prostate cancer radiotherapy,Urethra segmentation,Urinary toxicity }, journal = { Radiotherapy and Oncology }, issn = { 18790887 }, doi = { 10.1016/j.radonc.2017.09.015 }, author = { Acosta and Mylona and {Le Dain} and Voisin and Lizee and Rigaud and Lafond and Gnep and Crevoisier }, abstract = { Background and purpose Segmentation of intra-prostatic urethra for dose assessment from planning CT may help explaining urinary toxicity in prostate cancer radiotherapy. This work sought to: i) propose an automatic method for urethra segmentation in CT, ii) compare it with previously proposed surrogate models and iii) quantify the dose received by the urethra in patients treated with IMRT. Materials and methods A weighted multi-atlas-based urethra segmentation method was devised from a training data set of 55 CT scans of patients receiving brachytherapy with visible urinary catheters. Leave-one-out cross validation was performed to quantify the error between the urethra segmentation and the catheter ground truth with two scores: the centerlines distance (CLD) and the percentage of centerline within a certain distance from the catheter (PWR). The segmentation method was then applied to a second test data set of 95 prostate cancer patients having received 78 Gy IMRT to quantify dose to the urethra. Results Mean CLD was 3.25 ± 1.2 mm for the whole urethra and 3.7 ± 1.7 mm, 2.52 ± 1.5 mm, and 3.01 ± 1.7 mm for the top, middle, and bottom thirds, respectively. In average, 53{\%} of the segmented centerlines were within a radius {\textless} 3.5 mm from the centerline ground truth and 83{\%} in a radius {\textless} 5 mm. The proposed method outperformed existing surrogate models. In IMRT, urethra DVH was significantly higher than prostate DVH from V74 Gy to V79 Gy. Conclusion A multi-atlas-based segmentation method was proposed enabling assessment of the dose within the prostatic urethra. }, } |
2017 | Journal | Solmaz Abbasi, Farshad Tajeripour (2017). Detection of brain tumor in 3D MRI images using local binary patterns and histogram orientation gradient. Neurocomputing, 219, pp. 526–535. (link) (bib) x @article{Abbasi2017, year = { 2017 }, volume = { 219 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Detection of brain tumor in 3D MRI images using local binary patterns and histogram orientation gradient }, pages = { 526--535 }, keywords = { Histogram orientation gradient,Local binary patterns,MRI images,Medical image processing,Tumor detection }, journal = { Neurocomputing }, issn = { 18728286 }, doi = { 10.1016/j.neucom.2016.09.051 }, author = { Abbasi and Tajeripour }, abstract = { Brain tumor pathology is one of the most common mortality issues considered as an essential priority for health care societies. Accurate diagnosis of the type of disorder is crucial to make a plan for remedy that can minimize the deadly results. The main purpose of segmentation and detection is to make distinction between different regions of the brain. Besides accuracy, these techniques should be implemented quickly. In this paper an automatic method for brain tumor detection in 3D images has been proposed. In the first step, the bias field correction and histogram matching are used for pre-processing of the images. In the next step, the region of interest is identified and separated from the background of the Flair image. Local binary pattern in three orthogonal planes (LBP-TOP) and histogram of orientation gradients (HOG-TOP) are used as the learning features. Since 3D images are used in this research we use the idea of in local binary pattern in three orthogonal planes in order to extend histogram orientation gradients for 3D images. The random forest is then used to segment tumorous regions. We evaluate the performance of our algorithm on glioma images from BRATS 2013. Our experimental results and analyses indicate that our proposed framework is superior in detecting brain tumors in comparison with other techniques. }, } |
2017 | Journal | Roman Grothausmann, Lars Knudsen, Matthias Ochs, Christian Mühlfeld (2017). Digital 3D reconstructions using histological serial sections of lung tissue including the alveolar capillary network. American Journal of Physiology - Lung Cellular and Molecular Physiology, 312(2), pp. L243–L257. (link) (bib) x @article{Grothausmann2017, year = { 2017 }, volume = { 312 }, url = { https://www.physiology.org/doi/10.1152/ajplung.00326.2016 }, title = { Digital 3D reconstructions using histological serial sections of lung tissue including the alveolar capillary network }, pages = { L243--L257 }, number = { 2 }, month = { feb }, keywords = { 3D reconstruction,Alveolar capillary network (ACN),Stacked histological slices,Virtual endoscopy }, journal = { American Journal of Physiology - Lung Cellular and Molecular Physiology }, issn = { 15221504 }, doi = { 10.1152/ajplung.00326.2016 }, author = { Grothausmann and Knudsen and Ochs and M{\"{u}}hlfeld }, abstract = { The alveolar capillary network (ACN) provides an enormously large surface area that is necessary for pulmonary gas exchange. Changes of the ACN during normal or pathological development or in pulmonary diseases are of great functional impact and warrant further analysis. Due to the complexity of the three-dimensional (3D) architecture of the ACN, 2D approaches are limited in providing a comprehensive impression of the characteristics of the normal ACN or the nature of its alterations. Stereological methods offer a quantitative way to assess the ACN in 3D in terms of capillary volume, surface area, or number but lack a 3D visualization to interpret the data. Hence, the necessity to visualize the ACN in 3D and to correlate this with data from the same set of data arises. Such an approach requires a large sample volume combined with a high resolution. Here, we present a technically simple and cost-efficient approach to create 3D representations of lung tissue ranging from bronchioles over alveolar ducts and alveoli up to the ACN from more than 1 mm sample extent to a resolution of less than 1 $\mu$m. The method is based on automated image acquisition of serially sectioned epoxy resin-embedded lung tissue fixed by vascular perfusion and subsequent automated digital reconstruction and analysis of the 3D data. This efficient method may help to better understand mechanisms of vascular development and pathology of the lung. }, } |
2017 | Journal | Martin Klemm, Thomas Kirchner, Janek Gröhl, Dominique Cheray, Marco Nolden, Alexander Seitel, Harald Hoppe, Lena Maier-Hein, Alfred M. Franz (2017). MITK-OpenIGTLink for combining open-source toolkits in real-time computer-assisted interventions. International Journal of Computer Assisted Radiology and Surgery, 12(3), pp. 351–361. (bib) x @article{Klemm2017, year = { 2017 }, volume = { 12 }, title = { MITK-OpenIGTLink for combining open-source toolkits in real-time computer-assisted interventions }, publisher = { Springer Verlag }, pages = { 351--361 }, number = { 3 }, month = { mar }, keywords = { Computer-assisted interventions,Image-guided therapy,Interoperability,MITK,OpenIGTLink,Ultrasound }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-016-1488-y }, author = { Klemm and Kirchner and Gr{\"{o}}hl and Cheray and Nolden and Seitel and Hoppe and Maier-Hein and Franz }, abstract = { Purpose: Due to rapid developments in the research areas of medical imaging, medical image processing and robotics, computer-assisted interventions (CAI) are becoming an integral part of modern patient care. From a software engineering point of view, these systems are highly complex and research can benefit greatly from reusing software components. This is supported by a number of open-source toolkits for medical imaging and CAI such as the medical imaging interaction toolkit (MITK), the public software library for ultrasound imaging research (PLUS) and 3D Slicer. An independent inter-toolkit communication such as the open image-guided therapy link (OpenIGTLink) can be used to combine the advantages of these toolkits and enable an easier realization of a clinical CAI workflow. Methods: MITK-OpenIGTLink is presented as a network interface within MITK that allows easy to use, asynchronous two-way messaging between MITK and clinical devices or other toolkits. Performance and interoperability tests with MITK-OpenIGTLink were carried out considering the whole CAI workflow from data acquisition over processing to visualization. Results: We present how MITK-OpenIGTLink can be applied in different usage scenarios. In performance tests, tracking data were transmitted with a frame rate of up to 1000 Hz and a latency of 2.81 ms. Transmission of images with typical ultrasound (US) and greyscale high-definition (HD) resolutions of 640 × 480 and 1920 × 1080 is possible at up to 512 and 128 Hz, respectively. Conclusion: With the integration of OpenIGTLink into MITK, this protocol is now supported by all established open-source toolkits in the field. This eases interoperability between MITK and toolkits such as PLUS or 3D Slicer and facilitates cross-toolkit research collaborations. MITK and its submodule MITK-OpenIGTLink are provided open source under a BSD-style licence (http://mitk.org). }, } |
2017 | Journal | Shibin Wu, Shaode Yu, Ling Zhuang, Xinhua Wei, Mark Sak, Neb Duric, Jiani Hu, Yaoqin Xie (2017). Automatic Segmentation of Ultrasound Tomography Image. BioMed Research International, 2017, pp. NA (link) (bib) x @article{RN909, year = { 2017 }, volume = { 2017 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030642114{\&}doi=10.1155{\%}2F2017{\%}2F2059036{\&}partnerID=40{\&}md5=c0598599370d8f954d410519826406e2 }, type = { Journal Article }, title = { Automatic Segmentation of Ultrasound Tomography Image }, journal = { BioMed Research International }, issn = { 23146141 }, doi = { 10.1155/2017/2059036 }, author = { Wu and Yu and Zhuang and Wei and Sak and Duric and Hu and Xie }, abstract = { Ultrasound tomography (UST) image segmentation is fundamental in breast density estimation, medicine response analysis, and anatomical change quantification. Existing methods are time consuming and require massive manual interaction. To address these issues, an automatic algorithm based on GrabCut (AUGC) is proposed in this paper. The presented method designs automated GrabCut initialization for incomplete labeling and is sped up with multicore parallel programming. To verify performance, AUGC is applied to segment thirty-two in vivo UST volumetric images. The performance of AUGC is validated with breast overlapping metrics (Dice coefficient (D), Jaccard (J), and False positive (FP)) and time cost (TC). Furthermore, AUGC is compared to other methods, including Confidence Connected Region Growing (CCRG), watershed, and Active Contour based Curve Delineation (ACCD). Experimental results indicate that AUGC achieves the highest accuracy (D=0.9275 and J=0.8660 and FP=0.0077) and takes on average about 4 seconds to process a volumetric image. It was said that AUGC benefits large-scale studies by using UST images for breast cancer screening and pathological quantification. }, } |
2017 | Journal | Thomas Wollmann, Holger Erfle, Roland Eils, Karl Rohr, Manuel Gunkel (2017). Workflows for microscopy image analysis and cellular phenotyping. Journal of Biotechnology, 261, pp. 70–75. (link) (bib) x @article{RN796, year = { 2017 }, volume = { 261 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Workflows for microscopy image analysis and cellular phenotyping }, pages = { 70--75 }, keywords = { Galaxy,Image analysis,KNIME,Microscopy,Pipeline,Workflow,de.NBI }, journal = { Journal of Biotechnology }, issn = { 18734863 }, doi = { 10.1016/j.jbiotec.2017.07.019 }, author = { Wollmann and Erfle and Eils and Rohr and Gunkel }, abstract = { In large scale biological experiments, like high-throughput or high-content cellular screening, the amount and the complexity of images to be analyzed are steadily increasing. To handle and process these images, well defined image processing and analysis steps need to be performed by applying dedicated workflows. Multiple software tools have emerged with the aim to facilitate creation of such workflows by integrating existing methods, tools, and routines, and by adapting them to different applications and questions, as well as making them reusable and interchangeable. In this review, we describe workflow systems for the integration of microscopy image analysis techniques with focus on KNIME and Galaxy. }, } |
2017 | Journal | Joost J.M. Van Griethuysen, Andriy Fedorov, Chintan Parmar, Ahmed Hosny, Nicole Aucoin, Vivek Narayan, Regina G.H. Beets-Tan, Jean Christophe Fillion-Robin, Steve Pieper, Hugo J.W.L. Aerts (2017). Computational radiomics system to decode the radiographic phenotype. Cancer Research, 77(21), pp. e104–e107. (link) (bib) x @article{RN895, year = { 2017 }, volume = { 77 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035021353{\&}doi=10.1158{\%}2F0008-5472.CAN-17-0339{\&}partnerID=40{\&}md5=e2bbcfe2a2e6a8ec00b02cf00ef7f2fa }, type = { Journal Article }, title = { Computational radiomics system to decode the radiographic phenotype }, pmid = { 29092951 }, pages = { e104--e107 }, number = { 21 }, journal = { Cancer Research }, issn = { 15387445 }, doi = { 10.1158/0008-5472.CAN-17-0339 }, author = { {Van Griethuysen} and Fedorov and Parmar and Hosny and Aucoin and Narayan and Beets-Tan and Fillion-Robin and Pieper and Aerts }, abstract = { Radiomics aims to quantify phenotypic characteristics on medical imaging through the use of automated algorithms. Radiomic artificial intelligence (AI) technology, either based on engineered hard-coded algorithms or deep learning methods, can be used to develop noninvasive imaging-based biomarkers. However, lack of standardized algorithm definitions and image processing severely hampers reproducibility and comparability of results. To address this issue, we developed PyRadiomics, a flexible open-source platform capable of extracting a large panel of engineered features from medical images. PyRadiomics is implemented in Python and can be used standalone or using 3D Slicer. Here, we discuss the workflow and architecture of PyRadiomics and demonstrate its application in characterizing lung lesions. Source code, documentation, and examples are publicly available at www. radiomics.io. With this platform, we aim to establish a reference standard for radiomic analyses, provide a tested and maintained resource, and to grow the community of radiomic developers addressing critical needs in cancer research. Cancer Res; 77(21); e104-7. }, } |
2017 | Journal | David Tilly, Agustinus J.A.J. Van De Schoot, Erik Grusell, Arjan Bel, Anders Ahnesjö (2017). Dose coverage calculation using a statistical shape model - Applied to cervical cancer radiotherapy. Physics in Medicine and Biology, 62(10), pp. 4140–4159. (link) (bib) x @article{RN799, year = { 2017 }, volume = { 62 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Dose coverage calculation using a statistical shape model - Applied to cervical cancer radiotherapy }, pages = { 4140--4159 }, number = { 10 }, keywords = { cervix,deformable image registration,probabilistic planning,radiotherapy,statistical shape model }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aa64ef }, author = { Tilly and {Van De Schoot} and Grusell and Bel and Ahnesj{\"{o}} }, abstract = { A comprehensive methodology for treatment simulation and evaluation of dose coverage probabilities is presented where a population based statistical shape model (SSM) provide samples of fraction specific patient geometry deformations. The learning data consists of vector fields from deformable image registration of repeated imaging giving intra-patient deformations which are mapped to an average patient serving as a common frame of reference. The SSM is created by extracting the most dominating eigenmodes through principal component analysis of the deformations from all patients. The sampling of a deformation is thus reduced to sampling weights for enough of the most dominating eigenmodes that describe the deformations. For the cervical cancer patient datasets in this work, we found seven eigenmodes to be sufficient to capture 90{\%} of the variance in the deformations of the, and only three eigenmodes for stability in the simulated dose coverage probabilities. The normality assumption of the eigenmode weights was tested and found relevant for the 20 most dominating eigenmodes except for the first. Individualization of the SSM is demonstrated to be improved using two deformation samples from a new patient. The probabilistic evaluation provided additional information about the trade-offs compared to the conventional single dataset treatment planning. }, } |
2017 | Journal | Rosalia Tatano, Benjamin Berkels, Thomas M. Deserno (2017). Mesh-to-raster region-of-interest-based nonrigid registration of multimodal images. Journal of Medical Imaging, 4(04), pp. 1. (link) (bib) x @article{RN899, year = { 2017 }, volume = { 4 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032914246{\&}doi=10.1117{\%}2F1.JMI.4.4.044002{\&}partnerID=40{\&}md5=c806ed79840aa4e089be7a24062199ec }, type = { Journal Article }, title = { Mesh-to-raster region-of-interest-based nonrigid registration of multimodal images }, pages = { 1 }, number = { 04 }, journal = { Journal of Medical Imaging }, issn = { 2329-4310 }, eprint = { 1703.01972 }, doi = { 10.1117/1.jmi.4.4.044002 }, author = { Tatano and Berkels and Deserno }, arxivid = { 1703.01972 }, archiveprefix = { arXiv }, abstract = { Region of interest (ROI) alignment in medical images plays a crucial role in diagnostics, procedure planning, treatment, and follow-up. Frequently, a model is represented as triangulated mesh while the patient data is provided from CAT scanners as pixel or voxel data. Previously, we presented a 2D method for curve-to-pixel registration. This paper contributes (i) a general mesh-to-raster (M2R) framework to register ROIs in multi-modal images; (ii) a 3D surface-to-voxel application, and (iii) a comprehensive quantitative evaluation in 2D using ground truth provided by the simultaneous truth and performance level estimation (STAPLE) method. The registration is formulated as a minimization problem where the objective consists of a data term, which involves the signed distance function of the ROI from the reference image, and a higher order elastic regularizer for the deformation. The evaluation is based on quantitative light-induced fluoroscopy (QLF) and digital photography (DP) of decalcified teeth. STAPLE is computed on 150 image pairs from 32 subjects, each showing one corresponding tooth in both modalities. The ROI in each image is manually marked by three experts (900 curves in total). In the QLF-DP setting, our approach significantly outperforms the mutual information-based registration algorithm implemented with the Insight Segmentation and Registration Toolkit (ITK) and Elastix. }, } |
2017 | Journal | M. Staskuniene, A. Kaceniauskas, V. Starikovicius, A. Maknickas, E. Stupak, R. Pacevic (2017). Parallel simulation of the aortic valve flows on the openstack cloud. Civil-Comp Proceedings, 111, pp. NA (link) (bib) x @article{RN910, year = { 2017 }, volume = { 111 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020416209{\&}partnerID=40{\&}md5=c98d54cb21401e2192b0bf6051b51636 }, type = { Journal Article }, title = { Parallel simulation of the aortic valve flows on the openstack cloud }, keywords = { ANSYS Fluent,Cloud computing,Docker,OpenStack,Patient-specific aortic valve simulation,Performance analysis }, journal = { Civil-Comp Proceedings }, issn = { 17593433 }, doi = { 10.4203/ccp.111.16 }, author = { Staskuniene and Kaceniauskas and Starikovicius and Maknickas and Stupak and Pacevic }, abstract = { The paper presents efficient parallel computations of the patient-specific aortic valve on virtualized resources of the OpenStack cloud infrastructure. The main focus is on parallel performance analysis of the developed software service based on ANSYS Fluent platform, which runs on Docker containers of the private university cloud. The patient-specific aortic valve simulation, described by incompressible Navier-Stokes equations, is considered as a pilot application of the hosted cloud infrastructure. The parallel performance of the developed software service is assessed measuring parallel speedup of computations carried out on virtualized resources. The results obtained on Docker containers are compared with the performance measured by using the native hardware. }, } |
2017 | Journal | Chun Chien Shieh, Vincent Caillet, Michelle Dunbar, Paul J. Keall, Jeremy T. Booth, Nicholas Hardcastle, Carol Haddad, Thomas Eade, Ilana Feain (2017). A Bayesian approach for three-dimensional markerless tumor tracking using kV imaging during lung radiotherapy. Physics in Medicine and Biology, 62(8), pp. 3065–3080. (link) (bib) x @article{RN800, year = { 2017 }, volume = { 62 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A Bayesian approach for three-dimensional markerless tumor tracking using kV imaging during lung radiotherapy }, pages = { 3065--3080 }, number = { 8 }, keywords = { Bayesian,intrafraction imaging,lung cancer,markerless,tumor tracking }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/1361-6560/aa6393 }, author = { Shieh and Caillet and Dunbar and Keall and Booth and Hardcastle and Haddad and Eade and Feain }, abstract = { The ability to monitor tumor motion without implanted markers can potentially enable broad access to more accurate and precise lung radiotherapy. A major challenge is that kilovoltage (kV) imaging based methods are rarely able to continuously track the tumor due to the inferior tumor visibility on 2D kV images. Another challenge is the estimation of 3D tumor position based on only 2D imaging information. The aim of this work is to address both challenges by proposing a Bayesian approach for markerless tumor tracking for the first time. The proposed approach adopts the framework of the extended Kalman filter, which combines a prediction and measurement steps to make the optimal tumor position update. For each imaging frame, the tumor position is first predicted by a respiratory-correlated model. The 2D tumor position on the kV image is then measured by template matching. Finally, the prediction and 2D measurement are combined based on the 3D distribution of tumor positions in the past 10 s and the estimated uncertainty of template matching. To investigate the clinical feasibility of the proposed method, a total of 13 lung cancer patient datasets were used for retrospective validation, including 11 cone-beam CT scan pairs and two stereotactic ablative body radiotherapy cases. The ground truths for tumor motion were generated from the the 3D trajectories of implanted markers or beacons. The mean, standard deviation, and 95th percentile of the 3D tracking error were found to range from 1.6-2.9 mm, 0.6-1.5 mm, and 2.6-5.8 mm, respectively. Markerless tumor tracking always resulted in smaller errors compared to the standard of care. The improvement was the most pronounced in the superior-inferior (SI) direction, with up to 9.5 mm reduction in the 95th-percentile SI error for patients with {\textgreater}10 mm 5th-to-95th percentile SI tumor motion. The percentage of errors with 3D magnitude {\textless}5 mm was 96.5{\%} for markerless tumor tracking and 84.1{\%} for the standard of care. The feasibility of 3D markerless tumor tracking has been demonstrated on realistic clinical scenarios for the first time. The clinical implementation of the proposed method will enable more accurate and precise lung radiotherapy using existing hardware and workflow. Future work is focused on the clinical and real-time implementation of this method. }, } |
2017 | Journal | Michael Schwenke, Joachim Georgii, Tobias Preusser (2017). Fast numerical simulation of focused ultrasound treatments during respiratory motion with discontinuous motion boundaries. IEEE Transactions on Biomedical Engineering, 64(7), pp. 1455–1468. (link) (bib) x @article{RN901, year = { 2017 }, volume = { 64 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85026725476{\&}doi=10.1109{\%}2FTBME.2016.2619741{\&}partnerID=40{\&}md5=b0cf2a924d4dd6d672f984b75f9bf20c }, type = { Journal Article }, title = { Fast numerical simulation of focused ultrasound treatments during respiratory motion with discontinuous motion boundaries }, pages = { 1455--1468 }, number = { 7 }, keywords = { Abdominal organs treatment,Biomedical computing,High-intensity focused ultrasound,Medical simulation,Medical treatment,Numerical models,Numerical simulation,Respiratory motion,Simulation during motion,Therapeutic ultrasound,Thermal Ablation,Thermal analysis }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 15582531 }, doi = { 10.1109/TBME.2016.2619741 }, author = { Schwenke and Georgii and Preusser }, abstract = { Objective: Focused ultrasound (FUS) is rapidly gaining clinical acceptance for several target tissues in the human body. Yet, treating liver targets is not clinically applied due to a high complexity of the procedure (noninvasiveness, target motion, complex anatomy, blood cooling effects, shielding by ribs, and limited image-based monitoring). To reduce the complexity, numerical FUS simulations can be utilized for both treatment planning and execution. These use-cases demand highly accurate and computationally efficient simulations. Methods: We propose a numerical method for the simulation of abdominal FUS treatments during respiratory motion of the organs and target. Especially, a novel approach is proposed to simulate the heating during motion by solving Pennes' bioheat equation in a computational reference space, i.e., the equation is mathematically transformed to the reference. The approach allows for motion discontinuities, e.g., the sliding of the liver along the abdominal wall. Results: Implementing the solver completely on the graphics processing unit and combining it with an atlas-based ultrasound simulation approach yields a simulation performance faster than real time (less than 50-s computing time for 100 s of treatment time) on a modern off-the-shelf laptop. The simulation method is incorporated into a treatment planning demonstration application that allows to simulate real patient cases including respiratory motion. Conclusion: The high performance of the presented simulation method opens the door to clinical applications. Significance: The methods bear the potential to enable the application of FUS for moving organs. }, } |
2017 | Journal | David Sarrut, Adrien Halty, Jean Noel Badel, Ludovic Ferrer, Manuel Bardi\`es (2017). Voxel-based multimodel fitting method for modeling time activity curves in SPECT images:. Medical Physics, 44(12), pp. 6280–6288. (link) (bib) x @article{RN892, year = { 2017 }, volume = { 44 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85037856250{\&}doi=10.1002{\%}2Fmp.12586{\&}partnerID=40{\&}md5=e15c5e0e2cbe8975290ae3d911a44b52 }, type = { Journal Article }, title = { Voxel-based multimodel fitting method for modeling time activity curves in SPECT images: }, pages = { 6280--6288 }, number = { 12 }, keywords = { SPECT,dosimetry,targeted radionuclide therapy,time activity curve,voxel-based }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1002/mp.12586 }, author = { Sarrut and Halty and Badel and Ferrer and Bardi{\`{e}}s }, abstract = { Purpose: Estimating the biodistribution and the pharmacokinetics from time-sequence SPECT images on a per-voxel basis is useful for studying activity nonuniformity or computing absorbed dose distributions by convolution of voxel kernels or Monte-Carlo radiation transport. Current approaches are either region-based, thus assuming uniform activity within the region, or voxel-based but using the same fitting model for all voxels. Methods: We propose a voxel-based multimodel fitting method (VoMM) that estimates a fitting function for each voxel by automatically selecting the most appropriate model among a predetermined set with Akaike criteria. This approach can be used to compute the time integrated activity (TIA) for all voxels in the image. To control fitting optimization that may fail due to excessive image noise, an approximated version based on trapezoid integration, named restricted method, is also studied. From this comparison, the number of failed fittings within images was estimated and analyzed. Numerical experiments were used to quantify uncertainties and feasibility was demonstrated with real patient data. Results: Regarding numerical experiments, root mean square errors of TIA obtained with VoMM were similar to those obtained with bi-exponential fitting functions, and were lower ({\textless} 5{\%} vs. {\textgreater} 10{\%}) than with single model approaches that consider the same fitting function for all voxels. Failure rates were lower with VoMM and restricted approaches than with single-model methods. On real clinical data, VoMM was able to fit 90{\%} of the voxels and led to less failed fits than single-model approaches. On regions of interest (ROI) analysis, the difference between ROI-based and voxel-based TIA estimations was low, less than 4{\%}. However, the computation of the mean residence time exhibited larger differences, up to 25{\%}. Conclusions: The proposed voxel-based multimodel fitting method, VoMM, is feasible on patient data. VoMM leads organ-based TIA estimations similar to conventional ROI-based method. However, for pharmacokinetics analysis, studies of spatial heterogeneity or voxel-based absorbed dose assessment, VoMM could be used preferentially as it prevents model overfitting. }, } |
2017 | Journal | Bahram Marami, Seyed Sadegh Mohseni Salehi, Onur Afacan, Benoit Scherrer, Caitlin K. Rollins, Edward Yang, Judy A. Estroff, Simon K. Warfield, Ali Gholipour (2017). Temporal slice registration and robust diffusion-tensor reconstruction for improved fetal brain structural connectivity analysis. NeuroImage, 156, pp. 475–488. (link) (bib) x @article{RN900, year = { 2017 }, volume = { 156 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018689711{\&}doi=10.1016{\%}2Fj.neuroimage.2017.04.033{\&}partnerID=40{\&}md5=2a9b47eff0a2637f2e0b729996723477 }, type = { Journal Article }, title = { Temporal slice registration and robust diffusion-tensor reconstruction for improved fetal brain structural connectivity analysis }, pmid = { 28433624 }, pages = { 475--488 }, keywords = { Connectome,Diffusion-weighted MRI,Fetal brain,Motion Correction,Registration }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2017.04.033 }, author = { Marami and {Mohseni Salehi} and Afacan and Scherrer and Rollins and Yang and Estroff and Warfield and Gholipour }, abstract = { Diffusion weighted magnetic resonance imaging, or DWI, is one of the most promising tools for the analysis of neural microstructure and the structural connectome of the human brain. The application of DWI to map early development of the human connectome in-utero, however, is challenged by intermittent fetal and maternal motion that disrupts the spatial correspondence of data acquired in the relatively long DWI acquisitions. Fetuses move continuously during DWI scans. Reliable and accurate analysis of the fetal brain structural connectome requires careful compensation of motion effects and robust reconstruction to avoid introducing bias based on the degree of fetal motion. In this paper we introduce a novel robust algorithm to reconstruct in-vivo diffusion-tensor MRI (DTI) of the moving fetal brain and show its effect on structural connectivity analysis. The proposed algorithm involves multiple steps of image registration incorporating a dynamic registration-based motion tracking algorithm to restore the spatial correspondence of DWI data at the slice level and reconstruct DTI of the fetal brain in the standard (atlas) coordinate space. A weighted linear least squares approach is adapted to remove the effect of intra-slice motion and reconstruct DTI from motion-corrected data. The proposed algorithm was tested on data obtained from 21 healthy fetuses scanned in-utero at 22–38 weeks gestation. Significantly higher fractional anisotropy values in fiber-rich regions, and the analysis of whole-brain tractography and group structural connectivity, showed the efficacy of the proposed method compared to the analyses based on original data and previously proposed methods. The results of this study show that slice-level motion correction and robust reconstruction is necessary for reliable in-vivo structural connectivity analysis of the fetal brain. Connectivity analysis based on graph theoretic measures show high degree of modularity and clustering, and short average characteristic path lengths indicative of small-worldness property of the fetal brain network. These findings comply with previous findings in newborns and a recent study on fetuses. The proposed algorithm can provide valuable information from DWI of the fetal brain not available in the assessment of the original 2D slices and may be used to more reliably study the developing fetal brain connectome. }, } |
2017 | Journal | Yangming Li, Randall A. Bly, R. Alex Harbison, Ian M. Humphreys, Mark E. Whipple, Blake Hannaford, Kris S. Moe (2017). Anatomical Region Segmentation for Objective Surgical Skill Assessment with Operating Room Motion Data. Journal of Neurological Surgery, Part B: Skull Base, 78(6), pp. 490–496. (link) (bib) x @article{RN893, year = { 2017 }, volume = { 78 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85026850018{\&}doi=10.1055{\%}2Fs-0037-1604406{\&}partnerID=40{\&}md5=e63320b0921b5580303ada6248b2e769 }, type = { Journal Article }, title = { Anatomical Region Segmentation for Objective Surgical Skill Assessment with Operating Room Motion Data }, pages = { 490--496 }, number = { 6 }, keywords = { anatomical region,atlas-based segmentation,motion analysis,objective skill assessment,operating room data,sinus surgery,skull base }, journal = { Journal of Neurological Surgery, Part B: Skull Base }, issn = { 21936331 }, doi = { 10.1055/s-0037-1604406 }, author = { Li and Bly and Harbison and Humphreys and Whipple and Hannaford and Moe }, abstract = { Background Most existing objective surgical motion analysis schemes are limited to structured surgical tasks or recognition of motion patterns for certain categories of surgeries. Analyzing instrument motion data with respect to anatomical structures can break the limit, and an anatomical region segmentation algorithm is required for the analysis. Methods An atlas was generated by manually segmenting the skull base into nine regions, including left/right anterior/posterior ethmoid sinuses, frontal sinus, left and right maxillary sinuses, nasal airway, and sphenoid sinus. These regions were selected based on anatomical and surgical significance in skull base and sinus surgery. Six features, including left and right eye center, nasofrontal beak, anterior tip of nasal spine, posterior edge of hard palate at midline, and clival body at foramen magnum, were used for alignment. The B-spline deformable registration was adapted to fine tune the registration, and bony boundaries were automatically extracted for final precision improvement. The resultant deformation field was applied to the atlas, and the motion data were clustered according to the deformed atlas. Results Eight maxillofacial computed tomography scans were used in experiments. One was manually segmented as the atlas. The others were segmented by the proposed method. Motion data were clustered into nine groups for every dataset and outliers were filtered. Conclusions The proposed algorithm improved the efficiency of motion data clustering and requires limited human interaction in the process. The anatomical region segmentations effectively filtered out the portion of motion data that are out of surgery sites and grouped them according to anatomical similarities. }, } |
2017 | Journal | Bonnie Lawlor (2017). An overview of the NFAIS 2017 annual conference: The big pivot: Re-engineering scholarly communication. Information Services and Use, 37(3), pp. 283–306. (link) (bib) x @article{RN898, year = { 2017 }, volume = { 37 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032578212{\&}doi=10.3233{\%}2FISU-170854{\&}partnerID=40{\&}md5=b8ce9fb87b2e39ac050ef613e4bf6b5a }, type = { Journal Article }, title = { An overview of the NFAIS 2017 annual conference: The big pivot: Re-engineering scholarly communication }, pages = { 283--306 }, number = { 3 }, keywords = { Data management,Library cyberinfrastructure,Open access,Open data,Scholarly communication }, journal = { Information Services and Use }, issn = { 01675265 }, doi = { 10.3233/ISU-170854 }, author = { Lawlor }, abstract = { This paper provides an overview of the highlights of the 2017 NFAIS Annual Conference, The Big Pivot: Re- Engineering Scholarly Communication, that was held in Alexandria, VA from February 26-28, 2017. The goal of the conference was to examine the scholarly record and its current evolution in a digital world - both in how it functions and how it serves the information and scholarly research communities. The program stressed how in today's environment, new and innovative advances in information technology are drafting a blueprint that will optimize the ways in which users create, access, and use data and information. New government mandates and policies continue to be implemented on a global basis to facilitate open access to research outputs while in parallel alternative methods for peer review and measuring impact are being utilized.Within the context of these changes, the conference attempted to look at where this blueprint may lead the information community over the next few years. }, } |
2017 | Journal | Bishesh Khanal, Nicholas Ayache, Xavier Pennec (2017). Simulating longitudinal brain MRIs with known volume changes and realistic variations in image intensity. Frontiers in Neuroscience, 11(MAR), pp. NA (link) (bib) x @article{RN827, year = { 2017 }, volume = { 11 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Simulating longitudinal brain MRIs with known volume changes and realistic variations in image intensity }, number = { MAR }, keywords = { Biomechanical simulation,Biophysical modeling,Neurodegeneration,Simulated database,Synthetic images,Synthetic longitudinal MRIs }, journal = { Frontiers in Neuroscience }, issn = { 1662453X }, doi = { 10.3389/fnins.2017.00132 }, author = { Khanal and Ayache and Pennec }, abstract = { This paper presents a simulator tool that can simulate large databases of visually realistic longitudinal MRIs with known volume changes. The simulator is based on a previously proposed biophysical model of brain deformation due to atrophy in AD. In this work, we propose a novel way of reproducing realistic intensity variation in longitudinal brain MRIs, which is inspired by an approach used for the generation of synthetic cardiac sequence images. This approach combines a deformation field obtained from the biophysical model with a deformation field obtained by a non-rigid registration of two images. The combined deformation field is then used to simulate a new image with specified atrophy from the first image, but with the intensity characteristics of the second image. This allows to generate the realistic variations present in real longitudinal time-series of images, such as the independence of noise between two acquisitions and the potential presence of variable acquisition artifacts. Various options available in the simulator software are briefly explained in this paper. In addition, the software is released as an open-source repository. The availability of the software allows researchers to produce tailored databases of images with ground truth volume changes; we believe this will help developing more robust brain morphometry tools. Additionally, we believe that the scientific community can also use the software to further experiment with the proposed model, and add more complex models of brain deformation and atrophy generation. }, } |
2017 | Journal | Julien Jomier (2017). Open science - towards reproducible Research. Information Services and Use, 37(3), pp. 361–367. (link) (bib) x @article{RN897, year = { 2017 }, volume = { 37 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032568749{\&}doi=10.3233{\%}2FISU-170846{\&}partnerID=40{\&}md5=e551028edd91b74209c5cb3d27e210c2 }, type = { Journal Article }, title = { Open science - towards reproducible Research }, pages = { 361--367 }, number = { 3 }, keywords = { Open access,Open data,Open science,Open source,Reproducibility }, journal = { Information Services and Use }, issn = { 01675265 }, doi = { 10.3233/ISU-170846 }, author = { Jomier }, abstract = { This paper presents an overview of several efforts towards reproducible research in the field of medical imaging and visualization. In the first section, the components of Open Science are presented: open access, open data and open source. In the second section, the challenges of open-science are described and potential solutions are mentioned. Finally, a discussion on the potential future of open science and reproducible research is introduced. }, } |
2017 | Journal | Ali Hasan, Ebrahim M. Kolahdouz, Andinet Enquobahrie, Thomas G. Caranasos, John P. Vavalle, Boyce E. Griffith (2017). Image-based immersed boundary model of the aortic root. Medical Engineering and Physics, 47, pp. 72–84. (link) (bib) x @article{RN798, year = { 2017 }, volume = { 47 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Image-based immersed boundary model of the aortic root }, pages = { 72--84 }, keywords = { Aortic valve,Finite element method,Fluid–structure interaction,Immersed boundary method,Nonlinear elasticity }, journal = { Medical Engineering and Physics }, issn = { 18734030 }, eprint = { 1705.04279 }, doi = { 10.1016/j.medengphy.2017.05.007 }, author = { Hasan and Kolahdouz and Enquobahrie and Caranasos and Vavalle and Griffith }, arxivid = { 1705.04279 }, archiveprefix = { arXiv }, abstract = { Each year, approximately 300,000 heart valve repair or replacement procedures are performed worldwide, including approximately 70,000 aortic valve replacement surgeries in the United States alone. Computational platforms for simulating cardiovascular devices such as prosthetic heart valves promise to improve device design and assist in treatment planning, including patient-specific device selection. This paper describes progress in constructing anatomically and physiologically realistic immersed boundary (IB) models of the dynamics of the aortic root and ascending aorta. This work builds on earlier IB models of fluid–structure interaction (FSI) in the aortic root, which previously achieved realistic hemodynamics over multiple cardiac cycles, but which also were limited to simplified aortic geometries and idealized descriptions of the biomechanics of the aortic valve cusps. By contrast, the model described herein uses an anatomical geometry reconstructed from patient-specific computed tomography angiography (CTA) data, and employs a description of the elasticity of the aortic valve leaflets based on a fiber-reinforced constitutive model fit to experimental tensile test data. The resulting model generates physiological pressures in both systole and diastole, and yields realistic cardiac output and stroke volume at physiological Reynolds numbers. Contact between the valve leaflets during diastole is handled automatically by the IB method, yielding a fully competent valve model that supports a physiological diastolic pressure load without regurgitation. Numerical tests show that the model is able to resolve the leaflet biomechanics in diastole and early systole at practical grid spacings. The model is also used to examine differences in the mechanics and fluid dynamics yielded by fresh valve leaflets and glutaraldehyde-fixed leaflets similar to those used in bioprosthetic heart valves. Although there are large differences in the leaflet deformations during diastole, the differences in the open configurations of the valve models are relatively small, and nearly identical hemodynamics are obtained in all cases considered. }, } |
2017 | Journal | Valentina Giannini, Simone Mazzetti, Agnese Marmo, Filippo Montemurro, Daniele Regge, Laura Martincich (2017). A computer-aided diagnosis (CAD) scheme for pretreatment prediction of pathological response to neoadjuvant therapy using dynamic contrast-enhanced MRI texture features. British Journal of Radiology, 90(1077), pp. NA (link) (bib) x @article{RN911, year = { 2017 }, volume = { 90 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028072747{\&}doi=10.1259{\%}2Fbjr.20170269{\&}partnerID=40{\&}md5=5c537a3d7909e31a3bd3d481046791ce }, type = { Journal Article }, title = { A computer-aided diagnosis (CAD) scheme for pretreatment prediction of pathological response to neoadjuvant therapy using dynamic contrast-enhanced MRI texture features }, number = { 1077 }, journal = { British Journal of Radiology }, issn = { 00071285 }, doi = { 10.1259/bjr.20170269 }, author = { Giannini and Mazzetti and Marmo and Montemurro and Regge and Martincich }, abstract = { Objective: To assess whether a computer-aided, diagnosis (CAD) system can predict pathological Complete Response (pCR) to neoadjuvant chemotherapy (NAC) prior to treatment using texture features. Methods: Response to treatment of 44 patients was defined according to the histopatology of resected tumour and extracted axillary nodes in two ways: (a) pCR+ (Smith's Grade = 5) vs pCR- (Smith's Grade {\textless} 5); (b) pCRN+ (pCR+ and absence of residual lymph node metastases) vs pCRN-. A CAD system was developed to: (i) segment the breasts; (ii) register the DCE-MRI sequence; (iii) detect the lesion and (iv) extract 27 3D texture features. The role of individual texture features, multiparametric models and Bayesian classifiers in predicting patients' response to NAC were evaluated. Results: A cross-validated Bayesian classifier fed with 6 features was able to predict pCR with a specificity of 72{\%} and a sensitivity of 67{\%}. Conversely, 2 features were used by the Bayesian classifier to predict pCRN, obtaining a sensitivity of 69{\%} and a specificity of 61{\%}. Conclusion: A CAD scheme, that extracts texture features from an automatically segmented 3D mask of the tumour, could predict pathological response to NAC. Additional research should be performed to validate these promising results on a larger cohort of patients and using different classification strategies. Advances in knowledge: This is the first study assessing the role of an automatic CAD system in predicting the pathological response to NAC before treatment. Fully automatic methods represent the backbone of standardized analysis and may help in timely managing patients candidate to NAC. }, } |
2017 | Journal | Valentina Giannini, Veronica Bianchi, Silvia Carabalona, Simone Mazzetti, Furio Maggiorotto, Franziska Kubatzki, Daniele Regge, Riccardo Ponzone, Laura Martincich (2017). MRI to predict nipple-areola complex (NAC) involvement: An automatic method to compute the 3D distance between the NAC and tumor. Journal of Surgical Oncology, 116(8), pp. 1069–1078. (link) (bib) x @article{RN891, year = { 2017 }, volume = { 116 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85040316780{\&}doi=10.1002{\%}2Fjso.24788{\&}partnerID=40{\&}md5=759b8bc9fcaf202c7bea3274c812f4c2 }, type = { Journal Article }, title = { MRI to predict nipple-areola complex (NAC) involvement: An automatic method to compute the 3D distance between the NAC and tumor }, pages = { 1069--1078 }, number = { 8 }, keywords = { 3D automatic distance,breast cancer,magnetic resonance imaging,mastectomy,nipple-areola sparing,tumor segmentation }, journal = { Journal of Surgical Oncology }, issn = { 10969098 }, doi = { 10.1002/jso.24788 }, author = { Giannini and Bianchi and Carabalona and Mazzetti and Maggiorotto and Kubatzki and Regge and Ponzone and Martincich }, abstract = { Objectives: To assess the role in predicting nipple-areola complex (NAC) involvement of a newly developed automatic method which computes the 3D tumor-NAC distance. Patients and Methods: Ninety-nine patients scheduled to nipple sparing mastectomy (NSM) underwent magnetic resonance (MR) examination at 1.5 T, including sagittal T2w and dynamic contrast enhanced (DCE)-MR imaging. An automatic method was developed to segment the NAC and the tumor and to compute the 3D distance between them. The automatic measurement was compared with manual axial and sagittal 2D measurements. NAC involvement was defined by the presence of invasive ductal or lobular carcinoma and/or ductal carcinoma in situ or ductal intraepithelial neoplasia (DIN1c − DIN3). Results: Tumor-NAC distance was computed on 95/99 patients (25 NAC+), as three tumors were not correctly segmented (sensitivity = 97{\%}), and 1 NAC was not detected (sensitivity = 99{\%}). The automatic 3D distance reached the highest area under the receiver operating characteristic (ROC) curve (0.830) with respect to the manual axial (0.676), sagittal (0.664), and minimum distances (0.664). At the best cut-off point of 21 mm, the 3D distance obtained sensitivity = 72{\%}, specificity = 80{\%}, positive predictive value = 56{\%}, and negative predictive value = 89{\%}. Conclusions: This method could provide a reproducible biomarker to preoperatively select breast cancer patients candidates to NSM, thus helping surgical planning and intraoperative management of patients. }, } |
2017 | Journal | Fahmi Fahmi, Tigor H. Nasution, Anggreiny (2017). Smart cloud system with image processing server in diagnosing brain diseases dedicated for hospitals with limited resources. Technology and Health Care, 25(3), pp. 607–610. (link) (bib) x @article{RN905, year = { 2017 }, volume = { 25 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020190896{\&}doi=10.3233{\%}2FTHC-171298{\&}partnerID=40{\&}md5=43aa37ad080ba10e71273ea68294d0c5 }, type = { Journal Article }, title = { Smart cloud system with image processing server in diagnosing brain diseases dedicated for hospitals with limited resources }, pages = { 607--610 }, number = { 3 }, keywords = { Cloud system,ITK,brain diseases,medical image,web services }, journal = { Technology and Health Care }, issn = { 09287329 }, doi = { 10.3233/THC-171298 }, author = { Fahmi and Nasution and Anggreiny }, abstract = { The use of medical imaging in diagnosing brain disease is growing. The challenges are related to the big size of data and complexity of the image processing. High standard of hardware and software are demanded, which can only be provided in big hospitals. Our purpose was to provide a smart cloud system to help diagnosing brain diseases for hospital with limited infrastructure. The expertise of neurologists was first implanted in cloud server to conduct an automatic diagnosis in real time using image processing technique developed based on ITK library and web service. Users upload images through website and the result, in this case the size of tumor was sent back immediately. A specific image compression technique was developed for this purpose. The smart cloud system was able to measure the area and location of tumors, with average size of 19.91 ± 2.38 cm2 and an average response time 7.0 ± 0.3 s. The capability of the server decreased when multiple clients accessed the system simultaneously: 14 ± 0 s (5 parallel clients) and 27 ± 0.2 s (10 parallel clients). The cloud system was successfully developed to process and analyze medical images for diagnosing brain diseases in this case for tumor. }, } |
2017 | Journal | Chantal M.J. de Bakker, Allison R. Altman-Singles, Yihan Li, Wei Ju Tseng, Connie Li, X. Sherry Liu (2017). Adaptations in the Microarchitecture and Load Distribution of Maternal Cortical and Trabecular Bone in Response to Multiple Reproductive Cycles in Rats. Journal of Bone and Mineral Research, 32(5), pp. 1014–1026. (link) (bib) x @article{RN903, year = { 2017 }, volume = { 32 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013042443{\&}doi=10.1002{\%}2Fjbmr.3084{\&}partnerID=40{\&}md5=18906d1bf5b2d3d0e94d19b1af8384bf }, type = { Journal Article }, title = { Adaptations in the Microarchitecture and Load Distribution of Maternal Cortical and Trabecular Bone in Response to Multiple Reproductive Cycles in Rats }, pages = { 1014--1026 }, number = { 5 }, keywords = { BONE FORMATION,BONE MICROARCHITECTURE,LACTATION,PREGNANCY,STIFFNESS,WEANING }, journal = { Journal of Bone and Mineral Research }, issn = { 15234681 }, doi = { 10.1002/jbmr.3084 }, author = { Bakker and Altman-Singles and Li and Tseng and Li and Liu }, abstract = { Pregnancy, lactation, and weaning result in dramatic changes in maternal calcium metabolism. In particular, the increased calcium demand during lactation causes a substantial degree of maternal bone loss. This reproductive bone loss has been suggested to be largely reversible, as multiple clinical studies have found that parity and lactation history have no adverse effect on postmenopausal fracture risk. However, the precise effects of pregnancy, lactation, and post-weaning recovery on maternal bone structure are not well understood. Our study aimed to address this question by longitudinally tracking changes in trabecular and cortical bone microarchitecture at the proximal tibia in rats throughout three cycles of pregnancy, lactation, and post-weaning using in vivo $\mu$CT. We found that the trabecular thickness underwent a reversible deterioration during pregnancy and lactation, which was fully recovered after weaning, whereas other parameters of trabecular microarchitecture (including trabecular number, spacing, connectivity density, and structure model index) underwent a more permanent deterioration, which recovered minimally. Thus, pregnancy and lactation resulted in both transient and long-lasting alterations in trabecular microstructure. In the meantime, multiple reproductive cycles appeared to improve the robustness of cortical bone (resulting in an elevated cortical area and polar moment of inertia), as well as increase the proportion of the total load carried by the cortical bone at the proximal tibia. Taken together, changes in the cortical and trabecular compartments suggest that whereas rat tibial trabecular bone appears to be highly involved in maintaining calcium homeostasis during female reproduction, cortical bone adapts to increase its load-bearing capacity, allowing the overall mechanical function of the tibia to be maintained. {\textcopyright} 2017 American Society for Bone and Mineral Research. }, } |
2017 | Journal | C. Boydev, B. Demol, D. Pasquier, H. Saint-Jalmes, G. Delpon, N. Reynaert (2017). Zero echo time MRI-only treatment planning for radiation therapy of brain tumors after resection. Physica Medica, 42, pp. 332–338. (link) (bib) x @article{RN797, year = { 2017 }, volume = { 42 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Zero echo time MRI-only treatment planning for radiation therapy of brain tumors after resection }, pages = { 332--338 }, keywords = { Atlas-based method,Head cancer,MRI-only treatment planning,Pseudo-CT,ZTE sequence }, journal = { Physica Medica }, issn = { 1724191X }, doi = { 10.1016/j.ejmp.2017.04.028 }, author = { Boydev and Demol and Pasquier and Saint-Jalmes and Delpon and Reynaert }, abstract = { Using magnetic resonance imaging (MRI) as the sole imaging modality for patient modeling in radiation therapy (RT) is a challenging task due to the need to derive electron density information from MRI and construct a so-called pseudo-computed tomography (pCT) image. We have previously published a new method to derive pCT images from head T1-weighted (T1-w) MR images using a single-atlas propagation scheme followed by a post hoc correction of the mapped CT numbers using local intensity information. The purpose of this study was to investigate the performance of our method with head zero echo time (ZTE) MR images. To evaluate results, the mean absolute error in bins of 20 HU was calculated with respect to the true planning CT scan of the patient. We demonstrated that applying our method using ZTE MR images instead of T1-w improved the correctness of the pCT in case of bone resection surgery prior to RT (that is, an example of large anatomical difference between the atlas and the patient). }, } |
2017 | Journal | Hans. Johnson, Luis. Ib, Matthew. Mccormick, Insight Software Consortium. (2017). The ITK Software Guide Book 2: Design and Functionality. Itk, NA pp. NA (link) (bib) x @article{johnson2015itk, year = { 2017 }, url = { https://itk.org/ITKSoftwareGuide/html/Book2/ITKSoftwareGuide-Book2.html }, title = { The ITK Software Guide Book 2: Design and Functionality }, publisher = { Kitware, Inc. }, keywords = { Guide,Registration,Segmentation }, journal = { Itk }, isbn = { 978-1-930934-28-3 }, doi = { 1�930934-15�7 }, author = { Johnson and Ib and Mccormick and {Insight Software Consortium.} }, } |
2017 | In Collection | Kwame S. Kutten, Nicolas Charon, Michael I. Miller, J. Tilak Ratnanather, Jordan Matelsky, Alexander D. Baden, Kunal Lillaney, Karl Deisseroth, Li Ye, Joshua T. Vogelstein (2017). A large deformation diffeomorphic approach to registration of CLARITY images via mutual information. In M Descoteaux, S Duchesne, A Franz, P Jannin, D L Collins, L Maier-Hein, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 275–282. (link) (bib) x @incollection{RN907, year = { 2017 }, volume = { 10433 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029360827{\&}doi=10.1007{\%}2F978-3-319-66182-7{\_}32{\&}partnerID=40{\&}md5=3534498089ac268864d5dd2937d2983d }, type = { Serial }, title = { A large deformation diffeomorphic approach to registration of CLARITY images via mutual information }, publisher = { Springer Verlag }, pages = { 275--282 }, issn = { 16113349 }, isbn = { 9783319661810 }, eprint = { 1612.00356 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-319-66182-7_32 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Kutten and Charon and Miller and Ratnanather and Matelsky and Baden and Lillaney and Deisseroth and Ye and Vogelstein }, arxivid = { 1612.00356 }, archiveprefix = { arXiv }, abstract = { CLARITY is a method for converting biological tissues into translucent and porous hydrogel-tissue hybrids. This facilitates interrogation with light sheet microscopy and penetration of molecular probes while avoiding physical slicing. In this work, we develop a pipeline for registering CLARIfied mouse brains to an annotated brain atlas. Due to the novelty of this microscopy technique it is impractical to use absolute intensity values to align these images to existing standard atlases. Thus we adopt a large deformation diffeomorphic approach for registering images via mutual information matching. Furthermore we show how a cascaded multi-resolution approach can improve registration quality while reducing algorithm run time. As acquired image volumes were over a terabyte in size, they were far too large for work on personal computers. Therefore the NeuroData computational infrastructure was deployed for multi-resolution storage and visualization of these images and aligned annotations on the web. }, } |
2017 | In Collection | Marek Kulbacki, Jakub Segen, Artur Bak (2017). Analysis, recognition, and classification of biological membrane images. In Advances in Anatomy Embryology and Cell Biology, pp. 119–140. (link) (bib) x @incollection{RN956, year = { 2017 }, volume = { 227 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030665154{\&}doi=10.1007{\%}2F978-3-319-56895-9{\_}8{\&}partnerID=40{\&}md5=ee50a7c74dbc3a51f07be9c4038bb7b0 }, type = { Serial }, title = { Analysis, recognition, and classification of biological membrane images }, publisher = { Springer Verlag }, pages = { 119--140 }, issn = { 03015556 }, isbn = { 03015556 (ISSN) }, doi = { 10.1007/978-3-319-56895-9_8 }, booktitle = { Advances in Anatomy Embryology and Cell Biology }, author = { Kulbacki and Segen and Bak }, abstract = { Biological membrane images contain a variety of objects and patterns, which convey information about the underlying biological structures and mechanisms. The field of image analysis includes methods of computation which convert features and objects identified in images into quantitative information about biological structures represented in these images. Microscopy images are complex, noisy, and full of artifacts and consequently require multiple image processing steps for the extraction of meaningful quantitative information. This review is focused on methods of analysis of images of cells and biological membranes such as detection, segmentation, classification and machine learning, registration, tracking, and visualization. These methods could make possible, for example, to automatically identify defects in the cell membrane which affect physiological processes. Detailed analysis of membrane images could facilitate understanding of the underlying physiological structures or help in the interpretation of biological experiments. }, } |
2017 | In Conf. Proceedings | Francesco Ponzio, Enrico Macii, Elisa Ficarra, Santa Di Cataldo (2017). A multi-modal brain image registration framework for US-guided neuronavigation systems integrating MR and US for minimally invasive neuroimaging. In BIOIMAGING 2017 - 4th International Conference on Bioimaging, Proceedings; Part of 10th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2017, pp. 114–121, Setubal. (link) (bib) x @inproceedings{Ponzio2017, year = { 2017 }, volume = { 2017-Janua }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049240676{\&}partnerID=40{\&}md5=0189c956d91dec04b477ed5846ecb930 }, type = { Book }, title = { A multi-modal brain image registration framework for US-guided neuronavigation systems integrating MR and US for minimally invasive neuroimaging }, series = { Proceedings of the 10th International Joint Conference on Biomedical Engineering Systems and Technologies, Vol 2: Bioimaging }, publisher = { Scitepress }, pages = { 114--121 }, keywords = { Image Processing,MR-US Image Integration,Multi-modal Image Registration,Neuroimaging,US-based Neuronavigation }, isbn = { 9789897582158 }, doi = { 10.5220/0006239201140121 }, booktitle = { BIOIMAGING 2017 - 4th International Conference on Bioimaging, Proceedings; Part of 10th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2017 }, author = { Ponzio and Macii and Ficarra and {Di Cataldo} }, address = { Setubal }, abstract = { US-guided neuronavigation exploits the simplicity of use and minimal invasiveness of Ultrasound (US) imaging and the high tissue resolution and signal-to-noise ratio of Magnetic Resonance Imaging (MRI) to guide brain surgeries. More specifically, the intra-operative 3D US images are combined with pre-operative MR images to accurately localise the course of instruments in the operative field with minimal invasiveness. Multi-modal image registration of 3D US and MR images is an essential part of such system. In this paper, we present a complete software framework that enables the registration US and MR brain scans based on a multi resolution deformable transform, tackling elastic deformations (i.e. brain shifts) possibly occurring during the surgical procedure. The framework supports also simpler and faster registration techniques, based on rigid or affine transforms, and enables the interactive visualisation and rendering of the overlaid US and MRI volumes. The registration was experimentally validated on a public dataset of realistic brain phantom images, at different levels of artificially induced deformations. }, } |
2017 | In Conf. Proceedings | Arthur W. Wetzel, Jennifer Bakal, Markus Dittrich, David G.C. Hildebrand, Josh L. Morgan, Jeff W. Lichtman (2017). Registering large volume serial-section electron microscopy image sets for neural circuit reconstruction using FFT signal whitening. In Proceedings - Applied Imagery Pattern Recognition Workshop, pp. NA (link) (bib) x @inproceedings{Wetzel, year = { 2017 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028755924{\&}doi=10.1109{\%}2FAIPR.2016.8010595{\&}partnerID=40{\&}md5=688016e1bcc150f30e8230da73803097 }, type = { Conference Proceedings }, title = { Registering large volume serial-section electron microscopy image sets for neural circuit reconstruction using FFT signal whitening }, keywords = { connectomics,electron microscopy,image registration,neural circuit reconstruction,signal whitening }, issn = { 21642516 }, isbn = { 9781509032846 }, eprint = { 1612.04787 }, doi = { 10.1109/AIPR.2016.8010595 }, booktitle = { Proceedings - Applied Imagery Pattern Recognition Workshop }, author = { Wetzel and Bakal and Dittrich and Hildebrand and Morgan and Lichtman }, arxivid = { 1612.04787 }, archiveprefix = { arXiv }, abstract = { The detailed reconstruction of neural anatomy for connectomics studies requires a combination of resolution and large three-dimensional data capture provided by serial section electron microscopy (ssEM). The convergence of high throughput ssEM imaging and improved tissue preparation methods now allows ssEM capture of complete specimen volumes up to cubic millimeter scale. The resulting multi-terabyte image sets span thousands of serial sections and must be precisely registered into coherent volumetric forms in which neural circuits can be traced and segmented. This paper introduces a Signal Whitening Fourier Transform Image Registration approach (SWiFT-IR) under development at the Pittsburgh Supercomputing Center and its use to align mouse and zebrafish brain datasets acquired using the wafer mapper ssEM imaging technology recently developed at Harvard University. Unlike other methods now used for ssEM registration, SWiFT-IR modifies its spatial frequency response during image matching to maximize a signal-to-noise measure used as its primary indicator of alignment quality. This alignment signal is more robust to rapid variations in biological content and unavoidable data distortions than either phase-only or standard Pearson correlation, thus allowing more precise alignment and statistical confidence. These improvements in turn enable an iterative registration procedure based on projections through multiple sections rather than more typical adjacent-pair matching methods. This projection approach, when coupled with known anatomical constraints and iteratively applied in a multi-resolution pyramid fashion, drives the alignment into a smooth form that properly represents complex and widely varying anatomical content such as the full crosssection zebrafish data. }, } |
2017 | In Conf. Proceedings | Marilia Yatabe, Antonio Ruellas, Liliane Gomes, Lucie Macron, Julia Lopinto, Beatriz Paniagua, Francois Budin, Lucia Cevidanes (2017). Comparative Study of Three Methods to Compute 3D Craniofacial Angular Measurements. In 2017 IADR/AADR/CADR General Session (San Francisco, California) , pp. NA (link) (bib) x @inproceedings{Yatabe2017, year = { 2017 }, url = { https://iadr.abstractarchives.com/abstract/17iags-2636290/comparative-study-of-three-methods-to-compute-3d-craniofacial-angular-measurements }, title = { Comparative Study of Three Methods to Compute 3D Craniofacial Angular Measurements }, booktitle = { 2017 IADR/AADR/CADR General Session (San Francisco, California) }, author = { Yatabe and Ruellas and Gomes and Macron and Lopinto and Paniagua and Budin and Cevidanes }, } |
2017 | In Conf. Proceedings | Jiatao Wu, Yong Li, Yun Peng, Chunxiao Fan (2017). A fast and accurate segmentation method for medical images. In IS and T International Symposium on Electronic Imaging Science and Technology, pp. 38–43. (link) (bib) x @inproceedings{RN976, year = { 2017 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041527605{\&}doi=10.2352{\%}2FISSN.2470-1173.2017.2.VIPC-404{\&}partnerID=40{\&}md5=611236c5db623b23e4984bfc71e20bb2 }, type = { Conference Proceedings }, title = { A fast and accurate segmentation method for medical images }, publisher = { Society for Imaging Science and Technology }, pages = { 38--43 }, issn = { 24701173 }, isbn = { 24701173 (ISSN) }, editor = { [object Object],[object Object] }, doi = { 10.2352/ISSN.2470-1173.2017.2.VIPC-404 }, booktitle = { IS and T International Symposium on Electronic Imaging Science and Technology }, author = { Wu and Li and Peng and Fan }, abstract = { Selecting regions of interest (ROI) of the medical images is an important task in medical image processing. Manual selection of ROIs serves as the main method for single images and it has a high accuracy. However, it will become infeasible to manually segment ROIs on a large number of images. Observing this problem, this paper proposes a fast and accurate segmentation method to obtain ROIs on a batch of medical images. Firstly, we segment the standard brain image St which has not been injected with tracer. Secondly, we use a B-Spline elastic registration method to get the inverse-registration parameters. Thirdly, we get the template image Te with the registration parameters. Finally, we search the target region by template matching. Experimental results show that the proposed method performs well on medical image segmentation. }, } |
2017 | In Conf. Proceedings | Barbara Trimborn, Ivo Wolf, Denis Abu-Sammour, Thomas Henzler, Lothar R. Schad, Frank G. Zöllner (2017). Investigation of 3D histograms of oriented gradients for image-based registration of CT with interventional CBCT. In Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling, pp. 101350C. (link) (bib) x @inproceedings{RN906, year = { 2017 }, volume = { 10135 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020450535{\&}doi=10.1117{\%}2F12.2255601{\&}partnerID=40{\&}md5=d433d81da054c950f6c4f2534eda48af }, type = { Conference Proceedings }, title = { Investigation of 3D histograms of oriented gradients for image-based registration of CT with interventional CBCT }, publisher = { SPIE }, pages = { 101350C }, issn = { 16057422 }, isbn = { 9781510607156 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2255601 }, booktitle = { Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling }, author = { Trimborn and Wolf and Abu-Sammour and Henzler and Schad and Z{\"{o}}llner }, abstract = { {\textcopyright} 2017 SPIE. Image registration of preprocedural contrast-enhanced CTs to intraprocedual cone-beam computed tomography (CBCT) can provide additional information for interventional liver oncology procedures such as transcatheter arterial chemoembolisation (TACE). In this paper, a novel similarity metric for gradient-based image registration is proposed. The metric relies on the patch-based computation of histograms of oriented gradients (HOG) building the basis for a feature descriptor. The metric was implemented in a framework for rigid 3D-3D-registration of pre-interventional CT with intra-interventional CBCT data obtained during the workflow of a TACE. To evaluate the performance of the new metric, the capture range was estimated based on the calculation of the mean target registration error and compared to the results obtained with a normalized cross correlation metric. The results show that 3D HOG feature descriptors are suitable as image-similarity metric and that the novel metric can compete with established methods in terms of registration accuracy. }, } |
2017 | In Conf. Proceedings | Nirvedh H. Meshram, Tomy Varghese (2017). Fast multilevel Lagrangian carotid strain imaging with GPU computing. In IEEE International Ultrasonics Symposium, IUS, pp. NA (link) (bib) x @inproceedings{RN896, year = { 2017 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039446737{\&}doi=10.1109{\%}2FULTSYM.2017.8092085{\&}partnerID=40{\&}md5=6bb0c6a43db0421599b9f25e0926d036 }, type = { Conference Proceedings }, title = { Fast multilevel Lagrangian carotid strain imaging with GPU computing }, publisher = { IEEE Computer Society }, keywords = { CUDA,Carotid Plaque,GPU,Lagrangian,Strain Imaging,Ultrasound }, issn = { 19485727 }, isbn = { 9781538633830 }, doi = { 10.1109/ULTSYM.2017.8092085 }, booktitle = { IEEE International Ultrasonics Symposium, IUS }, author = { Meshram and Varghese }, abstract = { Lagrangian carotid strain imaging (LCSI) involves estimation of deformation in the carotid artery due to blood pressure variations under cardiac pulsation. Local strain over a cardiac cycle is tracked, which is computationally intensive. We incur long offline processing times for LCSI which becomes a limiting factor for clinical adoption. We report on the computational speedup obtained for a parallelized implementation of LCSI using CUDA programming for fast computation. LCSI is currently performed using a multi-level block matching algorithm written in C++ using the Insight Toolkit (ITK) system. We have implemented this code on a NVIDIA k40 GPU for running CUDA kernels called from the ITK C++ code. The multi-level algorithm consists of three processing stages; stage 1 performs block matching at the coarsest level while level 3 performs block-matching at the finest scale on radiofrequency signals. The regularization step which incurred the largest computational time was implemented on the GPU. Cross-correlation was then implemented with the regularization step thereby avoiding a CPU to GPU data transfer. Shared memory was used in the regularization step to further reduce processing time. The computation time per frame pairs for LCSI with our initial implementation was about 316.41 secs for an in vivo human carotid data set, thereby taking 131 minutes for an entire loop over a cardiac cycle with 25 frames. GPU implementation of regularization provided per frame results in 99.92 secs, a speedup of 3.16X. Further optimization with implementation of cross correlation on the GPU and use of shared memory improved the computation time to 23 secs per frame, a speed up of 13.75X, reducing processing time to 9.5 minutes over a cardiac cycle. }, } |
2017 | In Conf. Proceedings | Nadia Moqbel Hassan (2017). Analysis and Implementation of Constructor in Class Hierarchy. In 2017 2nd Al-Sadiq International Conference on Multidisciplinary in IT and Communication Science and Applications, AIC-MITCSA 2017, pp. 7–12. (link) (bib) x @inproceedings{RN958, year = { 2017 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85067129598{\&}doi=10.1109{\%}2FAIC-MITCSA.2017.8722987{\&}partnerID=40{\&}md5=2c3f65035b60b33f66bb21bb6842bc6a }, type = { Conference Proceedings }, title = { Analysis and Implementation of Constructor in Class Hierarchy }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 7--12 }, keywords = { Constructors in derived class,Inheritance,Metrics measurement model,Object Oriented programming,Object oriented design metrics,Software Metrics }, isbn = { 9781538642412 }, doi = { 10.1109/AIC-MITCSA.2017.8722987 }, booktitle = { 2017 2nd Al-Sadiq International Conference on Multidisciplinary in IT and Communication Science and Applications, AIC-MITCSA 2017 }, author = { Hassan }, abstract = { Programs organization and advancement that endeavor to kill a portion of the pitfalls of traditional programming strategies by data organized in a hierarchy of classes with several powerful new concepts. This paper displays another strategy for examining and reengineering Constructor in class hierarchies. In object oriented Software (OOS), the accentuation is on data instead of procedure. Class is a way that ties the data and procedure together. Constructor is a special member procedure (SMP) class and destructor restores the memory delivers back to the program. The advancement comprises of exercises, for example, analyze, design, code, test, implement and maintenance. This paper gives the information about measuring the estimations of Object Oriented programming (OOP) advancement and proposed a Metric Measurement Model to guarantee that measurements of value properties are imperative in Object Oriented programming improvement. }, } |
2017 | In Conf. Proceedings | Soheil Ghafurian, Ilker Hacihaliloglu, Dimitris N. Metaxas, Virak Tan, Kang Li (2017). 3D/2D image registration method for joint motion analysis using low-quality images from mini C-arm machines. In Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling, pp. 101350B. (link) (bib) x @inproceedings{RN963, year = { 2017 }, volume = { 10135 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020389194{\&}doi=10.1117{\%}2F12.2254678{\&}partnerID=40{\&}md5=6fd25bdbdca508f33b2b3846f0f0d7fc }, type = { Conference Proceedings }, title = { 3D/2D image registration method for joint motion analysis using low-quality images from mini C-arm machines }, publisher = { SPIE }, pages = { 101350B }, issn = { 16057422 }, isbn = { 9781510607156 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2254678 }, booktitle = { Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling }, author = { Ghafurian and Hacihaliloglu and Metaxas and Tan and Li }, abstract = { {\textcopyright} 2017 SPIE. A 3D kinematic measurement of joint movement is crucial for orthopedic surgery assessment and diagnosis. This is usually obtained through a frame-by-frame registration of the 3D bone volume to a uoroscopy video of the joint movement. The high cost of a high-quality uoroscopy imaging system has hindered the access of many labs to this application. This is while the more affordable and low-dosage version, the mini C-Arm, is not commonly used for this application due to low image quality. In this paper, we introduce a novel method for kinematic analysis of joint movement using the mini C-Arm. In this method the bone of interest is recovered and isolated from the rest of the image using a non-rigid registration of an atlas to each frame. The 3D/2D registration is then performed using the weighted histogram of image gradients as an image feature. In our experiments, the registration error was 0.89 mm and 2.36°for human C2 vertebra. While the precision is still lacking behind a high quality uoroscopy machine, it is a good starting point facilitating the use of mini C-Arms for motion analysis making this application available to lower-budget environments. Moreover, the registration was highly resistant to the initial distance from the true registration, converging to the answer from anywhere within ±90°of it. }, } |
2017 | In Conf. Proceedings | Gopichandh Danala, Yunzhi Wang, Theresa Thai, Camille C. Gunderson, Katherine M. Moxley, Kathleen Moore, Robert S. Mannel, Samuel Cheng, Hong Liu, Bin Zheng, Yuchen Qiu (2017). Improving efficacy of metastatic tumor segmentation to facilitate early prediction of ovarian cancer patients' response to chemotherapy. In Biophotonics and Immune Responses XII, pp. 100650J. (link) (bib) x @inproceedings{RN908, year = { 2017 }, volume = { 10065 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019246953{\&}doi=10.1117{\%}2F12.2250978{\&}partnerID=40{\&}md5=d74ac0c887f79b14d956f34ba1490c94 }, type = { Conference Proceedings }, title = { Improving efficacy of metastatic tumor segmentation to facilitate early prediction of ovarian cancer patients' response to chemotherapy }, publisher = { SPIE }, pages = { 100650J }, issn = { 16057422 }, isbn = { 9781510605718 }, editor = { [object Object] }, doi = { 10.1117/12.2250978 }, booktitle = { Biophotonics and Immune Responses XII }, author = { Danala and Wang and Thai and Gunderson and Moxley and Moore and Mannel and Cheng and Liu and Zheng and Qiu }, abstract = { {\textcopyright} 2017 SPIE. Accurate tumor segmentation is a critical step in the development of the computer-aided detection (CAD) based quantitative image analysis scheme for early stage prognostic evaluation of ovarian cancer patients. The purpose of this investigation is to assess the efficacy of several different methods to segment the metastatic tumors occurred in different organs of ovarian cancer patients. In this study, we developed a segmentation scheme consisting of eight different algorithms, which can be divided into three groups: 1) Region growth based methods; 2) Canny operator based methods; and 3) Partial differential equation (PDE) based methods. A number of 138 tumors acquired from 30 ovarian cancer patients were used to test the performance of these eight segmentation algorithms. The results demonstrate each of the tested tumors can be successfully segmented by at least one of the eight algorithms without the manual boundary correction. Furthermore, modified region growth, classical Canny detector, and fast marching, and threshold level set algorithms are suggested in the future development of the ovarian cancer related CAD schemes. This study may provide meaningful reference for developing novel quantitative image feature analysis scheme to more accurately predict the response of ovarian cancer patients to the chemotherapy at early stage. }, } |
2016 | Book | Desmond J. Higham, Nicholas J. Higham (2016). MATLAB Guide, Third Edition, NA 2016. (bib) x @book{Higham2016, year = { 2016 }, title = { MATLAB Guide, Third Edition }, doi = { 10.1137/1.9781611974669 }, booktitle = { MATLAB Guide, Third Edition }, author = { Higham and Higham }, abstract = { Third edition. "MATLAB is an interactive system for numerical computation that is widely used for teaching and research in industry and academia. It provides a modern programming language and problem solving environment, with powerful data structures, customizable graphics, and easy-to-use editing and debugging tools. This third edition of MATLAB Guide completely revises and updates the best-selling second edition and is more than 25 percent longer. The book remains a lively, concise introduction to the most popular and important features of MATLAB and the Symbolic Math Toolbox. Key features are a tutorial in Chapter 1 that gives a hands-on overview of MATLAB, a thorough treatment of MATLAB mathematics, including the linear algebra and numerical analysis functions and the differential equation solvers, and a web page that provides a link to example program files, updates, and links to MATLAB resources. The new edition contains color figures throughout, includes pithy discussions of related topics in new "Asides" boxes that augment the text, has new chapters on the Parallel Computing Toolbox, object-oriented programming, graphs, and large data sets, covers important new MATLAB data types such as categorical arrays, string arrays, tall arrays, tables, and timetables, contains more on MATLAB workflow, including the Live Editor and unit tests, and fully reflects major updates to the MATLAB graphics system." Chapter 1: A Brief Tutorial -- Chapter 2: Basics -- Chapter 3: Distinctive Features of MATLAB -- Chapter 4: Arithmetic -- Chapter 5: Matrices -- Chapter 6: Operators and Flow Control -- Chapter 7: Program Files -- Chapter 8: Graphics -- Chapter 9: Linear Algebra -- Chapter 10: More on Functions -- Chapter 11: Numerical Methods: Part I -- Chapter 12: Numerical Methods: Part II -- Chapter 13: Input and Output -- Chapter 14: Troubleshooting -- Chapter 15: Sparse Matrices -- Chapter 16: More on Coding -- Chapter 17: Advanced Graphics -- Chapter 18: Other Data Types and Multidimensional Arrays -- Chapter 19: Object-Oriented Programming -- Chapter 20: The Symbolic Math Toolbox -- Chapter 21: Graphs -- Chapter 22: Large Data Sets -- Chapter 23: Optimizing Codes -- Chapter 24: Tricks and Tips -- Chapter 25: The Parallel Computing Toolbox -- Chapter 26: Case Studies. }, } |
2016 | Book | I T Ozbolat (2016). 3D Bioprinting: Fundamentals, Principles and Applications, NA 2016. (link) (bib) x @book{Ozbolat2016a, year = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85014448877{\&}partnerID=40{\&}md5=e0d277bc12f4ddba45e2cdccfbae6cbc }, type = { Book }, title = { 3D Bioprinting: Fundamentals, Principles and Applications }, series = { 3D Bioprinting: Fundamentals, Principles and Applications }, pages = { 1--342 }, author = { Ozbolat }, } |
2016 | Book chapter | Issam El Naqa (2016). NA in Image processing and analysis of PET and hybrid PET imaging, NA pp. 285–301. (link) (bib) x @inbook{ElNaqa2016, year = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018895187{\&}doi=10.1007{\%}2F978-3-319-40070-9{\_}12{\&}partnerID=40{\&}md5=a2572f421a96f250c396c7c0538f189d }, type = { Book Section }, title = { Image processing and analysis of PET and hybrid PET imaging }, pages = { 285--301 }, keywords = { Hybrid imaging,Image processing,Quantitative PET,Radiomics }, isbn = { 9783319400709 }, doi = { 10.1007/978-3-319-40070-9_12 }, booktitle = { Basic Science of PET Imaging }, author = { {El Naqa} }, abstract = { PET imaging is a main diagnostic modality of different diseases including cancer. In the particular case of cancer, PET is widely used for staging of disease progression, identification of the treatment gross tumor volume, monitoring of disease, as well as prediction of outcomes and personalization of treatment regimens. Among the arsenal of different functional imaging modalities, PET has benefited from early adoption of quantitative image analysis starting from simple standard uptake value (SUV) normalization to more advanced extraction of complex imaging uptake patterns, thanks chiefly to the application of sophisticated image processing algorithms. In this chapter, we discuss the application of image processing techniques to PET imaging with special focus on the oncological radiotherapy domain starting from basic feature extraction to application in target definition using image segmentation/registration and more recent image-based outcome modeling in the radiomics field. We further extend the discussion into hybrid anatomical functional combinations of PET/CT and PET/MR multimodalities. }, } |
2016 | Book chapter | Elena Baglaeva, Sergey Tsapko, Irina Tsapko, Aleksey Ershov (2016). NA in Modelling of Cellular Structures Obtained By X-Ray Phase Contrast Imaging, Edited by O Berestneva, A Tikhomirov, A Trufanov, Atlantis Press, pp. 354–358, ACSR-Advances in Comptuer Science Research, Vol. 51. (link) (bib) x @inbook{Baglaeva2016, year = { 2016 }, volume = { 51 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Modelling of Cellular Structures Obtained By X-Ray Phase Contrast Imaging }, series = { ACSR-Advances in Comptuer Science Research }, publisher = { Atlantis Press }, pages = { 354--358 }, isbn = { 978-94-6252-196-4 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.2991/itsmssm-16.2016.95 }, booktitle = { Proceedings of the 2016 Conference on Information Technologies in Science, Management, Social Sphere and Medicine }, author = { Baglaeva and Tsapko and Tsapko and Ershov }, address = { Paris }, } |
2016 | Journal | Jessica L Forbes, Regina E Y Kim, Jane S Paulsen, Hans J Johnson (2016). An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases.. Frontiers in neuroinformatics, 10, pp. 1–11. (link) (bib) x @article{forbes2016open, year = { 2016 }, volume = { 10 }, url = { http://journal.frontiersin.org/Article/10.3389/fninf.2016.00029/abstract{\%}5Cnhttp://www.ncbi.nlm.nih.gov/pubmed/27536233{\%}5Cnhttp://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC4971025 }, title = { An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. }, publisher = { Frontiers Media SA }, pmid = { 27536233 }, pages = { 1--11 }, month = { aug }, keywords = { Huntingtons Disease,ITK,brain MRI,label atlas,multi-atlas,multi-modal,open-source }, journal = { Frontiers in neuroinformatics }, issn = { 1662-5196 }, doi = { 10.3389/fninf.2016.00029 }, author = { Forbes and Kim and Paulsen and Johnson }, annote = { \#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\# }, abstract = { The creation of high-quality medical imaging reference atlas datasets with consistent dense anatomical region labels is a challenging task. Reference atlases have many uses in medical image applications and are essential components of atlas-based segmentation tools commonly used for producing personalized anatomical measurements for individual subjects. The process of manual identification of anatomical regions by experts is regarded as a so-called gold standard; however, it is usually impractical because of the labor-intensive costs. Further, as the number of regions of interest increases, these manually created atlases often contain many small inconsistently labeled or disconnected regions that need to be identified and corrected. This project proposes an efficient process to drastically reduce the time necessary for manual revision in order to improve atlas label quality. We introduce the LabelAtlasEditor tool, a SimpleITK-based open-source label atlas correction tool distributed within the image visualization software 3D Slicer. LabelAtlasEditor incorporates several 3D Slicer widgets into one consistent interface and provides label-specific correction tools, allowing for rapid identification, navigation, and modification of the small, disconnected erroneous labels within an atlas. The technical details for the implementation and performance of LabelAtlasEditor are demonstrated using an application of improving a set of 20 Huntingtons Disease-specific multi-modal brain atlases. Additionally, we present the advantages and limitations of automatic atlas correction. After the correction of atlas inconsistencies and small, disconnected regions, the number of unidentified voxels for each dataset was reduced on average by 68.48{\%}. }, } |
2016 | Journal | Jessica L. Forbes, Regina E.Y. Kim, Jane S. Paulsen, Hans J. Johnson (2016). An open-source label atlas correction tool and preliminary results on huntingtons disease whole-brain MRI atlases. Frontiers in Neuroinformatics, 10(AUG), pp. 1–11. (link) (bib) x @article{forbes2016open, year = { 2016 }, volume = { 10 }, url = { http://journal.frontiersin.org/Article/10.3389/fninf.2016.00029/abstract{\%}5Cnhttp://www.ncbi.nlm.nih.gov/pubmed/27536233{\%}5Cnhttp://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC4971025 }, title = { An open-source label atlas correction tool and preliminary results on huntingtons disease whole-brain MRI atlases }, publisher = { Frontiers Media SA }, pmid = { 27536233 }, pages = { 1--11 }, number = { AUG }, month = { aug }, keywords = { Brain MRI,Huntingtons Disease,ITK,Label atlas,Multi-atlas,Multi-modal,Open-source }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, file = { :Users/johnsonhj/Library/Application Support/Mendeley Desktop/Downloaded/Forbes et al. - 2016 - An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases.pdf:pdf }, doi = { 10.3389/fninf.2016.00029 }, author = { Forbes and Kim and Paulsen and Johnson }, annote = { From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { The creation of high-quality medical imaging reference atlas datasets with consistent dense anatomical region labels is a challenging task. Reference atlases have many uses in medical image applications and are essential components of atlas-based segmentation tools commonly used for producing personalized anatomical measurements for individual subjects. The process of manual identification of anatomical regions by experts is regarded as a so-called gold standard; however, it is usually impractical because of the labor-intensive costs. Further, as the number of regions of interest increases, these manually created atlases often contain many small inconsistently labeled or disconnected regions that need to be identified and corrected. This project proposes an efficient process to drastically reduce the time necessary for manual revision in order to improve atlas label quality. We introduce the LabelAtlasEditor tool, a SimpleITK-based open-source label atlas correction tool distributed within the image visualization software 3D Slicer. LabelAtlasEditor incorporates several 3D Slicer widgets into one consistent interface and provides label-specific correction tools, allowing for rapid identification, navigation, and modification of the small, disconnected erroneous labels within an atlas. The technical details for the implementation and performance of LabelAtlasEditor are demonstrated using an application of improving a set of 20 Huntingtons Disease-specific multi-modal brain atlases. Additionally, we present the advantages and limitations of automatic atlas correction. After the correction of atlas inconsistencies and small, disconnected regions, the number of unidentified voxels for each dataset was reduced on average by 68.48{\%}. }, } |
2016 | Journal | Paolo Zaffino, Patrik Raudaschl, Karl Fritscher, Gregory C. Sharp, Maria Francesca Spadea (2016). Technical Note: Plastimatch mabs, an open source tool for automatic image segmentation. Medical Physics, 43(9), pp. 5155–5160. (link) (bib) x @article{Zaffino2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Technical Note: Plastimatch mabs, an open source tool for automatic image segmentation }, pages = { 5155--5160 }, number = { 9 }, keywords = { CT,MRI,automatic segmentation,multiatlas based segmentation,open source }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4961121 }, author = { Zaffino and Raudaschl and Fritscher and Sharp and Spadea }, abstract = { Purpose: Multiatlas based segmentation is largely used in many clinical and research applications. Due to its good performances, it has recently been included in some commercial platforms for radiotherapy planning and surgery guidance. Anyway, to date, a software with no restrictions about the anatomical district and image modality is still missing. In this paper we introduce PLASTIMATCH MABS, an open source software that can be used with any image modality for automatic segmentation. Methods: PLASTIMATCH MABS workflow consists of two main parts: (1) an offline phase, where optimal registration and voting parameters are tuned and (2) an online phase, where a new patient is labeled from scratch by using the same parameters as identified in the former phase. Several registration strategies, as well as different voting criteria can be selected. A flexible atlas selection scheme is also available. To prove the effectiveness of the proposed software across anatomical districts and image modalities, it was tested on two very different scenarios: head and neck (H{\&}N) CT segmentation for radiotherapy application, and magnetic resonance image brain labeling for neuroscience investigation. Results: For the neurological study, minimum dice was equal to 0.76 (investigated structures: left and right caudate, putamen, thalamus, and hippocampus). For head and neck case, minimum dice was 0.42 for the most challenging structures (optic nerves and submandibular glands) and 0.62 for the other ones (mandible, brainstem, and parotid glands). Time required to obtain the labels was compatible with a real clinical workflow (35 and 120 min). Conclusions: The proposed software fills a gap in the multiatlas based segmentation field, since all currently available tools (both for commercial and for research purposes) are restricted to a well specified application. Furthermore, it can be adopted as a platform for exploring MABS parameters and as a reference implementation for comparing against other segmentation algorithms. }, } |
2016 | Journal | Anna Wunderling, Mehdi Ben Targem, Pierre Barbier De Reuille, Laura Ragni, Simon Turner (2016). Novel tools for quantifying secondary growth. Journal of Experimental Botany, 68(1), pp. 89–95. (link) (bib) x @article{Wunderling2017, year = { 2016 }, volume = { 68 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Novel tools for quantifying secondary growth }, pages = { 89--95 }, number = { 1 }, keywords = { Arabidopsis,Automated cellular phenotyping,Machine learning,Quantitative histology,Secondary growth }, journal = { Journal of Experimental Botany }, issn = { 14602431 }, doi = { 10.1093/jxb/erw450 }, author = { Wunderling and Targem and {De Reuille} and Ragni and Turner }, abstract = { Secondary growth occurs in dicotyledons and gymnosperms, and results in an increased girth of plant organs. It is driven primarily by the vascular cambium, which produces thousands of cells throughout the life of several plant species. For instance, even in the small herbaceous model plant Arabidopsis, manual quantification of this massive process is impractical. Here, we provide a comprehensive overview of current methods used to measure radial growth. We discuss the issues and problematics related to its quantification. We highlight recent advances and tools developed for automated cellular phenotyping and its future applications. }, } |
2016 | Journal | Yuehong Tong, Tal Ben Ami, Sungmin Hong, Rainer Heintzmann, Guido Gerig, Zsolt Ablonczy, Christine A. Curcio, Thomas Ach, R. Theodore Smith (2016). Hyperspectral autofluorescence imaging of drusen and retinal pigment epithelium in donor eyes with age-related macular degeneration. Retina, 36(12), pp. S127–S136. (link) (bib) x @article{Tong2016, year = { 2016 }, volume = { 36 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Hyperspectral autofluorescence imaging of drusen and retinal pigment epithelium in donor eyes with age-related macular degeneration }, pages = { S127--S136 }, number = { 12 }, keywords = { Age-related macular degeneration,Autofluorescence,Bruch membrane,Drusen,Fluorophores,Hyperspectral imaging,Lipofuscin,Nonnegative tensor factorization,Retinal pigment epithelium,Sub-retinal pigment epithelium deposits }, journal = { Retina }, issn = { 15392864 }, doi = { 10.1097/IAE.0000000000001325 }, author = { Tong and Ami and Hong and Heintzmann and Gerig and Ablonczy and Curcio and Ach and Smith }, abstract = { Purpose: To elucidate the molecular pathogenesis of age-related macular degeneration (AMD) and interpretation of fundus autofluorescence imaging, the authors identified spectral autofluorescence characteristics of drusen and retinal pigment epithelium (RPE) in donor eyes with AMD. Methods: Macular RPE/Bruch membrane flat mounts were prepared from 5 donor eyes with AMD. In 12 locations (1-3 per eye), hyperspectral autofluorescence images in 10-nmwavelength steps were acquired at 2 excitation wavelengths (lex 436, 480 nm). A nonnegative tensor factorization algorithm was used to recover 5 abundant emission spectra and their corresponding spatial localizations. Results: At lex 436 nm, the authors consistently localized a novel spectrum (SDr) with a peak emission near 510 nm in drusen and sub-RPE deposits. Abundant emission spectra seen previously (S0 in Bruch membrane and S1, S2, and S3 in RPE lipofuscin/melanolipofuscin, respectively) also appeared in AMD eyes, with the same shapes and peak wavelengths as in normal tissue. Lipofuscin/melanolipofuscin spectra localizations in AMD eyes varied widely in their overlap with drusen, ranging from none to complete. Conclusion: An emission spectrum peaking at-510 nm (lex 436 nm) appears to be sensitive and specific for drusen and sub-RPE deposits. One or more abundant spectra from RPE organelles exhibit characteristic relationships with drusen. }, } |
2016 | Journal | Benjamin A. Thomas, Vesna Cuplov, Alexandre Bousse, Adriana Mendes, Kris Thielemans, Brian F. Hutton, Kjell Erlandsson (2016). PETPVC: A toolbox for performing partial volume correction techniques in positron emission tomography. Physics in Medicine and Biology, 61(22), pp. 7975–7993. (link) (bib) x @article{Thomas2016, year = { 2016 }, volume = { 61 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { PETPVC: A toolbox for performing partial volume correction techniques in positron emission tomography }, pages = { 7975--7993 }, number = { 22 }, keywords = { PET/MR,partial volume correction,partial volume effects,positron emission tomography }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/0031-9155/61/22/7975 }, author = { Thomas and Cuplov and Bousse and Mendes and Thielemans and Hutton and Erlandsson }, abstract = { Positron emission tomography (PET) images are degraded by a phenomenon known as the partial volume effect (PVE). Approaches have been developed to reduce PVEs, typically through the utilisation of structural information provided by other imaging modalities such as MRI or CT. These methods, known as partial volume correction (PVC) techniques, reduce PVEs by compensating for the effects of the scanner resolution, thereby improving the quantitative accuracy. The PETPVC toolbox described in this paper comprises a suite of methods, both classic and more recent approaches, for the purposes of applying PVC to PET data. Eight core PVC techniques are available. These core methods can be combined to create a total of 22 different PVC techniques. Simulated brain PET data are used to demonstrate the utility of toolbox in idealised conditions, the effects of applying PVC with mismatched point-spread function (PSF) estimates and the potential of novel hybrid PVC methods to improve the quantification of lesions. All anatomy-based PVC techniques achieve complete recovery of the PET signal in cortical grey matter (GM) when performed in idealised conditions. Applying deconvolution-based approaches results in incomplete recovery due to premature termination of the iterative process. PVC techniques are sensitive to PSF mismatch, causing a bias of up to 16.7{\%} in GM recovery when over-estimating the PSF by 3 mm. The recovery of both GM and a simulated lesion was improved by combining two PVC techniques together. The PETPVC toolbox has been written in C++, supports Windows, Mac and Linux operating systems, is open-source and publicly available. }, } |
2016 | Journal | Radka Stoyanova, Mandeep Takhar, Yohann Tschudi, John C. Ford, Gabriel Sol\'orzano, Nicholas Erho, Yoganand Balagurunathan, Sanoj Punnen, Elai Davicioni, Robert J. Gillies, Alan Pollack (2016). Prostate cancer radiomics and the promise of radiogenomics. Translational Cancer Research, 5(4), pp. 432–447. (link) (bib) x @article{Stoyanova2016, year = { 2016 }, volume = { 5 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Prostate cancer radiomics and the promise of radiogenomics }, pmid = { 29188191 }, pages = { 432--447 }, number = { 4 }, keywords = { Gene expression,MRI-targeted biopsies,Multiparametric MRI,Prostate cancer,Radiogenomics,Radiomics }, journal = { Translational Cancer Research }, issn = { 22196803 }, doi = { 10.21037/tcr.2016.06.20 }, author = { Stoyanova and Takhar and Tschudi and Ford and Sol{\'{o}}rzano and Erho and Balagurunathan and Punnen and Davicioni and Gillies and Pollack }, abstract = { Prostate cancer exhibits intra-tumoral heterogeneity that we hypothesize to be the leading confounding factor contributing to the underperformance of the current pre-treatment clinical-pathological and genomic assessment. These limitations impose an urgent need to develop better computational tools to identify men with low risk of prostate cancer versus others that may be at risk for developing metastatic cancer. The patient stratification will directly translate to patient treatments, wherein decisions regarding active surveillance or intensified therapy are made. Multiparametric MRI (mpMRI) provides the platform to investigate tumor heterogeneity by mapping the individual tumor habitats. We hypothesize that quantitative assessment (radiomics) of these habitats results in distinct combinations of descriptors that reveal regions with different physiologies and phenotypes. Radiogenomics, a discipline connecting tumor morphology described by radiomic and its genome described by the genomic data, has the potential to derive "radio phenotypes" that both correlate to and complement existing validated genomic risk stratification biomarkers. In this article we first describe the radiomic pipeline, tailored for analysis of prostate mpMRI, and in the process we introduce our particular implementations of radiomics modules. We also summarize the efforts in the radiomics field related to prostate cancer diagnosis and assessment of aggressiveness. Finally, we describe our results from radiogenomic analysis, based on mpMRI-Ultrasound (MRI-US) biopsies and discuss the potential of future applications of this technique. The mpMRI radiomics data indicate that the platform would significantly improve the biopsy targeting of prostate habitats through better recognition of indolent versus aggressive disease, thereby facilitating a more personalized approach to prostate cancer management. The expectation to non-invasively identify habitats with high probability of housing aggressive cancers would result in directed biopsies that are more informative and actionable. Conversely, providing evidence for lack of disease would reduce the incidence of non-informative biopsies. In radiotherapy of prostate cancer, dose escalation has been shown to reduce biochemical failure. Dose escalation only to determinate prostate habitats has the potential to improve tumor control with less toxicity than when the entire prostate is dose escalated. }, } |
2016 | Journal | D. Punzo, J. M. van der Hulst, J. B.T.M. Roerdink (2016). Finding faint H I structure in and around galaxies: Scraping the barrel. Astronomy and Computing, 17, pp. 163–176. (link) (bib) x @article{Punzo2016, year = { 2016 }, volume = { 17 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Finding faint H I structure in and around galaxies: Scraping the barrel }, pages = { 163--176 }, keywords = { Radio lines: galaxies,Scientific visualization,Techniques: image processing }, journal = { Astronomy and Computing }, issn = { 22131337 }, eprint = { 1609.03782 }, doi = { 10.1016/j.ascom.2016.09.002 }, author = { Punzo and Hulst and Roerdink }, arxivid = { 1609.03782 }, archiveprefix = { arXiv }, abstract = { Soon to be operational H I survey instruments such as APERTIF and ASKAP will produce large datasets. These surveys will provide information about the H I in and around hundreds of galaxies with a typical signal-to-noise ratio of ∼10 in the inner regions and ∼1 in the outer regions. In addition, such surveys will make it possible to probe faint H I structures, typically located in the vicinity of galaxies, such as extra-planar-gas, tails and filaments. These structures are crucial for understanding galaxy evolution, particularly when they are studied in relation to the local environment. Our aim is to find optimized kernels for the discovery of faint and morphologically complex H I structures. Therefore, using H I data from a variety of galaxies, we explore state-of-the-art filtering algorithms. We show that the intensity-driven gradient filter, due to its adaptive characteristics, is the optimal choice. In fact, this filter requires only minimal tuning of the input parameters to enhance the signal-to-noise ratio of faint components. In addition, it does not degrade the resolution of the high signal-to-noise component of a source. The filtering process must be fast and be embedded in an interactive visualization tool in order to support fast inspection of a large number of sources. To achieve such interactive exploration, we implemented a multi-core CPU (OpenMP) and a GPU (OpenGL) version of this filter in a 3D visualization environment (SlicerAstro). }, } |
2016 | Journal | Ibrahim Ozbolat, Hemanth Gudapati (2016). A review on design for bioprinting. Bioprinting, 3, pp. 1–14. (link) (bib) x @article{Ozbolat2016, year = { 2016 }, volume = { 3 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016144668{\&}doi=10.1016{\%}2Fj.bprint.2016.11.001{\&}partnerID=40{\&}md5=be8ae863546f5861c3ed0df09aaee711 }, type = { Journal Article }, title = { A review on design for bioprinting }, pages = { 1--14 }, keywords = { Blueprint modeling,Design requirements for bioprinting,Medical imaging,Toolpath planning }, journal = { Bioprinting }, issn = { 24058866 }, doi = { 10.1016/j.bprint.2016.11.001 }, author = { Ozbolat and Gudapati }, abstract = { In order to bioprint living tissue and organ constructs, patient-specific anatomical models need to be acquired; however, these models mainly provide external surface information only. The internal architecture of tissue constructs plays a crucial role as it provides a porous environment for media exchange, vascularization, tissue growth and engraftment. This review presents design requirements for bioprinting and discusses currently available medical imaging techniques used in acquisition of anatomical models including magnetic resonance imaging (MRI) and computed tomography (CT), and compares their strengths and limitations. Then, consideration for design architecture is discussed and various approaches in blueprint modeling of tissue constructs are presented for creation of porous architectures. Next, existing toolpath planning approaches for bioprinting of tissues and organs are presented. Design limitations for bioprinting are discussed and future perspectives are provided to the reader. }, } |
2016 | Journal | Rafael Nam\'ias, Juan Pablo D'Amato, Mariana del Fresno, Marcelo Vénere, Nicola Pirr\'o, Marc Emmanuel Bellemare (2016). Multi-object segmentation framework using deformable models for medical imaging analysis. Medical and Biological Engineering and Computing, 54(8), pp. 1181–1192. (link) (bib) x @article{Namias2016, year = { 2016 }, volume = { 54 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi-object segmentation framework using deformable models for medical imaging analysis }, pages = { 1181--1192 }, number = { 8 }, keywords = { Collision control,Complex segmentation,Deformable models,Multi-object segmentation,Segmentation framework }, journal = { Medical and Biological Engineering and Computing }, issn = { 17410444 }, doi = { 10.1007/s11517-015-1387-3 }, author = { Nam{\'{i}}as and D'Amato and Fresno and V{\'{e}}nere and Pirr{\'{o}} and Bellemare }, abstract = { Segmenting structures of interest in medical images is an important step in different tasks such as visualization, quantitative analysis, simulation, and image-guided surgery, among several other clinical applications. Numerous segmentation methods have been developed in the past three decades for extraction of anatomical or functional structures on medical imaging. Deformable models, which include the active contour models or snakes, are among the most popular methods for image segmentation combining several desirable features such as inherent connectivity and smoothness. Even though different approaches have been proposed and significant work has been dedicated to the improvement of such algorithms, there are still challenging research directions as the simultaneous extraction of multiple objects and the integration of individual techniques. This paper presents a novel open-source framework called deformable model array (DMA) for the segmentation of multiple and complex structures of interest in different imaging modalities. While most active contour algorithms can extract one region at a time, DMA allows integrating several deformable models to deal with multiple segmentation scenarios. Moreover, it is possible to consider any existing explicit deformable model formulation and even to incorporate new active contour methods, allowing to select a suitable combination in different conditions. The framework also introduces a control module that coordinates the cooperative evolution of the snakes and is able to solve interaction issues toward the segmentation goal. Thus, DMA can implement complex object and multi-object segmentations in both 2D and 3D using the contextual information derived from the model interaction. These are important features for several medical image analysis tasks in which different but related objects need to be simultaneously extracted. Experimental results on both computed tomography and magnetic resonance imaging show that the proposed framework has a wide range of applications especially in the presence of adjacent structures of interest or under intra-structure inhomogeneities giving excellent quantitative results. }, } |
2016 | Journal | J. M. Mukherjee, C. Lindsay, A. Mukherjee, P. Olivier, L. Shao, M. A. King, R. Licho (2016). Improved frame-based estimation of head motion in PET brain imaging. Medical Physics, 43(5), pp. 2443–2454. (link) (bib) x @article{Mukherjee2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Improved frame-based estimation of head motion in PET brain imaging }, pages = { 2443--2454 }, number = { 5 }, keywords = { PET,motion compensation,motion tracking,reconstruction,registration }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4946814 }, author = { Mukherjee and Lindsay and Mukherjee and Olivier and Shao and King and Licho }, abstract = { Purpose: Head motion during PET brain imaging can cause significant degradation of image quality. Several authors have proposed ways to compensate for PET brain motion to restore image quality and improve quantitation. Head restraints can reduce movement but are unreliable; thus the need for alternative strategies such as data-driven motion estimation or external motion tracking. Herein, the authors present a data-driven motion estimation method using a preprocessing technique that allows the usage of very short duration frames, thus reducing the intraframe motion problem commonly observed in the multiple frame acquisition method. Methods: The list mode data for PET acquisition is uniformly divided into 5-s frames and images are reconstructed without attenuation correction. Interframe motion is estimated using a 3D multiresolution registration algorithm and subsequently compensated for. For this study, the authors used 8 PET brain studies that used F-18 FDG as the tracer and contained minor or no initial motion. After reconstruction and prior to motion estimation, known motion was introduced to each frame to simulate head motion during a PET acquisition. To investigate the trade-off in motion estimation and compensation with respect to frames of different length, the authors summed 5-s frames accordingly to produce 10 and 60 s frames. Summed images generated from the motion-compensated reconstructed frames were then compared to the original PET image reconstruction without motion compensation. Results: The authors found that our method is able to compensate for both gradual and step-like motions using frame times as short as 5 s with a spatial accuracy of 0.2 mm on average. Complex volunteer motion involving all six degrees of freedom was estimated with lower accuracy (0.3 mm on average) than the other types investigated. Preprocessing of 5-s images was necessary for successful image registration. Since their method utilizes nonattenuation corrected frames, it is not susceptible to motion introduced between CT and PET acquisitions. Conclusions: The authors have shown that they can estimate motion for frames with time intervals as short as 5 s using nonattenuation corrected reconstructed FDG PET brain images. Intraframe motion in 60-s frames causes degradation of accuracy to about 2 mm based on the motion type. }, } |
2016 | Journal | Greta S.P. Mok, Cobie Y.T. Ho, Bang Hung Yang, Tung Hsin Wu (2016). Interpolated average CT for cardiac PET/CT attenuation correction. Journal of Nuclear Cardiology, 23(5), pp. 1072–1079. (link) (bib) x @article{Mok2016, year = { 2016 }, volume = { 23 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Interpolated average CT for cardiac PET/CT attenuation correction }, pages = { 1072--1079 }, number = { 5 }, keywords = { PET/CT,attenuation correction,cardiac imaging,respiratory artifacts }, journal = { Journal of Nuclear Cardiology }, issn = { 15326551 }, doi = { 10.1007/s12350-015-0140-5 }, author = { Mok and Ho and Yang and Wu }, abstract = { Background: Previously, we proposed interpolated averaged CT (IACT) for improved attenuation correction (AC) in thoracic PET/CT. This study aims to evaluate its feasibility and effectiveness on cardiac PET/CT. Methods: We simulated 18F-FDG distribution using the XCAT phantom with normal and abnormal cardiac uptake. Average activity and attenuation maps represented static PET and respiration average CT (ACT), respectively, while the attenuation maps of end-inspiration/expiration represented 2 helical CTs (HCT). IACT was obtained by averaging the 2 extreme phases and the interpolated phases generated between them. Later, we recruited 4 patients who were scanned 1 hr post 315-428 MBq 18F-FDG injection. Simulated and clinical PET sinograms were reconstructed with AC using (1) HCT, (2) IACT, and (3) ACT. Polar plots and the 17-segment plots were analyzed. Two regions-of-interest were drawn on lesion and background area to obtain the intensity ratio (IR). Results: Polar plots of PETIACT-AC were more similar to PETACT-AC in both simulation and clinical data. Artifacts were observed in various segments in PETHCT-AC. IR differences of HCT as compared to the phantom were up to {\~{}}20{\%}. Conclusions: IACT-AC reduced respiratory artifacts and improved PET/CT matching similarly to ACT-AC. It is a promising low-dose alternate of ACT for cardiac PET/CT. }, } |
2016 | Journal | Fujun Lan, Jean Jeudy, Suresh Senan, J. R. Van Sornsen De Koste, Warren D'Souza, Huan Hsin Tseng, Jinghao Zhou, Hao Zhang (2016). Should regional ventilation function be considered during radiation treatment planning to prevent radiation-induced complications?. Medical Physics, 43(9), pp. 5072–5079. (link) (bib) x @article{Lan2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Should regional ventilation function be considered during radiation treatment planning to prevent radiation-induced complications? }, pages = { 5072--5079 }, number = { 9 }, keywords = { non-small cell lung cancer,radiation fibrosis,regional ventilation function }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4960367 }, author = { Lan and Jeudy and Senan and {Van Sornsen De Koste} and D'Souza and Tseng and Zhou and Zhang }, abstract = { Purpose: To investigate the incorporation of pretherapy regional ventilation function in predicting radiation fibrosis (RF) in stage III nonsmall cell lung cancer (NSCLC) patients treated with concurrent thoracic chemoradiotherapy. Methods: Thirty-seven patients with stage III NSCLC were retrospectively studied. Patients received one cycle of cisplatingemcitabine, followed by two to three cycles of cisplatinetoposide concurrently with involved-field thoracic radiotherapy (4666 Gy; 2 Gy/fraction). Pretherapy regional ventilation images of the lung were derived from 4D computed tomography via a density changebased algorithm with mass correction. In addition to the conventional dosevolume metrics (V20, V30, V40, and mean lung dose), dosefunction metrics (fV20, fV30, fV40, and functional mean lung dose) were generated by combining regional ventilation and radiation dose. A new class of metrics was derived and referred to as dosesubvolume metrics (sV20, sV30, sV40, and subvolume mean lung dose); these were defined as the conventional dosevolume metrics computed on the functional lung. Area under the receiver operating characteristic curve (AUC) values and logistic regression analyses were used to evaluate these metrics in predicting hallmark characteristics of RF (lung consolidation, volume loss, and airway dilation). Results: AUC values for the dosevolume metrics in predicting lung consolidation, volume loss, and airway dilation were 0.650.69, 0.570.70, and 0.690.76, respectively. The respective ranges for dosefunction metrics were 0.630.66, 0.610.71, and 0.720.80 and for dosesubvolume metrics were 0.500.65, 0.650.75, and 0.730.85. Using an AUC value = 0.70 as cutoff value suggested that at least one of each type of metrics (dosevolume, dosefunction, dosesubvolume) was predictive for volume loss and airway dilation, whereas lung consolidation cannot be accurately predicted by any of the metrics. Logistic regression analyses showed that dosefunction and dosesubvolume metrics were significant (P values ≤ 0.02) in predicting volume airway dilation. Likelihood ratio test showed that when combining dosefunction and/or dosesubvolume metrics with dosevolume metrics, the achieved improvements of prediction accuracy on volume loss and airway dilation were significant (P values ≤ 0.04). Conclusions: The authors results demonstrated that the inclusion of regional ventilation function improved accuracy in predicting RF. In particular, dosesubvolume metrics provided a promising method for preventing radiation-induced pulmonary complications. }, } |
2016 | Journal | Panagiotis Korfiatis, Timothy L. Kline, Lucie Coufalova, Daniel H. Lachance, Ian F. Parney, Rickey E. Carter, Jan C. Buckner, Bradley J. Erickson (2016). MRI texture features as biomarkers to predict MGMT methylation status in glioblastomas. Medical Physics, 43(6), pp. 2835–2844. (link) (bib) x @article{Korfiatis2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MRI texture features as biomarkers to predict MGMT methylation status in glioblastomas }, pages = { 2835--2844 }, number = { 6 }, keywords = { MGMT,MRI,glioblastoma multiforme,imaging biomarkers,random forest,support vector machines }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4948668 }, author = { Korfiatis and Kline and Coufalova and Lachance and Parney and Carter and Buckner and Erickson }, abstract = { Purpose: Imaging biomarker research focuses on discovering relationships between radiological features and histological findings. In glioblastoma patients, methylation of the O6-methylguanine methyltransferase (MGMT) gene promoter is positively correlated with an increased effectiveness of current standard of care. In this paper, the authors investigate texture features as potential imaging biomarkers for capturing the MGMT methylation status of glioblastoma multiforme (GBM) tumors when combined with supervised classification schemes. Methods: A retrospective study of 155 GBM patients with known MGMT methylation status was conducted. Co-occurrence and run length texture features were calculated, and both support vector machines (SVMs) and random forest classifiers were used to predict MGMT methylation status. Results: The best classification system (an SVM-based classifier) had a maximum area under the receiver-operating characteristic (ROC) curve of 0.85 (95{\%} CI: 0.780.91) using four texture features (correlation, energy, entropy, and local intensity) originating from the T2-weighted images, yielding at the optimal threshold of the ROC curve, a sensitivity of 0.803 and a specificity of 0.813. Conclusions: Results show that supervised machine learning of MRI texture features can predict MGMT methylation status in preoperative GBM tumors, thus providing a new noninvasive imaging biomarker. }, } |
2016 | Journal | Ludovic Humbert, Javad Hazrati Marangalou, Luis Miguel Del R\'io Barquero, G. Harry Van Lenthe, Bert Van Rietbergen (2016). Technical Note: Cortical thickness and density estimation from clinical CT using a prior thickness-density relationship. Medical Physics, 43(4), pp. 1945–1954. (link) (bib) x @article{Humbert2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Technical Note: Cortical thickness and density estimation from clinical CT using a prior thickness-density relationship }, pages = { 1945--1954 }, number = { 4 }, keywords = { bone mineral density,computed tomography,cortical thickness,hip fracture,osteoporosis }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4944501 }, author = { Humbert and {Hazrati Marangalou} and {Del R{\'{i}}o Barquero} and {Van Lenthe} and {Van Rietbergen} }, abstract = { Purpose: Cortical thickness and density are critical components in determining the strength of bony structures. Computed tomography (CT) is one possible modality for analyzing the cortex in 3D. In this paper, a model-based approach for measuring the cortical bone thickness and density from clinical CT images is proposed. Methods: Density variations across the cortex were modeled as a function of the cortical thickness and density, location of the cortex, density of surrounding tissues, and imaging blur. High resolution micro-CT data of cadaver proximal femurs were analyzed to determine a relationship between cortical thickness and density. This thickness-density relationship was used as prior information to be incorporated in the model to obtain accurate measurements of cortical thickness and density from clinical CT volumes. The method was validated using micro-CT scans of 23 cadaver proximal femurs. Simulated clinical CT images with different voxel sizes were generated from the micro-CT data. Cortical thickness and density were estimated from the simulated images using the proposed method and compared with measurements obtained using the micro-CT images to evaluate the effect of voxel size on the accuracy of the method. Then, 19 of the 23 specimens were imaged using a clinical CT scanner. Cortical thickness and density were estimated from the clinical CT images using the proposed method and compared with the micro-CT measurements. Finally, a case-control study including 20 patients with osteoporosis and 20 age-matched controls with normal bone density was performed to evaluate the proposed method in a clinical context. Results: Cortical thickness (density) estimation errors were 0.07 ± 0.19 mm (-18 ± 92 mg/cm3) using the simulated clinical CT volumes with the smallest voxel size (0.33 × 0.33 × 0.5 mm3), and 0.10 ± 0.24 mm (-10 ± 115 mg/cm3) using the volumes with the largest voxel size (1.0 × 1.0 × 3.0 mm3). A trend for the cortical thickness and density estimation errors to increase with voxel size was observed and was more pronounced for thin cortices. Using clinical CT data for 19 of the 23 samples, mean errors of 0.18 ± 0.24 mm for the cortical thickness and 15 ± 106 mg/cm3 for the density were found. The case-control study showed that osteoporotic patients had a thinner cortex and a lower cortical density, with average differences of -0.8 mm and -58.6 mg/cm3 at the proximal femur in comparison with age-matched controls (p-value {\textless} 0.001). Conclusions: This method might be a promising approach for the quantification of cortical bone thickness and density using clinical routine imaging techniques. Future work will concentrate on investigating how this approach can improve the estimation of mechanical strength of bony structures, the prevention of fracture, and the management of osteoporosis. }, } |
2016 | Journal | Cobie Y.T. Ho, Tung Hsin Wu, Greta S.P. Mok (2016). Interpolated average CT for PET attenuation correction in different lesion characteristics. Nuclear Medicine Communications, 37(3), pp. 297–306. (link) (bib) x @article{Ho2016, year = { 2016 }, volume = { 37 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Interpolated average CT for PET attenuation correction in different lesion characteristics }, pages = { 297--306 }, number = { 3 }, keywords = { PET/CT,attenuation correction,respiratory artifacts,simulations,thoracic lesions }, journal = { Nuclear Medicine Communications }, issn = { 14735628 }, doi = { 10.1097/MNM.0000000000000435 }, author = { Ho and Wu and Mok }, abstract = { Objective Previously we proposed using an interpolated average computed tomography (IACT) method as a low-dose alternate of cine average computed tomography (CACT) for PET attenuation correction (AC). This study aims to evaluate its performance for thoracic lesions with different characteristics in simulations and clinical patients. Materials and methods We used the XCAT phantom to simulate noisy fluorine-18Fluorodeoxyglucose (18F-FDG) distribution with respiratory motion amplitudes of 2 and 3 cm. Average activity and attenuation maps represented static PET and CACT, respectively. IACT was generated by the end-inspiration and end-expiration phases of the attenuation maps (HCT-in and HCT-ex) using a deformable registration method. Spherical lesions with diameters of 10 and 20 mm with four target-to-background ratios (TBRs) were simulated at four different locations individually, including the lower left lung, lower right lung, middle right lung, and upper right lung. Five patients with a total of six thoracic lesions were recruited. They were scanned 1 h after 315-480 MBq 18F-FDG injection. Simulated and clinical PET sinograms were reconstructed with AC using (i) CACT, (ii) IACT, and (iii) helical computed tomography (HCTs). The TBRs and mean standardized uptake value were analyzed. Results Significant artifacts were observed in PET HCTs from visual assessment. For both simulation and clinical study, PET IACT was more similar to PET CACT in terms of TBRs and mean standardized uptake value. The differences between CACT/IACT and HCTs were more significant for lesions located at the lower lungs. Conclusion The IACT is a robust and low-dose AC method for improved thoracic lesion localization and quantitation for a wide range of lesion characteristics. }, } |
2016 | Journal | Heiko Herrmann, Emiliano Pastorelli, Aki Kallonen, Jussi Petteri Suuronen (2016). Methods for fibre orientation analysis of X-ray tomography images of steel fibre reinforced concrete (SFRC). Journal of Materials Science, 51(8), pp. 3772–3783. (link) (bib) x @article{Herrmann2016, year = { 2016 }, volume = { 51 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Methods for fibre orientation analysis of X-ray tomography images of steel fibre reinforced concrete (SFRC) }, pages = { 3772--3783 }, number = { 8 }, journal = { Journal of Materials Science }, issn = { 15734803 }, doi = { 10.1007/s10853-015-9695-4 }, author = { Herrmann and Pastorelli and Kallonen and Suuronen }, abstract = { One of the most important factors to determine the mechanical properties of a fibre composite material is the orientation of the fibres in the matrix. This paper presents Hessian matrix-based algorithms to retrieve the orientation of individual fibres out of steel fibre reinforced cementitious composites samples scanned with an X-ray computed tomography scanner. The software implemented with the algorithms includes a massive data filtering component to remove noise from the data-sets and prepare them correctly for the analysis. Due to its short computational times and limited need for user intervention, the software is able to process and analyse large batches of data in short periods and provide results in a variety of visual and numerical formats. The application and comparison of these algorithms lead to further insight into the material behaviour. In contrast to the usual assumption that the fibres act only along their main axis, it is shown that the contribution of hooked-end fibres in other directions may be noticeable. This means that fibres, depending on their shape, should act as orthotropic inclusions. The methods can be used by research laboratories and companies on an everyday basis to obtain fibre orientations from samples, which in turn can be used in research, to study stress–strain behaviour, as input to constitutive models or for quality assurance. }, } |
2016 | Journal | B. Fröhler, T. Möller, C. Heinzl (2016). GEMSe: Visualization-Guided Exploration of Multi-channel Segmentation Algorithms. Computer Graphics Forum, 35(3), pp. 191–200. (link) (bib) x @article{Frohler2016, year = { 2016 }, volume = { 35 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { GEMSe: Visualization-Guided Exploration of Multi-channel Segmentation Algorithms }, pages = { 191--200 }, number = { 3 }, keywords = { Categories and Subject Descriptors (according to A,I.3.8 [Computer Graphics]: Applications—,I.4.6 [Image Processing and Computer Vision]: Segm }, journal = { Computer Graphics Forum }, issn = { 14678659 }, doi = { 10.1111/cgf.12895 }, author = { Fr{\"{o}}hler and M{\"{o}}ller and Heinzl }, abstract = { We present GEMSe, an interactive tool for exploring and analyzing the parameter space of multi-channel segmentation algorithms. Our targeted user group are domain experts who are not necessarily segmentation specialists. GEMSe allows the exploration of the space of possible parameter combinations for a segmentation framework and its ensemble of results. Users start with sampling the parameter space and computing the corresponding segmentations. A hierarchically clustered image tree provides an overview of variations in the resulting space of label images. Details are provided through exemplary images from the selected cluster and histograms visualizing the parameters and the derived output in the selected cluster. The correlation between parameters and derived output as well as the effect of parameter changes can be explored through interactive filtering and scatter plots. We evaluate the usefulness of GEMSe through expert reviews and case studies based on three different kinds of datasets: A synthetic dataset emulating the combination of 3D X-ray computed tomography with data from K-Edge spectroscopy, a three-channel scan of a rock crystal acquired by a Talbot-Lau grating interferometer X-ray computed tomography device, as well as a hyperspectral image. }, } |
2016 | Journal | Christine Fennema-Notestine, Linda K. McEvoy, Randy Notestine, Matthew S. Panizzon, Wai Ying Wendy Yau, Carol E. Franz, Michael J. Lyons, Lisa T. Eyler, Michael C. Neale, Hong Xian, Ruth E. McKenzie, William S. Kremen (2016). White matter disease in midlife is heritable, related to hypertension, and shares some genetic influence with systolic blood pressure. NeuroImage: Clinical, 12, pp. 737–745. (link) (bib) x @article{FennemaNotestine2016, year = { 2016 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { White matter disease in midlife is heritable, related to hypertension, and shares some genetic influence with systolic blood pressure }, pages = { 737--745 }, keywords = { Blood pressure,Brain,Heritability,Hypertension,MRI,White matter }, journal = { NeuroImage: Clinical }, issn = { 22131582 }, doi = { 10.1016/j.nicl.2016.10.001 }, author = { Fennema-Notestine and McEvoy and Notestine and Panizzon and Yau and Franz and Lyons and Eyler and Neale and Xian and McKenzie and Kremen }, abstract = { White matter disease in the brain increases with age and cardiovascular disease, emerging in midlife, and these associations may be influenced by both genetic and environmental factors. We examined the frequency, distribution, and heritability of abnormal white matter and its association with hypertension in 395 middle-aged male twins (61.9 ± 2.6 years) from the Vietnam Era Twin Study of Aging, 67{\%} of whom were hypertensive. A multi-channel segmentation approach estimated abnormal regions within the white matter. Using multivariable regression models, we characterized the frequency distribution of abnormal white matter in midlife and investigated associations with hypertension and Apolipoprotein E-$\epsilon$4 status and the impact of duration and control of hypertension. Then, using the classical twin design, we estimated abnormal white matter heritability and the extent of shared genetic overlap with blood pressure. Abnormal white matter was predominantly located in periventricular and deep parietal and frontal regions; associated with age (t = 1.9, p = 0.05) and hypertension (t = 2.9, p = 0.004), but not Apolipoprotein $\epsilon$4 status; and was greater in those with uncontrolled hypertension relative to controlled (t = 3.0, p = 0.003) and normotensive (t = 4.0, p = 0.0001) groups, suggesting that abnormal white matter may reflect currently active cerebrovascular effects. Abnormal white matter was highly heritable (a2 = 0.81) and shared some genetic influences with systolic blood pressure (rA = 0.26), although there was evidence for distinct genetic contributions and unique environmental influences. Future longitudinal research will shed light on factors impacting white matter disease presentation, progression, and potential recovery. }, } |
2016 | Journal | Andrea Dziubek, Giovanna Guidoboni, Alon Harris, Anil N. Hirani, Edmond Rusjan, William Thistleton (2016). Effect of ocular shape and vascular geometry on retinal hemodynamics: a computational model. Biomechanics and Modeling in Mechanobiology, 15(4), pp. 893–907. (link) (bib) x @article{Dziubek2016, year = { 2016 }, volume = { 15 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Effect of ocular shape and vascular geometry on retinal hemodynamics: a computational model }, pages = { 893--907 }, number = { 4 }, keywords = { Finite element exterior calculus,Hierarchical porous medium,Mathematical modeling,Ocular curvature,Retinal hemodynamics,Vascular network }, journal = { Biomechanics and Modeling in Mechanobiology }, issn = { 16177940 }, doi = { 10.1007/s10237-015-0731-8 }, author = { Dziubek and Guidoboni and Harris and Hirani and Rusjan and Thistleton }, abstract = { A computational model for retinal hemodynamics accounting for ocular curvature is presented. The model combines (i) a hierarchical Darcy model for the flow through small arterioles, capillaries and small venules in the retinal tissue, where blood vessels of different size are comprised in different hierarchical levels of a porous medium; and (ii) a one-dimensional network model for the blood flow through retinal arterioles and venules of larger size. The non-planar ocular shape is included by (i) defining the hierarchical Darcy flow model on a two-dimensional curved surface embedded in the three-dimensional space; and (ii) mapping the simplified one-dimensional network model onto the curved surface. The model is solved numerically using a finite element method in which spatial domain and hierarchical levels are discretized separately. For the finite element method, we use an exterior calculus-based implementation which permits an easier treatment of non-planar domains. Numerical solutions are verified against suitably constructed analytical solutions. Numerical experiments are performed to investigate how retinal hemodynamics is influenced by the ocular shape (sphere, oblate spheroid, prolate spheroid and barrel are compared) and vascular architecture (four vascular arcs and a branching vascular tree are compared). The model predictions show that changes in ocular shape induce non-uniform alterations of blood pressure and velocity in the retina. In particular, we found that (i) the temporal region is affected the least by changes in ocular shape, and (ii) the barrel shape departs the most from the hemispherical reference geometry in terms of associated pressure and velocity distributions in the retinal microvasculature. These results support the clinical hypothesis that alterations in ocular shape, such as those occurring in myopic eyes, might be associated with pathological alterations in retinal hemodynamics. }, } |
2016 | Journal | J. Dolz, H. A. Kirişli, T. Fechter, S. Karnitzki, O. Oehlke, U. Nestle, M. Vermandel, L. Massoptier (2016). Interactive contour delineation of organs at risk in radiotherapy: Clinical evaluation on NSCLC patients. Medical Physics, 43(5), pp. 2569–2580. (link) (bib) x @article{Dolz2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Interactive contour delineation of organs at risk in radiotherapy: Clinical evaluation on NSCLC patients }, pages = { 2569--2580 }, number = { 5 }, keywords = { autocontouring,lung cancer,organs at risk segmentation,radiotherapy,thoracic oncology,treatment planning }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4947484 }, author = { Dolz and Kirişli and Fechter and Karnitzki and Oehlke and Nestle and Vermandel and Massoptier }, abstract = { Purpose: Accurate delineation of organs at risk (OARs) on computed tomography (CT) image is required for radiation treatment planning (RTP). Manual delineation of OARs being time consuming and prone to high interobserver variability, many (semi-) automatic methods have been proposed. However, most of them are specific to a particular OAR. Here, an interactive computer-assisted system able to segment various OARs required for thoracic radiation therapy is introduced. Methods: Segmentation information (foreground and background seeds) is interactively added by the user in any of the three main orthogonal views of the CT volume and is subsequently propagated within the whole volume. The proposed method is based on the combination of watershed transformation and graph-cuts algorithm, which is used as a powerful optimization technique to minimize the energy function. The OARs considered for thoracic radiation therapy are the lungs, spinal cord, trachea, proximal bronchus tree, heart, and esophagus. The method was evaluated on multivendor CT datasets of 30 patients. Two radiation oncologists participated in the study and manual delineations from the original RTP were used as ground truth for evaluation. Results: Delineation of the OARs obtained with the minimally interactive approach was approved to be usable for RTP in nearly 90{\%} of the cases, excluding the esophagus, which segmentation was mostly rejected, thus leading to a gain of time ranging from 50{\%} to 80{\%} in RTP. Considering exclusively accepted cases, overall OARs, a Dice similarity coefficient higher than 0.7 and a Hausdorff distance below 10 mm with respect to the ground truth were achieved. In addition, the interobserver analysis did not highlight any statistically significant difference, at the exception of the segmentation of the heart, in terms of Hausdorff distance and volume difference. Conclusions: An interactive, accurate, fast, and easy-to-use computer-assisted system able to segment various OARs required for thoracic radiation therapy has been presented and clinically evaluated. The introduction of the proposed system in clinical routine may offer valuable new option to radiation oncologists in performing RTP. }, } |
2016 | Journal | Minghui Deng, Renping Yu, Li Wang, Feng Shi, Pew Thian Yap, Dinggang Shen (2016). Learning-based 3T brain MRI segmentation with guidance from 7T MRI labeling. Medical Physics, 43(12), pp. 6588–6597. (link) (bib) x @article{Deng2016, year = { 2016 }, volume = { 43 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Learning-based 3T brain MRI segmentation with guidance from 7T MRI labeling }, pages = { 6588--6597 }, number = { 12 }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4967487 }, author = { Deng and Yu and Wang and Shi and Yap and Shen }, abstract = { Purpose: Segmentation of brain magnetic resonance (MR) images into white matter (WM), gray matter (GM), and cerebrospinal fluid (CSF) is crucial for brain structural measurement and disease diagnosis. Learning-based segmentation methods depend largely on the availability of good training ground truth. However, the commonly used 3T MR images are of insufficient image quality and often exhibit poor intensity contrast between WM, GM, and CSF. Therefore, they are not ideal for providing good ground truth label data for training learning-based methods. Recent advances in ultrahigh field 7T imaging make it possible to acquire images with excellent intensity contrast and signal-to-noise ratio. Methods: In this paper, the authors propose an algorithm based on random forest for segmenting 3T MR images by training a series of classifiers based on reliable labels obtained semiautomatically from 7T MR images. The proposed algorithm iteratively refines the probability maps of WM, GM, and CSF via a cascade of random forest classifiers for improved tissue segmentation. Results: The proposed method was validated on two datasets, i.e., 10 subjects collected at their institution and 797 3T MR images from the Alzheimer's Disease Neuroimaging Initiative (ADNI) dataset. Specifically, for the mean Dice ratio of all 10 subjects, the proposed method achieved 94.52{\%} ± 0.9{\%}, 89.49{\%} ± 1.83{\%}, and 79.97{\%} ± 4.32{\%} for WM, GM, and CSF, respectively, which are significantly better than the state-of-the-art methods (p-values {\textless} 0.021). For the ADNI dataset, the group difference comparisons indicate that the proposed algorithm outperforms state-of-the-art segmentation methods. Conclusions: The authors have developed and validated a novel fully automated method for 3T brain MR image segmentation. }, } |
2016 | Journal | Remi Cresson, Gabriel Hautreux (2016). A generic framework for the development of geospatial processing pipelines on clusters. IEEE Geoscience and Remote Sensing Letters, 13(11), pp. 1706–1710. (link) (bib) x @article{Cresson2016, year = { 2016 }, volume = { 13 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988653888{\&}doi=10.1109{\%}2FLGRS.2016.2605138{\&}partnerID=40{\&}md5=c94a89e4590d599fb89db287d43a4c88 }, type = { Journal Article }, title = { A generic framework for the development of geospatial processing pipelines on clusters }, pages = { 1706--1710 }, number = { 11 }, keywords = { Clusters,Message Passing Interface (MPI),Orfeo ToolBox (OTB),high-performance computing (HPC),parallel computing,remote sensing (RS) image processing }, journal = { IEEE Geoscience and Remote Sensing Letters }, issn = { 1545598X }, eprint = { 1609.08893 }, doi = { 10.1109/LGRS.2016.2605138 }, author = { Cresson and Hautreux }, arxivid = { 1609.08893 }, archiveprefix = { arXiv }, abstract = { The amount of remote sensing (RS) data available for applications is constantly growing due to the rise of very high resolution sensors and short-repeat-cycle satellites. Consequently, tackling the computational complexity in Earth observation information extraction is rising as a major challenge. Resorting to high-performance computing (HPC) is becoming a common practice, since this provides environments and programming facilities that are able to speed up processes. In particular, clusters are flexible cost-effective systems that are able to perform data-intensive tasks ideally fulfilling any computational requirement. However, their use typically implies a significant coding effort to build proper implementations of specific processing pipelines. This letter presents a generic framework for the development of RS images processing applications targeting cluster computing. It is based on common open-source libraries and leverages the parallelization of a wide variety of image processing pipelines in a transparent way. Performances on typical RS tasks implemented using the proposed framework demonstrate a great potential for the effective and timely processing of large amount of data. }, } |
2016 | Journal | J S Cordova, S S Gurbani, J J Olson, Z X Liang, L A D Cooper, H K G Shu, E Schreibmann, S G Neill, C G Hadjipanayis, C A Holder, H Shim (2016). A Systematic Pipeline for the Objective Comparison of Whole-Brain Spectroscopic MRI with Histology in Biopsy Specimens from Grade 3 Glioma. Tomography, 2(2), pp. 106–116. (link) (bib) x @article{Cordova2016, year = { 2016 }, volume = { 2 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A Systematic Pipeline for the Objective Comparison of Whole-Brain Spectroscopic MRI with Histology in Biopsy Specimens from Grade 3 Glioma }, pages = { 106--116 }, number = { 2 }, journal = { Tomography }, issn = { 2379-1381 }, doi = { 10.18383/j.tom.2016.00136 }, author = { Cordova and Gurbani and Olson and Liang and Cooper and Shu and Schreibmann and Neill and Hadjipanayis and Holder and Shim }, abstract = { The diagnosis, prognosis, and management of patients with gliomas are largely dictated by the pathological analysis of tissue biopsied from a selected region within the lesion. However, due to the heterogeneous and infiltrative nature of gliomas, identifying the optimal region for biopsy with conventional magnetic resonance imaging (MRI) can be quite difficult. This is especially true for low grade gliomas, which often are non-enhancing tumors. To improve the management of patients with these tumors, the field of neuro-oncology requires an imaging modality that can specifically identify a tumor's most anaplastic/aggressive region(s) for biopsy targeting. The addition of metabolic mapping using spectroscopic MRI (sMRI) to supplement conventional MRI could improve biopsy targeting and, ultimately, diagnostic accuracy. Here, we describe a pipeline for the integration of state-of-the-art, high-resolution whole-brain 3D sMRI maps into a stereotactic neuronavigation system for guiding biopsies in gliomas with nonenhancing components. We also outline a machine-learning method for automated histology analysis that generates normalized, quantitative metrics describing tumor infiltration in immunohistochemically-stained tissue specimens. As a proof of concept, we describe the combination of these two techniques in a small cohort of grade III glioma patients. In this work, we aim to set forth a systematic pipeline to stimulate histopathology-image validation of advanced MRI techniques, such as sMRI. }, } |
2016 | Journal | Matthew D. Blackledge, David J. Collins, Dow Mu Koh, Martin O. Leach (2016). Rapid development of image analysis research tools: Bridging the gap between researcher and clinician with pyOsiriX. Computers in Biology and Medicine, 69, pp. 203–212. (link) (bib) x @article{Blackledge2016, year = { 2016 }, volume = { 69 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Rapid development of image analysis research tools: Bridging the gap between researcher and clinician with pyOsiriX }, pages = { 203--212 }, keywords = { Computed tomography,Dicom management,Dicom visualisation,Medical imaging,OsiriX,Python,Radiology }, journal = { Computers in Biology and Medicine }, issn = { 18790534 }, doi = { 10.1016/j.compbiomed.2015.12.002 }, author = { Blackledge and Collins and Koh and Leach }, abstract = { We present pyOsiriX, a plugin built for the already popular dicom viewer OsiriX that provides users the ability to extend the functionality of OsiriX through simple Python scripts. This approach allows users to integrate the many cutting-edge scientific/image-processing libraries created for Python into a powerful DICOM visualisation package that is intuitive to use and already familiar to many clinical researchers. Using pyOsiriX we hope to bridge the apparent gap between basic imaging scientists and clinical practice in a research setting and thus accelerate the development of advanced clinical image processing. We provide arguments for the use of Python as a robust scripting language for incorporation into larger software solutions, outline the structure of pyOsiriX and how it may be used to extend the functionality of OsiriX, and we provide three case studies that exemplify its utility.For our first case study we use pyOsiriX to provide a tool for smooth histogram display of voxel values within a user-defined region of interest (ROI) in OsiriX. We used a kernel density estimation (KDE) method available in Python using the scikit-learn library, where the total number of lines of Python code required to generate this tool was 22. Our second example presents a scheme for segmentation of the skeleton from CT datasets. We have demonstrated that good segmentation can be achieved for two example CT studies by using a combination of Python libraries including scikit-learn, scikit-image, SimpleITK and matplotlib. Furthermore, this segmentation method was incorporated into an automatic analysis of quantitative PET-CT in a patient with bone metastases from primary prostate cancer. This enabled repeatable statistical evaluation of PET uptake values for each lesion, before and after treatment, providing estaimes maximum and median standardised uptake values (SUVmax and SUVmed respectively). Following treatment we observed a reduction in lesion volume, SUVmax and SUVmed for all lesions, in agreement with a reduction in concurrent measures of serum prostate-specific antigen (PSA). }, } |
2016 | Journal | Matthias Becker, Niels Nijdam, Nadia Magnenat-Thalmann (2016). Coupling strategies for multi-resolution deformable meshes: expanding the pyramid approach beyond its one-way nature. International Journal of Computer Assisted Radiology and Surgery, 11(5), pp. 695–705. (link) (bib) x @article{Becker2016, year = { 2016 }, volume = { 11 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Coupling strategies for multi-resolution deformable meshes: expanding the pyramid approach beyond its one-way nature }, pages = { 695--705 }, number = { 5 }, keywords = { Deformable models,Mesh coupling,Multi-resolution,Synchronization }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-015-1241-y }, author = { Becker and Nijdam and Magnenat-Thalmann }, abstract = { Purpose: With higher resolutions, medical image processing operations like segmentation take more time to calculate per step. The pyramid technique is a common approach to solving this problem. Starting with a low resolution, a stepwise refinement is applied until the original resolution is reached. Methods: Our work proposes a method for deformable model segmentation that generally utilizes the common pyramid technique with our improvement, to calculate and keep synchronized all mesh resolution levels in parallel. The models are coupled to propagate their changes. It presents coupling techniques and shows approaches for synchronization. The interaction with the models is realized using springs and volcanoes, and it is evaluated for the semantics of the operation to share them across the different levels. Results: The locking overhead has been evaluated for different synchronization techniques with meshes of individual resolutions. The partial update strategy has been found to have the least locking overhead. Conclusion: Running multiple models with individual resolutions in parallel is feasible. The synchronization approach has to be chosen carefully, so that an interactive modification of the segmentation remains possible. The proposed technique is aimed at making medical image segmentation more usable while delivering high performance. }, } |
2016 | Journal | Richard J. Beare, Jian Chen, Claire E. Kelly, Dimitrios Alexopoulos, Christopher D. Smyser, Cynthia E. Rogers, Wai Y. Loh, Lillian G. Matthews, Jeanie L.Y. Cheong, Alicia J. Spittle, Peter J. Anderson, Lex W. Doyle, Terrie E. Inder, Marc L. Seal, Deanne K. Thompson (2016). Neonatal brain tissue classification with morphological adaptation and unified segmentation. Frontiers in Neuroinformatics, 10(MAR), pp. 17. (link) (bib) x @article{Beare2016, year = { 2016 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Neonatal brain tissue classification with morphological adaptation and unified segmentation }, pages = { 17 }, number = { MAR }, keywords = { Magnetic resonance imaging,Neonate,Preterm birth,Statistical parametric mapping,Tissue classification }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, doi = { 10.3389/fninf.2016.00012 }, author = { Beare and Chen and Kelly and Alexopoulos and Smyser and Rogers and Loh and Matthews and Cheong and Spittle and Anderson and Doyle and Inder and Seal and Thompson }, abstract = { Measuring the distribution of brain tissue types (tissue classification) in neonates is necessary for studying typical and atypical brain development, such as that associated with preterm birth, and may provide biomarkers for neurodevelopmental outcomes. Compared with magnetic resonance images of adults, neonatal images present specific challenges that require the development of specialized, population-specific methods. This paper introduces MANTiS (Morphologically Adaptive Neonatal Tissue Segmentation), which extends the unified segmentation approach to tissue classification implemented in Statistical Parametric Mapping (SPM) software to neonates. MANTiS utilizes a combination of unified segmentation, template adaptation via morphological segmentation tools and topological filtering, to segment the neonatal brain into eight tissue classes: cortical gray matter, white matter, deep nuclear gray matter, cerebellum, brainstem, cerebrospinal fluid (CSF), hippocampus and amygdala. We evaluated the performance of MANTiS using two independent datasets. The first dataset, provided by the NeoBrainS12 challenge, consisted of coronal T2-weighted images of preterm infants (born ≤30 weeks' gestation) acquired at 30 weeks' corrected gestational age (n = 5), coronal T2-weighted images of preterm infants acquired at 40 weeks' corrected gestational age (n = 5) and axial T2-weighted images of preterm infants acquired at 40 weeks' corrected gestational age (n = 5). The second dataset, provided by the Washington University NeuroDevelopmental Research (WUNDeR) group, consisted of T2-weighted images of preterm infants (born {\textless}30 weeks' gestation) acquired shortly after birth (n = 12), preterm infants acquired at term-equivalent age (n = 12), and healthy term-born infants (born ≥38 weeks' gestation) acquired within the first 9 days of life (n = 12). For the NeoBrainS12 dataset, mean Dice scores comparing MANTiS with manual segmentations were all above 0.7, except for the cortical gray matter for coronal images acquired at 30 weeks. This demonstrates that MANTiS' performance is competitive with existing techniques. For the WUNDeR dataset, mean Dice scores comparing MANTiS with manually edited segmentations demonstrated good agreement, where all scores were above 0.75, except for the hippocampus and amygdala. The results show that MANTiS is able to segment neonatal brain tissues well, even in images that have brain abnormalities common in preterm infants. MANTiS is available for download as an SPM toolbox from http://developmentalimagingmcri.github.io/mantis }, } |
2016 | Journal | Hossein Arabi, Habib Zaidi (2016). Magnetic resonance imaging-guided attenuation correction in whole-body PET/MRI using a sorted atlas approach. Medical Image Analysis, 31, pp. 1–15. (link) (bib) x @article{Arabi2016, year = { 2016 }, volume = { 31 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Magnetic resonance imaging-guided attenuation correction in whole-body PET/MRI using a sorted atlas approach }, pages = { 1--15 }, keywords = { Atlas,Attenuation correction,PET/MRI,Pseudo-CT generation,Quantification }, journal = { Medical Image Analysis }, issn = { 13618423 }, doi = { 10.1016/j.media.2016.02.002 }, author = { Arabi and Zaidi }, abstract = { Quantitative whole-body PET/MR imaging is challenged by the lack of accurate and robust strategies for attenuation correction. In this work, a new pseudo-CT generation approach, referred to as sorted atlas pseudo-CT (SAP), is proposed for accurate extraction of bones and estimation of lung attenuation properties. This approach improves the Gaussian process regression (GPR) kernel proposed by Hofmann et al. which relies on the information provided by a co-registered atlas (CT and MRI) using a GPR kernel to predict the distribution of attenuation coefficients. Our approach uses two separate GPR kernels for lung and non-lung tissues. For non-lung tissues, the co-registered atlas dataset was sorted on the basis of local normalized cross-correlation similarity to the target MR image to select the most similar image in the atlas for each voxel. For lung tissue, the lung volume was incorporated in the GPR kernel taking advantage of the correlation between lung volume and corresponding attenuation properties to predict the attenuation coefficients of the lung. In the presence of pathological tissues in the lungs, the lesions are segmented on PET images corrected for attenuation using MRI-derived three-class attenuation map followed by assignment of soft-tissue attenuation coefficient. The proposed algorithm was compared to other techniques reported in the literature including Hofmann's approach and the three-class attenuation correction technique implemented on the Philips Ingenuity TF PET/MR where CT-based attenuation correction served as reference. Fourteen patients with head and neck cancer undergoing PET/CT and PET/MR examinations were used for quantitative analysis. SUV measurements were performed on 12 normal uptake regions as well as high uptake malignant regions. Moreover, a number of similarity measures were used to evaluate the accuracy of extracted bones. The Dice similarity metric revealed that the extracted bone improved from 0.58±0.09 to 0.65±0.07 when using the SAP technique compared to Hofmann's approach. This enabled to reduce the SUV mean bias in bony structures for the SAP approach to -1.7±4.8{\%} as compared to -7.3±6.0{\%} and -27.4±10.1{\%} when using Hofmann's approach and the three-class attenuation map, respectively. Likewise, the three-class attenuation map produces a relative absolute error of 21.7±11.8{\%} in the lungs. This was reduced on average to 15.8±8.6{\%} and 8.0±3.8{\%} when using Hofmann's and SAP techniques, respectively. The SAP technique resulted in better overall PET quantification accuracy than both Hofmann's and the three-class approaches owing to the more accurate extraction of bones and better prediction of lung attenuation coefficients. Further improvement of the technique and reduction of the computational time are still required. }, } |
2016 | Journal | Daniil P. Aksenov, Michael J. Miller, Limin Li, Alice M. Wyrwicz (2016). Eyeblink classical conditioning and BOLD fMRI of anesthesia-induced changes in the developing brain. Physiology and Behavior, 167, pp. 10–15. (link) (bib) x @article{Aksenov2016a, year = { 2016 }, volume = { 167 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Eyeblink classical conditioning and BOLD fMRI of anesthesia-induced changes in the developing brain }, pages = { 10--15 }, keywords = { Anesthesia,Infant,Learning }, journal = { Physiology and Behavior }, issn = { 1873507X }, doi = { 10.1016/j.physbeh.2016.08.030 }, author = { Aksenov and Miller and Li and Wyrwicz }, abstract = { Millions of children undergo general anesthesia each year in the USA alone, and a growing body of literature from animals and humans suggests that exposure to anesthesia at an early age can impact neuronal development, leading to learning and memory impairments later in childhood. Although a number of studies have reported behavioral and structural effects of anesthesia exposure during infancy, the functional manifestation of these changes has not been previous examined. In this study we used BOLD fMRI to measure the functional response to stimulation in the whisker barrel cortex of awake rabbits before and after learning a trace eyeblink classical conditioning paradigm. The functional changes, in terms of activated volume and time course, in rabbits exposed to isoflurane anesthesia during infancy was compared to unanesthetized controls when both groups reached young adulthood. Our findings show that whereas both groups exhibited decreased BOLD response duration after learning, the anesthesia-exposed group also showed a decrease in BOLD response volume in the whisker barrel cortex, particularly in the deeper infragranular layer. These results suggest that anesthesia exposure during infancy may affect the intracortical processes that mediate learning-related plasticity. }, } |
2016 | Journal | Daniil P. Aksenov, Limin Li, Michael J. Miller, Alice M. Wyrwicz (2016). Blood oxygenation level dependent signal and neuronal adaptation to optogenetic and sensory stimulation in somatosensory cortex in awake animals. European Journal of Neuroscience, 44(9), pp. 2722–2729. (link) (bib) x @article{Aksenov2016, year = { 2016 }, volume = { 44 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Blood oxygenation level dependent signal and neuronal adaptation to optogenetic and sensory stimulation in somatosensory cortex in awake animals }, pages = { 2722--2729 }, number = { 9 }, keywords = { functional magnetic resonance imaging,neural activity,rabbit,whisker barrel cortex }, journal = { European Journal of Neuroscience }, issn = { 14609568 }, doi = { 10.1111/ejn.13384 }, author = { Aksenov and Li and Miller and Wyrwicz }, abstract = { The adaptation of neuronal responses to stimulation, in which a peak transient response is followed by a sustained plateau, has been well-studied. The blood oxygenation level dependent (BOLD) functional magnetic resonance imaging (fMRI) signal has also been shown to exhibit adaptation on a longer time scale. However, some regions such as the visual and auditory cortices exhibit significant BOLD adaptation, whereas other such as the whisker barrel cortex may not adapt. In the sensory cortex a combination of thalamic inputs and intracortical activity drives hemodynamic changes, although the relative contributions of these components are not entirely understood. The aim of this study is to assess the role of thalamic inputs vs. intracortical processing in shaping BOLD adaptation during stimulation in the somatosensory cortex. Using simultaneous fMRI and electrophysiology in awake rabbits, we measured BOLD, local field potentials (LFPs), single- and multi-unit activity in the cortex during whisker and optogenetic stimulation. This design allowed us to compare BOLD and haemodynamic responses during activation of the normal thalamocortical sensory pathway (i.e., both inputs and intracortical activity) vs. the direct optical activation of intracortical circuitry alone. Our findings show that whereas LFP and multi-unit (MUA) responses adapted, neither optogenetic nor sensory stimulation produced significant BOLD adaptation. We observed for both paradigms a variety of excitatory and inhibitory single unit responses. We conclude that sensory feed-forward thalamic inputs are not primarily responsible for shaping BOLD adaptation to stimuli; but the single-unit results point to a role in this behaviour for specific excitatory and inhibitory neuronal sub-populations, which may not correlate with aggregate neuronal activity. }, } |
2016 | Journal | Hakim C. Achterberg, Marcel Koek, Wiro J. Niessen (2016). Fastr: A Workflow engine for advanced data flows in medical image analysis. Frontiers in ICT, 3(AUG), pp. NA (link) (bib) x @article{Achterberg2016, year = { 2016 }, volume = { 3 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048206888{\&}doi=10.3389{\%}2Ffict.2016.00015{\&}partnerID=40{\&}md5=453da670107b4919c49586ea9b393a99 }, type = { Journal Article }, title = { Fastr: A Workflow engine for advanced data flows in medical image analysis }, number = { AUG }, keywords = { Data flow,Data processing,Distributed computing,Pipeline,Provenance,Python,Reproducible research,Workflow }, journal = { Frontiers in ICT }, issn = { 2297198X }, doi = { 10.3389/fict.2016.00015 }, author = { Achterberg and Koek and Niessen }, abstract = { With the increasing number of datasets encountered in imaging studies, the increasing complexity of processing workflows, and a growing awareness for data stewardship, there is a need for managed, automated workflows. In this paper, we introduce Fastr, an automated workflow engine with support for advanced data flows. Fastr has built-in data provenance for recording processing trails and ensuring reproducible results. The extensible plugin-based design allows the system to interface with virtually any image archive and processing infrastructure. This workflow engine is designed to consolidate quantitative imaging biomarker pipelines in order to enable easy application to new data. }, } |
2016 | Journal | Mehdi Hadj-Hamou, Marco Lorenzi, Nicholas Ayache, Xavier Pennec (2016). Longitudinal analysis of image time series with diffeomorphic deformations: A computational framework based on stationary velocity fields. Frontiers in Neuroscience, 10(JUN), pp. NA (bib) x @article{HadjHamou2016, year = { 2016 }, volume = { 10 }, title = { Longitudinal analysis of image time series with diffeomorphic deformations: A computational framework based on stationary velocity fields }, publisher = { Frontiers Research Foundation }, number = { JUN }, month = { jun }, keywords = { Deformation-based morphometry,Diffeomorphism parametrized by stationary velocity,Longitudinal study,Non-linear registration,Reproducible research,Statistical analysis }, journal = { Frontiers in Neuroscience }, issn = { 1662453X }, doi = { 10.3389/fnins.2016.00236 }, author = { Hadj-Hamou and Lorenzi and Ayache and Pennec }, abstract = { We propose and detail a deformation-based morphometry computational framework, called Longitudinal Log-Demons Framework (LLDF), to estimate the longitudinal brain deformations from image data series, transport them in a common space and perform statistical group-wise analyses. It is based on freely available software and tools, and consists of three main steps: (i) Pre-processing, (ii) Position correction, and (iii) Non-linear deformation analysis. It is based on the LCC log-Demons non-linear symmetric diffeomorphic registration algorithm with an additional modulation of the similarity term using a confidence mask to increase the robustness with respect to brain boundary intensity artifacts. The pipeline is exemplified on the longitudinal Open Access Series of Imaging Studies (OASIS) database and all the parameters values are given so that the study can be reproduced. We investigate the group-wise differences between the patients with Alzheimer's disease and the healthy control group, and show that the proposed pipeline increases the sensitivity with no decrease in the specificity of the statistical study done on the longitudinal deformations. }, } |
2016 | Journal | U. Wolfram, J. J. Schwiedrzik, M. J. Mirzaali, A. B\"URKI, P. Varga, C. Olivier, F. Peyrin, P. K. Zysset (2016). Characterizing microcrack orientation distribution functions in osteonal bone samples. Journal of Microscopy, 264(3), pp. 268–281. (link) (bib) x @article{RN913, year = { 2016 }, volume = { 264 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978744015{\&}doi=10.1111{\%}2Fjmi.12440{\&}partnerID=40{\&}md5=eaacef5f6889af31fbaf82da8a2c4344 }, type = { Journal Article }, title = { Characterizing microcrack orientation distribution functions in osteonal bone samples }, pages = { 268--281 }, number = { 3 }, keywords = { Cortical bone,X-ray phase micro-tomography,microcrack segmentation,microdamage,orientation distribution function,synchrotron radiation }, journal = { Journal of Microscopy }, issn = { 13652818 }, doi = { 10.1111/jmi.12440 }, author = { Wolfram and Schwiedrzik and Mirzaali and B{\"{U}}RKI and Varga and Olivier and Peyrin and Zysset }, abstract = { Prefailure microdamage in bone tissue is considered to be the most detrimental factor in defining its strength and toughness with respect to age and disease. To understand the influence of microcracks on bone mechanics it is necessary to assess their morphology and three-dimensional distribution. This requirement reaches beyond classic histology and stereology, and methods to obtain such information are currently missing. Therefore, the aim of the study was to develop a methodology that allows to characterize three-dimensional microcrack distributions in bulk bone samples. Four dumbbell-shaped specimens of human cortical bone of a 77-year-old female donor were loaded beyond yield in either tension, compression or torsion (one control). Subsequently, synchrotron radiation micro-computed tomography (SR$\mu$CT) was used to obtain phase-contrast images of the damaged samples. A microcrack segmentation algorithm was developed and used to segment microcrack families for which microcrack orientation distribution functions were determined. Distinct microcrack families were observed for each load case that resulted in distinct orientation distribution functions. Microcracks had median areas of approximately 4.7 $\mu$m2, 33.3 $\mu$m2 and 64.0 $\mu$m2 for tension, compression and torsion. Verifying the segmentation algorithm against a manually segmented ground truth showed good results when comparing the microcrack orientation distribution functions. A size dependence was noted when investigating the orientation distribution functions with respect to the size of the volume of interest used for their determination. Furthermore, a scale separation between tensile, compressive and torsional microcracks was noticeable. Visual comparison to classic histology indicated that microcrack families were successfully distinguished. We propose a methodology to analyse three-dimensional microcrack distributions in overloaded cortical bone. Such information could improve our understanding of bone microdamage and its impact on bone failure in relation to tissue age and disease. }, } |
2016 | Journal | F. Werner, C. Jung, M. Hofmann, R. Werner, J. Salamon, D. Säring, M. G. Kaul, K. Them, O. M. Weber, T. Mummert, G. Adam, H. Ittrich, T. Knopp (2016). Geometry planning and image registration in magnetic particle imaging using bimodal fiducial markers. Medical Physics, 43(6), pp. 2884–2893. (link) (bib) x @article{RN920, year = { 2016 }, volume = { 43 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84969765405{\&}doi=10.1118{\%}2F1.4948998{\&}partnerID=40{\&}md5=6ef63d0a878e48dd0de13440e94b2c70 }, type = { Journal Article }, title = { Geometry planning and image registration in magnetic particle imaging using bimodal fiducial markers }, pages = { 2884--2893 }, number = { 6 }, keywords = { MPI,MRI,fiducialmarker,geometry planning,registration }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4948998 }, author = { Werner and Jung and Hofmann and Werner and Salamon and S{\"{a}}ring and Kaul and Them and Weber and Mummert and Adam and Ittrich and Knopp }, abstract = { Purpose: Magnetic particle imaging (MPI) is a quantitative imaging modality that allows the distribution of superparamagnetic nanoparticles to be visualized. Compared to other imaging techniques like x-ray radiography, computed tomography (CT), and magnetic resonance imaging (MRI), MPI only provides a signal from the administered tracer, but no additional morphological information, which complicates geometry planning and the interpretation of MP images. The purpose of the authors study was to develop bimodal fiducial markers that can be visualized by MPI and MRI in order to create MP-MR fusion images. Methods: A certain arrangement of three bimodal fiducial markers was developed and used in a combined MRI/MPI phantom and also during in vivo experiments in order to investigate its suitability for geometry planning and image fusion. An algorithm for automated marker extraction in both MR and MP images and rigid registration was established. Results: The developed bimodal fiducial markers can be visualized by MRI and MPI and allow for geometry planning as well as automated registration and fusion of MRMP images. Conclusions: To date, exact positioning of the object to be imaged within the field of view (FOV) and the assignment of reconstructed MPI signals to corresponding morphological regions has been difficult. The developed bimodal fiducial markers and the automated image registration algorithm help to overcome these difficulties. }, } |
2016 | Journal | Jay Gage Tarolli, Anna Bloom, Nicholas Winograd (2016). Multimodal image fusion with SIMS: Preprocessing with image registration. Biointerphases, 11(2), pp. 02A311. (bib) x @article{RN933, year = { 2016 }, volume = { 11 }, type = { Journal Article }, title = { Multimodal image fusion with SIMS: Preprocessing with image registration }, pages = { 02A311 }, number = { 2 }, journal = { Biointerphases }, issn = { 1934-8630 }, doi = { 10.1116/1.4939892 }, author = { Tarolli and Bloom and Winograd }, abstract = { {\textcopyright} 2016 American Vacuum Society. In order to utilize complementary imaging techniques to supply higher resolution data for fusion with secondary ion mass spectrometry (SIMS) chemical images, there are a number of aspects that, if not given proper consideration, could produce results which are easy to misinterpret. One of the most critical aspects is that the two input images must be of the same exact analysis area. With the desire to explore new higher resolution data sources that exists outside of the mass spectrometer, this requirement becomes even more important. To ensure that two input images are of the same region, an implementation of the insight segmentation and registration toolkit (ITK) was developed to act as a preprocessing step before performing image fusion. This implementation of ITK allows for several degrees of movement between two input images to be accounted for, including translation, rotation, and scale transforms. First, the implementation was confirmed to accurately register two multimodal images by supplying a known transform. Once validated, two model systems, a copper mesh grid and a group of RAW 264.7 cells, were used to demonstrate the use of the ITK implementation to register a SIMS image with a microscopy image for the purpose of performing image fusion. }, } |
2016 | Journal | Maxine Tan, Zheng Li, Yuchen Qiu, Scott D. McMeekin, Theresa C. Thai, Kai Ding, Kathleen N. Moore, Hong Liu, Bin Zheng (2016). A new approach to evaluate drug treatment response of ovarian cancer patients based on deformable image registration. IEEE Transactions on Medical Imaging, 35(1), pp. 316–325. (link) (bib) x @article{RN932, year = { 2016 }, volume = { 35 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959364855{\&}doi=10.1109{\%}2FTMI.2015.2473823{\&}partnerID=40{\&}md5=f4cd96ce6d440ec0f4576be7ed0f2e7c }, type = { Journal Article }, title = { A new approach to evaluate drug treatment response of ovarian cancer patients based on deformable image registration }, pages = { 316--325 }, number = { 1 }, keywords = { Computed tomography (CT),Computer-aided diagnosis (CAD),Deformable image registration,Metastatic tumors,Ovarian cancer,Response evaluation criteria in solid tumors (RECI,Tumor volume and necrosis tracking }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2015.2473823 }, author = { Tan and Li and Qiu and McMeekin and Thai and Ding and Moore and Liu and Zheng }, abstract = { Although Response Evaluation Criteria in Solid Tumors (RECIST) is the current clinical guideline to assess size change of solid tumors after therapeutic treatment, it has a relatively lower association to the clinical outcome of progression free survival (PFS) of the patients. In this paper, we presented a new approach to assess responses of ovarian cancer patients to new chemotherapy drugs in clinical trials. We first developed and applied a multi-resolution B-spline based deformable image registration method to register two sets of computed tomography (CT) image data acquired pre- and post-treatment. The B-spline difference maps generated from the co-registered CT images highlight the regions related to the volumetric growth or shrinkage of the metastatic tumors, and density changes related to variation of necrosis inside the solid tumors. Using a testing dataset involving 19 ovarian cancer patients, we compared patients' response to the treatment using the new image registration method and RECIST guideline. The results demonstrated that using the image registration method yielded higher association with the six-month PFS outcomes of the patients than using RECIST. The image registration results also provided a solid foundation of developing new computerized quantitative image feature analysis schemes in the future studies. }, } |
2016 | Journal | Jakub Smo\lka, Maria Skublewska-Paszkowska, Edyta \Lukasik (2016). Algorytm doboru optymalnych parametr\'ow analizy skupie\'n zastosowanej do redukcji nadsegmentacji. Przeglad Elektrotechniczny, 92(9), pp. 250–256. (link) (bib) x @article{RN934, year = { 2016 }, volume = { 92 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84986000629{\&}doi=10.15199{\%}2F48.2016.09.61{\&}partnerID=40{\&}md5=311e10c8007ae785c0cef57929b23170 }, type = { Journal Article }, title = { Algorytm doboru optymalnych parametr{\'{o}}w analizy skupie{\'{n}} zastosowanej do redukcji nadsegmentacji }, pages = { 250--256 }, number = { 9 }, keywords = { Cluster analysis,Over-segmentation reduction,Watershed transformation }, journal = { Przeglad Elektrotechniczny }, issn = { 00332097 }, doi = { 10.15199/48.2016.09.61 }, author = { Smo{\l}ka and Skublewska-Paszkowska and {\L}ukasik }, abstract = { The authors proposed a solution to the over-segmentation of color images processed by watershed segmentation algorithm. The solution utilizes hierarchical cluster analysis and treats watersheds as objects characterized by a number of attributes. This paper briefly discusses the solution (clustering methods, their parameters, selected watershed attributes) and presents an algorithm used for selecting optimal parameters for cluster analysis. Detailed results obtained for a set of test images are presented and discussed. }, } |
2016 | Journal | O. Rodrigues, F. F.C. Morais, N. A.O.S. Morais, L. S. Conci, L. V. Neto, A. Conci (2016). A novel approach for the automated segmentation and volume quantification of cardiac fats on computed tomography. Computer Methods and Programs in Biomedicine, 123, pp. 109–128. (link) (bib) x @article{RN935, year = { 2016 }, volume = { 123 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84951273755{\&}doi=10.1016{\%}2Fj.cmpb.2015.09.017{\&}partnerID=40{\&}md5=03ff84a656a2d758e69a4052ed2af8ee }, type = { Journal Article }, title = { A novel approach for the automated segmentation and volume quantification of cardiac fats on computed tomography }, pages = { 109--128 }, keywords = { Atlas,Classification,Computed tomography,Image registration,Machine learning,Segmentation }, journal = { Computer Methods and Programs in Biomedicine }, issn = { 18727565 }, doi = { 10.1016/j.cmpb.2015.09.017 }, author = { Rodrigues and Morais and Morais and Conci and Neto and Conci }, abstract = { The deposits of fat on the surroundings of the heart are correlated to several health risk factors such as atherosclerosis, carotid stiffness, coronary artery calcification, atrial fibrillation and many others. These deposits vary unrelated to obesity, which reinforces its direct segmentation for further quantification. However, manual segmentation of these fats has not been widely deployed in clinical practice due to the required human workload and consequential high cost of physicians and technicians. In this work, we propose a unified method for an autonomous segmentation and quantification of two types of cardiac fats. The segmented fats are termed epicardial and mediastinal, and stand apart from each other by the pericardium. Much effort was devoted to achieve minimal user intervention. The proposed methodology mainly comprises registration and classification algorithms to perform the desired segmentation. We compare the performance of several classification algorithms on this task, including neural networks, probabilistic models and decision tree algorithms. Experimental results of the proposed methodology have shown that the mean accuracy regarding both epicardial and mediastinal fats is 98.5{\%} (99.5{\%} if the features are normalized), with a mean true positive rate of 98.0{\%}. In average, the Dice similarity index was equal to 97.6{\%}. }, } |
2016 | Journal | N. Reynaert, B. Demol, M. Charoy, S. Bouchoucha, F. Crop, A. Wagner, T. Lacornerie, F. Dubus, E. Rault, P. Comte, R. Cayez, C. Boydev, D. Pasquier, X. Mirabel, E. Lartigau, T. Sarrazin (2016). Clinical implementation of a Monte Carlo based treatment plan QA platform for validation of Cyberknife and Tomotherapy treatments. Physica Medica, 32(10), pp. 1225–1237. (link) (bib) x @article{RN917, year = { 2016 }, volume = { 32 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992718263{\&}doi=10.1016{\%}2Fj.ejmp.2016.09.009{\&}partnerID=40{\&}md5=3de38bf62efd4bb22fcce5398852ce95 }, type = { Journal Article }, title = { Clinical implementation of a Monte Carlo based treatment plan QA platform for validation of Cyberknife and Tomotherapy treatments }, pages = { 1225--1237 }, number = { 10 }, keywords = { Delivered dose,Monte Carlo,QA,Treatment planning }, journal = { Physica Medica }, issn = { 1724191X }, doi = { 10.1016/j.ejmp.2016.09.009 }, author = { Reynaert and Demol and Charoy and Bouchoucha and Crop and Wagner and Lacornerie and Dubus and Rault and Comte and Cayez and Boydev and Pasquier and Mirabel and Lartigau and Sarrazin }, abstract = { Purpose The main focus of the current paper is the clinical implementation of a Monte Carlo based platform for treatment plan validation for Tomotherapy and Cyberknife, without adding additional tasks to the dosimetry department. Methods The Monte Carlo platform consists of C++ classes for the actual functionality and a web based GUI that allows accessing the system using a web browser. Calculations are based on BEAMnrc/DOSXYZnrc and/or GATE and are performed automatically after exporting the dicom data from the treatment planning system. For Cyberknife treatments of moving targets, the log files saved during the treatment (position of robot, internal fiducials and external markers) can be used in combination with the 4D planning CT to reconstruct the actually delivered dose. The Monte Carlo platform is also used for calculation on MRI images, using pseudo-CT conversion. Results For Tomotherapy treatments we obtain an excellent agreement (within 2{\%}) for almost all cases. However, we have been able to detect a problem regarding the CT Hounsfield units definition of the Toshiba Large Bore CT when using a large reconstruction diameter. For Cyberknife treatments we obtain an excellent agreement with the Monte Carlo algorithm of the treatment planning system. For some extreme cases, when treating small lung lesions in low density lung tissue, small differences are obtained due to the different cut-off energy of the secondary electrons. Conclusions A Monte Carlo based treatment plan validation tool has successfully been implemented in clinical routine and is used to systematically validate all Cyberknife and Tomotherapy plans. }, } |
2016 | Journal | Bahram Marami, Benoit Scherrer, Onur Afacan, Burak Erem, Simon K. Warfield, Ali Gholipour (2016). Motion-Robust Diffusion-Weighted Brain MRI Reconstruction Through Slice-Level Registration-Based Motion Tracking. IEEE Transactions on Medical Imaging, 35(10), pp. 2258–2269. (link) (bib) x @article{RN918, year = { 2016 }, volume = { 35 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991454091{\&}doi=10.1109{\%}2FTMI.2016.2555244{\&}partnerID=40{\&}md5=377eb413c86d8cfebc71fb33434e9bf4 }, type = { Journal Article }, title = { Motion-Robust Diffusion-Weighted Brain MRI Reconstruction Through Slice-Level Registration-Based Motion Tracking }, pmid = { 27834639 }, pages = { 2258--2269 }, number = { 10 }, keywords = { Diffusion-weighted MRI,motion tracking,motion-robust MRI,outlier-robust kalman filter,slice registration }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2016.2555244 }, author = { Marami and Scherrer and Afacan and Erem and Warfield and Gholipour }, abstract = { This work proposes a novel approach for motion-robust diffusion-weighted (DW) brain MRI reconstruction through tracking temporal head motion using slice-to-volume registration. The slice-level motion is estimated through a filtering approach that allows tracking the head motion during the scan and correcting for out-of-plane inconsistency in the acquired images. Diffusion-sensitized image slices are registered to a base volume sequentially over time in the acquisition order where an outlier-robust Kalman filter, coupled with slice-to-volume registration, estimates head motion parameters. Diffusion gradient directions are corrected for the aligned DWI slices based on the computed rotation parameters and the diffusion tensors are directly estimated from the corrected data at each voxel using weighted linear least squares. The method was evaluated in DWI scans of adult volunteers who deliberately moved during scans as well as clinical DWI of 28 neonates and children with different types of motion. Experimental results showed marked improvements in DWI reconstruction using the proposed method compared to the state-of-the-art DWI analysis based on volume-to-volume registration. This approach can be readily used to retrieve information from motion-corrupted DW imaging data. }, } |
2016 | Journal | Piotr Majka, Daniel K. W\'ojcik (2016). Possum—A Framework for Three-Dimensional Reconstruction of Brain Images from Serial Sections. Neuroinformatics, 14(3), pp. 265–278. (link) (bib) x @article{RN828, year = { 2016 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Possum—A Framework for Three-Dimensional Reconstruction of Brain Images from Serial Sections }, pages = { 265--278 }, number = { 3 }, keywords = { 3D reconstruction,Brain atlas,Histology,Image analysis,Image registration,Light microscopy }, journal = { Neuroinformatics }, issn = { 15392791 }, doi = { 10.1007/s12021-015-9286-1 }, author = { Majka and W{\'{o}}jcik }, abstract = { Techniques based on imaging serial sections of brain tissue provide insight into brain structure and function. However, to compare or combine them with results from three dimensional imaging methods, reconstruction into a volumetric form is required. Currently, there are no tools for performing such a task in a streamlined way. Here we propose the Possum volumetric reconstruction framework which provides a selection of 2D to 3D image reconstruction routines allowing one to build workflows tailored to one's specific requirements. The main components include routines for reconstruction with or without using external reference and solutions for typical issues encountered during the reconstruction process, such as propagation of the registration errors due to distorted sections. We validate the implementation using synthetic datasets and actual experimental imaging data derived from publicly available resources. We also evaluate efficiency of a subset of the algorithms implemented. The Possum framework is distributed under MIT license and it provides researchers with a possibility of building reconstruction workflows from existing components, without the need for low-level implementation. As a consequence, it also facilitates sharing and data exchange between researchers and laboratories. }, } |
2016 | Journal | Jan Hering, Ivo Wolf, Klaus H. Maier-Hein (2016). Multi-Objective Memetic Search for Robust Motion and Distortion Correction in Diffusion MRI. IEEE Transactions on Medical Imaging, 35(10), pp. 2280–2291. (link) (bib) x @article{RN919, year = { 2016 }, volume = { 35 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991444277{\&}doi=10.1109{\%}2FTMI.2016.2557580{\&}partnerID=40{\&}md5=c42388a9bec72c65c2504113fc52a566 }, type = { Journal Article }, title = { Multi-Objective Memetic Search for Robust Motion and Distortion Correction in Diffusion MRI }, pages = { 2280--2291 }, number = { 10 }, keywords = { Artifact correction,diffusion-weighted images,motion correction,particle swarm optimization,registration }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2016.2557580 }, author = { Hering and Wolf and Maier-Hein }, abstract = { Effective image-based artifact correction is an essential step in the analysis of diffusion MR images. Many current approaches are based on retrospective registration, which becomes challenging in the realm of high b-values and low signal-to-noise ratio, rendering the corresponding correction schemes more and more ineffective. We propose a novel registration scheme based on memetic search optimization that allows for simultaneous exploitation of different signal intensity relationships between the images, leading to more robust registration results. We demonstrate the increased robustness and efficacy of our method on simulated as well as in vivo datasets. In contrast to the state-of-art methods, the median target registration error (TRE) stayed below the voxel size even for high b-values (3000 s {\textperiodcentered} mm-2 and higher) and low SNR conditions. We also demonstrate the increased precision in diffusion-derived quantities by evaluating Neurite Orientation Dispersion and Density Imaging (NODDI) derived measures on a in vivo dataset with severe motion artifacts. These promising results will potentially inspire further studies on metaheuristic optimization in diffusion MRI artifact correction and image registration in general. }, } |
2016 | Journal | Xiaogang Du, Jianwu Dang, Yangping Wang, Song Wang, Tao Lei (2016). A Parallel Nonrigid Registration Algorithm Based on B-Spline for Medical Images. Computational and Mathematical Methods in Medicine, 2016, pp. NA (link) (bib) x @article{RN931, year = { 2016 }, volume = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85008957755{\&}doi=10.1155{\%}2F2016{\%}2F7419307{\&}partnerID=40{\&}md5=94c05725af9454145efeff48e39a8b1e }, type = { Journal Article }, title = { A Parallel Nonrigid Registration Algorithm Based on B-Spline for Medical Images }, journal = { Computational and Mathematical Methods in Medicine }, issn = { 17486718 }, doi = { 10.1155/2016/7419307 }, author = { Du and Dang and Wang and Wang and Lei }, abstract = { The nonrigid registration algorithm based on B-spline Free-Form Deformation (FFD) plays a key role and is widely applied in medical image processing due to the good flexibility and robustness. However, it requires a tremendous amount of computing time to obtain more accurate registration results especially for a large amount of medical image data. To address the issue, a parallel nonrigid registration algorithm based on B-spline is proposed in this paper. First, the Logarithm Squared Difference (LSD) is considered as the similarity metric in the B-spline registration algorithm to improve registration precision. After that, we create a parallel computing strategy and lookup tables (LUTs) to reduce the complexity of the B-spline registration algorithm. As a result, the computing time of three time-consuming steps including B-splines interpolation, LSD computation, and the analytic gradient computation of LSD, is efficiently reduced, for the B-spline registration algorithm employs the Nonlinear Conjugate Gradient (NCG) optimization method. Experimental results of registration quality and execution efficiency on the large amount of medical images show that our algorithm achieves a better registration accuracy in terms of the differences between the best deformation fields and ground truth and a speedup of 17 times over the single-threaded CPU implementation due to the powerful parallel computing ability of Graphics Processing Unit (GPU). }, } |
2016 | Journal | Alexander Danilov, Roman Pryamonosov, Alexandra Yurova (2016). Image segmentation for cardiovascular biomedical applications at different scales. Computation, 4(3), pp. NA (link) (bib) x @article{RN966, year = { 2016 }, volume = { 4 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050390538{\&}doi=10.3390{\%}2Fcomputation4030035{\&}partnerID=40{\&}md5=ec04fcf1cbad06c8db55480c5b77767f }, type = { Journal Article }, title = { Image segmentation for cardiovascular biomedical applications at different scales }, number = { 3 }, keywords = { Abdominal tissues,Cardiovascular applications,Cerebral arteries,Coronary arteries,Electron microscopy,Image segmentation }, journal = { Computation }, issn = { 20793197 }, doi = { 10.3390/computation4030035 }, author = { Danilov and Pryamonosov and Yurova }, abstract = { In this study, we present several image segmentation techniques for various image scales and modalities. We consider cellular-, organ-, and whole organism-levels of biological structures in cardiovascular applications. Several automatic segmentation techniques are presented and discussed in this work. The overall pipeline for reconstruction of biological structures consists of the following steps: image pre-processing, feature detection, initial mask generation, mask processing, and segmentation post-processing. Several examples of image segmentation are presented, including patient-specific abdominal tissues segmentation, vascular network identification and myocyte lipid droplet micro-structure reconstruction. }, } |
2016 | Journal | Wookjin Choi, Ming Xue, Barton F. Lane, Min Kyu Kang, Kruti Patel, William F. Regine, Paul Klahr, Jiahui Wang, Shifeng Chen, Warren D'Souza, Wei Lu (2016). Individually optimized contrast-enhanced 4D-CT for radiotherapy simulation in pancreatic ductal adenocarcinoma. Medical Physics, 43(10), pp. 5659–5666. (link) (bib) x @article{RN916, year = { 2016 }, volume = { 43 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988808490{\&}doi=10.1118{\%}2F1.4963213{\&}partnerID=40{\&}md5=d555238a514a92d5a318ee19a0427dba }, type = { Journal Article }, title = { Individually optimized contrast-enhanced 4D-CT for radiotherapy simulation in pancreatic ductal adenocarcinoma }, pages = { 5659--5666 }, number = { 10 }, keywords = { 4D-CT,contrast enhancement,pancreatic ductal adenocarcinoma,radiotherapy simulation }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4963213 }, author = { Choi and Xue and Lane and Kang and Patel and Regine and Klahr and Wang and Chen and D'Souza and Lu }, abstract = { Purpose: To develop an individually optimized contrast-enhanced (CE) 4D-computed tomography (CT) for radiotherapy simulation in pancreatic ductal adenocarcinomas (PDA). Methods: Ten PDA patients were enrolled. Each underwent three CT scans: a 4D-CT immediately following a CE 3D-CT and an individually optimized CE 4D-CT using test injection. Three physicians contoured the tumor and pancreatic tissues. Image quality scores, tumor volume, motion, tumor-to-pancreas contrast, and contrast-to-noise ratio (CNR) were compared in the three CTs. Interobserver variations were also evaluated in contouring the tumor using simultaneous truth and performance level estimation. Results: Average image quality scores for CE 3D-CT and CE 4D-CT were comparable (4.0 and 3.8, respectively; P = 0.082), and both were significantly better than that for 4D-CT (2.6, P {\textless} 0.001). Tumor-to-pancreas contrast results were comparable in CE 3D-CT and CE 4D-CT (15.5 and 16.7 Hounsfield units (HU), respectively; P = 0.21), and the latter was significantly higher than in 4D-CT (9.2 HU, P = 0.001). Image noise in CE 3D-CT (12.5 HU) was significantly lower than in CE 4D-CT (22.1 HU, P = 0.013) and 4D-CT (19.4 HU, P = 0.009). CNRs were comparable in CE 3D-CT and CE 4D-CT (1.4 and 0.8, respectively; P = 0.42), and both were significantly better in 4D-CT (0.6, P = 0.008 and 0.014). Mean tumor volumes were significantly smaller in CE 3D-CT (29.8 cm3, P = 0.03) and CE 4D-CT (22.8 cm3, P = 0.01) than in 4D-CT (42.0 cm3). Mean tumor motion was comparable in 4D-CT and CE 4D-CT (7.2 and 6.2 mm, P = 0.17). Interobserver variations were comparable in CE 3D-CT and CE 4D-CT (Jaccard index 66.0{\%} and 61.9{\%}, respectively) and were worse for 4D-CT (55.6{\%}) than CE 3D-CT. Conclusions: CE 4D-CT demonstrated characteristics comparable to CE 3D-CT, with high potential for simultaneously delineating the tumor and quantifying tumor motion with a single scan. }, } |
2016 | Journal | Julián Betancur, Antoine Simon, Edgar Halbert, Fran\ccois Tavard, Fran\ccois Carré, Alfredo Hernández, Erwan Donal, Frédéric Schnell, Mireille Garreau (2016). Registration of dynamic multiview 2D ultrasound and late gadolinium enhanced images of the heart: Application to hypertrophic cardiomyopathy characterization. Medical Image Analysis, 28, pp. 13–21. (link) (bib) x @article{RN921, year = { 2016 }, volume = { 28 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84948142529{\&}doi=10.1016{\%}2Fj.media.2015.10.010{\&}partnerID=40{\&}md5=7d93ff8e0d80ef9b1730c036b8529e1d }, type = { Journal Article }, title = { Registration of dynamic multiview 2D ultrasound and late gadolinium enhanced images of the heart: Application to hypertrophic cardiomyopathy characterization }, pages = { 13--21 }, keywords = { Cardiac ultrasound,Hypertrophic cardiomyopathy,Late gadolinium-enhanced magnetic resonance,Multimodal image registration,Speckle tracking echocardiography }, journal = { Medical Image Analysis }, issn = { 13618423 }, doi = { 10.1016/j.media.2015.10.010 }, author = { Betancur and Simon and Halbert and Tavard and Carr{\'{e}} and Hern{\'{a}}ndez and Donal and Schnell and Garreau }, abstract = { Describing and analyzing heart multiphysics requires the acquisition and fusion of multisensor cardiac images. Multisensor image fusion enables a combined analysis of these heterogeneous modalities. We propose to register intra-patient multiview 2D+t ultrasound (US) images with multiview late gadolinium-enhanced (LGE) images acquired during cardiac magnetic resonance imaging (MRI), in order to fuse mechanical and tissue state information. The proposed procedure registers both US and LGE to cine MRI. The correction of slice misalignment and the rigid registration of multiview LGE and cine MRI are studied, to select the most appropriate similarity measure. It showed that mutual information performs the best for LGE slice misalignment correction and for LGE and cine registration. Concerning US registration, dynamic endocardial contours resulting from speckle tracking echocardiography were exploited in a geometry-based dynamic registration. We propose the use of an adapted dynamic time warping procedure to synchronize cardiac dynamics in multiview US and cine MRI. The registration of US and LGE MRI was evaluated on a dataset of patients with hypertrophic cardiomyopathy. A visual assessment of 330 left ventricular regions from US images of 28 patients resulted in 92.7{\%} of regions successfully aligned with cardiac structures in LGE. Successfully-aligned regions were then used to evaluate the abilities of strain indicators to predict the presence of fibrosis. Longitudinal peak-strain and peak-delay of aligned left ventricular regions were computed from corresponding regional strain curves from US. The Mann-Withney test proved that the expected values of these indicators change between the populations of regions with and without fibrosis (p {\textless} 0.01). ROC curves otherwise proved that the presence of fibrosis is one factor amongst others which modifies longitudinal peak-strain and peak-delay. }, } |
2016 | Journal | Fakhre Alam, Sami Ur Rahman (2016). Intrinsic registration techniques for medical images: A state-of-the-art review. Journal of Postgraduate Medical Institute, 30(2), pp. 119–132. (link) (bib) x @article{RN971, year = { 2016 }, volume = { 30 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964707329{\&}partnerID=40{\&}md5=77f7b0fdb11032a6d00501e3da414f03 }, type = { Journal Article }, title = { Intrinsic registration techniques for medical images: A state-of-the-art review }, pages = { 119--132 }, number = { 2 }, keywords = { Image guided surgery,Intrinsic registration,Medical image registration,Medical imaging modalities }, journal = { Journal of Postgraduate Medical Institute }, issn = { 18119387 }, author = { Alam and Rahman }, abstract = { Medical image registration is the process of mapping two or more medical images into a single more informative image for the purpose of receiving precise and complementary information. The precise mapping of medical images obtained in different time-frames and by the same or different modalities is now possible due to the availability of large number of registration techniques. The purpose of this paper is to present and analyse intrinsic registration techniques for medical imaging in a comprehensive manner. Our approach of analysis is unique from already published work because we have performed detailed investigation on each registration techniques, and analyse similarity measures and assessments according to various parameters. The knowledge on the work that has been developed in the area is presented in a compact form. This work is expected to provide a useful platform for the researchers in the field of medical image registration in general and in intrinsic registration in particular. }, } |
2016 | In Collection | Cory W. Quammen, Russell M. Taylor, Pavel Krajcevski, Sorin Mitran, Andinet Enquobahrie, Richard Superfine, Brad Davis, Stephanie Davis, Carlton Zdanski (2016). The virtual pediatric airways workbench. In L Fellander-Tsai, K G Vosburgh, J D Westwood, S Senger, S W Westwood, C M Fidopiastis, A Liu, editor, Studies in Health Technology and Informatics, pp. 295–300. (link) (bib) x @incollection{RN964, year = { 2016 }, volume = { 220 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84965026283{\&}doi=10.3233{\%}2F978-1-61499-625-5-295{\&}partnerID=40{\&}md5=a0854888010669dc0a00ab47368c672c }, type = { Serial }, title = { The virtual pediatric airways workbench }, publisher = { IOS Press }, pages = { 295--300 }, keywords = { 3D modeling,Surgery planning,Virtual reality }, issn = { 18798365 }, isbn = { 9781614996248 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.3233/978-1-61499-625-5-295 }, booktitle = { Studies in Health Technology and Informatics }, author = { Quammen and Taylor and Krajcevski and Mitran and Enquobahrie and Superfine and Davis and Davis and Zdanski }, abstract = { The Virtual Pediatric Airways Workbench (VPAW) is a patient-centered surgical planning software system targeted to pediatric patients with airway obstruction. VPAW provides an intuitive surgical planning interface for clinicians and supports quantitative analysis regarding prospective surgeries to aid clinicians deciding on potential surgical intervention. VPAW enables a full surgical planning pipeline, including importing DICOM images, segmenting the airway, interactive 3D editing of airway geometries to express potential surgical treatment planning options, and creating input files for offline geometric analysis and computational fluid dynamics simulations for evaluation of surgical outcomes. In this paper, we describe the VPAW system and its use in one case study with a clinician to successfully describe an intended surgery outcome. }, } |
2016 | In Collection | A. Nikonorov, A. Kolsanov, M. Petrov, Y. Yuzifovich, E. Prilepin, S. Chaplygin, P. Zelter, K. Bychenkov (2016). Vessel segmentation for noisy CT data with quality measure based on single-point contrast-to-noise ratio. In M S Obaidat, P Lorenz, editor, Communications in Computer and Information Science, pp. 490–507. (link) (bib) x @incollection{RN930, year = { 2016 }, volume = { 585 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84960396227{\&}doi=10.1007{\%}2F978-3-319-30222-5{\_}23{\&}partnerID=40{\&}md5=00b082a6118fc326c7a012f9772959cf }, type = { Serial }, title = { Vessel segmentation for noisy CT data with quality measure based on single-point contrast-to-noise ratio }, publisher = { Springer Verlag }, pages = { 490--507 }, keywords = { CUDA,Contrast to noise ratio,Fast marching,GPGPU,Geodesic active contours,Liver,Proximal algorithms,Total variance de-noising,Vessels segmentation,Xeon phi }, issn = { 18650929 }, isbn = { 9783319302218 }, editor = { [object Object],[object Object] }, doi = { 10.1007/978-3-319-30222-5_23 }, booktitle = { Communications in Computer and Information Science }, author = { Nikonorov and Kolsanov and Petrov and Yuzifovich and Prilepin and Chaplygin and Zelter and Bychenkov }, abstract = { This paper describes a comprehensive multi-step algorithm for vascular structure segmentation in CT scan data, from raw slice images to a 3D object, with an emphasis on improving segmentation quality and assessing computational complexity. To estimate initial image quality and to evaluate denoising in the absence of the noise-free image, we propose a semi-global contrast-to-noise quality metric. We show that total variation-based filtering in the L1 metric results in the best denoising when compared to widely used nonlocal means or anisotropic diffusion denoising. To address higher computational complexity of our denoising algorithm, we created two high performance implementations, using Intel MIC and NVIDIA CUDA and compared results. In combination with proposed nearly real-time incremental segmentation technique, it provides fast and framework with controlled quality. }, } |
2016 | In Collection | Javier Garcia Blas, Manuel F. Dolz, J. Daniel Garcia, Jesus Carretero, Alessandro Daducci, Yasser Aleman, Erick Jorge Canales-Rodriguez (2016). Porting Matlab applications to high-performance C++ codes: CPU/GPU-accelerated spherical deconvolution of diffusion MRI data. In J Carretero, K Nakano, R K L Ko, P Mueller, J Garcia-Blas, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 630–643. (link) (bib) x @incollection{RN968, year = { 2016 }, volume = { 10048 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85007236124{\&}doi=10.1007{\%}2F978-3-319-49583-5{\_}49{\&}partnerID=40{\&}md5=aa237e3a3fce5dcf7978db850e1b2615 }, type = { Serial }, title = { Porting Matlab applications to high-performance C++ codes: CPU/GPU-accelerated spherical deconvolution of diffusion MRI data }, publisher = { Springer Verlag }, pages = { 630--643 }, keywords = { Linear algebra,Magnetic resonance imaging,Matlab }, issn = { 16113349 }, isbn = { 9783319495828 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-319-49583-5_49 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Blas and Dolz and {Daniel Garcia} and Carretero and Daducci and Aleman and Canales-Rodriguez }, abstract = { In many scientific research fields, Matlab has been established as de facto tool for application design. This approach offers multiple advantages such as rapid deployment prototyping and the use of high performance linear algebra, among others. However, the applications developed are highly dependent of the Matlab runtime, limiting the deployment in heterogeneous platforms. In this paper we present the migration of a Matlab-implemented application to the C++ programming language, allowing the parallelization in GPUs. In particular, we have chosen RUMBA-SD, a spherical deconvolution algorithm, which estimates the intravoxel white-matter fiber orientations from diffusion MRI data. We describe the methodology used along with the tools and libraries leveraged during the translation task of such application. To demonstrate the benefits of the migration process, we perform a series of experiments using different high performance computing heterogeneous platforms and linear algebra libraries. This work aims to be a guide for future developments that are implemented out of Matlab. The results show that the C++ version attains, on average, a speedup of 8× over the Matlab one. }, } |
2016 | In Conf. Proceedings | Mohammad Saleh Miri, Ali Ghayoor, Hans J. Johnson, Milan Sonka (2016). Comparative study of multimodal intra-subject image registration methods on a publicly available database. In Medical Imaging 2016: Biomedical Applications in Molecular, Structural, and Functional Imaging, pp. 97881Z. (link) (bib) x @inproceedings{Miri2016, year = { 2016 }, volume = { 9788 }, url = { http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2214209 }, title = { Comparative study of multimodal intra-subject image registration methods on a publicly available database }, pages = { 97881Z }, month = { mar }, keywords = { Iterative closest point,Multimo,[Intensity-based,intensity-based,iterative closest point,multimodal registration,mutual information,point-based }, issn = { 16057422 }, isbn = { 9781510600232 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Miri et al/Medical Imaging 2016 Biomedical Applications in Molecular, Structural, and Functional Imaging/Miri et al. - 2016 - Comparative study of multimodal intra-subject image registration methods on a publicly available database.pdf:pdf }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2214209 }, booktitle = { Medical Imaging 2016: Biomedical Applications in Molecular, Structural, and Functional Imaging }, author = { Miri and Ghayoor and Johnson and Sonka }, annote = { From Duplicate 2 (Comparative study of multimodal intra-subject image registration methods on a publicly available database - Miri, M.S.; Ghayoor, Ali; Johnson, Hans J.; Sonka, M.) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { {\textcopyright} 2016 SPIE.This work reports on a comparative study between five manual and automated methods for intra-subject pair-wise registration of images from different modalities. The study includes a variety of inter-modal image registrations (MR-CT, PET-CT, PET-MR) utilizing different methods including two manual point-based techniques using rigid and similarity transformations, one automated point-based approach based on Iterative Closest Point (ICP) algorithm, and two automated intensity-based methods using mutual information (MI) and normalized mutual information (NMI). These techniques were employed for inter-modal registration of brain images of 9 subjects from a publicly available dataset, and the results were evaluated qualitatively via checkerboard images and quantitatively using root mean square error and MI criteria. In addition, for each inter-modal registration, a paired t-test was performed on the quantitative results in order to find any significant difference between the results of the studied registration techniques. }, } |
2016 | In Conf. Proceedings | Wei Shao, Gary E. Christensen, Hans J. Johnson, Joo Hyun Song, Oguz C Durumeric, Casey P. Johnson, Joseph J. Shaffer, Vincent A. Magnotta, Jess G. Fiedorowicz, John A Wemmie (2016). Population Shape Collapse in Large Deformation Registration of MR Brain Images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 109–117. (bib) x @inproceedings{shao2016population, year = { 2016 }, title = { Population Shape Collapse in Large Deformation Registration of MR Brain Images }, pages = { 109--117 }, isbn = { 9781509014378 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Shao et al/Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops/Shao et al. - 2016 - Population Shape Collapse in Large Deformation Registration of MR Brain Images.pdf:pdf }, doi = { 10.1109/CVPRW.2016.75 }, booktitle = { Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops }, author = { Shao and Christensen and Johnson and {Hyun Song} and Durumeric and Johnson and Shaffer and Magnotta and Fiedorowicz and Wemmie }, annote = { From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, } |
2016 | In Conf. Proceedings | Sarah E Gerard, Hans J. Johnson, John E Bayouth, Gary E. Christensen, Kaifang Du, Junfeng Guo, Joseph M Reinhardt (2016). Alpha Shapes for Lung Segmentation in the Presence of Large Tumors. In PIA: Pulmonary Image Analysis, pp. 1–9. (bib) x @inproceedings{Gerard2016, year = { 2016 }, title = { Alpha Shapes for Lung Segmentation in the Presence of Large Tumors }, pages = { 1--9 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Gerard et al/PIA Pulmonary Image Analysis/Gerard et al. - 2016 - Alpha Shapes for Lung Segmentation in the Presence of Large Tumors.pdf:pdf }, booktitle = { PIA: Pulmonary Image Analysis }, author = { Gerard and Johnson and Bayouth and Christensen and Du and Guo and Reinhardt }, annote = { \#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work. I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\# }, abstract = { Lung segmentation is a critical initial step in planning radiation therapy interventions for lung cancer patients. Achieving robust automatic segmentation of lungs with large tumors is challenging due to large variations in lung morphology, tumor location, and tumor shape between subjects. We present an automatic method to segment lungs with large tumors in CT images using an initial intensity based segmentation followed by alpha shape construction and graph search. We evaluated our method by comparing automated segmentations to manual segmentations on twelve subjects. Computed metrics for segmentation quality include average surface distance of 0.727 mm and average DICE coefficient of 0.970. These results demonstrate that the proposed method accurately segments the entire lung regions both free of and in the presence of large tumors. }, } |
2016 | In Conf. Proceedings | Regina E.Y. Kim, Peg Nopoulos, Jane Paulsen, Hans Johnson (2016). Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 54–61. (link) (bib) x @inproceedings{kim2015efficient, year = { 2016 }, volume = { 9401 }, url = { http://link.springer.com/10.1007/978-3-319-31808-0{\_}7 }, title = { Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources }, pages = { 54--61 }, organization = { Springer, Cham }, keywords = { Brain,HPC/HTC,Large-scale,Longitudinal data,MRI,Pipeline }, issn = { 16113349 }, isbn = { 9783319318073 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Workshop on Clinical Image-Based Procedures/Kim et al. - 2015 - Efficient and extensible workflow Reliable whole brain segmentation for large-scale, multi-center longitudinal human.pdf:pdf }, doi = { 10.1007/978-3-319-31808-0_7 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Kim and Nopoulos and Paulsen and Johnson }, annote = { From Duplicate 2 (Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources - Kim, Regina EY Y; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Efficient and Extensible Workflow: Reliable Whole Brain Segmentation for Large-Scale, Multi-center Longitudinal Human MRI Analysis Using High Performance/Throughput Computing Resources - Kim, Regina EY Y; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources - Kim, Regina EY Y; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources - Kim, Regina EY; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} }, abstract = { Advances in medical image applications have led to mounting expectations in regard to their impact on neuroscience studies. In light of this fact, a comprehensive application is needed to move neuroimaging data into clinical research discoveries in a way that maximizes collected data utilization and minimizes the development costs.We introduce BRAINS AutoWorkup, a Nipype based open source MRI analysis application distributed with BRAINSTools suite (http://brainsia.github. io/BRAINSTools/). This work describes the use of efficient and extensible automated brain MRI analysis workflow for large-scale multi-center longitudinal studies. We first explain benefits of our extensible workflow development using Nipype, including fast integration and validation of recently introduced tools with heterogeneous software infrastructures. Based on this workflow development, we also discuss our recent advancements to the workflow for reliable and accurate analysis of multi-center longitudinal data. In addition to Nipype providing a unified workflow, its support for High Performance Computing (HPC) resources leads to a further increased time efficiency of our workflow. We show our success on a few selected large-scale studies, and discuss future direction of this translation research in medical imaging applications. }, } |
2016 | In Conf. Proceedings | Ali Ghayoor, Jane S. Paulsen, Regina E. Y. Kim, Hans J. Johnson (2016). Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method. In Medical Imaging 2016: Image Processing, pp. 97841V. (link) (bib) x @inproceedings{ghayoor2016tissue, year = { 2016 }, volume = { 9784 }, url = { http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2216625 }, title = { Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method }, pages = { 97841V }, organization = { International Society for Optics and Photonics }, month = { mar }, keywords = { expectation maximization,fuzzy k-nearest neighborhood method,multi-site studies,neurodegenerative diseases,segmentation,tissue classification }, issn = { 16057422 }, isbn = { 9781510600195 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Ghayoor et al/Medical Imaging 2016 Image Processing/Ghayoor et al. - 2016 - Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method.pdf:pdf }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2216625 }, booktitle = { Medical Imaging 2016: Image Processing }, author = { Ghayoor and Paulsen and Kim and Johnson }, annote = { From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S; Kim, Regina E Y; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S; Kim, Regina E Y; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S; Kim, Regina E Y; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} }, abstract = { This paper describes enhancements to automate classification of brain tissues formulti-site degenerative magnetic resonance imaging (MRI) data analysis. Processing of large collections of MR images is a key research technique to advance our understanding of the human brain. Previous studies have developed a robust multi-modal tool for automated tissue classification of large-scale data based on expectation maximization (EM) method initialized by group-wise prior probability distributions. This work aims to augment the EM-based classification using a non-parametric fuzzy k-Nearest Neighbor (k-NN) classifier that can model the unique anatomical states of each subject in the study of degenerative diseases. The presented method is applicable to multi-center heterogeneous data analysis and is quantitatively validated on a set of 18 synthetic multi-modal MR datasets having six different levels of noise and three degrees of bias-field provided with known ground truth. Dice index and average Hausdorff distance are used to compare the accuracy and robustness of the proposed method to a state-of-the-art classification method implemented based on EM algorithm. Both evaluation measurements show that presented enhancements produce superior results as compared to the EM only classification. }, } |
2016 | In Conf. Proceedings | Wei Shao, Gary E. Christensen, Hans J. Johnson, Joo H. Song, Oguz C. Durumeric, Casey P. Johnson, Joseph J. Shaffer, Vincent A. Magnotta, Jess G. Fiedorowicz, John A. Wemmie (2016). Population Shape Collapse in Large Deformation Registration of MR Brain Images. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, pp. 549–557. (bib) x @inproceedings{shao2016population, year = { 2016 }, title = { Population Shape Collapse in Large Deformation Registration of MR Brain Images }, pages = { 549--557 }, keywords = { Diffeomorphic image registration,Shape collapse }, issn = { 21607516 }, isbn = { 9781467388504 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Shao et al/Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops/Shao et al. - 2016 - Population Shape Collapse in Large Deformation Registration of MR Brain Images.pdf:pdf }, doi = { 10.1109/CVPRW.2016.75 }, booktitle = { IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops }, author = { Shao and Christensen and Johnson and Song and Durumeric and Johnson and Shaffer and Magnotta and Fiedorowicz and Wemmie }, annote = { From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { This paper examines the shape collapse problem that occurs when registering a pair of images or a population of images of the brain to a reference (target) image coordinate system using diffeomorphic image registration. Shape collapse occurs when a foreground or background structure in an image with non-zero volume is transformed into a set of zero or near zero volume as measured on a discrete voxel lattice in the target image coordinate system. Shape collapse may occur during image registration when the moving image has a structure that is either missing or does not sufficiently overlap the corresponding structure in the target image[4]. Such a problem is common in image registration algorithms with large degrees of freedom such as many diffeomorphic image registration algorithms. Shape collapse is a concern when mapping functional data. For example, loss of signal may occur when mapping functional data such as fMRI, PET, SPECT using a transformation with a shape collapse if the functional signal occurs at the collapse region. This paper proposes an novel shape collapse measurement algorithm to detect the regions of shape collapse after image registration in pairwise registration. We further compute the shape collapse for a population of pairwise transformations such as occurs when registering many images to a common atlas coordinate system. Experiments are presented using the SyN diffeomorphic image registration algorithm. We demonstrate how changing the input parameters to the SyN registration algorithm can mitigate some of the collapse image registration artifacts. }, } |
2016 | In Conf. Proceedings | Sarah E Gerard, Hans J. Johnson, John E Bayouth, Gary E. Christensen, Kaifang Du, Junfeng Guo, Joseph M Reinhardt (2016). Alpha Shapes for Lung Segmentation in the Presence of Large Tumors. In PIA: Pulmonary Image Analysis, pp. 1–9. (bib) x @inproceedings{Gerard2016, year = { 2016 }, title = { Alpha Shapes for Lung Segmentation in the Presence of Large Tumors }, pages = { 1--9 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Gerard et al/PIA Pulmonary Image Analysis/Gerard et al. - 2016 - Alpha Shapes for Lung Segmentation in the Presence of Large Tumors.pdf:pdf }, booktitle = { PIA: Pulmonary Image Analysis }, author = { Gerard and Johnson and Bayouth and Christensen and Du and Guo and Reinhardt }, annote = { From Duplicate 1 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work. I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) From Duplicate 1 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) From Duplicate 1 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work. I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J; Bayouth, John E; Christensen, Gary E; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work. I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work. I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} }, abstract = { Lung segmentation is a critical initial step in planning radiation therapy interventions for lung cancer patients. Achieving robust automatic segmentation of lungs with large tumors is challenging due to large variations in lung morphology, tumor location, and tumor shape between subjects. We present an automatic method to segment lungs with large tumors in CT images using an initial intensity based segmentation followed by alpha shape construction and graph search. We evaluated our method by comparing automated segmentations to manual segmentations on twelve subjects. Computed metrics for segmentation quality include average surface distance of 0.727 mm and average DICE coefficient of 0.970. These results demonstrate that the proposed method accurately segments the entire lung regions both free of and in the presence of large tumors. }, } |
2016 | In Conf. Proceedings | Adam R. Sibley, Erica Markiewicz, Devkumar Mustafi, Xiaobing Fan, Suzanne Conzen, Greg Karczmar, Maryellen L. Giger (2016). Computerized segmentation algorithm with personalized atlases of murine MRIs in a SV40 large T-antigen mouse mammary cancer model. In Medical Imaging 2016: Biomedical Applications in Molecular, Structural, and Functional Imaging, pp. 97882M, Bellingham. (link) (bib) x @inproceedings{Sibley2016, year = { 2016 }, volume = { 9788 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978919989{\&}doi=10.1117{\%}2F12.2217425{\&}partnerID=40{\&}md5=df4d24295f5fc4323b0407742579e008 }, type = { Conference Proceedings }, title = { Computerized segmentation algorithm with personalized atlases of murine MRIs in a SV40 large T-antigen mouse mammary cancer model }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 97882M }, issn = { 16057422 }, isbn = { 9781510600232 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2217425 }, booktitle = { Medical Imaging 2016: Biomedical Applications in Molecular, Structural, and Functional Imaging }, author = { Sibley and Markiewicz and Mustafi and Fan and Conzen and Karczmar and Giger }, address = { Bellingham }, } |
2016 | In Conf. Proceedings | Bidisha Chakraborty, Brecht Heyde, Martino Alessandrini, Jan D'hooge (2016). Fast myocardial strain estimation from 3D ultrasound through elastic image registration with analytic regularization. In Medical Imaging 2016: Ultrasonic Imaging and Tomography, pp. 979006, Bellingham. (link) (bib) x @inproceedings{Chakraborty2016, year = { 2016 }, volume = { 9790 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84976497384{\&}doi=10.1117{\%}2F12.2216781{\&}partnerID=40{\&}md5=7e796a05f80c2deec426ba41b27b2284 }, type = { Conference Proceedings }, title = { Fast myocardial strain estimation from 3D ultrasound through elastic image registration with analytic regularization }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 979006 }, issn = { 16057422 }, isbn = { 9781510600256 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2216781 }, booktitle = { Medical Imaging 2016: Ultrasonic Imaging and Tomography }, author = { Chakraborty and Heyde and Alessandrini and D'hooge }, address = { Bellingham }, abstract = { {\textcopyright} 2016 SPIE. Image registration techniques using free-form deformation models have shown promising results for 3D myocardial strain estimation from ultrasound. However, the use of this technique has mostly been limited to research institutes due to the high computational demand, which is primarily due to the computational load of the regularization term ensuring spatially smooth cardiac strain estimates. Indeed, this term typically requires evaluating derivatives of the transformation field numerically in each voxel of the image during every iteration of the optimization process. In this paper, we replace this time-consuming step with a closed-form solution directly associated with the transformation field resulting in a speed up factor of ∼10-60,000, for a typical 3D B-mode image of 250 3 and 500 3 voxels, depending upon the size and the parametrization of the transformation field. The performance of the numeric and the analytic solutions was contrasted by computing tracking and strain accuracy on two realistic synthetic 3D cardiac ultrasound sequences, mimicking two ischemic motion patterns. Mean and standard deviation of the displacement errors over the cardiac cycle for the numeric and analytic solutions were 0.68±0.40 mm and 0.75±0.43 mm respectively. Correlations for the radial, longitudinal and circumferential strain components at end-systole were 0.89, 0.83 and 0.95 versus 0.90, 0.88 and 0.92 for the numeric and analytic regularization respectively. The analytic solution matched the performance of the numeric solution as no statistically significant differences (p {\textgreater} 0.05) were found when expressed in terms of bias or limits-of-Agreement. }, } |
2016 | In Conf. Proceedings | Timur Gamilov, Roman Pryamonosov, Sergey Simakov (2016). Modeling of patient-specific cases of atherosclerosis in carotid arteries. In ECCOMAS Congress 2016 - Proceedings of the 7th European Congress on Computational Methods in Applied Sciences and Engineering, pp. 81–89. (link) (bib) x @inproceedings{Gamilov, year = { 2016 }, volume = { 1 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84995444960{\&}doi=10.7712{\%}2F100016.1793.8690{\&}partnerID=40{\&}md5=47f13a248cbaf7933cea6036588d227b }, type = { Conference Proceedings }, title = { Modeling of patient-specific cases of atherosclerosis in carotid arteries }, pages = { 81--89 }, keywords = { 1D haemodynamics,Carotid artery stenosis,Circle of Willis,Microcirculation,Patient-specific,Vessel segmentation }, isbn = { 9786188284401 }, doi = { 10.7712/100016.1793.8690 }, booktitle = { ECCOMAS Congress 2016 - Proceedings of the 7th European Congress on Computational Methods in Applied Sciences and Engineering }, author = { Gamilov and Pryamonosov and Simakov }, abstract = { 1D model is used to simulate blood flow in major vessels of the upper body and head. The 1D part is stated in terms of viscous incompressible fluid flow in the network of elastic tubes. Two different types of junctions are considered: junctions between major vessels and junctions between arteries and veins. Vessel network reconstruction algorithm consists of vessel segmentation, thinning-based obtaining of set of centerlines, and graph reconstruction. Input data is 3D DICOM datasets, obtained with contrast enhanced Computed Tomography (CT) Angiography. Constructed model is used to study the influence of carotid artery stenosis on the direction of blood flow in the circle of Willis. }, } |
2016 | In Conf. Proceedings | Alexander A. Danilov, Roman A. Pryamonosov, Alexandra S. Yurova (2016). Image segmentation techniques for biomedical modeling: Electrophysiology and hemodynamics. In ECCOMAS Congress 2016 - Proceedings of the 7th European Congress on Computational Methods in Applied Sciences and Engineering, pp. 454–461. (link) (bib) x @inproceedings{Danilov, year = { 2016 }, volume = { 1 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84995513594{\&}doi=10.7712{\%}2F100016.1827.10770{\&}partnerID=40{\&}md5=3954727a98c6ce96faf2f9cd25fba8f7 }, type = { Conference Proceedings }, title = { Image segmentation techniques for biomedical modeling: Electrophysiology and hemodynamics }, pages = { 454--461 }, keywords = { Electrophysiology,Hemodynamics,Image segmentation,Medical images,Mesh generation }, isbn = { 9786188284401 }, doi = { 10.7712/100016.1827.10770 }, booktitle = { ECCOMAS Congress 2016 - Proceedings of the 7th European Congress on Computational Methods in Applied Sciences and Engineering }, author = { Danilov and Pryamonosov and Yurova }, abstract = { The work addresses segmentation techniques for generation of individualized computational domains on the basis of medical imaging dataset. The computational domains will be used in 3D electrophysiology models and 3D-1D coupled hemodynamics models. Several techniques for user-guided and automated segmentation of soft tissues, segmentation of vascular and tubular structures, generation of centerlines, 1D network reconstruction, correction and local adaptation are examined. We propose two algorithms for automatic vascular network segmentation and user-guided cardiac segmentation. }, } |
2016 | In Conf. Proceedings | Colin Blackburn, Chris Allan, Sébastien Besson, Jean-Marie Burel, Mark Carroll, Richard K. Ferguson, Helen Flynn, David Gault, Kenneth Gillen, Roger Leigh, Simone Leo, Simon Li, Dominik Lindner, Melissa Linkert, Josh Moore, William J. Moore, Balaji Ramalingam, Emil Rozbicki, Gabriella Rustici, Aleksandra Tarkowska, Petr Walczysko, Eleanor Williams, Jason R. Swedlow (2016). The Open Microscopy Environment: open image informatics for the biological sciences. In Software and Cyberinfrastructure for Astronomy IV, pp. 991324. (link) (bib) x @inproceedings{Blackburn, year = { 2016 }, volume = { 9913 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85006380046{\&}doi=10.1117{\%}2F12.2232291{\&}partnerID=40{\&}md5=e050c03cd02d1a387a82a993b146e1d1 }, type = { Conference Proceedings }, title = { The Open Microscopy Environment: open image informatics for the biological sciences }, pages = { 991324 }, issn = { 1996756X }, isbn = { 9781510602052 }, doi = { 10.1117/12.2232291 }, booktitle = { Software and Cyberinfrastructure for Astronomy IV }, author = { Blackburn and Allan and Besson and Burel and Carroll and Ferguson and Flynn and Gault and Gillen and Leigh and Leo and Li and Lindner and Linkert and Moore and Moore and Ramalingam and Rozbicki and Rustici and Tarkowska and Walczysko and Williams and Swedlow }, abstract = { Despite significant advances in biological imaging and analysis, major informatics challenges remain unsolved: file formats are proprietary, storage and analysis facilities are lacking, as are standards for sharing image data and results. While the open FITS file format is ubiquitous in astronomy, astronomical imaging shares many challenges with biological imaging, including the need to share large image sets using secure, cross-platform APIs, and the need for scalable applications for processing and visualization. The Open Microscopy Environment (OME) is an open-source software framework developed to address these challenges. OME tools include: an open data model for multidimensional imaging (OME Data Model); an open file format (OME-TIFF) and library (Bio-Formats) enabling free access to images (5D+) written in more than 145 formats from many imaging domains, including FITS; and a data management server (OMERO). The Java-based OMERO client-server platform comprises an image metadata store, an image repository, visualization and analysis by remote access, allowing sharing and publishing of image data. OMERO provides a means to manage the data through a multi-platform API. OMERO's model-based architecture has enabled its extension into a range of imaging domains, including light and electron microscopy, high content screening, digital pathology and recently into applications using non-image data from clinical and genomic studies. This is made possible using the Bio-Formats library. The current release includes a single mechanism for accessing image data of all types, regardless of original file format, via Java, C/C++ and Python and a variety of applications and environments (e.g. ImageJ, Matlab and R). }, } |
2016 | In Conf. Proceedings | E. P. Vasil'ev, A. A. Belokamenskaja, M. M. Novozhilov, V. E. Turlapov (2016). A parallel algorithm for 3D reconstruction of internal organs according to imaging based on the active contour model. In CEUR Workshop Proceedings, pp. 482–489. (link) (bib) x @inproceedings{RN927, year = { 2016 }, volume = { 1576 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978488492{\&}partnerID=40{\&}md5=ce7b9f7668691df1ee707e7ab33ab66e }, type = { Conference Proceedings }, title = { A parallel algorithm for 3D reconstruction of internal organs according to imaging based on the active contour model }, publisher = { CEUR-WS }, pages = { 482--489 }, keywords = { 3D reconstruction,3D segmentation,Reference geometrical model,Segmentation,Tomography active contour method }, issn = { 16130073 }, isbn = { 16130073 (ISSN) }, editor = { [object Object],[object Object] }, booktitle = { CEUR Workshop Proceedings }, author = { Vasil'ev and Belokamenskaja and Novozhilov and Turlapov }, abstract = { In this paper we consider the problem of building parallel methods for three-dimensional human organs' geometrical reconstruction based on the active contour method. The idea of computing decomposition is based on opportunity to visit all layers in both directions from the starting layer and use several starting layers that have initial contours generated from the reference body geometrical model. Using ITK library we have implemented parallel kidney segmentation algorithms. The first parallel version gave acceleration of about 1.6 for 2 threads on CPU, and the second - a significant increase in efficiency with an increase in the number of points in the contour, but in the range 3.2 times for 8 threads on CPU. We are planning to transfer the algorithm on the GPU. }, } |
2016 | In Conf. Proceedings | Szabolcs Urbán, Lászl\'o Rusk\'o, Antal Nagy (2016). Semi-automatic tumor contouring method using PET and MRI medical images. In Computational Vision and Medical Image Processing V - Proceedings of 5th Eccomas Thematic Conference on Computational Vision and Medical Image Processing, VipIMAGE 2015, pp. 209–214. (link) (bib) x @inproceedings{RN925, year = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959298536{\&}partnerID=40{\&}md5=54e2f8c3e13fa638156bc0e79854a686 }, type = { Conference Proceedings }, title = { Semi-automatic tumor contouring method using PET and MRI medical images }, publisher = { CRC Press/Balkema }, pages = { 209--214 }, isbn = { 9781138029262 }, editor = { [object Object],[object Object] }, doi = { 10.1201/b19241-35 }, booktitle = { Computational Vision and Medical Image Processing V - Proceedings of 5th Eccomas Thematic Conference on Computational Vision and Medical Image Processing, VipIMAGE 2015 }, author = { Urb{\'{a}}n and Rusk{\'{o}} and Nagy }, abstract = { Tumor contouring is a challenging task for physicians especially when functional as well as anatomical images are incorporated. This paper presents a semi-automated technique to solve this problem. The proposed method is a variant of the level-set segmentation where the initial region is based on the functional image and the speed function combines both images. The presented approach allows the user to balance between the information of two input images. The method was evaluated on registered head and neck PET-MRI image pairs using manually defined tumor contours as reference. The algorithm was tested for various types of tumors using different weights to combine the functional and the anatomical information. The best results showed good correlation with the reference (3{\%} volume difference and 80{\%} DICE similarity in average). }, } |
2016 | In Conf. Proceedings | Maxine Tan, Zheng Li, Kathleen Moore, Theresa Thai, Kai Ding, Hong Liu, Bin Zheng (2016). A B-spline image registration based CAD scheme to evaluate drug treatment response of ovarian cancer patients. In Medical Imaging 2016: Computer-Aided Diagnosis, pp. 97853D. (link) (bib) x @inproceedings{RN928, year = { 2016 }, volume = { 9785 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988892588{\&}doi=10.1117{\%}2F12.2216303{\&}partnerID=40{\&}md5=eab17b988f48475720c13a6e6b359caa }, type = { Conference Proceedings }, title = { A B-spline image registration based CAD scheme to evaluate drug treatment response of ovarian cancer patients }, publisher = { SPIE }, pages = { 97853D }, issn = { 16057422 }, isbn = { 9781510600201 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2216303 }, booktitle = { Medical Imaging 2016: Computer-Aided Diagnosis }, author = { Tan and Li and Moore and Thai and Ding and Liu and Zheng }, } |
2016 | In Conf. Proceedings | Mark Palmeri, Tyler Glass, Rajan Gupta, Matt McCormick, Alison Brown, Thomas Polascik, Stephen Rosenzweig, Andrew Buck, Kathy Nightingale (2016). Comparison between 3D ARFI imaging and mpMRI in detecting clinically-significant prostate cancer lesions. In IEEE International Ultrasonics Symposium, IUS, pp. NA (link) (bib) x @inproceedings{RN915, year = { 2016 }, volume = { 2016-Novem }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84996562155{\&}doi=10.1109{\%}2FULTSYM.2016.7728618{\&}partnerID=40{\&}md5=06c5d391a733c4454dbbf008fae5b2ca }, type = { Conference Proceedings }, title = { Comparison between 3D ARFI imaging and mpMRI in detecting clinically-significant prostate cancer lesions }, publisher = { IEEE Computer Society }, issn = { 19485727 }, isbn = { 9781467398978 }, doi = { 10.1109/ULTSYM.2016.7728618 }, booktitle = { IEEE International Ultrasonics Symposium, IUS }, author = { Palmeri and Glass and Gupta and McCormick and Brown and Polascik and Rosenzweig and Buck and Nightingale }, abstract = { Current prostate cancer screening methods involve non-targeted needle biopsies and detection of clinically-insignificant lesions that receive excessive treatments, exposing patients to unnecessary adverse side effects and placing a burden on our health care systems. There is a strong clinical need for improved prostate imaging methods that are sensitive and specific for clinically-significant prostate cancer lesions to guide needle biopsies, target focal treatments, and improve overall patient outcomes. In this study, we compared 3D in vivo Acoustic Radiation Force Impulse (ARFI) imaging with 3 Tesla, endorectal coil, multi-parametric magnetic resonance imaging (mpMRI) to correlate the ability for each modality to identify clinically-significant prostate cancer lesions. We also correlated Apparent Diffusion Coefficient (ADC) values from Diffusion Weighted Imaging (DWI) MR sequences with ARFI indices of suspicion and MR Prostate Imaging - Reporting and Data Systems (PI-RADS) scores, testing the hypothesis that increased cellular density is associated with regions suspicious for prostate cancer in ARFI images. Overall, ARFI and mpMR imaging were well-correlated in identifying clinically-significant prostate cancer lesions. There were several cases where only one of the imaging modalities was able to identify the prostate cancer lesion, highlighting the potential to further improve prostate cancer lesion detection and localization with a fused ARFI:mpMRI imaging system. ADC values were decreased in all prostate cancer lesions identified with mpMRI, but there were no obvious trends between the absolute ADC values and the ARFI image indices of suspicion. }, } |
2016 | In Conf. Proceedings | Kasper Marstal, Floris Berendsen, Marius Staring, Stefan Klein (2016). SimpleElastix: A User-Friendly, Multi-lingual Library for Medical Image Registration. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, pp. 574–582. (link) (bib) x @inproceedings{RN977, year = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010216516{\&}doi=10.1109{\%}2FCVPRW.2016.78{\&}partnerID=40{\&}md5=0d777eb051999b3ff620a82e4ec05c67 }, type = { Conference Proceedings }, title = { SimpleElastix: A User-Friendly, Multi-lingual Library for Medical Image Registration }, publisher = { IEEE Computer Society }, pages = { 574--582 }, issn = { 21607516 }, isbn = { 9781467388504 }, doi = { 10.1109/CVPRW.2016.78 }, booktitle = { IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops }, author = { Marstal and Berendsen and Staring and Klein }, abstract = { In this paper we present SimpleElastix, an extension of SimpleITK designed to bring the Elastix medical image registration library to a wider audience. Elastix is a modular collection of robust C++ image registration algorithms that is widely used in the literature. However, its command-line interface introduces overhead during prototyping, experimental setup, and tuning of registration algorithms. By integrating Elastix with SimpleITK, Elastix can be used as a native library in Python, Java, R, Octave, Ruby, Lua, Tcl and C{\#} on Linux, Mac and Windows. This allows Elastix to intregrate naturally with many development environments so the user can focus more on the registration problem and less on the underlying C++ implementation. As means of demonstration, we show how to register MR images of brains and natural pictures of faces using minimal amount of code. SimpleElastix is open source, licensed under the permissive Apache License Version 2.0 and available at https://github.com/kaspermarstal/SimpleElastix. }, } |
2016 | In Conf. Proceedings | Kwame S. Kutten, Joshua T. Vogelstein, Nicolas Charon, Li Ye, Karl Deisseroth, Michael I. Miller (2016). Deformably registering and annotating whole CLARITY brains to an atlas via masked LDDMM. In Optics, Photonics and Digital Technologies for Imaging Applications IV, pp. 989616. (link) (bib) x @inproceedings{RN922, year = { 2016 }, volume = { 9896 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991503754{\&}doi=10.1117{\%}2F12.2227444{\&}partnerID=40{\&}md5=e5bfa771d1160566e24476b33887e0d9 }, type = { Conference Proceedings }, title = { Deformably registering and annotating whole CLARITY brains to an atlas via masked LDDMM }, publisher = { SPIE }, pages = { 989616 }, issn = { 1996756X }, isbn = { 9781510601413 }, eprint = { 1605.02060 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1117/12.2227444 }, booktitle = { Optics, Photonics and Digital Technologies for Imaging Applications IV }, author = { Kutten and Vogelstein and Charon and Ye and Deisseroth and Miller }, arxivid = { 1605.02060 }, archiveprefix = { arXiv }, abstract = { The CLARITY method renders brains optically transparent to enable high-resolution imaging in the structurally intact brain. Anatomically annotating CLARITY brains is necessary for discovering which regions contain signals of interest. Manually annotating whole-brain, terabyte CLARITY images is difficult, time-consuming, subjective, and error-prone. Automatically registering CLARITY images to a pre-annotated brain atlas offers a solution, but is difficult for several reasons. Removal of the brain from the skull and subsequent storage and processing cause variable non-rigid deformations, thus compounding inter-subject anatomical variability. Additionally, the signal in CLARITY images arises from various biochemical contrast agents which only sparsely label brain structures. This sparse labeling challenges the most commonly used registration algorithms that need to match image histogram statistics to the more densely labeled histological brain atlases. The standard method is a multiscale Mutual Information B-spline algorithm that dynamically generates an average template as an intermediate registration target. We determined that this method performs poorly when registering CLARITY brains to the Allen Institute's Mouse Reference Atlas (ARA), because the image histogram statistics are poorly matched. Therefore, we developed a method (Mask-LDDMM) for registering CLARITY images, that automatically find the brain boundary and learns the optimal deformation between the brain and atlas masks. Using Mask-LDDMM without an average template provided better results than the standard approach when registering CLARITY brains to the ARA. The LDDMM pipelines developed here provide a fast automated way to anatomically annotate CLARITY images. Our code is available as open source software at http://NeuroData.io. }, } |
2016 | In Conf. Proceedings | Katarzyna Heryan, Andrzej Skalski, Janusz Gajda, Tomasz Drewniak, Jacek Jakubowski (2016). Registration of different phases of contrast-enhanced CT for facilitation of partial nephrectomy. In IST 2016 - 2016 IEEE International Conference on Imaging Systems and Techniques, Proceedings, pp. 255–260. (link) (bib) x @inproceedings{RN914, year = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85004072339{\&}doi=10.1109{\%}2FIST.2016.7738233{\&}partnerID=40{\&}md5=aa3e12d899e600e256992bc3fbaface3 }, type = { Conference Proceedings }, title = { Registration of different phases of contrast-enhanced CT for facilitation of partial nephrectomy }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 255--260 }, keywords = { B-spline registration,image registration,medical image processing,partial nephrectomy,segmentation,surgery planning }, isbn = { 9781509018178 }, doi = { 10.1109/IST.2016.7738233 }, booktitle = { IST 2016 - 2016 IEEE International Conference on Imaging Systems and Techniques, Proceedings }, author = { Heryan and Skalski and Gajda and Drewniak and Jakubowski }, abstract = { From the preoperative partial nephrectomy planning perspective, it is essential to expose separately different kidney structures and to analyze their mutual topological relations. Only then, the identification of possible conflicts prior to surgical intervention can be facilitated. To enable this, we propose a segmentation frameworks for renal vascular tree, kidney and pelvicalyceal system from corresponding CT phases. In order to compensate for both patient position changes and volumetric changes related to respiratory activity, registration of different CT phases is required. It is performed by combining global rigid transform with multilevel and multiresolution B-spline registration. The research material consisted of fifteen patients that underwent CT scanning preceding kidney cancer surgery. Presented results using checkerboards and differential images prove the effectiveness of the proposed method. In addition, visualizations of the segmented structures (renal arteries, kidney, pelvicalyceal system) from registered CT phases are provided to exemplary demonstrate individual model for preoperative planning. This kind of solution meets the expectations of urological oncology in terms of facilitating planning the optimal surgical approach in partial nephrectomy. To the best of our knowledge, such a comprehensive strategy involving both the proposed segmentation frameworks and registration has not been introduced yet. }, } |
2016 | In Conf. Proceedings | Soheil Ghafurian, Dimitris N. Metaxas, Virak Tan, Kang Li (2016). Fast generation of digitally reconstructed radiograph through an efficient preprocessing of ray attenuation values. In Medical Imaging 2016: Image-Guided Procedures, Robotic Interventions, and Modeling, pp. 97860C. (link) (bib) x @inproceedings{RN929, year = { 2016 }, volume = { 9786 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84982108538{\&}doi=10.1117{\%}2F12.2217756{\&}partnerID=40{\&}md5=dc30594f8f1ae1c399aff0783e094425 }, type = { Conference Proceedings }, title = { Fast generation of digitally reconstructed radiograph through an efficient preprocessing of ray attenuation values }, publisher = { SPIE }, pages = { 97860C }, issn = { 16057422 }, isbn = { 9781510600218 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2217756 }, booktitle = { Medical Imaging 2016: Image-Guided Procedures, Robotic Interventions, and Modeling }, author = { Ghafurian and Metaxas and Tan and Li }, abstract = { {\textcopyright} 2016 SPIE.Digitally reconstructed radiographs (DRR) are a simulation of radiographic images produced through a perspective projection of the three-dimensional (3D) image (volume) onto a two-dimensional (2D) image plane. The traditional method for the generation of DRRs, namely ray-casting, is a computationally intensive process and accounts for most of solution time in 3D/2D medical image registration frameworks, where a large number of DRRs is required. A few alternate methods for a faster DRR generation have been proposed, the most successful of which are based on the idea of pre-calculating the attenuation value of possible rays. Despite achieving good quality, these methods support a limited range of motion for the volume and entail long pre-calculation time. In this paper, we propose a new preprocessing procedure and data structure for the calculation of the ray attenuation values. This method supports all possible volume positions with practically small memory requirements in addition to reducing the complexity of the problem from O(n3) to O(n2). In our experiments, we generated DRRs of high quality in 63 milliseconds with a preprocessing time of 99.48 seconds and a memory size of 7.45 megabytes. }, } |
2016 | In Conf. Proceedings | Konstantin Dmitriev, Ievgeniia Gutenko, Saad Nadeem, Arie Kaufman (2016). Pancreas and cyst segmentation. In Medical Imaging 2016: Image Processing, pp. 97842C. (link) (bib) x @inproceedings{RN926, year = { 2016 }, volume = { 9784 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84981736248{\&}doi=10.1117{\%}2F12.2216537{\&}partnerID=40{\&}md5=0c6892196eefd59bd4552f6498d124be }, type = { Conference Proceedings }, title = { Pancreas and cyst segmentation }, publisher = { SPIE }, pages = { 97842C }, issn = { 16057422 }, isbn = { 9781510600195 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1117/12.2216537 }, booktitle = { Medical Imaging 2016: Image Processing }, author = { Dmitriev and Gutenko and Nadeem and Kaufman }, } |
2016 | In Conf. Proceedings | A. Bilqis, R. Widita (2016). Comparison of segmentation using fast marching and geodesic active contours methods for bone. In Journal of Physics: Conference Series, pp. NA (link) (bib) x @inproceedings{RN970, year = { 2016 }, volume = { 694 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971658619{\&}doi=10.1088{\%}2F1742-6596{\%}2F694{\%}2F1{\%}2F012044{\&}partnerID=40{\&}md5=5864b0c4fb7c08baa7e2a09f54dd0b9e }, type = { Conference Proceedings }, title = { Comparison of segmentation using fast marching and geodesic active contours methods for bone }, publisher = { Institute of Physics Publishing }, number = { 1 }, issn = { 17426596 }, isbn = { 17426588 (ISSN) }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1088/1742-6596/694/1/012044 }, booktitle = { Journal of Physics: Conference Series }, author = { Bilqis and Widita }, abstract = { Image processing is important in diagnosing diseases or damages of human organs. One of the important stages of image processing is segmentation process. Segmentation is a separation process of the image into regions of certain similar characteristics. It is used to simplify the image to make an analysis easier. The case raised in this study is image segmentation of bones. Bone's image segmentation is a way to get bone dimensions, which is needed in order to make prosthesis that is used to treat broken or cracked bones. Segmentation methods chosen in this study are fast marching and geodesic active contours. This study uses ITK (Insight Segmentation and Registration Toolkit) software. The success of the segmentation was then determined by calculating its accuracy, sensitivity, and specificity. Based on the results, the Active Contours method has slightly higher accuracy and sensitivity values than the fast marching method. As for the value of specificity, fast marching has produced three image results that have higher specificity values compared to those of geodesic active contour's. The result also indicates that both methods have succeeded in performing bone's image segmentation. Overall, geodesic active contours method is quite better than fast marching in segmenting bone images. }, } |
2016 | In Conf. Proceedings | Floris F. Berendsen, Kasper Marstal, Stefan Klein, Marius Staring (2016). The Design of SuperElastix - A Unifying Framework for a Wide Range of Image Registration Methodologies. In IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, pp. 498–506. (link) (bib) x @inproceedings{RN912, year = { 2016 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010223955{\&}doi=10.1109{\%}2FCVPRW.2016.69{\&}partnerID=40{\&}md5=dc7baf79ffe3f2bf7316f023fd6aa7a1 }, type = { Conference Proceedings }, title = { The Design of SuperElastix - A Unifying Framework for a Wide Range of Image Registration Methodologies }, publisher = { IEEE Computer Society }, pages = { 498--506 }, issn = { 21607516 }, isbn = { 9781467388504 }, doi = { 10.1109/CVPRW.2016.69 }, booktitle = { IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops }, author = { Berendsen and Marstal and Klein and Staring }, abstract = { A large diversity of image registration methodologies has emerged from the research community. The scattering of methods over toolboxes impedes rigorous comparison to select the appropriate method for a given application. Toolboxes typically tailor their implementations to a mathematical registration paradigm, which makes internal functionality nonexchangeable. Subsequently, this forms a barrier for adoption of registration technology in the clinic. We therefore propose a unifying, role-based software design that can integrate a broad range of functional registration components. These components can be configured into an algorithmic network via a single highlevel user interface. A generic component handshake mechanism provides users feedback on incompatibilities. We demonstrate the viability of our design by incorporating two paradigms from different code bases. The implementation is done in C++ and is available as open source. The progress of embedding more paradigms can be followed via https://github.com/kaspermarstal/SuperElastix. }, } |
2016 | In Conf. Proceedings | A. R. Amanda, R. Widita (2016). Comparison of image segmentation of lungs using methods: Connected threshold, neighborhood connected, and threshold level set segmentation. In Journal of Physics: Conference Series, pp. NA (link) (bib) x @inproceedings{RN969, year = { 2016 }, volume = { 694 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971601490{\&}doi=10.1088{\%}2F1742-6596{\%}2F694{\%}2F1{\%}2F012048{\&}partnerID=40{\&}md5=211e94ed993c83699b57654dc8bcf38d }, type = { Conference Proceedings }, title = { Comparison of image segmentation of lungs using methods: Connected threshold, neighborhood connected, and threshold level set segmentation }, publisher = { Institute of Physics Publishing }, number = { 1 }, issn = { 17426596 }, isbn = { 17426588 (ISSN) }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1088/1742-6596/694/1/012048 }, booktitle = { Journal of Physics: Conference Series }, author = { Amanda and Widita }, abstract = { The aim of this research is to compare some image segmentation methods for lungs based on performance evaluation parameter (Mean Square Error (MSE) and Peak Signal Noise to Ratio (PSNR)). In this study, the methods compared were connected threshold, neighborhood connected, and the threshold level set segmentation on the image of the lungs. These three methods require one important parameter, i.e the threshold. The threshold interval was obtained from the histogram of the original image. The software used to segment the image here was InsightToolkit-4.7.0 (ITK). This research used 5 lung images to be analyzed. Then, the results were compared using the performance evaluation parameter determined by using MATLAB. The segmentation method is said to have a good quality if it has the smallest MSE value and the highest PSNR. The results show that four sample images match the criteria of connected threshold, while one sample refers to the threshold level set segmentation. Therefore, it can be concluded that connected threshold method is better than the other two methods for these cases. }, } |
2015 | Book | Hans J. Johnson, Matthew M McCormick, Luis Ibanez (2015). The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7, NA 2015. (link) (bib) x @book{Johnson2015a, year = { 2015 }, url = { https://itk.org/ }, title = { The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7 }, isbn = { 978-1930934276 }, booktitle = { Kitware, Inc.(January 2015) }, author = { Johnson and McCormick and Ibanez }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both. }, } |
2015 | Book | Hans J. Johnson, Matthew M McCormick, Luis Ibanez (2015). The ITK Software Guide: Introduction and Development Guidelines version 4.6, NA 2015. (link) (bib) x @book{Johnson2015, year = { 2015 }, url = { https://itk.org/ }, title = { The ITK Software Guide: Introduction and Development Guidelines version 4.6 }, pages = { 248 }, keywords = { Guide,Registration,Segmentation }, isbn = { 1930934270 }, booktitle = { Kitware, Inc.(January 2015) }, author = { Johnson and McCormick and Ibanez }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registra tion and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is a n image acquired from such medical instru- mentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT s can may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environmen t known as CMake to manage platform- specific project generation and compilation process in a pla tform-independent way. ITK is imple- mented in C++. ITK's implementation style employs generic p rogramming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template . The use of C++ templating means that the code is highly efficient and many issues are discovered at compile-time, rather than at run-time during program execution. It also means that many of ITK's al gorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high- level programming language Python . This enables rapid prototyping and faster exploration of i deas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is avai lable for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, a nd extend the software because ITK is an open-source project. ITK uses a model of software devel opment known as Extreme Program- ming. Extreme Programming collapses the usual software dev elopment methodology into a simulta- neous iterative process of design-implement-test-releas e. The key features of Extreme Programming are communication and testing. Communication among the mem bers of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated co ntinuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf . This book is a guide to developing software with ITK; it is the first of two companion books. This book cove rs building and installation, general architecture and design, as well as the process of contribut ing in the ITK community. The second book covers detailed design and functionality for reading a nd writing images, filtering, registration, segmentation, and performing statistical analysis. }, } |
2015 | Book | Aared L Yarbs, Basil Haigh, Ulrich Ettinger, Veena Kumari, Trevor J. Crawford, Vanja Flak, Tonmoy Sharma, Robert E. Davis, Philip J. Corr, Inti A. Brazil, Ellen R A de Bruijn, Berend H. Bulten, A. Katinka L von Borries, Jacques J D M van Lankveld, Jan K. Buitelaar, Robbert J. Verkes, Hedwig Eisenbarth, Georg W. Alpers, Eric Singler, Omri Gillath, Angela J. Bahns, Hayley A. Burghart, Charles Clifton, Fernanda Ferreira, John M. Henderson, Albrecht W. Inhoff, Simon P. Liversedge, Erik D. Reichle, Elizabeth R. Schotter, Nicolas Carvalho, Eric Laurent, Nicolas Noiret, Gilles Chopard, Emmanuel Haffen, Djamila Bennabi, Pierre Vandel, R L Gregory (2015). Third edition : revised and updated World, NA 2015, ISBN: 1931-1516. (link) (bib) x @book{Yarbs2015, year = { 2015 }, volume = { 86 }, url = { http://doi.apa.org/getdoi.cfm?doi=10.1037/a0022758{\%}0Ahttp://dx.doi.org/10.1016/j.biopsych.2008.08.011 }, title = { Third edition : revised and updated World }, pmid = { 15312695 }, pages = { 860--865 }, number = { 1 }, keywords = { Antisaccade,Attraction,Automatic processing,Bipolar depression,Chest,E-Z Reader,Emotion,Endophenotype,Eye movement,Eye movement control,Eye-tracking,Friendship,Keith Rayner,Mating,Negative emotionality,Neuroticism,Oculomotor control,Prosaccade (reflexive saccade,Saccade,Schizophrenia,Schizotypal personality traits,Sentences and discourses,Unipolar depression,Visual scenes,Waist-to-hip ratio,Word recognition,behavioral adaptation,dimberg,e,emotion,error positivity,error signaling,error-related negativity,eye-tracking,facial expressions,facial expressions contain information,fast and automatic processing,g,havior,http://www.archive.org/details/eyebrainpsycholo00r,information is optimized,psychopathy,relevant for social be-,scan path,the processing of facial,thunberg,thus,visually-guided saccade) }, issn = { 1931-1516 }, isbn = { 0749-596X }, doi = { 10.1016/j.biopsycho.2004.03.014 }, booktitle = { Problems of Information Transmission }, author = { Yarbs and Haigh and Ettinger and Kumari and Crawford and Flak and Sharma and Davis and Corr and Brazil and Bruijn and Bulten and Borries and Lankveld and Buitelaar and Verkes and Eisenbarth and Alpers and Singler and Gillath and Bahns and Burghart and Clifton and Ferreira and Henderson and Inhoff and Liversedge and Reichle and Schotter and Carvalho and Laurent and Noiret and Chopard and Haffen and Bennabi and Vandel and Gregory }, abstract = { BACKGROUND: The analysis of eye movements (EM) by eye-tracking has been carried out for several decades to investigate mood regulation, emotional information processing, and psychomotor disturbances in depressive disorders.$\backslash$nMETHOD: A systematic review of all English language PubMed articles using the terms "saccadic eye movements" OR "eye-tracking" AND "depression" OR "bipolar disorders" was conducted using PRISMA guidelines. The aim of this review was to characterize the specific alterations of EM in unipolar and bipolar depression.$\backslash$nRESULTS: Findings regarding psychomotor disturbance showed an increase in reaction time in prosaccade and antisaccade tasks in both unipolar and bipolar disorders. In both disorders, patients have been reported to have an attraction for negative emotions, especially for negative pictures in unipolar and threatening images in bipolar disorder. However, the pattern could change with aging, elderly unipolar patients disengaging key features of sad and neutral stimuli. METHODological limitations generally include small sample sizes with mixed unipolar and bipolar depressed patients.$\backslash$nCONCLUSION: Eye movement analysis can be used to discriminate patients with depressive disorders from controls, as well as patients with bipolar disorder from patients with unipolar depression. General knowledge concerning psychomotor alterations and affective regulation strategies associated with each disorder can also be gained thanks to the analysis. Future directions for research on eye movement and depression are proposed in this review. }, } |
2015 | Book | Martin Schober, Philipp Schlömer, Markus Cremer, Hartmut Mohlberg, Anh Minh Huynh, Nicole Schubert, Mehmet E. Kirlangic, Katrin Amunts, Markus Axer (2015). Reference volume generation for subsequent 3D reconstruction of histological sections, Springer-Verlag Berlin, 2015, ISBN: 1431472X. (link) (bib) x @book{Schober2015, year = { 2015 }, url = { {\%}3CGo to }, type = { Book }, title = { Reference volume generation for subsequent 3D reconstruction of histological sections }, series = { Bildverarbeitung Fur Die Medizin 2015: Algorithmen - Systeme - Anwendungen }, publisher = { Springer-Verlag Berlin }, pages = { 143--148 }, issn = { 1431472X }, isbn = { 9783662462232 }, doi = { 10.1007/978-3-662-46224-9_26 }, booktitle = { Informatik aktuell }, author = { Schober and Schl{\"{o}}mer and Cremer and Mohlberg and Huynh and Schubert and Kirlangic and Amunts and Axer }, address = { Berlin }, abstract = { Anatomical reference brains are indispensable tools in human brain mapping, enabling the integration of multimodal data or the alignment of a series of adjacent histological brain sections into an anatomically realistic space. This study describes a robust and efficient method for an automatic 3D reconstruction of blockface images taken from postmortem brains during cutting as a prerequisite for high-quality 3D reconstruction of brain sections. The refinement technique used in this registration method is applicable for a broad range of pre-registered histological stacks. }, } |
2015 | Book | Hans J. Johnson, Matthew M McCormick, Luis Ibanez (2015). The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7, NA 2015. (link) (bib) x @book{johnson2014itk, year = { 2015 }, url = { https://itk.org/ }, title = { The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7 }, isbn = { 978-1930934276 }, editor = { [object Object],[object Object],[object Object] }, edition = { 4.7 }, booktitle = { Kitware, Inc.(January 2015) }, author = { Johnson and McCormick and Ibanez }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both. }, } |
2015 | Book | Hans J. Johnson, Matthew M McCormick, Luis Ibanez (2015). The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7, Kitware, Inc., 2015. (link) (bib) x @book{johnson2015itk, year = { 2015 }, url = { https://itk.org/ }, title = { The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7 }, publisher = { Kitware, Inc. }, pages = { 248 }, keywords = { Guide,Registration,Segmentation }, isbn = { 978-1930934276 }, editor = { [object Object],[object Object],[object Object] }, edition = { 4.7 }, booktitle = { Kitware, Inc.(January 2015) }, author = { Johnson and McCormick and Ibanez }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both. }, } |
2015 | Book chapter | S Meesters, P Ossenblok, A Colon, O Schijns, L Florack, P Boon, L Wagner, A Fuster, Ieee (2015). NA in AUTOMATED IDENTIFICATION OF INTRACRANIAL DEPTH ELECTRODES IN COMPUTED TOMOGRAPHY DATA, Ieee, pp. 976–979, IEEE International Symposium on Biomedical Imaging. (link) (bib) x @inbook{Meesters2015, year = { 2015 }, url = { {\%}3CGo to }, type = { Book Section }, title = { AUTOMATED IDENTIFICATION OF INTRACRANIAL DEPTH ELECTRODES IN COMPUTED TOMOGRAPHY DATA }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 976--979 }, isbn = { 978-1-4799-2374-8 }, booktitle = { 2015 IEEE 12th International Symposium on Biomedical Imaging }, author = { Meesters and Ossenblok and Colon and Schijns and Florack and Boon and Wagner and Fuster and Ieee }, address = { New York }, } |
2015 | Book chapter | Nelson Velasco Toledo, Eduardo Romero Castro (2015). NA in Fast high resolution reconstruction in multi-slice and multi-view cMRI, Edited by E Romero, N Lepore, Spie-Int Soc Optical Engineering, pp. 92871C, Proceedings of SPIE, Vol. 9287, ISBN: 16057422. (link) (bib) x @inbook{Toledo2015, year = { 2015 }, volume = { 9287 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Fast high resolution reconstruction in multi-slice and multi-view cMRI }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 92871C }, issn = { 16057422 }, isbn = { 9781628413625 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2073319 }, booktitle = { 10th International Symposium on Medical Information Processing and Analysis }, author = { {Velasco Toledo} and {Romero Castro} }, address = { Bellingham }, } |
2015 | Book chapter | Mathias Polfliet, Jef Vandemeulebroucke, Gert Van Gompel, Nico Buls, Rudi Deklerck, Thierry Scheerlinck (2015). NA in Estimation of hip prosthesis migration: A study of zero migration, Edited by I Lackovic, D Vasic, Springer-Verlag Berlin, pp. 126–129, IFMBE Proceedings, Vol. 45, ISBN: 16800737. (link) (bib) x @inbook{Polfliet2015, year = { 2015 }, volume = { 45 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Estimation of hip prosthesis migration: A study of zero migration }, series = { IFMBE Proceedings }, publisher = { Springer-Verlag Berlin }, pages = { 126--129 }, keywords = { Hip Prosthesis,Migration,Rigid Registration }, issn = { 16800737 }, isbn = { 9783319111278 }, editor = { [object Object],[object Object] }, doi = { 10.1007/978-3-319-11128-5_32 }, booktitle = { IFMBE Proceedings }, author = { Polfliet and Vandemeulebroucke and {Van Gompel} and Buls and Deklerck and Scheerlinck }, address = { Berlin }, abstract = { We present a method for automatically estimating prosthesis migration from previous and follow-up CT image data. The method consists of the segmentation of the bone and prosthesis in both images, followed by a registration of both substructures. The migration is found by computing the difference between both transforms. In this work we assess the accuracy of the method for zero migration. The method was applied on data from a mechanical phantom and on patient data, both with zero migration. Our experiments show that an accuracy of less than 0.3 mm can be achieved in a clinical setting. }, } |
2015 | Journal | Brian B. Avants, Hans J. Johnson, Nicholas J. Tustison (2015). Neuroinformatics and the The Insight ToolKit. Frontiers in neuroinformatics, 9(March), pp. 1–3. (link) (bib) x @article{avants2015neuroinformatics, year = { 2015 }, volume = { 9 }, url = { http://www.frontiersin.org/Neuroinformatics/10.3389/fninf.2015.00005/full }, title = { Neuroinformatics and the The Insight ToolKit }, publisher = { Frontiers Media SA }, pmid = { 25859213 }, pages = { 1--3 }, number = { March }, keywords = { C++,ITK,c,edited and reviewed by,itk,open source,registration,segmentation }, journal = { Frontiers in neuroinformatics }, issn = { 1662-5196 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Avants, Johnson, Tustison/Frontiers in neuroinformatics/Avants, Johnson, Tustison - 2015 - Neuroinformatics and the The Insight ToolKit.pdf:pdf }, doi = { 10.3389/fninf.2015.00005 }, author = { Avants and Johnson and Tustison }, annote = { From Duplicate 1 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) {\#}{\#}CONTRIBUTIONS: As a contributing guest editor for a special issue of "Frontiers in Neruoinformatics" I had substantial oversight in the criteria for selection, review and ultimate publication of materials in the issue. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Editorial :{\#}{\#} }, } |
2015 | Journal | Hans J. Johnson, Matthew M Mccormick, Luis Ibanez (2015). The ITK Software Guide Book 1: Introduction and Development Guidelines-Volume 1. NA NA pp. NA (bib) x @article{johnson2015itk, year = { 2015 }, title = { The ITK Software Guide Book 1: Introduction and Development Guidelines-Volume 1 }, publisher = { Kitware, Inc. }, keywords = { Guide,Registration,Segmentation }, isbn = { 978-1-930934-28-3 }, author = { Johnson and Mccormick and Ibanez }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registra tion and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is a n image acquired from such medical instru- mentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT s can may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environmen t known as CMake to manage platform- specific project generation and compilation process in a pla tform-independent way. ITK is imple- mented in C++. ITK's implementation style employs generic p rogramming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template . The use of C++ templating means that the code is highly efficient and many issues are discovered at compile-time, rather than at run-time during program execution. It also means that many of ITK's al gorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high- level programming language Python . This enables rapid prototyping and faster exploration of i deas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is avai lable for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, a nd extend the software because ITK is an open-source project. ITK uses a model of software devel opment known as Extreme Program- ming. Extreme Programming collapses the usual software dev elopment methodology into a simulta- neous iterative process of design-implement-test-releas e. The key features of Extreme Programming are communication and testing. Communication among the mem bers of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated co ntinuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf . This book is a guide to developing software with ITK; it is the first of two companion books. This book cove rs building and installation, general architecture and design, as well as the process of contribut ing in the ITK community. The second book covers detailed design and functionality for reading a nd writing images, filtering, registration, segmentation, and performing statistical analysis. }, } |
2015 | Journal | Brian B. Avants, Hans J. Johnson, Nicholas J. Tustison (2015). Neuroinformatics and the the insight toolkit. Frontiers in Neuroinformatics, 9(MAR), pp. 1–3. (link) (bib) x @article{avants2015neuroinformatics, year = { 2015 }, volume = { 9 }, url = { http://www.frontiersin.org/Neuroinformatics/10.3389/fninf.2015.00005/full }, title = { Neuroinformatics and the the insight toolkit }, publisher = { Frontiers Media SA }, pmid = { 25859213 }, pages = { 1--3 }, number = { MAR }, keywords = { C++,ITK,Open source,Registration,Segmentation }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Avants, Johnson, Tustison/Frontiers in neuroinformatics/Avants, Johnson, Tustison - 2015 - Neuroinformatics and the The Insight ToolKit.pdf:pdf }, doi = { 10.3389/fninf.2015.00005 }, author = { Avants and Johnson and Tustison }, annote = { From Duplicate 1 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) From Duplicate 1 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) {\#}{\#}CONTRIBUTIONS: As a contributing guest editor for a special issue of "Frontiers in Neruoinformatics" I had substantial oversight in the criteria for selection, review and ultimate publication of materials in the issue. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Editorial :{\#}{\#} From Duplicate 2 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) From Duplicate 2 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) {\#}{\#}CONTRIBUTIONS: As a contributing guest editor for a special issue of "Frontiers in Neruoinformatics" I had substantial oversight in the criteria for selection, review and ultimate publication of materials in the issue. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Editorial :{\#}{\#} }, } |
2015 | Journal | Hans J. Johnson, Matthew M McCormick, Luis Ibanez (2015). The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7. Kitware, Inc.(January 2015), NA pp. NA (link) (bib) x @article{johnson2015itk, year = { 2015 }, url = { https://itk.org/ }, title = { The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7 }, publisher = { Kitware, Inc. }, keywords = { Guide,Registration,Segmentation }, journal = { Kitware, Inc.(January 2015) }, isbn = { 978-1930934276 }, author = { Johnson and McCormick and Ibanez }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both. }, } |
2015 | Journal | Francesco Santini, N. Kawel-Boehm, A. Greiser, J. Bremerich, O. Bieri (2015). Simultaneous T1 and T2 quantification of the myocardium using cardiac balanced-SSFP inversion recovery with interleaved sampling acquisition (CABIRIA). Magnetic Resonance in Medicine, 74(2), pp. 365–371. (link) (bib) x @article{Santini2015, year = { 2015 }, volume = { 74 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Simultaneous T1 and T2 quantification of the myocardium using cardiac balanced-SSFP inversion recovery with interleaved sampling acquisition (CABIRIA) }, pages = { 365--371 }, number = { 2 }, keywords = { balanced steady-state free precession,cardiac MRI,cardiac relaxometry,inversion recovery }, journal = { Magnetic Resonance in Medicine }, issn = { 15222594 }, doi = { 10.1002/mrm.25402 }, author = { Santini and Kawel-Boehm and Greiser and Bremerich and Bieri }, abstract = { Purpose To develop a novel sequence for simultaneous quantification of T1 and T2 relaxation times in the myocardium based on the transient phase of the balanced steady-state free precession. Methods A new prototype sequence, named "cardiac balanced-SSFP inversion recovery with interleaved sampling acquisition" (CABIRIA) was developed based on a single-shot bSSFP readout following an inversion pulse. With this method, T1 and T2 values can be calculated from the analysis of signal evolution. The scan duration for a single slice in vivo was 8 heartbeats, thus feasible in a breath-hold. The sequence was validated both in vitro by comparing it to conventional inversion recovery and multi-echo spin-echo methods and in 5 healthy volunteers by comparing it to the Modified Look-Locker Inversion Recovery (MOLLI) sequence and to a T2 quantification sequence based on multi-T2-prepared bSSFP. Results The method showed good agreement with conventional methods for both T1 and T2 measurements (concordance correlation coefficient ≥ 0.99) in vitro. In healthy volunteers the measured T1 values were 1227 ± 68 ms and T2 values 37.9 ± 2.4 ms, with similar inter- and intrasubject variability with respect to existing methods. Conclusion The proposed CABIRIA method enables simultaneous quantification of myocardial T1 and T2 values with good accuracy and precision. Magn Reson Med 74:365-371, 2015. }, } |
2015 | Journal | Daniel F. Gaddy, Helen Lee, Jinzi Zheng, David A. Jaffray, Thomas J. Wickham, Bart S. Hendriks (2015). Whole-body organ-level and kidney micro-dosimetric evaluations of 64Cu-loaded HER2/ErbB2-targeted liposomal doxorubicin (64Cu-MM-302) in rodents and primates. EJNMMI Research, 5(1), pp. 10. (link) (bib) x @article{Gaddy2015, year = { 2015 }, volume = { 5 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928392739{\&}doi=10.1186{\%}2Fs13550-015-0096-0{\&}partnerID=40{\&}md5=9be19ccea7a7aa4683c93949d1fae115 }, type = { Journal Article }, title = { Whole-body organ-level and kidney micro-dosimetric evaluations of 64Cu-loaded HER2/ErbB2-targeted liposomal doxorubicin (64Cu-MM-302) in rodents and primates }, pages = { 10 }, number = { 1 }, keywords = { Copper-64,Dosimetry,Nanotherapeutics,Positron emission tomography }, journal = { EJNMMI Research }, issn = { 2191219X }, doi = { 10.1186/s13550-015-0096-0 }, author = { Gaddy and Lee and Zheng and Jaffray and Wickham and Hendriks }, abstract = { Background: Features of the tumor microenvironment influence the efficacy of cancer nanotherapeutics. The ability to directly radiolabel nanotherapeutics offers a valuable translational tool to obtain biodistribution and tumor deposition data, testing the hypothesis that the extent of delivery predicts therapeutic outcome. In support of a first in-human clinical trial with 64Cu-labeled HER2-targeted liposomal doxorubicin (64Cu-MM-302), a preclinical dosimetric analysis was performed. Methods: Whole-body biodistribution and pharmacokinetic data were obtained in mice that received 64Cu-MM-302 and used to estimate absorbed radiation doses in normal human organs. PET/CT imaging revealed non-uniform distribution of 64Cu signal in mouse kidneys. Kidney micro-dosimetry analysis was performed in mice and squirrel monkeys, using a physiologically based pharmacokinetic model to estimate the full dynamics of the 64Cu signal in monkeys. Results: Organ-level dosimetric analysis of mice receiving 64Cu-MM-302 indicated that the heart was the organ receiving the highest radiation absorbed dose, due to extended liposomal circulation. However, PET/CT imaging indicated that 64Cu-MM-302 administration resulted in heterogeneous exposure in the kidney, with a focus of 64Cu activity in the renal pelvis. This result was reproduced in primates. Kidney micro-dosimetry analysis illustrated that the renal pelvis was the maximum exposed tissue in mice and squirrel monkeys, due to the highly concentrated signal within the small renal pelvis surface area. Conclusions: This study was used to select a starting clinical radiation dose of 64Cu-MM-302 for PET/CT in patients with advanced HER2-positive breast cancer. Organ-level dosimetry and kidney micro-dosimetry results predicted that a radiation dose of 400 MBq of 64Cu-MM-302 should be acceptable in patients. }, } |
2015 | Journal | Alper Willführ, Christina Brandenberger, Tanja Piatkowski, Roman Grothausmann, Jens Randel Nyengaard, Matthias Ochs, Christian Mühlfeld (2015). Estimation of the number of alveolar capillaries by the euler number (Euler-poincaré characteristic). American Journal of Physiology - Lung Cellular and Molecular Physiology, 309(11), pp. L1286–L1293. (link) (bib) x @article{Willfuhr2015, year = { 2015 }, volume = { 309 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Estimation of the number of alveolar capillaries by the euler number (Euler-poincar{\'{e}} characteristic) }, pages = { L1286--L1293 }, number = { 11 }, keywords = { Capillary number,Euler number,Stereology }, journal = { American Journal of Physiology - Lung Cellular and Molecular Physiology }, issn = { 15221504 }, doi = { 10.1152/ajplung.00410.2014 }, author = { Willf{\"{u}}hr and Brandenberger and Piatkowski and Grothausmann and Nyengaard and Ochs and M{\"{u}}hlfeld }, abstract = { The lung parenchyma provides a maximal surface area of blood-containing capillaries that are in close contact with a large surface areaof the air-containing alveoli. Volume and surface area of capillaries are the classic stereological parameters to characterize the alveolar capillary network (ACN) and have provided essential structure-function information of the lung. When loss (rarefaction) or gain (angiogenesis) of capillaries occurs, these parameters may not be sufficient to provide mechanistic insight. Therefore, it would be desirable to estimate the number of capillaries, as it contains more distinct and mechanistically oriented information. Here, we present a new stereological method to estimate the number of capillary loops in the ACN. One advantage of this method is that it is independent of the shape, size, or distribution of the capillaries. We used consecutive, 1 {\_}mthick sections from epoxy resin-embedded material as a physical disector. The Euler-Poincar{\'{e}} characteristic of capillary networks can be estimated by counting the easily recognizable topolog ical constellations of “islands,” “bridges,” and “holes.” The total number of capillary loops in the ACN can then be calculated from the Euler- Poincar{\'{e}} characteristic. With the use of the established estimator of alveolar number, it is possible to obtain the mean number of capillary loops per alveolus. In conclusion, estimation of alveolar capillaries by design-based stereology is an efficient and unbiased method to characterize the ACN and may be particularly useful for studies on emphysema, pulmonary hypertension, or lung development. }, } |
2015 | Journal | Y. X. Yang, S. K. Teo, E. Van Reeth, C. H. Tan, I. W.K. Tham, C. L. Poh (2015). A hybrid approach for fusing 4D-MRI temporal information with 3D-CT for the study of lung and lung tumor motion. Medical Physics, 42(8), pp. 4484–4496. (link) (bib) x @article{Yang2015, year = { 2015 }, volume = { 42 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A hybrid approach for fusing 4D-MRI temporal information with 3D-CT for the study of lung and lung tumor motion }, pages = { 4484--4496 }, number = { 8 }, keywords = { DIR,FEM,image fusion,lung cancer,radiotherapy }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4923167 }, author = { Yang and Teo and {Van Reeth} and Tan and Tham and Poh }, abstract = { Purpose: Accurate visualization of lung motion is important in many clinical applications, such as radiotherapy of lung cancer. Advancement in imaging modalities [e.g., computed tomography (CT) and MRI] has allowed dynamic imaging of lung and lung tumor motion. However, each imaging modality has its advantages and disadvantages. The study presented in this paper aims at generating synthetic 4D-CT dataset for lung cancer patients by combining both continuous three-dimensional (3D) motion captured by 4D-MRI and the high spatial resolution captured by CT using the author's proposed approach.Methods: A novel hybrid approach based on deformable image registration (DIR) and finite element method simulation was developed to fuse a static 3D-CT volume (acquired under breath-hold) and the 3D motion information extracted from 4D-MRI dataset, creating a synthetic 4D-CT dataset. Results: The study focuses on imaging of lung and lung tumor. Comparing the synthetic 4D-CT dataset with the acquired 4D-CT dataset of six lung cancer patients based on 420 landmarks, accurate results (average error {\textless}2 mm) were achieved using the authors' proposed approach. Their hybrid approach achieved a 40{\%} error reduction (based on landmarks assessment) over using only DIR techniques. Conclusions: The synthetic 4D-CT dataset generated has high spatial resolution, has excellent lung details, and is able to show movement of lung and lung tumor over multiple breathing cycles. }, } |
2015 | Journal | Yiwen Xu, J. Geoffrey Pickering, Zengxuan Nong, Eli Gibson, John Michael Arpino, Hao Yin, Aaron D. Ward (2015). A method for 3D histopathology reconstruction supporting mouse microvasculature analysis. PLoS ONE, 10(5), pp. 24. (link) (bib) x @article{Xu2015a, year = { 2015 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A method for 3D histopathology reconstruction supporting mouse microvasculature analysis }, pages = { 24 }, number = { 5 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0126817 }, author = { Xu and Pickering and Nong and Gibson and Arpino and Yin and Ward }, abstract = { Structural abnormalities of the microvasculature can impair perfusion and function. Conventional histology provides good spatial resolution with which to evaluate the microvascular structure but affords no 3-dimensional information; this limitation could lead to misinterpretations of the complex microvessel network in health and disease. The objective of this study was to develop and evaluate an accurate, fully automated 3D histology reconstruction method to visualize the arterioles and venules within the mouse hind-limb. Sections of the tibialis anterior muscle from C57BL/J6 mice (both normal and subjected to femoral artery excision) were reconstructed using pairwise rigid and affine registrations of 5 $\mu$m-thick, paraffin-embedded serial sections digitized at 0.25 $\mu$m/pixel. Low-resolution intensity-based rigid registration was used to initialize the nucleus landmark-based registration, and conventional high-resolution intensity-based registration method. The affine nucleus landmark-based registration was developed in this work and was compared to the conventional affine high-resolution intensity-based registration method. Target registration errors were measured between adjacent tissue sections (pairwise error), as well as with respect to a 3D reference reconstruction (accumulated error, to capture propagation of error through the stack of sections). Accumulated error measures were lower (p{\textless}0.01) for the nucleus landmark technique and superior vasculature continuity was observed. These findings indicate that registration based on automatic extraction and correspondence of small, homologous landmarks may support accurate 3D histology reconstruction. This technique avoids the otherwise problematic "banana-into-cylinder" effect observed using conventional methods that optimize the pairwise alignment of salient structures, forcing them to be section-orthogonal. This approach will provide a valuable tool for high-accuracy 3D histology tissue reconstructions for analysis of diseased microvasculature. }, } |
2015 | Journal | Helen Xu, Andras Lasso, Andriy Fedorov, Kemal Tuncali, Clare Tempany, Gabor Fichtinger (2015). Multi-slice-to-volume registration for MRI-guided transperineal prostate biopsy. International Journal of Computer Assisted Radiology and Surgery, 10(5), pp. 563–572. (link) (bib) x @article{Xu2015, year = { 2015 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi-slice-to-volume registration for MRI-guided transperineal prostate biopsy }, pages = { 563--572 }, number = { 5 }, keywords = { Image registration,MRI-guidance,Prostate biopsy,Target localization }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-014-1108-7 }, author = { Xu and Lasso and Fedorov and Tuncali and Tempany and Fichtinger }, abstract = { Purpose: Prostate needle biopsy is a commonly performed procedure since it is the most definitive form of cancer diagnosis. Magnetic resonance imaging (MRI) allows target-specific biopsies to be performed. However, needle placements are often inaccurate due to intra-operative prostate motion and the lack of motion compensation techniques. This paper detects and determines the extent of tissue displacement during an MRI-guided biopsy so that the needle insertion plan can be adjusted accordingly. Methods: A multi-slice-to-volume registration algorithm was developed to align the pre-operative planning image volume with three intra-operative orthogonal image slices of the prostate acquired immediately before needle insertion. The algorithm consists of an initial rigid transformation followed by a deformable step. Results: A total of 14 image sets from 10 patients were studied. Based on prostate contour alignment, the registrations were accurate to within 2 mm. Conclusion: This algorithm can be used to increase the needle targeting accuracy by alerting the clinician if the biopsy target has moved significantly prior to needle insertion. The proposed method demonstrated feasibility of intra-operative target localization and motion compensation for MRI-guided prostate biopsy. }, } |
2015 | Journal | Andreas Wibmer, Hedvig Hricak, Tatsuo Gondo, Kazuhiro Matsumoto, Harini Veeraraghavan, Duc Fehr, Junting Zheng, Debra Goldman, Chaya Moskowitz, Samson W. Fine, Victor E. Reuter, James Eastham, Evis Sala, Hebert Alberto Vargas (2015). Haralick texture analysis of prostate MRI: utility for differentiating non-cancerous prostate from prostate cancer and differentiating prostate cancers with different Gleason scores. European Radiology, 25(10), pp. 2840–2850. (link) (bib) x @article{Wibmer2015, year = { 2015 }, volume = { 25 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Haralick texture analysis of prostate MRI: utility for differentiating non-cancerous prostate from prostate cancer and differentiating prostate cancers with different Gleason scores }, pages = { 2840--2850 }, number = { 10 }, keywords = { Adenocarcinoma,Computer-assisted,Gleason grading,Image processing,Magnetic resonance imaging,Prostatic neoplasm }, journal = { European Radiology }, issn = { 14321084 }, doi = { 10.1007/s00330-015-3701-8 }, author = { Wibmer and Hricak and Gondo and Matsumoto and Veeraraghavan and Fehr and Zheng and Goldman and Moskowitz and Fine and Reuter and Eastham and Sala and Vargas }, abstract = { Objectives: To investigate Haralick texture analysis of prostate MRI for cancer detection and differentiating Gleason scores (GS). Methods: One hundred and forty-seven patients underwent T2- weighted (T2WI) and diffusion-weighted prostate MRI. Cancers ≥0.5 ml and non-cancerous peripheral (PZ) and transition (TZ) zone tissue were identified on T2WI and apparent diffusion coefficient (ADC) maps, using whole-mount pathology as reference. Texture features (Energy, Entropy, Correlation, Homogeneity, Inertia) were extracted and analysed using generalized estimating equations. Results: PZ cancers (n = 143) showed higher Entropy and Inertia and lower Energy, Correlation and Homogeneity compared to non-cancerous tissue on T2WI and ADC maps (p-values: {\textless}.0001–0.008). In TZ cancers (n = 43) we observed significant differences for all five texture features on the ADC map (all p-values: {\textless}.0001) and for Correlation (p = 0.041) and Inertia (p = 0.001) on T2WI. On ADC maps, GS was associated with higher Entropy (GS 6 vs. 7: p = 0.0225; 6 vs. {\textgreater}7: p = 0.0069) and lower Energy (GS 6 vs. 7: p = 0.0116, 6 vs. {\textgreater}7: p = 0.0039). ADC map Energy (p = 0.0102) and Entropy (p = 0.0019) were significantly different in GS ≤3 + 4 versus ≥4 + 3 cancers; ADC map Entropy remained significant after controlling for the median ADC (p = 0.0291). Conclusion: Several Haralick-based texture features appear useful for prostate cancer detection and GS assessment. Key Points: • Several Haralick texture features may differentiate non-cancerous and cancerous prostate tissue. • Tumour Energy and Entropy on ADC maps correlate with Gleason score. • T2w-image-derived texture features are not associated with the Gleason score. }, } |
2015 | Journal | Jared A. Weis, Katelyn M. Flint, Violeta Sanchez, Thomas E. Yankeelov, Michael I. Miga (2015). Assessing the accuracy and reproducibility of modality independent elastography in a murine model of breast cancer. Journal of Medical Imaging, 2(3), pp. 036001. (link) (bib) x @article{Weis2015, year = { 2015 }, volume = { 2 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Assessing the accuracy and reproducibility of modality independent elastography in a murine model of breast cancer }, pages = { 036001 }, number = { 3 }, journal = { Journal of Medical Imaging }, issn = { 2329-4302 }, doi = { 10.1117/1.jmi.2.3.036001 }, author = { Weis and Flint and Sanchez and Yankeelov and Miga }, abstract = { {\textcopyright} 2015 Society of Photo-Optical Instrumentation Engineers (SPIE).Cancer progression has been linked to mechanics. Therefore, there has been recent interest in developing noninvasive imaging tools for cancer assessment that are sensitive to changes in tissue mechanical properties. We have developed one such method, modality independent elastography (MIE), that estimates the relative elastic properties of tissue by fitting anatomical image volumes acquired before and after the application of compression to biomechanical models. The aim of this study was to assess the accuracy and reproducibility of the method using phantoms and a murine breast cancer model. Magnetic resonance imaging data were acquired, and the MIE method was used to estimate relative volumetric stiffness. Accuracy was assessed using phantom data by comparing to gold-standard mechanical testing of elasticity ratios. Validation error was {\textless}12{\%}. Reproducibility analysis was performed on animal data, and within-subject coefficients of variation ranged from 2 to 13{\%} at the bulk level and 32{\%} at the voxel level. To our knowledge, this is the first study to assess the reproducibility of an elasticity imaging metric in a preclinical cancer model. Our results suggest that the MIE method can reproducibly generate accurate estimates of the relative mechanical stiffness and provide guidance on the degree of change needed in order to declare biological changes rather than experimental error in future therapeutic studies. }, } |
2015 | Journal | Sébastien Tourbier, Xavier Bresson, Patric Hagmann, Jean Philippe Thiran, Reto Meuli, Meritxell Bach Cuadra (2015). An efficient total variation algorithm for super-resolution in fetal brain MRI with adaptive regularization. NeuroImage, 118, pp. 584–597. (link) (bib) x @article{Tourbier2015, year = { 2015 }, volume = { 118 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84940440329{\&}doi=10.1016{\%}2Fj.neuroimage.2015.06.018{\&}partnerID=40{\&}md5=58941cfc11c891f903f91a0bf8aa005c }, type = { Journal Article }, title = { An efficient total variation algorithm for super-resolution in fetal brain MRI with adaptive regularization }, pmid = { 26072252 }, pages = { 584--597 }, keywords = { Fast convex optimization,Fetal brain MRI,Super-resolution,Total variation }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2015.06.018 }, author = { Tourbier and Bresson and Hagmann and Thiran and Meuli and Cuadra }, abstract = { Although fetal anatomy can be adequately viewed in new multi-slice MR images, many critical limitations remain for quantitative data analysis. To this end, several research groups have recently developed advanced image processing methods, often denoted by super-resolution (SR) techniques, to reconstruct from a set of clinical low-resolution (LR) images, a high-resolution (HR) motion-free volume. It is usually modeled as an inverse problem where the regularization term plays a central role in the reconstruction quality. Literature has been quite attracted by Total Variation energies because of their ability in edge preserving but only standard explicit steepest gradient techniques have been applied for optimization. In a preliminary work, it has been shown that novel fast convex optimization techniques could be successfully applied to design an efficient Total Variation optimization algorithm for the super-resolution problem. In this work, two major contributions are presented. Firstly, we will briefly review the Bayesian and Variational dual formulations of current state-of-the-art methods dedicated to fetal MRI reconstruction. Secondly, we present an extensive quantitative evaluation of our SR algorithm previously introduced on both simulated fetal and real clinical data (with both normal and pathological subjects). Specifically, we study the robustness of regularization terms in front of residual registration errors and we also present a novel strategy for automatically select the weight of the regularization as regards the data fidelity term. Our results show that our TV implementation is highly robust in front of motion artifacts and that it offers the best trade-off between speed and accuracy for fetal MRI recovery as in comparison with state-of-the art methods. }, } |
2015 | Journal | Rachel Sparks, B. Nicolas Bloch, Ernest Feleppa, Dean Barratt, Daniel Moses, Lee Ponsky, Anant Madabhushi (2015). Multiattribute probabilistic prostate elastic registration (MAPPER): Application to fusion of ultrasound and magnetic resonance imaging. Medical Physics, 42(3), pp. 1153–1163. (link) (bib) x @article{Sparks2015, year = { 2015 }, volume = { 42 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multiattribute probabilistic prostate elastic registration (MAPPER): Application to fusion of ultrasound and magnetic resonance imaging }, pages = { 1153--1163 }, number = { 3 }, keywords = { MRI-ultrasound fusion,image registration,image-guided biopsy,prostate cancer,prostate imaging }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4905104 }, author = { Sparks and {Nicolas Bloch} and Feleppa and Barratt and Moses and Ponsky and Madabhushi }, abstract = { Purpose: Transrectal ultrasound (TRUS)-guided needle biopsy is the current gold standard for prostate cancer diagnosis. However, up to 40{\%} of prostate cancer lesions appears isoechoic on TRUS. Hence, TRUS-guided biopsy has a high false negative rate for prostate cancer diagnosis. Magnetic resonance imaging (MRI) is better able to distinguish prostate cancer from benign tissue. However, MRI-guided biopsy requires special equipment and training and a longer procedure time. MRI-TRUS fusion, where MRI is acquired preoperatively and then aligned to TRUS, allows for advantages of both modalities to be leveraged during biopsy. MRI-TRUS-guided biopsy increases the yield of cancer positive biopsies. In this work, the authors present multiattribute probabilistic postate elastic registration (MAPPER) to align prostate MRI and TRUS imagery. Methods: MAPPER involves (1) segmenting the prostate on MRI, (2) calculating a multiattribute probabilistic map of prostate location on TRUS, and (3) maximizing overlap between the prostate segmentation on MRI and the multiattribute probabilistic map on TRUS, thereby driving registration of MRI onto TRUS. MAPPER represents a significant advancement over the current state-of-the-art as it requires no user interaction during the biopsy procedure by leveraging texture and spatial information to determine the prostate location on TRUS. Although MAPPER requires manual interaction to segment the prostate on MRI, this step is performed prior to biopsy and will not substantially increase biopsy procedure time. Results: MAPPER was evaluated on 13 patient studies from two independent datasetsDataset 1 has 6 studies acquired with a side-firing TRUS probe and a 1.5 T pelvic phased-array coil MRI; Dataset 2 has 7 studies acquired with a volumetric end-firing TRUS probe and a 3.0 T endorectal coil MRI. MAPPER has a root-mean-square error (RMSE) for expert selected fiducials of 3.36±1.10 mm for Dataset 1 and 3.14±0.75 mm for Dataset 2. State-of-the-art MRI-TRUS fusion methods report RMSE of 3.062.07 mm. Conclusions: MAPPER aligns MRI and TRUS imagery without manual intervention ensuring efficient, reproducible registration. MAPPER has a similar RMSE to state-of-the-art methods that require manual intervention. }, } |
2015 | Journal | Johannes Schindelin, Curtis T. Rueden, Mark C. Hiner, Kevin W. Eliceiri (2015). The ImageJ ecosystem: An open platform for biomedical image analysis. Molecular Reproduction and Development, 82(7-8), pp. 518–529. (link) (bib) x @article{Schindelin2015, year = { 2015 }, volume = { 82 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { The ImageJ ecosystem: An open platform for biomedical image analysis }, pages = { 518--529 }, number = { 7-8 }, journal = { Molecular Reproduction and Development }, issn = { 10982795 }, doi = { 10.1002/mrd.22489 }, author = { Schindelin and Rueden and Hiner and Eliceiri }, abstract = { Technology in microscopy advances rapidly, enabling increasingly affordable, faster, and more precise quantitative biomedical imaging, which necessitates correspondingly more-advanced image processing and analysis techniques. A wide range of software is available-from commercial to academic, special-purpose to Swiss army knife, small to large-but a key characteristic of software that is suitable for scientific inquiry is its accessibility. Open-source software is ideal for scientific endeavors because it can be freely inspected, modified, and redistributed; in particular, the open-software platform ImageJ has had a huge impact on the life sciences, and continues to do so. From its inception, ImageJ has grown significantly due largely to being freely available and its vibrant and helpful user community. Scientists as diverse as interested hobbyists, technical assistants, students, scientific staff, and advanced biology researchers use ImageJ on a daily basis, and exchange knowledge via its dedicated mailing list. Uses of ImageJ range from data visualization and teaching to advanced image processing and statistical analysis. The software's extensibility continues to attract biologists at all career stages as well as computer scientists who wish to effectively implement specific image-processing algorithms. In this review, we use the ImageJ project as a case study of how open-source software fosters its suites of software tools, making multitudes of image-analysis technology easily accessible to the scientific community. We specifically explore what makes ImageJ so popular, how it impacts the life sciences, how it inspires other projects, and how it is self-influenced by coevolving projects within the ImageJ ecosystem. }, } |
2015 | Journal | Julia Reckfort, Hendrik Wiese, Uwe Pietrzyk, Karl Zilles, Katrin Amunts, Markus Axer (2015). A multiscale approach for the reconstruction of the fiber architecture of the human brain based on 3D-PLI. Frontiers in Neuroanatomy, 9(september), pp. 11. (link) (bib) x @article{Reckfort2015, year = { 2015 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A multiscale approach for the reconstruction of the fiber architecture of the human brain based on 3D-PLI }, pages = { 11 }, number = { september }, keywords = { Brain,Connectome,Fiber orientation,Multiscale approach,Polarized light imaging }, journal = { Frontiers in Neuroanatomy }, issn = { 16625129 }, doi = { 10.3389/fnana.2015.00118 }, author = { Reckfort and Wiese and Pietrzyk and Zilles and Amunts and Axer }, abstract = { Structural connectivity of the brain can be conceptionalized as a multiscale organization. The present study is built on 3D-Polarized Light Imaging (3D-PLI), a neuroimaging technique targeting the reconstruction of nerve fiber orientations and therefore contributing to the analysis of brain connectivity. Spatial orientations of the fibers are derived from birefringence measurements of unstained histological sections that are interpreted by means of a voxel-based analysis. This implies that a single fiber orientation vector is obtained for each voxel, which reflects the net effect of all comprised fibers. We have utilized two polarimetric setups providing an object space resolution of 1.3 $\mu$m/px (microscopic setup) and 64 $\mu$m/px (macroscopic setup) to carry out 3D-PLI and retrieve fiber orientations of the same tissue samples, but at complementary voxel sizes (i.e., scales). The present study identifies the main sources which cause a discrepancy of the measured fiber orientations observed when measuring the same sample with the two polarimetric systems. As such sources the differing optical resolutions and diverging retardances of the implemented waveplates were identified. A methodology was implemented that enables the compensation of measured different systems' responses to the same birefringent sample. This opens up new ways to conduct multiscale analysis in brains by means of 3D-PLI and to provide a reliable basis for the transition between different scales of the nerve fiber architecture. }, } |
2015 | Journal | Lei Qu, Hanchuan Peng (2015). LittleQuickWarp: An ultrafast image warping tool. Methods, 73, pp. 38–42. (link) (bib) x @article{Qu2015, year = { 2015 }, volume = { 73 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { LittleQuickWarp: An ultrafast image warping tool }, pages = { 38--42 }, keywords = { B-spline,Image registration,Thin plate spline,Warping }, journal = { Methods }, issn = { 10959130 }, doi = { 10.1016/j.ymeth.2014.09.002 }, author = { Qu and Peng }, abstract = { Warping images into a standard coordinate space is critical for many image computing related tasks. However, for multi-dimensional and high-resolution images, an accurate warping operation itself is often very expensive in terms of computer memory and computational time. For high-throughput image analysis studies such as brain mapping projects, it is desirable to have high performance image warping tools that are compatible with common image analysis pipelines. In this article, we present LittleQuickWarp, a swift and memory efficient tool that boosts 3D image warping performance dramatically and at the same time has high warping quality similar to the widely used thin plate spline (TPS) warping. Compared to the TPS, LittleQuickWarp can improve the warping speed 2-5 times and reduce the memory consumption 6-20 times. We have implemented LittleQuickWarp as an Open Source plug-in program on top of the Vaa3D system (http://vaa3d.org). The source code and a brief tutorial can be found in the Vaa3D plugin source code repository. }, } |
2015 | Journal | Dino Podlesek, Tobias Meyer, Ute Morgenstern, Gabriele Schackert, Matthias Kirsch (2015). Improved visualization of intracranial vessels with intraoperative coregistration of rotational digital subtraction angiography and intraoperative 3D ultrasound. PLoS ONE, 10(3), pp. 16. (link) (bib) x @article{Podlesek2015, year = { 2015 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Improved visualization of intracranial vessels with intraoperative coregistration of rotational digital subtraction angiography and intraoperative 3D ultrasound }, pages = { 16 }, number = { 3 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0121345 }, author = { Podlesek and Meyer and Morgenstern and Schackert and Kirsch }, abstract = { Introduction: Ultrasound can visualize and update the vessel status in real time during cerebral vascular surgery. We studied the depiction of parent vessels and aneurysms with a high-resolution 3D intraoperative ultrasound imaging system during aneurysm clipping using rotational digital subtraction angiography as a reference. Methods: We analyzed 3D intraoperative ultrasound in 39 patients with cerebral aneurysms to visualize the aneurysm intraoperatively and the nearby vascular tree before and after clipping. Simultaneous coregistration of preoperative subtraction angiography data with 3D intraoperative ultrasound was performed to verify the anatomical assignment. Results: Intraoperative ultrasound detected 35 of 43 aneurysms (81{\%}) in 39 patients. Thirty-nine intraoperative ultrasound measurements were matched with rotational digital subtraction angiography and were successfully reconstructed during the procedure. In 7 patients, the aneurysm was partially visualized by 3D-ioUS or was not in field of view. Post-clipping intraoperative ultrasound was obtained in 26 and successfully reconstructed in 18 patients (69{\%}) despite clip related artefacts. The overlap between 3D-ioUS aneurysm volume and preoperative rDSA aneurysm volume resulted in a mean accuracy of 0.71 (Dice coefficient). Conclusions: Intraoperative coregistration of 3D intraoperative ultrasound data with preoperative rotational digital subtraction angiography is possible with high accuracy. It allows the immediate visualization of vessels beyond the microscopic field, as well as parallel assessment of blood velocity, aneurysm and vascular tree configuration. Although spatial resolution is lower than for standard angiography, the method provides an excellent vascular overview, advantageous interpretation of 3D-ioUS and immediate intraoperative feedback of the vascular status. A prerequisite for understanding vascular intraoperative ultrasound is image quality and a successful match with preoperative rotational digital subtraction angiography. }, } |
2015 | Journal | Pietro Nardelli, Kashif A. Khan, Alberto Corv\`o, Niamh Moore, Mary J. Murphy, Maria Twomey, Owen J. O'Connor, Marcus P. Kennedy, Ra\'ul San José Estépar, Michael M. Maher, Pádraig Cantillon-Murphy (2015). Optimizing parameters of an open-source airway segmentation algorithm using different CT images. BioMedical Engineering Online, 14(1), pp. 24. (link) (bib) x @article{Nardelli2015, year = { 2015 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Optimizing parameters of an open-source airway segmentation algorithm using different CT images }, pages = { 24 }, number = { 1 }, keywords = { 3D Slicer,Airway segmentation,Computed tomography (CT),ITK,Image processing,Lung,Region growing }, journal = { BioMedical Engineering Online }, issn = { 1475925X }, doi = { 10.1186/s12938-015-0060-2 }, author = { Nardelli and Khan and Corv{\`{o}} and Moore and Murphy and Twomey and O'Connor and Kennedy and Est{\'{e}}par and Maher and Cantillon-Murphy }, abstract = { Background: Computed tomography (CT) helps physicians locate and diagnose pathological conditions. In some conditions, having an airway segmentation method which facilitates reconstruction of the airway from chest CT images can help hugely in the assessment of lung diseases. Many efforts have been made to develop airway segmentation algorithms, but methods are usually not optimized to be reliable across different CT scan parameters. Methods: In this paper, we present a simple and reliable semi-automatic algorithm which can segment tracheal and bronchial anatomy using the open-source 3D Slicer platform. The method is based on a region growing approach where trachea, right and left bronchi are cropped and segmented independently using three different thresholds. The algorithm and its parameters have been optimized to be efficient across different CT scan acquisition parameters. The performance of the proposed method has been evaluated on EXACT'09 cases and local clinical cases as well as on a breathing pig lung phantom using multiple scans and changing parameters. In particular, to investigate multiple scan parameters reconstruction kernel, radiation dose and slice thickness have been considered. Volume, branch count, branch length and leakage presence have been evaluated. A new method for leakage evaluation has been developed and correlation between segmentation metrics and CT acquisition parameters has been considered. Results: All the considered cases have been segmented successfully with good results in terms of leakage presence. Results on clinical data are comparable to other teams' methods, as obtained by evaluation against the EXACT09 challenge, whereas results obtained from the phantom prove the reliability of the method across multiple CT platforms and acquisition parameters. As expected, slice thickness is the parameter affecting the results the most, whereas reconstruction kernel and radiation dose seem not to particularly affect airway segmentation. Conclusion: The system represents the first open-source airway segmentation platform. The quantitative evaluation approach presented represents the first repeatable system evaluation tool for like-for-like comparison between different airway segmentation platforms. Results suggest that the algorithm can be considered stable across multiple CT platforms and acquisition parameters and can be considered as a starting point for the development of a complete airway segmentation algorithm. }, } |
2015 | Journal | Tinashe Mutsvangwa, Valerie Burdin, Cedric Schwartz, Christian Roux (2015). An Automated Statistical Shape Model Developmental Pipeline: Application to the Human Scapula and Humerus. IEEE Transactions on Biomedical Engineering, 62(4), pp. 1098–1107. (link) (bib) x @article{Mutsvangwa2015, year = { 2015 }, volume = { 62 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { An Automated Statistical Shape Model Developmental Pipeline: Application to the Human Scapula and Humerus }, pages = { 1098--1107 }, number = { 4 }, keywords = { humerus,scapula,statistical shape model }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 15582531 }, doi = { 10.1109/TBME.2014.2368362 }, author = { Mutsvangwa and Burdin and Schwartz and Roux }, abstract = { This paper presents development of statistical shape models based on robust and rigid-groupwise registration followed by pointset nonrigid registration. The main advantages of the pipeline include automation in that the method does not rely on manual landmarks or a regionalization step; there is no bias in the choice of reference during the correspondence steps and the use of the probabilistic principal component analysis framework increases the domain of the shape variability. A comparison between the widely used expectation maximization-iterative closest point algorithm and a recently reported groupwise method on publicly available data (hippocampus) using the well-known criteria of generality, specificity, and compactness is also presented. The proposed method gives similar values but the curves of generality and specificity are superior to those of the other two methods. Finally, the method is applied to the human scapula, which is a known difficult structure, and the human humerus. }, } |
2015 | Journal | Abdallah S.R. Mohamed, Manee Naad Ruangskul, Musaddiq J. Awan, Charles A. Baron, Jayashree Kalpathy-Cramer, Richard Castillo, Edward Castillo, Thomas M. Guerrero, Esengul Kocak-Uzel, Jinzhong Yang, Laurence E. Court, Michael E. Kantor, G. Brandon Gunn, Rivka R. Colen, Steven J. Frank, Adam S. Garden, David I. Rosenthal, Clifton D. Fuller (2015). Quality assurance assessment of diagnostic and radiation therapy-simulation CT image registration for head and neck radiation therapy: Anatomic region of interest-based comparison of rigid and deformable algorithms. Radiology, 274(3), pp. 752–763. (link) (bib) x @article{Mohamed2015, year = { 2015 }, volume = { 274 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Quality assurance assessment of diagnostic and radiation therapy-simulation CT image registration for head and neck radiation therapy: Anatomic region of interest-based comparison of rigid and deformable algorithms }, pages = { 752--763 }, number = { 3 }, journal = { Radiology }, issn = { 15271315 }, doi = { 10.1148/radiol.14132871 }, author = { Mohamed and Ruangskul and Awan and Baron and Kalpathy-Cramer and Castillo and Castillo and Guerrero and Kocak-Uzel and Yang and Court and Kantor and Gunn and Colen and Frank and Garden and Rosenthal and Fuller }, abstract = { Purpose: To develop a quality assurance (QA) workflow by using a robust, curated, manually segmented anatomic region-of-interest (ROI) library as a benchmark for quantitative assessment of different image registration techniques used for head and neck radiation therapy-simulation computed tomography (CT) with diagnostic CT coregistration. Materials and Methods: Radiation therapy-simulation CT images and diagnostic CT images in 20 patients with head and neck squamous cell carcinoma treated with curative-intent intensity-modulated radiation therapy between August 2011 and May 2012 were retrospectively retrieved with institutional review board approval. Sixty-eight reference anatomic ROIs with gross tumor and nodal targets were then manually contoured on images from each examination. Diagnostic CT images were registered with simulation CT images rigidly and by using four deformable image registration (DIR) algorithms: atlas based, B-spline, demons, and optical flow. The resultant deformed ROIs were compared with manually contoured reference ROIs by using similarity coefficient metrics (ie, Dice similarity coefficient) and surface distance metrics (ie, 95{\%} maximum Hausdorff distance). The nonparametric Steel test with control was used to compare different DIR algorithms with rigid image registration (RIR) by using the post hoc Wilcoxon signed-rank test for stratified metric comparison.. Results: A total of 2720 anatomic and 50 tumor and nodal ROIs were delineated. All DIR algorithms showed improved performance over RIR for anatomic and target ROI conformance, as shown for most comparison metrics (Steel test, P {\textless} .008 after Bonferroni correction). The performance of different algorithms varied substantially with stratification by specific anatomic structures or category and simulation CT section thickness. Conclusion: Development of a formal ROI-based QA workflow for registration assessment demonstrated improved performance with DIR techniques over RIR. After QA, DIR implementation should be the standard for head and neck diagnostic CT and simulation CT allineation, especially for target delineation. }, } |
2015 | Journal | Cynthia Ménard, Douglas Iupati, Julia Publicover, Jenny Lee, Jessamine Abed, Gerald O'Leary, Anna Simeonov, Warren D. Foltz, Michael Milosevic, Charles Catton, Gerard Morton, Robert Bristow, Andrew Bayley, Eshetu G. Atenafu, Andrew J. Evans, David A. Jaffray, Peter Chung, Kristy K. Brock, Masoom A. Haider (2015). MR-guided prostate biopsy for planning of focal salvage after radiation therapy. Radiology, 274(1), pp. 181–191. (link) (bib) x @article{Menard2015, year = { 2015 }, volume = { 274 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MR-guided prostate biopsy for planning of focal salvage after radiation therapy }, pages = { 181--191 }, number = { 1 }, journal = { Radiology }, issn = { 15271315 }, doi = { 10.1148/radiol.14122681 }, author = { M{\'{e}}nard and Iupati and Publicover and Lee and Abed and O'Leary and Simeonov and Foltz and Milosevic and Catton and Morton and Bristow and Bayley and Atenafu and Evans and Jaffray and Chung and Brock and Haider }, abstract = { Purpose: To determine if the integration of diagnostic magnetic resonance (MR) imaging and MR-guided biopsy would improve target delineation for focal salvage therapy in men with prostate cancer. Materials and Between September 2008 and March 2011, 30 men with bio-Methods: chemical failure after radiation therapy for prostate cancer provided written informed consent and were enrolled in a prospective clinical trial approved by the institutional research ethics board. An integrated diagnostic MR imaging and interventional biopsy procedure was performed with a 1.5-T MR imager by using a prototype table and stereotactic transperineal template. Multiparametric MR imaging (T2-weighted, dynamic contrast material-enhanced, and diffusion-weighted sequences) was followed by targeted biopsy of suspicious regions and systematic sextant sampling. Biopsy needle locations were imaged and registered to diagnostic images. Two observers blinded to clinical data and the results of prior imaging studies delineated tumor boundaries. Area under the receiver operating characteristic curve (Az) was calculated based on generalized linear models by using biopsy as the reference standard to distinguish benign from malignant lesions. Results: Twenty-eight patients were analyzed. Most patients (n = 22) had local recurrence, with 82{\%} (18 of 22) having unifocal disease. When multiparametric volumes from two observers were combined, it increased the apparent overall tumor volume by 30{\%}; however, volumes remained small (mean, 2.9 mL; range, 0.5-8.3 mL). Tumor target boundaries differed between T2-weighted, dynamic contrast-enhanced, and diffusion-weighted sequences (mean Dice coefficient, 0.13-0.35). Diagnostic accuracy in the identification of tumors improved with a multiparametric approach versus a strictly T2-weighted or dynamic contrast-enhanced approach through an improvement in sensitivity (observer 1, 0.65 vs 0.35 and 0.44, respectively; observer 2, 0.82 vs 0.64 and 0.53, respectively; P {\textless} .05) and improved further with a 5-mm expansion margin (Az = 0.85 vs 0.91 for observer 2). After excluding three patients with fewer than six informative biopsy cores and six patients with inadequately stained margins, MR-guided biopsy enabled more accurate delineation of the tumor target volume be means of exclusion of false-positive results in 26{\%} (five of 19 patients), false-negative results in 11{\%} (two of 19 patients) and by guiding extension of tumor boundaries in 16{\%} (three of 19 patients). Conclusion: The integration of guided biopsy with diagnostic MR imaging is feasible and alters delineation of the tumor target boundary in a substantial proportion of patients considering focal salvage. }, } |
2015 | Journal | Sarah A. Mattonen, Shyama Tetar, David A. Palma, Alexander V. Louie, Suresh Senan, Aaron D. Ward (2015). Imaging texture analysis for automated prediction of lung cancer recurrence after stereotactic radiotherapy. Journal of Medical Imaging, 2(4), pp. 041010. (link) (bib) x @article{Mattonen2015, year = { 2015 }, volume = { 2 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Imaging texture analysis for automated prediction of lung cancer recurrence after stereotactic radiotherapy }, pages = { 041010 }, number = { 4 }, journal = { Journal of Medical Imaging }, issn = { 2329-4302 }, doi = { 10.1117/1.jmi.2.4.041010 }, author = { Mattonen and Tetar and Palma and Louie and Senan and Ward }, abstract = { Benign radiation-induced lung injury (RILI) is not uncommon following stereotactic ablative radiotherapy (SABR) for lung cancer and can be difficult to differentiate from tumor recurrence on follow-up imaging. We previously showed the ability of computed tomography (CT) texture analysis to predict recurrence. The aim of this study was to evaluate and compare the accuracy of recurrence prediction using manual region-of-interest segmentation to that of a semiautomatic approach. We analyzed 22 patients treated for 24 lesions (11 recurrences, 13 RILI). Consolidative and ground-glass opacity (GGO) regions were manually delineated. The longest axial diameter of the consolidative region on each post-SABR CT image was measured. This line segment is routinely obtained as part of the clinical imaging workflow and was used as input to automatically delineate the consolidative region and subsequently derive a periconsolidative region to sample GGO tissue. Texture features were calculated, and at two to five months post-SABR, the entropy texture measure within the semiautomatic segmentations showed prediction accuracies [areas under the receiver operating characteristic curve (AUC): 0.70 to 0.73] similar to those of manual GGO segmentations (AUC: 0.64). After integration into the clinical workflow, this decision support system has the potential to support earlier salvage for patients with recurrence and fewer investigations of benign RILI. }, } |
2015 | Journal | Caroline Magnain, Jean C. Augustinack, Ender Konukoglu, Matthew P. Frosch, Sava Sakad\vzic, Ani Varjabedian, Nathalie Garcia, Van J. Wedeen, David A. Boas, Bruce Fischl (2015). Optical coherence tomography visualizes neurons in human entorhinal cortex. Neurophotonics, 2(1), pp. 015004. (link) (bib) x @article{Magnain2015, year = { 2015 }, volume = { 2 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Optical coherence tomography visualizes neurons in human entorhinal cortex }, pages = { 015004 }, number = { 1 }, journal = { Neurophotonics }, issn = { 2329-423X }, doi = { 10.1117/1.nph.2.1.015004 }, author = { Magnain and Augustinack and Konukoglu and Frosch and Sakad{\v{z}}ic and Varjabedian and Garcia and Wedeen and Boas and Fischl }, abstract = { Abstract. The cytoarchitecture of the human brain is of great interest in diverse fields: neuroanatomy, neurology, neuroscience, and neuropathology. Traditional histology is a method that has been historically used to assess cell and fiber content in the ex vivo human brain. However, this technique suffers from significant distortions. We used a previously demonstrated optical coherence microscopy technique to image individual neurons in several square millimeters of en-face tissue blocks from layer II of the human entorhinal cortex, over 50 $\mu$min depth. The same slices were then sectioned and stained for Nissl substance. We registered the optical coherence tomog- raphy (OCT) images with the corresponding Nissl stained slices using a nonlinear transformation. The neurons were then segmented in both images and we quantified the overlap. We show that OCT images contain infor- mation about neurons that is comparable to what can be obtained from Nissl staining, and thus can be used to assess the cytoarchitecture of the ex vivo human brain with minimal distortion. With the future integration of a vibratome into the OCT imaging rig, this technique can be scaled up to obtain undistorted volumetric data of centimeter cube tissue blocks in the near term, and entire human hemispheres in the future. }, } |
2015 | Journal | Sidong Liu, Weidong Cai, Siqi Liu, Fan Zhang, Michael Fulham, Dagan Feng, Sonia Pujol, Ron Kikinis (2015). Multimodal neuroimaging computing: the workflows, methods, and platforms. Brain Informatics, 2(3), pp. 181–195. (link) (bib) x @article{Liu2015, year = { 2015 }, volume = { 2 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84977927257{\&}doi=10.1007{\%}2Fs40708-015-0020-4{\&}partnerID=40{\&}md5=5b2c05f1154939f3d1a0412038a423e1 }, type = { Journal Article }, title = { Multimodal neuroimaging computing: the workflows, methods, and platforms }, pages = { 181--195 }, number = { 3 }, keywords = { Medical image computing,Multimodal,Neuroimaging }, journal = { Brain Informatics }, issn = { 21984026 }, doi = { 10.1007/s40708-015-0020-4 }, author = { Liu and Cai and Liu and Zhang and Fulham and Feng and Pujol and Kikinis }, abstract = { The last two decades have witnessed the explosive growth in the development and use of noninvasive neuroimaging technologies that advance the research on human brain under normal and pathological conditions. Multimodal neuroimaging has become a major driver of current neuroimaging research due to the recognition of the clinical benefits of multimodal data, and the better access to hybrid devices. Multimodal neuroimaging computing is very challenging, and requires sophisticated computing to address the variations in spatiotemporal resolution and merge the biophysical/biochemical information. We review the current workflows and methods for multimodal neuroimaging computing, and also demonstrate how to conduct research using the established neuroimaging computing packages and platforms. }, } |
2015 | Journal | Robert Korez, Bulat Ibragimov, Bostjan Likar, Franjo Pernus, Tomaz Vrtovec (2015). A Framework for Automated Spine and Vertebrae Interpolation-Based Detection and Model-Based Segmentation. IEEE Transactions on Medical Imaging, 34(8), pp. 1649–1662. (link) (bib) x @article{Korez2015, year = { 2015 }, volume = { 34 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A Framework for Automated Spine and Vertebrae Interpolation-Based Detection and Model-Based Segmentation }, pages = { 1649--1662 }, number = { 8 }, keywords = { Computed tomography,deformable models,image segmentation,interpolation theory,object detection,spine,vertebra }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2015.2389334 }, author = { Korez and Ibragimov and Likar and Pernus and Vrtovec }, abstract = { Automated and semi-automated detection and segmentation of spinal and vertebral structures from computed tomography (CT) images is a challenging task due to a relatively high degree of anatomical complexity, presence of unclear boundaries and articulation of vertebrae with each other, as well as due to insufficient image spatial resolution, partial volume effects, presence of image artifacts, intensity variations and low signal-to-noise ratio. In this paper, we describe a novel framework for automated spine and vertebrae detection and segmentation from 3-D CT images. A novel optimization technique based on interpolation theory is applied to detect the location of the whole spine in the 3-D image and, using the obtained location of the whole spine, to further detect the location of individual vertebrae within the spinal column. The obtained vertebra detection results represent a robust and accurate initialization for the subsequent segmentation of individual vertebrae, which is performed by an improved shape-constrained deformable model approach. The framework was evaluated on two publicly available CT spine image databases of 50 lumbar and 170 thoracolumbar vertebrae. Quantitative comparison against corresponding reference vertebra segmentations yielded an overall mean centroid-to-centroid distance of 1.1 mm and Dice coefficient of 83.6{\%} for vertebra detection, and an overall mean symmetric surface distance of 0.3 mm and Dice coefficient of 94.6{\%} for vertebra segmentation. The results indicate that by applying the proposed automated detection and segmentation framework, vertebrae can be successfully detected and accurately segmented in 3-D from CT spine images. }, } |
2015 | Journal | Philipp Kainz, Michael Mayrhofer-Reinhartshuber, Helmut Ahammer (2015). IQM: An extensible and portable open source application for image and signal analysis in java. PLoS ONE, 10(1), pp. 28. (link) (bib) x @article{Kainz2015, year = { 2015 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { IQM: An extensible and portable open source application for image and signal analysis in java }, pages = { 28 }, number = { 1 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0116329 }, author = { Kainz and Mayrhofer-Reinhartshuber and Ahammer }, abstract = { Image and signal analysis applications are substantial in scientific research. Both open source and commercial packages provide a wide range of functions for image and signal analysis, which are sometimes supported very well by the communities in the corresponding fields. Commercial software packages have the major drawback of being expensive and having undisclosed source code, which hampers extending the functionality if there is no plugin interface or similar option available. However, both variants cannot cover all possible use cases and sometimes custom developments are unavoidable, requiring open source applications. In this paper we describe IQM, a completely free, portable and open source (GNU GPLv3) image and signal analysis application written in pure Java. IQM does not depend on any natively installed libraries and is therefore runnable out-of-the-box. Currently, a continuously growing repertoire of 50 image and 16 signal analysis algorithms is provided. The modular functional architecture based on the three-tier model is described along the most important functionality. Extensibility is achieved using operator plugins, and the development of more complex workflows is provided by a Groovy script interface to the JVM. We demonstrate IQM's image and signal processing capabilities in a proof-of-principle analysis and provide example implementations to illustrate the plugin framework and the scripting interface. IQM integrates with the popular ImageJ image processing software and is aiming at complementing functionality rather than competing with existing open source software. Machine learning can be integrated into more complex algorithms via the WEKA software package as well, enabling the development of transparent and robust methods for image and signal analysis. }, } |
2015 | Journal | Cory Jones, Ting Liu, Nathaniel Wood Cohan, Mark Ellisman, Tolga Tasdizen (2015). Efficient semi-automatic 3D segmentation for neuron tracing in electron microscopy images. Journal of Neuroscience Methods, 246, pp. 13–21. (link) (bib) x @article{Jones2015, year = { 2015 }, volume = { 246 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Efficient semi-automatic 3D segmentation for neuron tracing in electron microscopy images }, pages = { 13--21 }, keywords = { 3D segmentation,Connectomics,Electron microscopy,Image segmentation,Neuron reconstruction,Semi-automatic segmentation }, journal = { Journal of Neuroscience Methods }, issn = { 1872678X }, doi = { 10.1016/j.jneumeth.2015.03.005 }, author = { Jones and Liu and Cohan and Ellisman and Tasdizen }, abstract = { Background: In the area of connectomics, there is a significant gap between the time required for data acquisition and dense reconstruction of the neural processes contained in the same dataset. Automatic methods are able to eliminate this timing gap, but the state-of-the-art accuracy so far is insufficient for use without user corrections. If completed naively, this process of correction can be tedious and time consuming. New method: We present a new semi-automatic method that can be used to perform 3D segmentation of neurites in EM image stacks. It utilizes an automatic method that creates a hierarchical structure for recommended merges of superpixels. The user is then guided through each predicted region to quickly identify errors and establish correct links. Results: We tested our method on three datasets with both novice and expert users. Accuracy and timing were compared with published automatic, semi-automatic, and manual results. Comparison with existing methods: Post-automatic correction methods have also been used in Mishchenko et al. (2010) and Haehn et al. (2014). These methods do not provide navigation or suggestions in the manner we present. Other semi-automatic methods require user input prior to the automatic segmentation such as Jeong et al. (2009) and Cardona et al. (2010) and are inherently different than our method. Conclusion: Using this method on the three datasets, novice users achieved accuracy exceeding state-of-the-art automatic results, and expert users achieved accuracy on par with full manual labeling but with a 70{\%} time improvement when compared with other examples in publication. }, } |
2015 | Journal | Iván G\'omez-Conde, Susana S. Caetano, Carlos E. Tadokoro, David N. Olivieri (2015). Stabilizing 3D in vivo intravital microscopy images with an iteratively refined soft-tissue model for immunology experiments. Computers in Biology and Medicine, 64, pp. 246–260. (link) (bib) x @article{GomezConde2015,
year = { 2015 },
volume = { 64 },
url = { {\%}3CGo to },
type = { Journal Article },
title = { Stabilizing 3D in vivo intravital microscopy images with an iteratively refined soft-tissue model for immunology experiments },
pages = { 246--260 },
keywords = { Bioimaging,Biomedical image stabilization,Image registration,In vivo two photon microscopy,Lymphocyte tracking,Soft-tissue deformations },
journal = { Computers in Biology and Medicine },
issn = { 18790534 },
doi = { 10.1016/j.compbiomed.2015.07.001 },
author = { G{\'{o}}mez-Conde and Caetano and Tadokoro and Olivieri },
abstract = { We describe a set of new algorithms and a software tool, StabiTissue, for stabilizing in vivo intravital microscopy images that suffer from soft-tissue background movement. Because these images lack predetermined anchors and are dominated by noise, we use a pixel weighted image alignment together with a correction for nonlinear tissue deformations. We call this correction a poor man[U+05F3]. s diffeomorphic map since it ascertains the nonlinear regions of the image without resorting to a full integral equation method. To determine the quality of the image stabilization, we developed an ensemble sampling method that quantifies the coincidence between image pairs from randomly distributed image regions. We obtain global stabilization alignment through an iterative constrained simulated annealing optimization procedure. To show the accuracy of our algorithm with existing software, we measured the misalignment error rate in datasets taken from two different organs and compared the results to a similar and popular open-source solution. Present open-source stabilization software tools perform poorly because they do not treat the specific needs of the IV-2pM datasets with soft-tissue deformation, speckle noise, full 5D inter- and intra-stack motion error correction, and NA anchors. In contrast, the results of our tests demonstrate that our method is more immune to noise and provides better performance for datasets' possessing nonlinear tissue deformations. As a practical application of our software, we show how our stabilization improves cell tracking, where the presence of background movement would degrade track information. We also provide a qualitative comparison of our software with other open-source libraries/applications. Our software is freely available at the open source repository http://sourceforge.net/projects/stabitissue/. },
} |
2015 | Journal | Duc Fehr, Harini Veeraraghavan, Andreas Wibmer, Tatsuo Gondo, Kazuhiro Matsumoto, Herbert Alberto Vargas, Evis Sala, Hedvig Hricak, Joseph O. Deasy (2015). Automatic classification of prostate cancer Gleason scores from multiparametric magnetic resonance images. Proceedings of the National Academy of Sciences of the United States of America, 112(46), pp. E6265–E6273. (link) (bib) x @article{Fehr2015, year = { 2015 }, volume = { 112 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic classification of prostate cancer Gleason scores from multiparametric magnetic resonance images }, pages = { E6265--E6273 }, number = { 46 }, keywords = { Gleason score classification,Learning from unbalanced data,Multiparametric mri,PCa gleason (3+4) vs. (4+3) cancers,PCa gleason 6 vs. ≥7 }, journal = { Proceedings of the National Academy of Sciences of the United States of America }, issn = { 10916490 }, doi = { 10.1073/pnas.1505935112 }, author = { Fehr and Veeraraghavan and Wibmer and Gondo and Matsumoto and Vargas and Sala and Hricak and Deasy }, abstract = { Noninvasive, radiological image-based detection and stratification of Gleason patterns can impact clinical outcomes, treatment selection, and the determination of disease status at diagnosis without subjecting patients to surgical biopsies. We present machine learning-based automatic classification of prostate cancer aggressiveness by combining apparent diffusion coefficient (ADC) and T2-weighted (T2-w) MRI-based texture features. Our approach achieved reasonably accurate classification of Gleason scores (GS) 6(3 + 3) vs. ≥7 and 7(3 + 4) vs. 7(4 + 3) despite the presence of highly unbalanced samples by using two different sample augmentation techniques followed by feature selection-based classification. Our method distinguished between GS 6(3 + 3) and ≥7 cancers with 93{\%} accuracy for cancers occurring in both peripheral (PZ) and transition (TZ) zones and 92{\%} for cancers occurring in the PZ alone. Our approach distinguished the GS 7(3 + 4) from GS 7(4 + 3) with 92{\%} accuracy for cancers occurring in both the PZ and TZ and with 93{\%} for cancers occurring in the PZ alone. In comparison, a classifier using only the ADC mean achieved a top accuracy of 58{\%} for distinguishing GS 6(3 + 3) vs. GS ≥7 for cancers occurring in PZ and TZ and 63{\%} for cancers occurring in PZ alone. The same classifier achieved an accuracy of 59{\%} for distinguishing GS 7(3 + 4) from GS 7(4 + 3) occurring in the PZ and TZ and 60{\%} for cancers occurring in PZ alone. Separate analysis of the cancers occurring in TZ alone was not performed owing to the limited number of samples. Our results suggest that texture features derived from ADC and T2-w MRI together with sample augmentation can help to obtain reasonably accurate classification of Gleason patterns. }, } |
2015 | Journal | Pierre Barbier de Reuille, Anne Lise Routier-Kierzkowska, Daniel Kierzkowski, George W. Bassel, Thierry Schüpbach, Gerardo Tauriello, Namrata Bajpai, Sören Strauss, Alain Weber, Annamaria Kiss, Agata Burian, Hugo Hofhuis, Aleksandra Sapala, Marcin Lipowczan, Maria B. Heimlicher, Sarah Robinson, Emmanuelle M. Bayer, Konrad Basler, Petros Koumoutsakos, Adrienne H.K. Roeder, Tinri Aegerter-Wilmsen, Naomi Nakayama, Miltos Tsiantis, Angela Hay, Dorota Kwiatkowska, Ioannis Xenarios, Cris Kuhlemeier, Richard S. Smith (2015). MorphoGraphX: A platform for quantifying morphogenesis in 4D. eLife, 4(MAY), pp. 1–20. (link) (bib) x @article{Reuille2015, year = { 2015 }, volume = { 4 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MorphoGraphX: A platform for quantifying morphogenesis in 4D }, pmid = { 25946108 }, pages = { 1--20 }, number = { MAY }, journal = { eLife }, issn = { 2050084X }, doi = { 10.7554/eLife.05864 }, author = { Reuille and Routier-Kierzkowska and Kierzkowski and Bassel and Sch{\"{u}}pbach and Tauriello and Bajpai and Strauss and Weber and Kiss and Burian and Hofhuis and Sapala and Lipowczan and Heimlicher and Robinson and Bayer and Basler and Koumoutsakos and Roeder and Aegerter-Wilmsen and Nakayama and Tsiantis and Hay and Kwiatkowska and Xenarios and Kuhlemeier and Smith }, abstract = { Morphogenesis emerges from complex multiscale interactions between genetic and mechanical processes. To understand these processes, the evolution of cell shape, proliferation and gene expression must be quantified. This quantification is usually performed either in full 3D, which is computationally expensive and technically challenging, or on 2D planar projections, which introduces geometrical artifacts on highly curved organs. Here we present MorphoGraphX (www.MorphoGraphX.org), a software that bridges this gap by working directly with curved surface images extracted from 3D data. In addition to traditional 3D image analysis, we have developed algorithms to operate on curved surfaces, such as cell segmentation, lineage tracking and fluorescence signal quantification. The software's modular design makes it easy to include existing libraries, or to implement new algorithms. Cell geometries extracted with MorphoGraphX can be exported and used as templates for simulation models, providing a powerful platform to investigate the interactions between shape, genes and growth. }, } |
2015 | Journal | Benjamin De Leener, Julien Cohen-Adad, Samuel Kadoury (2015). Automatic Segmentation of the Spinal Cord and Spinal Canal Coupled with Vertebral Labeling. IEEE Transactions on Medical Imaging, 34(8), pp. 1705–1718. (link) (bib) x @article{DeLeener2015, year = { 2015 }, volume = { 34 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic Segmentation of the Spinal Cord and Spinal Canal Coupled with Vertebral Labeling }, pages = { 1705--1718 }, number = { 8 }, keywords = { Automatic segmentation,CSF,MRI,Vertebral labeling,deformable model,spinal canal,spinal cord }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2015.2437192 }, author = { {De Leener} and Cohen-Adad and Kadoury }, abstract = { Quantifying spinal cord (SC) atrophy in neurodegenerative and traumatic diseases brings important diagnosis and prognosis information for the clinician. We recently developed the PropSeg method, which allows for fast, accurate and automatic segmentation of the SC on different types of MRI contrast (e.g.,T1-,T2-and T2z.ast;-weighted sequences) and any field of view. However, comparing measurements from the SC between subjects is hindered by the lack of a generic coordinate system for the SC. In this paper, we present a new framework combining PropSeg and a vertebral level identification method, thereby enabling direct inter-and intra-subject comparison of SC measurements for large cohort studies as well as for longitudinal studies. Our segmentation method is based on the multi-resolution propagation of tubular deformable models. Coupled with an automatic intervertebral disk identification method, our segmentation pipeline provides quantitative metrics of the SC and spinal canal such as cross-sectional areas and volumes in a generic coordinate system based on vertebral levels. This framework was validated on 17 healthy subjects and on one patient with SC injury against manual segmentation. Results have been compared with an existing active surface method and show high local and global accuracy for both SC and spinal canal (Dice coefficients =0.91 ± 0.02) segmentation. Having a robust and automatic framework for SC segmentation and vertebral-based normalization opens the door to bias-free measurement of SC atrophy in large cohorts. }, } |
2015 | Journal | Michael D. De Bellis, Stephen R. Hooper, Steven D. Chen, James M. Provenzale, Brian D. Boyd, Christopher E. Glessner, James R. Macfall, Martha E. Payne, Robert Rybczynski, Donald P. Woolley (2015). Posterior structural brain volumes differ in maltreated youth with and without chronic posttraumatic stress disorder. Development and Psychopathology, 27(4), pp. 1555–1576. (link) (bib) x @article{DeBellis2015, year = { 2015 }, volume = { 27 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Posterior structural brain volumes differ in maltreated youth with and without chronic posttraumatic stress disorder }, pages = { 1555--1576 }, number = { 4 }, journal = { Development and Psychopathology }, issn = { 14692198 }, doi = { 10.1017/S0954579415000942 }, author = { {De Bellis} and Hooper and Chen and Provenzale and Boyd and Glessner and Macfall and Payne and Rybczynski and Woolley }, abstract = { Magnetic resonance imaging studies of maltreated children with posttraumatic stress disorder (PTSD) suggest that maltreatment-related PTSD is associated with adverse brain development. Maltreated youth resilient to chronic PTSD were not previously investigated and may elucidate neuromechanisms of the stress diathesis that leads to resilience to chronic PTSD. In this cross-sectional study, anatomical volumetric and corpus callosum diffusion tensor imaging measures were examined using magnetic resonance imaging in maltreated youth with chronic PTSD (N = 38), without PTSD (N = 35), and nonmaltreated participants (n = 59). Groups were sociodemographically similar. Participants underwent assessments for strict inclusion/exclusion criteria and psychopathology. Maltreated youth with PTSD were psychobiologically different from maltreated youth without PTSD and nonmaltreated controls. Maltreated youth with PTSD had smaller posterior cerebral and cerebellar gray matter volumes than did maltreated youth without PTSD and nonmaltreated participants. Cerebral and cerebellar gray matter volumes inversely correlated with PTSD symptoms. Posterior corpus callosum microstructure in pediatric maltreatment-related PTSD differed compared to maltreated youth without PTSD and controls. The group differences remained significant when controlling for psychopathology, numbers of Axis I disorders, and trauma load. Alterations of these posterior brain structures may result from a shared trauma-related mechanism or an inherent vulnerability that mediates the pathway from chronic PTSD to comorbidity. }, } |
2015 | Journal | Gabriella Captur, Audrey L. Karperien, Chunming Li, Filip Zemrak, Catalina Tobon-Gomez, Xuexin Gao, David A. Bluemke, Perry M. Elliott, Steffen E. Petersen, James C. Moon (2015). Fractal frontiers in cardiovascular magnetic resonance: Towards clinical implementation. Journal of Cardiovascular Magnetic Resonance, 17(1), pp. 10. (link) (bib) x @article{Captur2015, year = { 2015 }, volume = { 17 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Fractal frontiers in cardiovascular magnetic resonance: Towards clinical implementation }, pages = { 10 }, number = { 1 }, keywords = { Cardiovascular magnetic resonance,Image processing,Segmentation }, journal = { Journal of Cardiovascular Magnetic Resonance }, issn = { 1532429X }, doi = { 10.1186/s12968-015-0179-0 }, author = { Captur and Karperien and Li and Zemrak and Tobon-Gomez and Gao and Bluemke and Elliott and Petersen and Moon }, abstract = { Many of the structures and parameters that are detected, measured and reported in cardiovascular magnetic resonance (CMR) have at least some properties that are fractal, meaning complex and self-similar at different scales. To date however, there has been little use of fractal geometry in CMR; by comparison, many more applications of fractal analysis have been published in MR imaging of the brain. This review explains the fundamental principles of fractal geometry, places the fractal dimension into a meaningful context within the realms of Euclidean and topological space, and defines its role in digital image processing. It summarises the basic mathematics, highlights strengths and potential limitations of its application to biomedical imaging, shows key current examples and suggests a simple route for its successful clinical implementation by the CMR community. By simplifying some of the more abstract concepts of deterministic fractals, this review invites CMR scientists (clinicians, technologists, physicists) to experiment with fractal analysis as a means of developing the next generation of intelligent quantitative cardiac imaging tools. }, } |
2015 | Journal | Olivier Bernus, Aleksandra Radjenovic, Mark L. Trew, Ian J. Legrice, Gregory B. Sands, Derek R. Magee, Bruce H. Smaill, Stephen H. Gilbert (2015). Comparison of diffusion tensor imaging by cardiovascular magnetic resonance and gadolinium enhanced 3D image intensity approaches to investigation of structural anisotropy in explanted rat hearts. Journal of Cardiovascular Magnetic Resonance, 17(1), pp. 27. (link) (bib) x @article{Bernus2015, year = { 2015 }, volume = { 17 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Comparison of diffusion tensor imaging by cardiovascular magnetic resonance and gadolinium enhanced 3D image intensity approaches to investigation of structural anisotropy in explanted rat hearts }, pages = { 27 }, number = { 1 }, keywords = { Cardiovascular magnetic resonance,Diffusion tensor imaging,Myocardium,Myolaminar }, journal = { Journal of Cardiovascular Magnetic Resonance }, issn = { 1532429X }, doi = { 10.1186/s12968-015-0129-x }, author = { Bernus and Radjenovic and Trew and Legrice and Sands and Magee and Smaill and Gilbert }, abstract = { Background: Cardiovascular magnetic resonance (CMR) can through the two methods 3D FLASH and diffusion tensor imaging (DTI) give complementary information on the local orientations of cardiomyocytes and their laminar arrays. Methods: Eight explanted rat hearts were perfused with Gd-DTPA contrast agent and fixative and imaged in a 9.4T magnet by two types of acquisition: 3D fast low angle shot (FLASH) imaging, voxels 50∈×∈50∈×∈50 $\mu$m, and 3D spin echo DTI with monopolar diffusion gradients of 3.6 ms duration at 11.5 ms separation, voxels 200∈×∈200∈×∈200 $\mu$m. The sensitivity of each approach to imaging parameters was explored. Results: The FLASH data showed laminar alignments of voxels with high signal, in keeping with the presumed predominance of contrast in the interstices between sheetlets. It was analysed, using structure-tensor (ST) analysis, to determine the most (v 1ST ), intermediate (v 2ST ) and least (v 3ST ) extended orthogonal directions of signal continuity. The DTI data was analysed to determine the most (e 1DTI ), intermediate (e 2DTI ) and least (e 3DTI ) orthogonal eigenvectors of extent of diffusion. The correspondence between the FLASH and DTI methods was measured and appraised. The most extended direction of FLASH signal (v 1ST ) agreed well with that of diffusion (e 1DTI ) throughout the left ventricle (representative discrepancy in the septum of 13.3∈±∈6.7°: median∈±∈absolute deviation) and both were in keeping with the expected local orientations of the long-axis of cardiomyocytes. However, the orientation of the least directions of FLASH signal continuity (v 3ST ) and diffusion (e 3ST ) showed greater discrepancies of up to 27.9∈±∈17.4°. Both FLASH (v 3ST ) and DTI (e 3DTI ) where compared to directly measured laminar arrays in the FLASH images. For FLASH the discrepancy between the structure-tensor calculated v 3ST and the directly measured FLASH laminar array normal was of 9∈±∈7° for the lateral wall and 7∈±∈9°for the septum (median∈±∈inter quartile range), and for DTI the discrepancy between the calculated v 3DTI and the directly measured FLASH laminar array normal was 22∈±∈14°and 61∈±∈53.4°. DTI was relatively insensitive to the number of diffusion directions and to time up to 72 hours post fixation, but was moderately affected by b-value (which was scaled by modifying diffusion gradient pulse strength with fixed gradient pulse separation). Optimal DTI parameters were b∈=∈1000 mm/s2 and 12 diffusion directions. FLASH acquisitions were relatively insensitive to the image processing parameters explored. Conclusions: We show that ST analysis of FLASH is a useful and accurate tool in the measurement of cardiac microstructure. While both FLASH and the DTI approaches appear promising for mapping of the alignments of myocytes throughout myocardium, marked discrepancies between the cross myocyte anisotropies deduced from each method call for consideration of their respective limitations. }, } |
2015 | Journal | Peter Bajcsy, Antonio Cardone, Joe Chalfoun, Michael Halter, Derek Juba, Marcin Kociolek, Michael Majurski, Adele Peskin, Carl Simon, Mylene Simon, Antoine Vandecreme, Mary Brady (2015). Survey statistics of automated segmentations applied to optical imaging of mammalian cells. BMC Bioinformatics, 16(1), pp. 28. (link) (bib) x @article{Bajcsy2015, year = { 2015 }, volume = { 16 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Survey statistics of automated segmentations applied to optical imaging of mammalian cells }, pages = { 28 }, number = { 1 }, keywords = { Accelerated execution of segmentation for high-thr,Cell segmentation,Cellular measurements,Segmentation evaluation,Segmented objects }, journal = { BMC Bioinformatics }, issn = { 14712105 }, doi = { 10.1186/s12859-015-0762-2 }, author = { Bajcsy and Cardone and Chalfoun and Halter and Juba and Kociolek and Majurski and Peskin and Simon and Simon and Vandecreme and Brady }, abstract = { Background: The goal of this survey paper is to overview cellular measurements using optical microscopy imaging followed by automated image segmentation. The cellular measurements of primary interest are taken from mammalian cells and their components. They are denoted as two- or three-dimensional (2D or 3D) image objects of biological interest. In our applications, such cellular measurements are important for understanding cell phenomena, such as cell counts, cell-scaffold interactions, cell colony growth rates, or cell pluripotency stability, as well as for establishing quality metrics for stem cell therapies. In this context, this survey paper is focused on automated segmentation as a software-based measurement leading to quantitative cellular measurements. Methods: We define the scope of this survey and a classification schema first. Next, all found and manually filteredpublications are classified according to the main categories: (1) objects of interests (or objects to be segmented), (2) imaging modalities, (3) digital data axes, (4) segmentation algorithms, (5) segmentation evaluations, (6) computational hardware platforms used for segmentation acceleration, and (7) object (cellular) measurements. Finally, all classified papers are converted programmatically into a set of hyperlinked web pages with occurrence and co-occurrence statistics of assigned categories. Results: The survey paper presents to a reader: (a) the state-of-the-art overview of published papers about automated segmentation applied to optical microscopy imaging of mammalian cells, (b) a classification of segmentation aspects in the context of cell optical imaging, (c) histogram and co-occurrence summary statistics about cellular measurements, segmentations, segmented objects, segmentation evaluations, and the use of computational platforms for accelerating segmentation execution, and (d) open research problems to pursue. Conclusions: The novel contributions of this survey paper are: (1) a new type of classification of cellular measurements and automated segmentation, (2) statistics about the published literature, and (3) a web hyperlinked interface to classification statistics of the surveyed papers at https://isg.nist.gov/deepzoomweb/resources/survey/index.html. }, } |
2015 | Journal | Francisco P.M. Oliveira, Miguel Castelo-Branco (2015). Computer-aided diagnosis of Parkinson's disease based on [123I]FP-CIT SPECT binding potential images, using the voxels-as-features approach and support vector machines. Journal of Neural Engineering, 12(2), pp. NA (bib) x @article{Oliveira2015, year = { 2015 }, volume = { 12 }, title = { Computer-aided diagnosis of Parkinson's disease based on [123I]FP-CIT SPECT binding potential images, using the voxels-as-features approach and support vector machines }, publisher = { Institute of Physics Publishing }, number = { 2 }, month = { apr }, keywords = { DaTSCAN,automated image analysis,binding potential,classification }, journal = { Journal of Neural Engineering }, issn = { 17412552 }, doi = { 10.1088/1741-2560/12/2/026008 }, author = { Oliveira and Castelo-Branco }, abstract = { Objective. The aim of the present study was to develop a fully-automated computational solution for computer-aided diagnosis in Parkinson syndrome based on [123I]FP-CIT single photon emission computed tomography (SPECT) images. Approach. A dataset of 654 [123I]FP-CIT SPECT brain images from the Parkinson's Progression Markers Initiative were used. Of these, 445 images were of patients with Parkinson's disease at an early stage and the remainder formed a control group. The images were pre-processed using automated template-based registration followed by the computation of the binding potential at a voxel level. Then, the binding potential images were used for classification, based on the voxel-as-feature approach and using the support vector machines paradigm. Main results. The obtained estimated classification accuracy was 97.86{\%}, the sensitivity was 97.75{\%} and the specificity 98.09{\%}. Significance. The achieved classification accuracy was very high and, in fact, higher than accuracies found in previous studies reported in the literature. In addition, results were obtained on a large dataset of early Parkinson's disease subjects. In summation, the information provided by the developed computational solution potentially supports clinical decision-making in nuclear medicine, using important additional information beyond the commonly used uptake ratios and respective statistical comparisons. (ClinicalTrials.gov Identifier: NCT01141023) }, } |
2015 | Journal | Alper Willführ, Christina Brandenberger, Tanja Piatkowski, Roman Grothausmann, Jens Randel Nyengaard, Matthias Ochs, Christian Mühlfeld (2015). Estimation of the number of alveolar capillaries by the euler number (Euler-poincaré characteristic). American Journal of Physiology - Lung Cellular and Molecular Physiology, 309(11), pp. L1286–L1293. (link) (bib) x @article{RN949, year = { 2015 }, volume = { 309 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84949526543{\&}doi=10.1152{\%}2Fajplung.00410.2014{\&}partnerID=40{\&}md5=56c6058ebfd732edb1588ab2e55b6ec9 }, type = { Journal Article }, title = { Estimation of the number of alveolar capillaries by the euler number (Euler-poincar{\'{e}} characteristic) }, pages = { L1286--L1293 }, number = { 11 }, keywords = { Capillary number,Euler number,Stereology }, journal = { American Journal of Physiology - Lung Cellular and Molecular Physiology }, issn = { 15221504 }, doi = { 10.1152/ajplung.00410.2014 }, author = { Willf{\"{u}}hr and Brandenberger and Piatkowski and Grothausmann and Nyengaard and Ochs and M{\"{u}}hlfeld }, abstract = { The lung parenchyma provides a maximal surface area of blood-containing capillaries that are in close contact with a large surface areaof the air-containing alveoli. Volume and surface area of capillaries are the classic stereological parameters to characterize the alveolar capillary network (ACN) and have provided essential structure-function information of the lung. When loss (rarefaction) or gain (angiogenesis) of capillaries occurs, these parameters may not be sufficient to provide mechanistic insight. Therefore, it would be desirable to estimate the number of capillaries, as it contains more distinct and mechanistically oriented information. Here, we present a new stereological method to estimate the number of capillary loops in the ACN. One advantage of this method is that it is independent of the shape, size, or distribution of the capillaries. We used consecutive, 1 {\_}mthick sections from epoxy resin-embedded material as a physical disector. The Euler-Poincar{\'{e}} characteristic of capillary networks can be estimated by counting the easily recognizable topolog ical constellations of “islands,” “bridges,” and “holes.” The total number of capillary loops in the ACN can then be calculated from the Euler- Poincar{\'{e}} characteristic. With the use of the established estimator of alveolar number, it is possible to obtain the mean number of capillary loops per alveolus. In conclusion, estimation of alveolar capillaries by design-based stereology is an efficient and unbiased method to characterize the ACN and may be particularly useful for studies on emphysema, pulmonary hypertension, or lung development. }, } |
2015 | Journal | A. Vignati, S. Mazzetti, V. Giannini, F. Russo, E. Bollito, F. Porpiglia, M. Stasi, D. Regge (2015). Texture features on T2-weighted magnetic resonance imaging: New potential biomarkers for prostate cancer aggressiveness. Physics in Medicine and Biology, 60(7), pp. 2685–2701. (link) (bib) x @article{RN940, year = { 2015 }, volume = { 60 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84925609486{\&}doi=10.1088{\%}2F0031-9155{\%}2F60{\%}2F7{\%}2F2685{\&}partnerID=40{\&}md5=4b5ae2306deef2604c6595214b20549c }, type = { Journal Article }, title = { Texture features on T2-weighted magnetic resonance imaging: New potential biomarkers for prostate cancer aggressiveness }, pages = { 2685--2701 }, number = { 7 }, keywords = { ADC maps,GLCM texture feature,T2-weighted MR imaging,pathologic Gleason score correlation,prostate cancer aggressiveness }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/0031-9155/60/7/2685 }, author = { Vignati and Mazzetti and Giannini and Russo and Bollito and Porpiglia and Stasi and Regge }, abstract = { To explore contrast (C) and homogeneity (H) gray-level co-occurrence matrix texture features on T2-weighted (T2w) Magnetic Resonance (MR) images and apparent diffusion coefficient (ADC) maps for predicting prostate cancer (PCa) aggressiveness, and to compare them with traditional ADC metrics for differentiating low- from intermediate/high-grade PCas. The local Ethics Committee approved this prospective study of 93 patients (median age, 65 years), who underwent 1.5 T multiparametric endorectal MR imaging before prostatectomy. Clinically significant (volume≥0.5 ml) peripheral tumours were outlined on histological sections, contoured on T2w and ADC images, and their pathological Gleason Score (pGS) was recorded. C, H, and traditional ADC metrics (mean, median, 10th and 25th percentile) were calculated on the largest lesion slice, and correlated with the pGS through the Spearman correlation coefficient. The area under the receiver operating characteristic curve (AUC) assessed how parameters differentiate pGS = 6 from pGS≥7. The dataset included 49 clinically significant PCas with a balanced distribution of pGS. The Spearman $\rho$ and AUC values on ADC were:-0.489, 0.823 (mean);-0.522, 0.821 (median);-0.569, 0.854 (10th percentile);-0.556, 0.854 (25th percentile);-0.386, 0.871 (C); 0.533, 0.923 (H); while on T2w they were:-0.654, 0.945 (C); 0.645, 0.962 (H). AUC of H on ADC and T2w, and C on T2w were significantly higher than that of the mean ADC (p = 0.05). H and C calculated on T2w images outperform ADC parameters in correlating with pGS and differentiating low- from intermediate/high-risk PCas, supporting the role of T2w MR imaging in assessing PCa biological aggressiveness. }, } |
2015 | Journal | Tuomas Turpeinen, Markko Myllys, Pekka Kekalainen, Jussi Timonen (2015). Interface Detection Using a Quenched-Noise Version of the Edwards-Wilkinson Equation. IEEE Transactions on Image Processing, 24(12), pp. 5696–5705. (link) (bib) x @article{RN803, year = { 2015 }, volume = { 24 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Interface Detection Using a Quenched-Noise Version of the Edwards-Wilkinson Equation }, pages = { 5696--5705 }, number = { 12 }, keywords = { Gray-scale,Image segmentation,Mathematical model,Noise,Surface morphology,Surface topography,Three-dimensional displays }, journal = { IEEE Transactions on Image Processing }, issn = { 10577149 }, doi = { 10.1109/TIP.2015.2484061 }, author = { Turpeinen and Myllys and Kekalainen and Timonen }, abstract = { We report here a multipurpose dynamic-interface-based segmentation tool, suitable for segmenting planar, cylindrical, and spherical surfaces in 3D. The method is fast enough to be used conveniently even for large images. Its implementation is straightforward and can be easily realized in many environments. Its memory consumption is low, and the set of parameters is small and easy to understand. The method is based on the Edwards-Wilkinson equation, which is traditionally used to model the equilibrium fluctuations of a propagating interface under the influence of temporally and spatially varying noise. We report here an adaptation of this equation into multidimensional image segmentation, and its efficient discretization. }, } |
2015 | Journal | Chun Chien Shieh, Paul J. Keall, Zdenka Kuncic, Chen Yu Huang, Ilana Feain (2015). Markerless tumor tracking using short kilovoltage imaging arcs for lung image-guided radiotherapy. Physics in Medicine and Biology, 60(24), pp. 9437–9454. (link) (bib) x @article{RN962, year = { 2015 }, volume = { 60 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84957922433{\&}doi=10.1088{\%}2F0031-9155{\%}2F60{\%}2F24{\%}2F9437{\&}partnerID=40{\&}md5=b33e8bc47270bdc2d818b5af2cf039d3 }, type = { Journal Article }, title = { Markerless tumor tracking using short kilovoltage imaging arcs for lung image-guided radiotherapy }, pages = { 9437--9454 }, number = { 24 }, keywords = { IGRT,lung cancer,markerless,tumor tracking }, journal = { Physics in Medicine and Biology }, issn = { 13616560 }, doi = { 10.1088/0031-9155/60/24/9437 }, author = { Shieh and Keall and Kuncic and Huang and Feain }, abstract = { The ability to monitor tumor motion without implanted markers is clinically advantageous for lung image-guided radiotherapy (IGRT). Existing markerless tracking methods often suffer from overlapping structures and low visibility of tumors on kV projection images. We introduce the short arc tumor tracking (SATT) method to overcome these issues. The proposed method utilizes multiple kV projection images selected from a nine-degree imaging arc to improve tumor localization, and respiratory-correlated 4D cone-beam CT (CBCT) prior knowledge to minimize the effects of overlapping anatomies. The 3D tumor position is solved as an optimization problem with prior knowledge incorporated via regularization. We retrospectively validated SATT on 11 clinical scans from four patients with central tumors. These patients represent challenging scenarios for markerless tumor tracking due to the inferior adjacent contrast. The 3D trajectories of implanted fiducial markers were used as the ground truth for tracking accuracy evaluation. In all cases, the tumors were successfully tracked at all gantry angles. Compared to standard pre-treatment CBCT guidance alone, trajectory errors were significantly smaller with tracking in all cases, and the improvements were the most prominent in the superior-inferior direction. The mean 3D tracking error ranged from 2.2-9.9 mm, which was 0.4-2.6 mm smaller compared to pre-treatment CBCT. In conclusion, we were able to directly track tumors with inferior visibility on kV projection images using SATT. Tumor localization accuracies are significantly better with tracking compared to the current standard of care of lung IGRT. Future work involves the prospective evaluation and clinical implementation of SATT. }, } |
2015 | Journal | Micha\"el Sdika (2015). Enhancing atlas based segmentation with multiclass linear classifiers. Medical Physics, 42(12), pp. 7169–7181. (link) (bib) x @article{RN781, year = { 2015 }, volume = { 42 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Enhancing atlas based segmentation with multiclass linear classifiers }, pages = { 7169--7181 }, number = { 12 }, keywords = { atlas based segmentation,machine learning,multiple atlas,support vector machine }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4935946 }, author = { Sdika }, abstract = { Purpose: To present a method to enrich atlases for atlas based segmentation. Such enriched atlases can then be used as a single atlas or within a multiatlas framework. Methods: In this paper, machine learning techniques have been used to enhance the atlas based segmentation approach. The enhanced atlas defined in this work is a pair composed of a gray level image alongside an image of multiclass classifiers with one classifier per voxel. Each classifier embeds local information from the whole training dataset that allows for the correction of some systematic errors in the segmentation and accounts for the possible local registration errors. The authors also propose to use these images of classifiers within a multiatlas framework: results produced by a set of such local classifier atlases can be combined using a label fusion method. Results: Experiments have been made on the in vivo images of the IBSR dataset and a comparison has been made with several state-of-the-art methods such as FreeSurfer and the multiatlas nonlocal patch based method of Coup{\'{e}} or Rousseau. These experiments show that their method is competitive with state-of-the-art methods while having a low computational cost. Further enhancement has also been obtained with a multiatlas version of their method. It is also shown that, in this case, nonlocal fusion is unnecessary. The multiatlas fusion can therefore be done efficiently. Conclusions: The single atlas version has similar quality as state-of-the-arts multiatlas methods but with the computational cost of a naive single atlas segmentation. The multiatlas version offers a improvement in quality and can be done efficiently without a nonlocal strategy. }, } |
2015 | Journal | Bastien Rigaud, Antoine Simon, Jo\"el Castelli, Maxime Gobeli, Juan David Ospina Arango, Guillaume Cazoulat, Olivier Henry, Pascal Haigron, Renaud De Crevoisier (2015). Evaluation of deformable image registration methods for dose monitoring in head and neck radiotherapy. BioMed Research International, 2015, pp. NA (link) (bib) x @article{RN948, year = { 2015 }, volume = { 2015 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84924164245{\&}doi=10.1155{\%}2F2015{\%}2F726268{\&}partnerID=40{\&}md5=d9d5d935280b872c6cac8a60ef5e6f65 }, type = { Journal Article }, title = { Evaluation of deformable image registration methods for dose monitoring in head and neck radiotherapy }, journal = { BioMed Research International }, issn = { 23146141 }, doi = { 10.1155/2015/726268 }, author = { Rigaud and Simon and Castelli and Gobeli and {Ospina Arango} and Cazoulat and Henry and Haigron and {De Crevoisier} }, abstract = { In the context of head and neck cancer (HNC) adaptive radiation therapy (ART), the two purposes of the study were to compare the performance of multiple deformable image registration (DIR) methods and to quantify their impact for dose accumulation, in healthy structures. Fifteen HNC patients had a planning computed tomography (CT0) and weekly CTs during the 7 weeks of intensity-modulated radiation therapy (IMRT). Ten DIR approaches using different registration methods (demons or B-spline free form deformation (FFD)), preprocessing, and similarity metrics were tested. Two observers identified 14 landmarks (LM) on each CT-scan to compute LM registration error. The cumulated doses estimated by each method were compared. The two most effective DIR methods were the demons and the FFD, with both the mutual information (MI) metric and the filtered CTs. The corresponding LM registration accuracy (precision) was 2.44 mm (1.30 mm) and 2.54 mm (1.33 mm), respectively. The corresponding LM estimated cumulated dose accuracy (dose precision) was 0.85 Gy (0.93 Gy) and 0.88 Gy (0.95 Gy), respectively. The mean uncertainty (difference between maximal and minimal dose considering all the 10 methods) to estimate the cumulated mean dose to the parotid gland (PG) was 4.03 Gy (SD = 2.27 Gy, range: 1.06-8.91 Gy). }, } |
2015 | Journal | Dominik Neumann, Sasa Grbic, Matthias John, Nassir Navab, Joachim Hornegger, Razvan Ionasec (2015). Probabilistic sparse matching for robust 3D/3D fusion in minimally invasive surgery. IEEE Transactions on Medical Imaging, 34(1), pp. 49–60. (link) (bib) x @article{RN947, year = { 2015 }, volume = { 34 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920159378{\&}doi=10.1109{\%}2FTMI.2014.2343936{\&}partnerID=40{\&}md5=a81867f10d05f5ddb53549f54f45df01 }, type = { Journal Article }, title = { Probabilistic sparse matching for robust 3D/3D fusion in minimally invasive surgery }, pmid = { 25095250 }, pages = { 49--60 }, number = { 1 }, keywords = { Anatomical overlay,Procedure guidance,computed tomography (CT),model-based cardiac image registration }, journal = { IEEE Transactions on Medical Imaging }, issn = { 1558254X }, doi = { 10.1109/TMI.2014.2343936 }, author = { Neumann and Grbic and John and Navab and Hornegger and Ionasec }, abstract = { Classical surgery is being overtaken by minimally invasive and transcatheter procedures. As there is no direct view or access to the affected anatomy, advanced imaging techniques such as 3D C-arm computed tomography (CT) and C-arm fluoroscopy are routinely used in clinical practice for intraoperative guidance. However, due to constraints regarding acquisition time and device configuration, intraoperative modalities have limited soft tissue image quality and reliable assessment of the cardiac anatomy typically requires contrast agent, which is harmful to the patient and requires complex acquisition protocols. We propose a probabilistic sparse matching approach to fuse high-quality preoperative CT images and nongated, noncontrast intraoperative C-arm CT images by utilizing robust machine learning and numerical optimization techniques. Thus, high-quality patient-specific models can be extracted from the preoperative CT and mapped to the intraoperative imaging environment to guide minimally invasive procedures. Extensive quantitative experiments on 95 clinical datasets demonstrate that our model-based fusion approach has an average execution time of 1.56 s, while the accuracy of 5.48 mm between the anchor anatomy in both images lies within expert user confidence intervals. In direct comparison with image-to-image registration based on an open-source state-of-the-art medical imaging library and a recently proposed quasi-global, knowledge-driven multi-modal fusion approach for thoracic-abdominal images, our model-based method exhibits superior performance in terms of registration accuracy and robustness with respect to both target anatomy and anchor anatomy alignment errors. }, } |
2015 | Journal | S. Koho, T. Deguchi, P. E. Hänninen (2015). A software tool for tomographic axial superresolution in STED microscopy. Journal of Microscopy, 260(2), pp. 208–218. (link) (bib) x @article{RN946, year = { 2015 }, volume = { 260 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84945471664{\&}doi=10.1111{\%}2Fjmi.12287{\&}partnerID=40{\&}md5=a793356576a358ca87cce6a3b305aea9 }, type = { Journal Article }, title = { A software tool for tomographic axial superresolution in STED microscopy }, pages = { 208--218 }, number = { 2 }, keywords = { Axial tomography,Image fusion,Image processing,Image registration,Open source software,Reconstruction algorithms,STED,Superresolution microscopy }, journal = { Journal of Microscopy }, issn = { 13652818 }, doi = { 10.1111/jmi.12287 }, author = { Koho and Deguchi and H{\"{a}}nninen }, abstract = { A method for generating three-dimensional tomograms from multiple three-dimensional axial projections in STimulated Emission Depletion (STED) superresolution microscopy is introduced. Our STED{\textless} method, based on the use of a micromirror placed on top of a standard microscopic sample, is used to record a three-dimensional projection at an oblique angle in relation to the main optical axis. Combining the STED{\textless} projection with the regular STED image into a single view by tomographic reconstruction, is shown to result in a tomogram with three-to-four-fold improved apparent axial resolution. Registration of the different projections is based on the use of a mutual-information histogram similarity metric. Fusion of the projections into a single view is based on Richardson-Lucy iterative deconvolution algorithm, modified to work with multiple projections. Our tomographic reconstruction method is demonstrated to work with real biological STED superresolution images, including a data set with a limited signal-to-noise ratio (SNR); the reconstruction software (SuperTomo) and its source code will be released under BSD open-source license. }, } |
2015 | Journal | A. Kaceniauskas, R. Pacevic, M. Sta\vskuniene, V. Starikovicius, G. Davidavicius (2015). Development of cloud software services for computational analysis of blood flows. Civil-Comp Proceedings, 107, pp. NA (link) (bib) x @article{RN945, year = { 2015 }, volume = { 107 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971597818{\&}partnerID=40{\&}md5=313cdcaa420e54b4e9dad1c9f2228cc8 }, type = { Journal Article }, title = { Development of cloud software services for computational analysis of blood flows }, keywords = { ANSYS Fluent,Aortic valve,Blood flows,Cloud computing,OpenStack,Performance analysis,Software as a service }, journal = { Civil-Comp Proceedings }, issn = { 17593433 }, doi = { 10.4203/ccp.107.23 }, author = { Kaceniauskas and Pacevic and Sta{\v{s}}kuniene and Starikovicius and Davidavicius }, abstract = { This paper presents the development of the cloud software services for computational analysis of blood flows on a private university cloud. The main focus is on the software service level built on the top of the computational platform provided. Moreover, user friendly management tools have been developed by using the Apache jclouds API to enhance the management of OpenStack cloud infrastructure and to increase the accessibility of engineering software. The blood flow through an aortic valve is considered as a pilot application of the private cloud infrastructure. The investigated flows can be described using numerical models based on viscous incompressible Navier-Stokes equations. The modelling software environment based on ANSYS Fluent is developed as a software service (SaaS) for the numerical analysis of low flow, low pressure gradient aortic stenosis. The performance of the developed cloud infrastructure has been assessed testing CPU, memory IO, disk IO, network and the developed software service for computations of blood flow through an aortic valve. The results obtained have been compared with the performance obtained using the native hardware. }, } |
2015 | Journal | Valentina Giannini, Simone Mazzetti, Anna Vignati, Filippo Russo, Enrico Bollito, Francesco Porpiglia, Michele Stasi, Daniele Regge (2015). A fully automatic computer aided diagnosis system for peripheral zone prostate cancer detection using multi-parametric magnetic resonance imaging. Computerized Medical Imaging and Graphics, 46, pp. 219–226. (link) (bib) x @article{RN939, year = { 2015 }, volume = { 46 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983127840{\&}doi=10.1016{\%}2Fj.compmedimag.2015.09.001{\&}partnerID=40{\&}md5=3a43e32746c5917a989d8eb7f9942bb2 }, type = { Journal Article }, title = { A fully automatic computer aided diagnosis system for peripheral zone prostate cancer detection using multi-parametric magnetic resonance imaging }, pages = { 219--226 }, keywords = { Computer aided detection,Image analysis,Multiparametric MRI,Prostate cancer,SVM classifier }, journal = { Computerized Medical Imaging and Graphics }, issn = { 18790771 }, doi = { 10.1016/j.compmedimag.2015.09.001 }, author = { Giannini and Mazzetti and Vignati and Russo and Bollito and Porpiglia and Stasi and Regge }, abstract = { Multiparametric (mp)-Magnetic Resonance Imaging (MRI) is emerging as a powerful test to diagnose and stage prostate cancer (PCa). However, its interpretation is a time consuming and complex feat requiring dedicated radiologists. Computer-aided diagnosis (CAD) tools could allow better integration of data deriving from the different MRI sequences in order to obtain accurate, reproducible, non-operator dependent information useful to identify and stage PCa. In this paper, we present a fully automatic CAD system conceived as a 2-stage process. First, a malignancy probability map for all voxels within the prostate is created. Then, a candidate segmentation step is performed to highlight suspected areas, thus evaluating both the sensitivity and the number of false positive (FP) regions detected by the system. Training and testing of the CAD scheme is performed using whole-mount histological sections as the reference standard. On a cohort of 56 patients (i.e. 65 lesions) the area under the ROC curve obtained during the voxel-wise step was 0.91, while, in the second step, a per-patient sensitivity of 97{\%} was reached, with a median number of FP equal to 3 in the whole prostate. The system here proposed could be potentially used as first or second reader to manage patients suspected to have PCa, thus reducing both the radiologist's reporting time and the inter-reader variability. As an innovative setup, it could also be used to help the radiologist in setting the MRI-guided biopsy target. }, } |
2015 | Journal | Chantal M.J. de Bakker, Allison R. Altman, Wei Ju Tseng, Mary Beth Tribble, Connie Li, Abhishek Chandra, Ling Qin, X. Sherry Liu (2015). $\mu$CT-based, in vivo dynamic bone histomorphometry allows 3D evaluation of the early responses of bone resorption and formation to PTH and alendronate combination therapy. Bone, 73, pp. 198–207. (link) (bib) x @article{RN941, year = { 2015 }, volume = { 73 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920973040{\&}doi=10.1016{\%}2Fj.bone.2014.12.061{\&}partnerID=40{\&}md5=ce16efe68357b4c7c5a21d428c6efe79 }, type = { Journal Article }, title = { $\mu$CT-based, in vivo dynamic bone histomorphometry allows 3D evaluation of the early responses of bone resorption and formation to PTH and alendronate combination therapy }, pages = { 198--207 }, keywords = { Animal models/rodent,Anti-resorptive treatment,Bone stiffness,In vivo $\mu$CT,Parathyroid hormone,Trabecular bone microstructure }, journal = { Bone }, issn = { 87563282 }, doi = { 10.1016/j.bone.2014.12.061 }, author = { Bakker and Altman and Tseng and Tribble and Li and Chandra and Qin and Liu }, abstract = { Current osteoporosis treatments improve bone mass by increasing net bone formation: anti-resorptive drugs such as bisphosphonates block osteoclast activity, while anabolic agents such as parathyroid hormone (PTH) increase bone remodeling, with a greater effect on formation. Although these drugs are widely used, their role in modulating formation and resorption is not fully understood, due in part to technical limitations in the ability to longitudinally assess bone remodeling. Importantly, it is not known whether or not PTH-induced bone formation is independent of resorption, resulting in controversy over the effectiveness of combination therapies that use both PTH and an anti-resorptive. In this study, we developed a $\mu$CT-based, in vivo dynamic bone histomorphometry technique for rat tibiae, and applied this method to longitudinally track changes in bone resorption and formation as a result of treatment with alendronate (ALN), PTH, or combination therapy of both PTH and ALN (PTH+ALN). Correlations between our $\mu$CT-based measures of bone formation and measures of bone formation based on calcein-labeled histology (r = 0.72-0.83) confirm the accuracy of this method. Bone remodeling parameters measured through $\mu$CT-based in vivo dynamic bone histomorphometry indicate an increased rate of bone formation in rats treated with PTH and PTH+ALN, together with a decrease in bone resorption measures in rats treated with ALN and PTH+ALN. These results were further supported by traditional histology-based measurements, suggesting that PTH was able to induce bone formation while bone resorption was suppressed. }, } |
2015 | Journal | Iman Aganj, Martin Reuter, Mert R. Sabuncu, Bruce Fischl (2015). Avoiding symmetry-breaking spatial non-uniformity in deformable image registration via a quasi-volume-preserving constraint. NeuroImage, 106, pp. 238–251. (link) (bib) x @article{RN973, year = { 2015 }, volume = { 106 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920130398{\&}doi=10.1016{\%}2Fj.neuroimage.2014.10.059{\&}partnerID=40{\&}md5=537fee2b2ea9e7af63a87f230be26ded }, type = { Journal Article }, title = { Avoiding symmetry-breaking spatial non-uniformity in deformable image registration via a quasi-volume-preserving constraint }, pages = { 238--251 }, keywords = { Deformable image registration,Integral non-uniformity,Inverse-consistency,Symmetry,Volume-preserving constraints }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2014.10.059 }, author = { Aganj and Reuter and Sabuncu and Fischl }, abstract = { The choice of a reference image typically influences the results of deformable image registration, thereby making it asymmetric. This is a consequence of a spatially non-uniform weighting in the cost function integral that leads to general registration inaccuracy. The inhomogeneous integral measure - which is the local volume change in the transformation, thus varying through the course of the registration - causes image regions to contribute differently to the objective function. More importantly, the optimization algorithm is allowed to minimize the cost function by manipulating the volume change, instead of aligning the images. The approaches that restore symmetry to deformable registration successfully achieve inverse-consistency, but do not eliminate the regional bias that is the source of the error. In this work, we address the root of the problem: the non-uniformity of the cost function integral. We introduce a new quasi-volume-preserving constraint that allows for volume change only in areas with well-matching image intensities, and show that such a constraint puts a bound on the error arising from spatial non-uniformity. We demonstrate the advantages of adding the proposed constraint to standard (asymmetric and symmetrized) demons and diffeomorphic demons algorithms through experiments on synthetic images, and real X-ray and 2D/3D brain MRI data. Specifically, the results show that our approach leads to image alignment with more accurate matching of manually defined neuroanatomical structures, better tradeoff between image intensity matching and registration-induced distortion, improved native symmetry, and lower susceptibility to local optima. In summary, the inclusion of this space- and time-varying constraint leads to better image registration along every dimension that we have measured it. }, } |
2015 | In Conf. Proceedings | Regina E Y Kim, Peg Nopoulos, Jane Paulsen, Hans Johnson (2015). Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources. In Workshop on Clinical Image-Based Procedures, pp. 54–61. (bib) x @inproceedings{kim2015efficient, year = { 2015 }, title = { Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources }, pages = { 54--61 }, organization = { Springer, Cham }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Workshop on Clinical Image-Based Procedures/Kim et al. - 2015 - Efficient and extensible workflow Reliable whole brain segmentation for large-scale, multi-center longitudinal human.pdf:pdf }, booktitle = { Workshop on Clinical Image-Based Procedures }, author = { Kim and Nopoulos and Paulsen and Johnson }, } |
2015 | In Conf. Proceedings | Stephan Meesters, Pauly Ossenblok, Albert Colon, Olaf Schijns, Luc Florack, Paul Boon, Louis Wagner, Andrea Fuster (2015). Automated identification of intracranial depth electrodes in computed tomography data. In Proceedings - International Symposium on Biomedical Imaging, pp. 976–979, New York. (link) (bib) x @inproceedings{Meesters, year = { 2015 }, volume = { 2015-July }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944328676{\&}doi=10.1109{\%}2FISBI.2015.7164034{\&}partnerID=40{\&}md5=3989bdde301290c2ae9d67f2142f6235 {\%}3CGo to }, type = { Conference Proceedings }, title = { Automated identification of intracranial depth electrodes in computed tomography data }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 976--979 }, keywords = { Computed tomography (CT),Computer-aided detection and diagnosis (CAD),Visualization }, issn = { 19458452 }, isbn = { 9781479923748 }, doi = { 10.1109/ISBI.2015.7164034 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Meesters and Ossenblok and Colon and Schijns and Florack and Boon and Wagner and Fuster }, address = { New York }, abstract = { Intracranial depth electrodes are commonly used to identify the regions of the brain that are responsible for epileptic seizures. Knowledge of the exact location of the electrodes is important as to properly interpret the EEG in relation to the anatomy. In order to provide fast and accurate identification of these electrodes, a procedure has been developed for automatic detection and localization in computed tomography data. Results indicate that in the vast majority of cases the depth electrodes can be automatically found. The localization of the electrodes versus the anatomy showed an acceptably small error when compared to manual positioning. Furthermore, interactive visualization software is developed to show the detected electrodes together with pre-operative MRI images, which enables the physician to confirm that the electrode is placed at the expected anatomical location. }, } |
2015 | In Conf. Proceedings | Sébastien Tourbier, Patric Hagmann, Maud Cagneaux, Laurent Guibaud, Subrahmanyam Gorthi, Marie Schaer, Jean-Philippe Thiran, Reto Meuli, Meritxell Bach Cuadra (2015). Automatic brain extraction in fetal MRI using multi-atlas-based segmentation. In Medical Imaging 2015: Image Processing, pp. 94130Y, Bellingham. (link) (bib) x @inproceedings{Tourbier2015a, year = { 2015 }, volume = { 9413 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84943378886{\&}doi=10.1117{\%}2F12.2081777{\&}partnerID=40{\&}md5=1e5911cbeee0e1cf86d55e9988c697b9 }, type = { Conference Proceedings }, title = { Automatic brain extraction in fetal MRI using multi-atlas-based segmentation }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 94130Y }, issn = { 16057422 }, isbn = { 9781628415032 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2081777 }, booktitle = { Medical Imaging 2015: Image Processing }, author = { Tourbier and Hagmann and Cagneaux and Guibaud and Gorthi and Schaer and Thiran and Meuli and {Bach Cuadra} }, address = { Bellingham }, abstract = { {\textcopyright} 2015 SPIE. In fetal brain MRI, most of the high-resolution reconstruction algorithms rely on brain segmentation as a preprocessing step. Manual brain segmentation is however highly time-consuming and therefore not a realistic solution. In this work, we assess on a large dataset the performance of Multiple Atlas Fusion (MAF) strategies to automatically address this problem. Firstly, we show that MAF significantly increase the accuracy of brain segmentation as regards single-atlas strategy. Secondly, we show that MAF compares favorably with the most recent approach (Dice above 0.90). Finally, we show that MAF could in turn provide an enhancement in terms of reconstruction quality. }, } |
2015 | In Conf. Proceedings | Caroline Magnain, Jean C. Augustinack, Ender Konukoglu, David Boas, Bruce Fischl (2015). Visualization of the cytoarchitecture of Ex vivo human brain by optical coherence tomography. In Optics and the Brain, BRAIN 2015, pp. NA (link) (bib) x @inproceedings{Magnain, year = { 2015 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84930804493{\&}partnerID=40{\&}md5=a7e7e7e3b1f380d82e3ea7c3f1960a06 }, type = { Conference Proceedings }, title = { Visualization of the cytoarchitecture of Ex vivo human brain by optical coherence tomography }, isbn = { 9781557529541 }, doi = { 10.1364/brain.2015.brt4b.5 }, booktitle = { Optics and the Brain, BRAIN 2015 }, author = { Magnain and Augustinack and Konukoglu and Boas and Fischl }, abstract = { Optical coherence tomography visualizes the structure of the human brain, from the cortical laminar structure to the individual neurons. }, } |
2015 | In Conf. Proceedings | Kuldeep Kumar Khajwaniya, Vibha Tiwari (2015). Satellite image denoising using Weiner filter with SPEA2 algorithm. In Proceedings of 2015 IEEE 9th International Conference on Intelligent Systems and Control, ISCO 2015, pp. NA (link) (bib) x @inproceedings{Khajwaniya, year = { 2015 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959108119{\&}doi=10.1109{\%}2FISCO.2015.7282324{\&}partnerID=40{\&}md5=3cac197663c657dd57bf87eb47941101 }, type = { Conference Proceedings }, title = { Satellite image denoising using Weiner filter with SPEA2 algorithm }, keywords = { Bilateral Filter,Image denoising,SPEA2 Algorithm,Weiner Filter }, isbn = { 9781479964802 }, doi = { 10.1109/ISCO.2015.7282324 }, booktitle = { Proceedings of 2015 IEEE 9th International Conference on Intelligent Systems and Control, ISCO 2015 }, author = { Khajwaniya and Tiwari }, abstract = { The paper proposes a new methodology in order to improve the quality of satellite images. It is demonstrated that it is possible to achieve a better performance than that of Bilateral filter in a variety of noise levels. We have proposed Weiner filter in accordance with SPEA2 algorithm which removes pre-filtering and high noise level: therefore it improves the Peak Signal-to-Noise Ratio (PSNR) and visual quality gets improved and complexities and processing time are reduced. This improved algorithm is extended and used to denoise satellite images. Output results show that the performance has upgraded in comparison with current methods of denoising satellite. }, } |
2015 | In Conf. Proceedings | Christophe Van Dijck, Faes Kerkhof, Evie Vereecke, Roel Wirix-Speetjens, Jos Vander Sloten (2015). Segmentation of 4D CT bone images by sequential registration. In Proceedings - International Symposium on Biomedical Imaging, pp. 621–624. (link) (bib) x @inproceedings{RN942, year = { 2015 }, volume = { 2015-July }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944313955{\&}doi=10.1109{\%}2FISBI.2015.7163950{\&}partnerID=40{\&}md5=7b264d4c8cefb0383cb157bd19741c43 }, type = { Conference Proceedings }, title = { Segmentation of 4D CT bone images by sequential registration }, publisher = { IEEE Computer Society }, pages = { 621--624 }, keywords = { 4D CT,Registration,Segmentation }, issn = { 19458452 }, isbn = { 9781479923748 }, doi = { 10.1109/ISBI.2015.7163950 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { {Van Dijck} and Kerkhof and Vereecke and Wirix-Speetjens and {Vander Sloten} }, abstract = { The introduction of 4D image acquisition techniques has made it possible to analyse anatomical motion in vivo. With 4D computed tomography (CT), it is now possible to study the motion of joints leading to a deeper understanding of the role of morphology on joint motion and a better assessment of pathologies. Although 4D CT shows a lot of opportunities, the workload to process these 4D acquisitions has increased dramatically. A major part of this process is segmentation, the delineation of the objects of interest within the image volume. This paper presents an algorithm to accelerate this step by registering the segmentation of one frame onto the others. This results in a fast segmentation of the whole 4D dataset, all identical in shape. We show that the proposed algorithm is able to segment two carpal bones, the trapezoid and the scaphoid, with results close to a manual segmentation in less than 5{\%} of the processing time. }, } |
2015 | In Conf. Proceedings | A. Nikonorov, A. Kolsanov, M. Petrov, Y. Yuzifovich, E. Prilepin, K. Bychenkov (2015). Contrast-to-noise based metric of denoising algorithms for liver vein segmentation. In SIGMAP 2015 - 12th International Conference on Signal Processing and Multimedia Applications, Proceedings; Part of 12th International Joint Conference on e-Business and Telecommunications, ICETE 2015, pp. 59–67. (link) (bib) x @inproceedings{RN944, year = { 2015 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84970959837{\&}partnerID=40{\&}md5=0d3090fef893d468c72b58001dc5eceb }, type = { Conference Proceedings }, title = { Contrast-to-noise based metric of denoising algorithms for liver vein segmentation }, publisher = { SciTePress }, pages = { 59--67 }, keywords = { CUDA,Contrast to noise ratio,Fast marching,GPGPU,Geodesic active contours,Liver,Proximal algorithms,Total variance de-noising,Vessels segmentation,Xeon phi }, isbn = { 9789897581182 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.5220/0005542400590067 }, booktitle = { SIGMAP 2015 - 12th International Conference on Signal Processing and Multimedia Applications, Proceedings; Part of 12th International Joint Conference on e-Business and Telecommunications, ICETE 2015 }, author = { Nikonorov and Kolsanov and Petrov and Yuzifovich and Prilepin and Bychenkov }, abstract = { We analyse CT image denoising when applied to vessel segmentation. Proposed semi-global quality metric based on the contrast-to-noise ratio allowed us to estimate initial image quality and efficiency of denoising procedures without prior knowledge about a noise-free image. We show that the total variance filtering in L1 metric provides the best denoising when compared to other well-known denoising procedures such as nonlocal means denoising or anisotropic diffusion. Computational complexity of this denoising algorithm is addressed by comparing its implementation for Intel MIC and for NVIDIA CUDA HPC systems. }, } |
2015 | In Conf. Proceedings | Lay Khoon Lee, Siau Chuin Liew (2015). A survey of medical image processing tools. In 2015 4th International Conference on Software Engineering and Computer Systems, ICSECS 2015: Virtuous Software Solutions for Big Data, pp. 171–176. (link) (bib) x @inproceedings{RN972, year = { 2015 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962073514{\&}doi=10.1109{\%}2FICSECS.2015.7333105{\&}partnerID=40{\&}md5=67e62e688838868692dd48230c893599 }, type = { Conference Proceedings }, title = { A survey of medical image processing tools }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 171--176 }, keywords = { computer vision,image processing,tools component }, isbn = { 9781467367226 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1109/ICSECS.2015.7333105 }, booktitle = { 2015 4th International Conference on Software Engineering and Computer Systems, ICSECS 2015: Virtuous Software Solutions for Big Data }, author = { Lee and Liew }, abstract = { A precise analysis of medical image is an important stage in the contouring phase throughout radiotherapy preparation. Medical images are mostly used as radiographic techniques in diagnosis, clinical studies and treatment planning Medical image processing tool are also similarly as important. With a medical image processing tool, it is possible to speed up and enhance the operation of the analysis of the medical image. This paper describes medical image processing software tool which attempts to secure the same kind of programmability advantage for exploring applications of the pipelined processors. These tools simulate complete systems consisting of several of the proposed processing components, in a configuration described by a graphical schematic diagram. In this paper, fifteen different medical image processing tools will be compared in several aspects. The main objective of the comparison is to gather and analysis on the tool in order to recommend users of different operating systems on what type of medical image tools to be used when analysing different types of imaging. A result table was attached and discussed in the paper. }, } |
2015 | In Conf. Proceedings | Alan Kuntz, Luis G. Torres, Richard H. Feins, Robert J. Webster, Ron Alterovitz (2015). Motion planning for a three-stage multilumen transoral lung access system. In IEEE International Conference on Intelligent Robots and Systems, pp. 3255–3261. (link) (bib) x @inproceedings{RN937, year = { 2015 }, volume = { 2015-Decem }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84958176573{\&}doi=10.1109{\%}2FIROS.2015.7353829{\&}partnerID=40{\&}md5=7513ff75c060dfaaf113a4487f0d9a72 }, type = { Conference Proceedings }, title = { Motion planning for a three-stage multilumen transoral lung access system }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 3255--3261 }, keywords = { Biomedical imaging,Biopsy,Electron tubes,Lungs,Needles,Planning,Robots }, issn = { 21530866 }, isbn = { 9781479999941 }, doi = { 10.1109/IROS.2015.7353829 }, booktitle = { IEEE International Conference on Intelligent Robots and Systems }, author = { Kuntz and Torres and Feins and Webster and Alterovitz }, abstract = { Lung cancer is the leading cause of cancer-related death, and early-stage diagnosis is critical to survival. Biopsy is typically required for a definitive diagnosis, but current low-risk clinical options for lung biopsy cannot access all biopsy sites. We introduce a motion planner for a multilumen transoral lung access system, a new system that has the potential to perform safe biopsies anywhere in the lung, which could enable more effective early-stage diagnosis of lung cancer. The system consists of three stages in which a bronchoscope is deployed transorally to the lung, a concentric tube robot pierces through the bronchial tubes into the lung parenchyma, and a steerable needle deploys through a properly oriented concentric tube and steers through the lung parenchyma to the target site while avoiding anatomical obstacles such as significant blood vessels. A sampling-based motion planner computes actions for each stage of the system and considers the coupling of the stages in an efficient manner. We demonstrate the motion planner's fast performance and ability to compute plans with high clearance from obstacles in simulated anatomical scenarios. }, } |
2015 | In Conf. Proceedings | Alejandro De Le\'on Cuevas, Sa\'ul Tovar-Arriaga, Arturo González-Gutiérrez, Marco Antonio Aceves-Fernández (2015). Trajectory planning for keyhole neurosurgery using fuzzy logic for risk evaluation. In 2015 12th International Conference on Electrical Engineering, Computing Science and Automatic Control, CCE 2015, pp. NA (link) (bib) x @inproceedings{RN936, year = { 2015 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962882000{\&}doi=10.1109{\%}2FICEEE.2015.7357927{\&}partnerID=40{\&}md5=2ec0aab6774734025966c01139e1fb7a }, type = { Conference Proceedings }, title = { Trajectory planning for keyhole neurosurgery using fuzzy logic for risk evaluation }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, keywords = { Image guided surgery,Trajectory planning,artificial intelligence in surgery,fuzzy logic }, isbn = { 9781467378390 }, doi = { 10.1109/ICEEE.2015.7357927 }, booktitle = { 2015 12th International Conference on Electrical Engineering, Computing Science and Automatic Control, CCE 2015 }, author = { {De Le{\'{o}}n Cuevas} and Tovar-Arriaga and Gonz{\'{a}}lez-Guti{\'{e}}rrez and Aceves-Fern{\'{a}}ndez }, abstract = { Planning safe trajectories in keyhole neurosurgery requires a high level of accuracy in order to access to small structures either by biopsies, stimulating deep brain and others. We propose a computer system that carries out decision making based on rules using fuzzy logic to plan safe trajectories for preoperative neurosurgery. The processes to generate input values of membership functions, and implementation of the system for decision function will be explained. The results of risk weights for each candidate trajectory are evaluated and the safest calculated trajectories taking into account the risk structures that there are in the brain from the insertion points to the target point are visualized. }, } |
2015 | In Conf. Proceedings | Bastian Bier, Firas Mualla, Stefan Steidl, Christopher Bohr, Helmut Neumann, Andreas Maier, Joachim Hornegger (2015). Band-pass filter design by segmentation in frequency domain for detection of epithelial cells in endomicroscope images. In Informatik aktuell, pp. 413–418. (link) (bib) x @inproceedings{RN943, year = { 2015 }, volume = { 0 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85012231802{\&}doi=10.1007{\%}2F978-3-662-46224-9{\_}71{\&}partnerID=40{\&}md5=a2320e0e42ae26842cd9d13f26bc03b8 }, type = { Conference Proceedings }, title = { Band-pass filter design by segmentation in frequency domain for detection of epithelial cells in endomicroscope images }, publisher = { Kluwer Academic Publishers }, pages = { 413--418 }, issn = { 1431472X }, isbn = { 9783662462232 }, editor = { [object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-662-46224-9_71 }, booktitle = { Informatik aktuell }, author = { Bier and Mualla and Steidl and Bohr and Neumann and Maier and Hornegger }, abstract = { Voice hoarseness can have various reasons, one of them is a change of the vocal fold mucus. This change can be examined with micro endoscopes. Cell detection in these images is a difficult task, due to bad image quality, caused by noise and illumination variations. In previous works, it was observed that the repetitive pattern of the cell walls cause an elliptical shape in the Fourier domain [1, 2]. A manual segmentation and back transformation of this shape results in filtered images, where the cell detection is much easier [3]. The goal of this work is to automatically segment the elliptical shape in Fourier domain. Two different approaches are developed to get a suitable band-pass filter: a thresholding and an active contour method. After the band-pass filter is applied, the achieved results are superior to the manual segmentation case. }, } |
2014 | Book | E Pastorelli, H Herrmann, Ieee (2014). Virtual Reality Visualization for Short Fibre Orientation Analysis, Ieee, 2014. (link) (bib) x @book{Pastorelli2014, year = { 2014 }, url = { {\%}3CGo to }, type = { Book }, title = { Virtual Reality Visualization for Short Fibre Orientation Analysis }, series = { 2014 Proceedings of the 14th Biennial Baltic Electronics Conference }, publisher = { Ieee }, pages = { 201--204 }, isbn = { 978-1-4673-9539-7 }, author = { Pastorelli and Herrmann and Ieee }, address = { New York }, } |
2014 | Book chapter | S Tourbier, X Bresson, P Hagmann, J P Thiran, R Meuli, M B Cuadra (2014). NA in Efficient Total Variation Algorithm for Fetal Brain MRI Reconstruction, Edited by P Golland, N Hata, C Barillot, J Hornegger, R Howe, Springer-Verlag Berlin, pp. 252–259, Lecture Notes in Computer Science, Vol. 8674. (link) (bib) x @inbook{Tourbier2014, year = { 2014 }, volume = { 8674 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Efficient Total Variation Algorithm for Fetal Brain MRI Reconstruction }, series = { Lecture Notes in Computer Science }, publisher = { Springer-Verlag Berlin }, pages = { 252--259 }, isbn = { 978-3-319-10470-6; 978-3-319-10469-0 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, booktitle = { Medical Image Computing and Computer-Assisted Intervention - Miccai 2014, Pt Ii }, author = { Tourbier and Bresson and Hagmann and Thiran and Meuli and Cuadra }, address = { Berlin }, } |
2014 | Book chapter | J Zhang, H Sorby, J Clement, C D L Thomas, P Hunter, P Nielsen, D Lloyd, M Taylor, T Besier (2014). NA in The MAP Client: User-Friendly Musculoskeletal Modelling Workflows, Edited by F Bello, S Cotin, Springer International Publishing Ag, pp. 182–192, Lecture Notes in Computer Science, Vol. 8789. (link) (bib) x @inbook{Zhang2014, year = { 2014 }, volume = { 8789 }, url = { {\%}3CGo to }, type = { Book Section }, title = { The MAP Client: User-Friendly Musculoskeletal Modelling Workflows }, series = { Lecture Notes in Computer Science }, publisher = { Springer International Publishing Ag }, pages = { 182--192 }, isbn = { 978-3-319-12057-7; 978-3-319-12056-0 }, editor = { [object Object],[object Object] }, booktitle = { Biomedical Simulation }, author = { Zhang and Sorby and Clement and Thomas and Hunter and Nielsen and Lloyd and Taylor and Besier }, address = { Cham }, } |
2014 | Book chapter | Zhen Huan Zhou (2014). NA in Comparison and assessment of different image registration algorithms based on ITK, Edited by G Yang, Trans Tech Publications Ltd, pp. 515–519, Applied Mechanics and Materials, Vol. 442, ISBN: 16609336. (link) (bib) x @inbook{Zhou2014, year = { 2014 }, volume = { 442 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Comparison and assessment of different image registration algorithms based on ITK }, series = { Applied Mechanics and Materials }, publisher = { Trans Tech Publications Ltd }, pages = { 515--519 }, keywords = { Algorithms comparison,ITK,Image registration,Registration framework }, issn = { 16609336 }, isbn = { 9783037859018 }, editor = { [object Object] }, doi = { 10.4028/www.scientific.net/AMM.442.515 }, booktitle = { Applied Mechanics and Materials }, author = { Zhou }, address = { Durnten-Zurich }, abstract = { A lot of image registration algorithms are proposed in recent year, among these algorithms, which one is better or faster than the other can be only validated by experiments. In this paper, ITK (Insight Segmentation and Registration Toolkit) is used for verifying different algorithms as a framework. ITK framework requires the following components: a fixed image, a moving image, a transform, a metric, an interpolator and an optimizer. Dozens of classical algorithms are tested under the same conditions and their experimental results are demonstrated with different metrics, interpolators or optimizers. By comparison of registration time and accuracy, those practical and useful algorithms are selected for developing software in image analysis. These kinds of experiments are very valuable for software engineering, they can shorten the cycle of software development and greatly reduce the development costs. {\textcopyright} (2014) Trans Tech Publications, Switzerland. }, } |
2014 | Book chapter | Peter R. Mouton (2014). NA in Quantitative anatomy using design-based stereology, NA pp. 217–228. (link) (bib) x @inbook{Veress2014, year = { 2014 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054614475{\&}doi=10.1201{\%}2Fb17566{\&}partnerID=40{\&}md5=c910ea6ed3c6c6ab22591fe45986254f }, type = { Book Section }, title = { Quantitative anatomy using design-based stereology }, pages = { 217--228 }, isbn = { 9781466588141 }, doi = { 10.1201/b17566 }, booktitle = { Handbook of Imaging in Biological Mechanics }, author = { Mouton }, abstract = { In 1961, scientists from biology, geology, and material sciences convened a small meeting at a mountaintop retreat in Germany. Their purpose was to discuss an obscure problem common to all of their research: how to quantify the morphological features of arbitrary-shaped 3-D objects based on their 2-D appearance on cut surfaces. To describe this topic, they selected the term stereology, from the Greek stereos for “the study of objects in three dimensions,” and a year later formed the International Society for Stereology (ISS). After more than a decade of debating the relative merits of different stereology approaches in their respective fields of study, the ISS categorically rejected older methods based on assumption-and model-based Euclidean geometry in favor of innovative new methods designed to avoid methodological bias. Today unbiased or design-based stereology is the sine qua non for the reliable quantification of structure in many fields of biological and biomedical research. This chapter examines key concepts of design-based stereology using systematic random sampling and objective geometry probes in comparison with biased methods. To exemplify an application to neurosciences (neurostereology), we review the results and discussion from our recently published study to quantify differences in postmortem brains from autistic and normal children. Continuing advances in computer-assisted microscopy, image segmentation, and whole slide scanning support the growing trend toward higher throughput and greater efficiency in computerized stereology, with the promise of fully automatic and accurate stereoanalysis of tissue sections and in vivo images in the near future. }, } |
2014 | Journal | Ipek Oguz, Mahshid Farzinfar, Joy T. Matsui, Francois Budin, Zhexing Liu, Guido Gerig, Hans J. Johnson, Martin Styner (2014). DTIPrep: quality control of diffusion-weighted images. Frontiers in neuroinformatics, 8(January), pp. 4. (link) (bib) x @article{oguz2014dtiprep, year = { 2014 }, volume = { 8 }, url = { http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3906573{\&}tool=pmcentrez{\&}rendertype=abstract http://journal.frontiersin.org/Journal/10.3389/fninf.2014.00004/full http://journal.frontiersin.org/article/10.3389/fninf.2014.00004/abstract{\%}0Ahttp://www }, title = { DTIPrep: quality control of diffusion-weighted images }, publisher = { Frontiers Media SA }, pmid = { 24523693 }, pages = { 4 }, number = { January }, keywords = { diffusion MRI,diffusion mri,diffusion tensor imaging,open-source,preprocessing,quality c,quality control,software }, journal = { Frontiers in neuroinformatics }, issn = { 1662-5196 }, isbn = { 1662-5196 (Electronic){\$}\backslash{\$}r1662-5196 (Linking) }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Oguz et al/Frontiers in neuroinformatics/Oguz et al. - 2014 - DTIPrep quality control of diffusion-weighted images.pdf:pdf }, doi = { 10.3389/fninf.2014.00004 }, author = { Oguz and Farzinfar and Matsui and Budin and Liu and Gerig and Johnson and Styner }, annote = { From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} }, abstract = { In the last decade, diffusion MRI (dMRI) studies of the human and animal brain have been used to investigate a multitude of pathologies and drug-related effects in neuroscience research. Study after study identifies white matter (WM) degeneration as a crucial biomarker for all these diseases. The tool of choice for studying WM is dMRI. However, dMRI has inherently low signal-to-noise ratio and its acquisition requires a relatively long scan time; in fact, the high loads required occasionally stress scanner hardware past the point of physical failure. As a result, many types of artifacts implicate the quality of diffusion imagery. Using these complex scans containing artifacts without quality control (QC) can result in considerable error and bias in the subsequent analysis, negatively affecting the results of research studies using them. However, dMRI QC remains an under-recognized issue in the dMRI community as there are no user-friendly tools commonly available to comprehensively address the issue of dMRI QC. As a result, current dMRI studies often perform a poor job at dMRI QC. Thorough QC of dMRI will reduce measurement noise and improve reproducibility, and sensitivity in neuroimaging studies; this will allow researchers to more fully exploit the power of the dMRI technique and will ultimately advance neuroscience. Therefore, in this manuscript, we present our open-source software, DTIPrep, as a unified, user friendly platform for thorough QC of dMRI data. These include artifacts caused by eddy-currents, head motion, bed vibration and pulsation, venetian blind artifacts, as well as slice-wise and gradient-wise intensity inconsistencies. This paper summarizes a basic set of features of DTIPrep described earlier and focuses on newly added capabilities related to directional artifacts and bias analysis. }, } |
2014 | Journal | Brian B. Avants, Nicholas J. Tustison, Michael Stauffer, Gang Song, Baohua Wu, James C. Gee (2014). The Insight ToolKit image registration framework. Frontiers in Neuroinformatics, 8(APR), pp. NA (bib) x @article{Avants2014a, year = { 2014 }, volume = { 8 }, title = { The Insight ToolKit image registration framework }, number = { APR }, keywords = { Brain,Death,MRI,Open-source,Registration }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, doi = { 10.3389/fninf.2014.00044 }, author = { Avants and Tustison and Stauffer and Song and Wu and Gee }, abstract = { Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and improve reproducibility. Version 4 of the Insight ToolKit (ITK4) seeks to establish new standards in publicly available image registration methodology. ITK4 makes several advances in comparison to previous versions of ITK. ITK4 supports both multivariate images and objective functions; it also unifies high-dimensional (deformation field) and low-dimensional (affine) transformations with metrics that are reusable across transform types and with composite transforms that allow arbitrary series of geometric mappings to be chained together seamlessly. Metrics and optimizers take advantage of multi-core resources, when available. Furthermore, ITK4 reduces the parameter optimization burden via principled heuristics that automatically set scaling across disparate parameter types (rotations vs. translations). A related approach also constrains steps sizes for gradient-based optimizers. The result is that tuning for different metrics and/or image pairs is rarely necessary allowing the researcher to more easily focus on design/comparison of registration strategies. In total, the ITK4 contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. Finally, we contextualize this work with a reference registration evaluation study with application to pediatric brain labeling. {\textcopyright} 2014 Avants, Tustison, Stauffer, Song, Wuand Gee. }, } |
2014 | Journal | Eun Young Kim, Vincent A. Magnotta, Dawei Liu, Hans J. Johnson (2014). Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data. Magnetic Resonance Imaging, 32(7), pp. 832–844. (link) (bib) x @article{Kim2014, year = { 2014 }, volume = { 32 }, url = { http://www.ncbi.nlm.nih.gov/pubmed/24818817 }, title = { Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data }, publisher = { Elsevier Inc. }, pmid = { 24818817 }, pages = { 832--844 }, number = { 7 }, month = { may }, keywords = { Machine learning,Multicenter study,Random forest,Segmentation }, journal = { Magnetic Resonance Imaging }, issn = { 18735894 }, isbn = { 3193213152 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Magnetic Resonance Imaging/Kim et al. - 2014 - Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data.pdf:pdf }, doi = { 10.1016/j.mri.2014.04.016 }, author = { Kim and Magnotta and Liu and Johnson }, annote = { From Duplicate 1 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.; Kim, Eun Young; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) From Duplicate 1 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.; Kim, Eun Young; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) From Duplicate 2 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { Machine learning (ML)-based segmentation methods are a common technique in the medical image processing field. In spite of numerous research groups that have investigated ML-based segmentation frameworks, there remains unanswered aspects of performance variability for the choice of two key components: ML algorithm and intensity normalization. This investigation reveals that the choice of those elements plays a major part in determining segmentation accuracy and generalizability. The approach we have used in this study aims to evaluate relative benefits of the two elements within a subcortical MRI segmentation framework. Experiments were conducted to contrast eight machine-learning algorithm configurations and 11 normalization strategies for our brain MR segmentation framework. For the intensity normalization, a Stable Atlas-based Mapped Prior (STAMP) was utilized to take better account of contrast along boundaries of structures. Comparing eight machine learning algorithms on down-sampled segmentation MR data, it was obvious that a significant improvement was obtained using ensemble-based ML algorithms (i.e., random forest) or ANN algorithms. Further investigation between these two algorithms also revealed that the random forest results provided exceptionally good agreement with manual delineations by experts. Additional experiments showed that the effect of STAMP-based intensity normalization also improved the robustness of segmentation for multicenter data sets. The constructed framework obtained good multicenter reliability and was successfully applied on a large multicenter MR data set (n{\textgreater}. 3000). Less than 10{\%} of automated segmentations were recommended for minimal expert intervention. These results demonstrate the feasibility of using the ML-based segmentation tools for processing large amount of multicenter MR images. We demonstrated dramatically different result profiles in segmentation accuracy according to the choice of ML algorithm and intensity normalization chosen. {\textcopyright} 2014 Elsevier Inc. }, } |
2014 | Journal | Ipek Oguz, Mahshid Farzinfar, Joy Matsui, Francois Budin, Zhexing Liu, Guido Gerig, Hans J. Johnson, Martin Styner (2014). DTIPrep: Quality control of diffusion-weighted images. Frontiers in Neuroinformatics, 8(JAN), pp. 4. (link) (bib) x @article{oguz2014dtiprep, year = { 2014 }, volume = { 8 }, url = { http://journal.frontiersin.org/article/10.3389/fninf.2014.00004/abstract{\%}0Ahttp://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3906573{\&}tool=pmcentrez{\&}rendertype=abstract{\%}5Cnhttp://journal.frontiersin.org/Journal/10.3389/fninf.2014.00004/full http:// }, title = { DTIPrep: Quality control of diffusion-weighted images }, publisher = { Frontiers Media SA }, pmid = { 24523693 }, pages = { 4 }, number = { JAN }, keywords = { Diffusion MRI,Diffusion tensor imaging,Open-source,Preprocessing,Quality control,Software }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, isbn = { 1662-5196 (Electronic)$\backslash$r1662-5196 (Linking) }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Oguz et al/Frontiers in neuroinformatics/Oguz et al. - 2014 - DTIPrep quality control of diffusion-weighted images.pdf:pdf }, doi = { 10.3389/fninf.2014.00004 }, author = { Oguz and Farzinfar and Matsui and Budin and Liu and Gerig and Johnson and Styner }, annote = { From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} }, abstract = { In the last decade, diffusion MRI (dMRI) studies of the human and animal brain have been used to investigate a multitude of pathologies and drug-related effects in neuroscience research. Study after study identifies white matter (WM) degeneration as a crucial biomarker for all these diseases. The tool of choice for studying WM is dMRI. However, dMRI has inherently low signal-to-noise ratio and its acquisition requires a relatively long scan time; in fact, the high loads required occasionally stress scanner hardware past the point of physical failure. As a result, many types of artifacts implicate the quality of diffusion imagery. Using these complex scans containing artifacts without quality control (QC) can result in considerable error and bias in the subsequent analysis, negatively affecting the results of research studies using them. However, dMRI QC remains an under-recognized issue in the dMRI community as there are no user-friendly tools commonly available to comprehensively address the issue of dMRI QC. As a result, current dMRI studies often perform a poor job at dMRI QC. Thorough QC of dMRI will reduce measurement noise and improve reproducibility, and sensitivity in neuroimaging studies; this will allow researchers to more fully exploit the power of the dMRI technique and will ultimately advance neuroscience. Therefore, in this manuscript, we present our open-source software, DTIPrep, as a unified, user friendly platform for thorough QC of dMRI data. These include artifacts caused by eddy-currents, head motion, bed vibration and pulsation, venetian blind artifacts, as well as slice-wise and gradient-wise intensity inconsistencies. This paper summarizes a basic set of features of DTIPrep described earlier and focuses on newly added capabilities related to directional artifacts and bias analysis. {\textcopyright} 2014 Oguz, Farzinfar, Matsui, Budin, Liu, Gerig, Johnson and Styner. }, } |
2014 | Journal | Francesca Pennati, James D. Quirk, Dmitriy A. Yablonskiy, Mario Castro, Andrea Aliverti, Jason C. Woods (2014). Assessment of regional lung function with multivolume 1H MR imaging in health and obstructive lung Disease: Comparison with 3He MR imaging. Radiology, 273(2), pp. 580–590. (link) (bib) x @article{Pennati2014a, year = { 2014 }, volume = { 273 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84910050100{\&}doi=10.1148{\%}2Fradiol.14132470{\&}partnerID=40{\&}md5=75e8d2666ef74110c8fda5fcbf082a75 }, type = { Journal Article }, title = { Assessment of regional lung function with multivolume 1H MR imaging in health and obstructive lung Disease: Comparison with 3He MR imaging }, pages = { 580--590 }, number = { 2 }, journal = { Radiology }, issn = { 15271315 }, doi = { 10.1148/radiol.14132470 }, author = { Pennati and Quirk and Yablonskiy and Castro and Aliverti and Woods }, abstract = { Purpose: To introduce a method based on multivolume proton (hydrogen [1H]) magnetic resonance (MR) imaging for the regional assessment of lung ventilatory function, investigating its use in healthy volunteers and patients with obstructive lung disease and comparing the outcome with the outcome of the research standard helium 3 (3He) MR imaging. Materials and Methods: The institutional review board approved the HIPAA-compliant protocol, and informed written consent was obtained from each subject. Twenty-six subjects, including healthy volunteers (n = 6) and patients with severe asthma (n = 11) and mild (n = 6) and severe (n = 3) emphysema, were imaged with a 1.5-T whole-body MR unit at four lung volumes (residual volume [RV], functional residual capacity [FRC], 1 L above FRC [FRC+1 L], total lung capacity [TLC]) with breath holds of 10-11 seconds, by using volumetric interpolated breath-hold examination. Each pair of volumes were registered, resulting in maps of 1H signal change between the two lung volumes. 3He MR imaging was performed at FRC+1 L by using a two-dimensional gradient-echo sequence. 1H signal change and 3He signal were measured and compared in corresponding regions of interest selected in ventral, intermediate, and dorsal areas. Results: In all volunteers and patients combined, proton signal difference between TLC and RV correlated positively with 3He signal (correlation coefficient R2 = 0.64, P {\textless} .001). Lower (P {\textless} .001) but positive correlation results from 1H signal difference between FRC and FRC+1 L (R2 = 0.44, P {\textless} .001). In healthy volunteers, 1H signal changes show a higher median and interquartile range compared with patients with obstructive disease and significant differences between nondependent and dependent regions. Conclusion: Findings in this study demonstrate that multivolume 1H MR imaging, without contrast material, can be used as a biomarker for regional ventilation, both in healthy volunteers and patients with obstructive lung disease. }, } |
2014 | Journal | Yong Tae Kim, Melis Hazar, Deepthi S. Vijayraghavan, Jiho Song, Timothy R. Jackson, Sagar D. Joshi, William C. Messner, Lance A. Davidson, Philip R. LeDuc (2014). Mechanochemcal actuators of embryonic epithelial contractility. Proceedings of the National Academy of Sciences of the United States of America, 111(40), pp. 14366–14371. (link) (bib) x @article{Kim2014a, year = { 2014 }, volume = { 111 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047696684{\&}doi=10.1073{\%}2Fpnas.1405209111{\&}partnerID=40{\&}md5=21fb602c98b9aba0f290a278894f3d81 }, type = { Journal Article }, title = { Mechanochemcal actuators of embryonic epithelial contractility }, pages = { 14366--14371 }, number = { 40 }, keywords = { Mechanotransduction,Microfluidics,Multicellular,Signaling }, journal = { Proceedings of the National Academy of Sciences of the United States of America }, issn = { 10916490 }, doi = { 10.1073/pnas.1405209111 }, author = { Kim and Hazar and Vijayraghavan and Song and Jackson and Joshi and Messner and Davidson and LeDuc }, abstract = { Spatiotemporal regulation of cell contractility coordinates cell shape change to construct tissue architecture and ultimately directs the morphology and function of the organism. Here we show that contractility responses to spatially and temporally controlled chemical stimuli depend much more strongly on intercellular mechanical connections than on biochemical cues in both stimulated tissues and adjacent cells. We investigate how the cell contractility is triggered within an embryonic epithelial sheet by local ligand stimulation and coordinates a long-range contraction response. Our custom microfluidic control system allows spatiotemporally controlled stimulation with extracellular ATP, which results in locally distinct contractility followed by mechanical strain pattern formation. The stimulationresponse circuit exposed here provides a better understanding of how morphogenetic processes integrate responses to stimulation and how intercellular responses are transmitted across multiple cells. These findings may enable one to create a biological actuator that actively drives morphogenesis. }, } |
2014 | Journal | Bernhard Hesse, Max Langer, Peter Varga, Alexandra Pacureanu, Pei Dong, Susanne Schrof, Nils Man̈nicke, Heikki Suhonen, Cecile Olivier, Peter Maurer, Galateia J. Kazakia, Kay Raum, Francoise Peyrin (2014). Alterations of mass density and 3D osteocyte lacunar properties in bisphosphonate-related osteonecrotic human jaw bone, a synchrotron $\mu$CT study. PLoS ONE, 9(2), pp. 11. (link) (bib) x @article{Hesse2014, year = { 2014 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Alterations of mass density and 3D osteocyte lacunar properties in bisphosphonate-related osteonecrotic human jaw bone, a synchrotron $\mu$CT study }, pages = { 11 }, number = { 2 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0088481 }, author = { Hesse and Langer and Varga and Pacureanu and Dong and Schrof and Man̈nicke and Suhonen and Olivier and Maurer and Kazakia and Raum and Peyrin }, abstract = { Osteonecrosis of the jaw, in association with bisphosphonates (BRONJ) used for treating osteoporosis or cancer, is a severe and most often irreversible side effect whose underlying pathophysiological mechanisms remain largely unknown. Osteocytes are involved in bone remodeling and mineralization where they orchestrate the delicate equilibrium between osteoclast and osteoblast activity and through the active process called osteocytic osteolysis. Here, we hypothesized that (i) changes of the mineralized tissue matrix play a substantial role in the pathogenesis of BRONJ, and (ii) the osteocyte lacunar morphology is altered in BRONJ. Synchrotron $\mu$CT with phase contrast is an appropriate tool for assessing both the 3D morphology of the osteocyte lacunae and the bone matrix mass density. Here, we used this technique to investigate the mass density distribution and 3D osteocyte lacunar properties at the sub-micrometer scale in human bone samples from the jaw, femur and tibia. First, we compared healthy human jaw bone to human tibia and femur in order to assess the specific differences and address potential explanations of why the jaw bone is exclusively targeted by the necrosis as a side effect of BP treatment. Second, we investigated the differences between BRONJ and control jaw bone samples to detect potential differences which could aid an improved understanding of the course of BRONJ. We found that the apparent mass density of jaw bone was significantly smaller compared to that of tibia, consistent with a higher bone turnover in the jaw bone. The variance of the lacunar volume distribution was significantly different depending on the anatomical site. The comparison between BRONJ and control jaw specimens revealed no significant increase in mineralization after BP. We found a significant decrease in osteocyte-lacunar density in the BRONJ group compared to the control jaw. Interestingly, the osteocyte-lacunar volume distribution was not altered after BP treatment. {\textcopyright} 2014 Hesse et al. }, } |
2014 | Journal | Johannes Stegmaier, Jens C. Otte, Andrei Kobitski, Andreas Bartschat, Ariel Garcia, G. Ulrich Nienhaus, Uwe Strähle, Ralf Mikut (2014). Fast segmentation of stained nuclei in terabyte-scale, time resolved 3D microscopy image stacks. PLoS ONE, 9(2), pp. 11. (link) (bib) x @article{Stegmaier2014, year = { 2014 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Fast segmentation of stained nuclei in terabyte-scale, time resolved 3D microscopy image stacks }, pages = { 11 }, number = { 2 }, journal = { PLoS ONE }, issn = { 19326203 }, doi = { 10.1371/journal.pone.0090036 }, author = { Stegmaier and Otte and Kobitski and Bartschat and Garcia and Nienhaus and Str{\"{a}}hle and Mikut }, abstract = { Automated analysis of multi-dimensional microscopy images has become an integral part of modern research in life science. Most available algorithms that provide sufficient segmentation quality, however, are infeasible for a large amount of data due to their high complexity. In this contribution we present a fast parallelized segmentation method that is especially suited for the extraction of stained nuclei from microscopy images, e.g., of developing zebrafish embryos. The idea is to transform the input image based on gradient and normal directions in the proximity of detected seed points such that it can be handled by straightforward global thresholding like Otsu's method. We evaluate the quality of the obtained segmentation results on a set of real and simulated benchmark images in 2D and 3D and show the algorithm's superior performance compared to other state-of-the-art algorithms. We achieve an up to ten-fold decrease in processing times, allowing us to process large data sets while still providing reasonable segmentation results. {\textcopyright} 2014 Stegmaier et al. }, } |
2014 | Journal | Maria Francesca Spadea, Aurora Fassi, Paolo Zaffino, Marco Riboldi, Guido Baroni, Nicolas Depauw, Joao Seco (2014). Contrast-enhanced proton radiography for patient set-up by using x-ray CT prior knowledge. International Journal of Radiation Oncology Biology Physics, 90(3), pp. 628–636. (link) (bib) x @article{Spadea2014, year = { 2014 }, volume = { 90 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Contrast-enhanced proton radiography for patient set-up by using x-ray CT prior knowledge }, pages = { 628--636 }, number = { 3 }, journal = { International Journal of Radiation Oncology Biology Physics }, issn = { 1879355X }, doi = { 10.1016/j.ijrobp.2014.06.057 }, author = { Spadea and Fassi and Zaffino and Riboldi and Baroni and Depauw and Seco }, abstract = { Methods and Materials: Six lung cancer patients CT scans were preprocessed by masking out the gross tumor volume (GTV), and digitally reconstructed radiographs along the planned beams eye view (BEV) were generated, for a total of 27 projections. Proton radiographies (PR) were also computed for the same BEV through Monte Carlo simulations. The digitally reconstructed radiograph was subtracted from the corresponding proton image, resulting in a contrast-enhanced proton radiography (CEPR). Michelson contrast analysis was performed both on PR and CEPR. The tumor region was then automatically segmented on CEPR and compared to the ground truth (GT) provided by physicians in terms of Dice coefficient, accuracy, precision, sensitivity, and specificity. Purpose: To obtain a contrasted image of the tumor region during the setup for proton therapy in lung patients, by using proton radiography and x-ray computed tomography (CT) prior knowledge. Results: Contrast on CEPR was, on average, 4 times better than on PR. For 10 lateral projections (±45 off of 90 or 270), although it was not possible to distinguish the tumor region in the PR, CEPR offers excellent GTV visibility. The median ± quartile values of Dice, precision, and accuracy indexes were 0.86 ± 0.03, 0.86 ± 0.06, and 0.88 ± 0.02, respectively, thus confirming the reliability of the method in highlighting tumor boundaries. Sensitivity and specificity analysis demonstrated that there is no systematic over- or underestimation of the tumor region. Identification of the tumor boundaries using CEPR resulted in a more accurate and precise definition of GTV compared to that obtained from pretreatment CT. Conclusions: In most proton centers, the current clinical protocol is to align the patient using kV imaging with bony anatomy as a reference. We demonstrated that CEPR can significantly improve tumor visualization, allowing better patient set-up and permitting image guided proton therapy (IGPT). }, } |
2014 | Journal | Zhenzhou Shao, Jianda Han, Wei Liang, Jindong Tan, Yong Guan (2014). Robust and fast initialization for intensity-based 2D/3D registration. Advances in Mechanical Engineering, 2014, pp. 12. (link) (bib) x @article{Shao2014, year = { 2014 }, volume = { 2014 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Robust and fast initialization for intensity-based 2D/3D registration }, pages = { 12 }, journal = { Advances in Mechanical Engineering }, issn = { 16878140 }, doi = { 10.1155/2014/989254 }, author = { Shao and Han and Liang and Tan and Guan }, abstract = { Intensity-based 2D/3D registration is a key technique using digitally reconstructed radiographs (DRRs) to register the preoperative volume to the patient setup during the operation. Although DRR-based method provides a high accuracy, the small capture range hinders its clinical use. In this paper, such problem was addressed by a robust and fast initialization method using a two-level scheme including automatic tracking-based initialization (Level I) and multiresolution estimation based on central-slice theorem and phase correlation (Level II). It provided almost optimal transformation parameters for intensity-based registration. Experiments using a public gold standard data set and a spinal phantom have been conducted. The mean target registration error (mTRE) was limited in the range from 2.12 mm to 22.57 mm after tracking-based initialization. The capture range based on level II only was 20.1 mm and the mTRE in this capture range was 2.92 ± 2.21 mm. The intensity-based 2D/3D registration using proposed two-level initialization achieved the successful rate of 84.8{\%} with the average error of 2.36 mm. The experimental results showed that the proposed method yielded the robust and fast initialization for intensity-based registration methods. In a similar way, it can be applied to other registration methods to enable a larger capture range and robust implementation. {\textcopyright} 2014 Zhenzhou Shao et al. }, } |
2014 | Journal | Mirabela Rusu, B. Nicolas Bloch, Carl C. Jaffe, Elizabeth M. Genega, Robert E. Lenkinski, Neil M. Rofsky, Ernest Feleppa, Anant Madabhushi (2014). Prostatome: A combined anatomical and disease based MRI atlas of the prostate. Medical Physics, 41(7), pp. 12. (link) (bib) x @article{Rusu2014, year = { 2014 }, volume = { 41 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Prostatome: A combined anatomical and disease based MRI atlas of the prostate }, pages = { 12 }, number = { 7 }, keywords = { anatomic imaging atlas,guided biopsy,image processing,in vivo imaging,prostate cancer }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4881515 }, author = { Rusu and Bloch and Jaffe and Genega and Lenkinski and Rofsky and Feleppa and Madabhushi }, abstract = { Purpose: In this work, the authors introduce a novel framework, the anatomically constrained registration (AnCoR) scheme and apply it to create a fused anatomic-disease atlas of the prostate which the authors refer to as the prostatome. The prostatome combines a MRI based anatomic and a histology based disease atlas. Statistical imaging atlases allow for the integration of information across multiple scales and imaging modalities into a single canonical representation, in turn enabling a fused anatomical-disease representation which may facilitate the characterization of disease appearance relative to anatomic structures. While statistical atlases have been extensively developed and studied for the brain, approaches that have attempted to combine pathology and imaging data for study of prostate pathology are not extant. This works seeks to address this gap. Methods: The AnCoR framework optimizes a scoring function composed of two surface (prostate and central gland) misalignment measures and one intensity-based similarity term. This ensures the correct mapping of anatomic regions into the atlas, even when regional MRI intensities are inconsistent or highly variable between subjects. The framework allows for creation of an anatomic imaging and a disease atlas, while enabling their fusion into the anatomic imaging-disease atlas. The atlas presented here was constructed using 83 subjects with biopsy confirmed cancer who had pre-operative MRI (collected at two institutions) followed by radical prostatectomy. The imaging atlas results from mapping thein vivo MRI into the canonical space, while the anatomic regions serve as domain constraints. Elastic co-registration MRI and corresponding ex vivo histology provides " ground truth" mapping of cancer extent on in vivo imaging for 23 subjects. Results: AnCoR was evaluated relative to alternative construction strategies that use either MRI intensities or the prostate surface alone for registration. The AnCoR framework yielded a central gland Dice similarity coefficient (DSC) of 90{\%}, and prostate DSC of 88{\%}, while the misalignment of the urethra and verumontanum was found to be 3.45 mm, and 4.73 mm, respectively, which were measured to be significantly smaller compared to the alternative strategies. As might have been anticipated from our limited cohort of biopsy confirmed cancers, the disease atlas showed that most of the tumor extent was limited to the peripheral zone. Moreover, central gland tumors were typically larger in size, possibly because they are only discernible at a much later stage. Conclusions: The authors presented the AnCoR framework to explicitly model anatomic constraints for the construction of a fused anatomic imaging-disease atlas. The framework was applied to constructing a preliminary version of an anatomic-disease atlas of the prostate, the prostatome. The prostatome could facilitate the quantitative characterization of gland morphology and imaging features of prostate cancer. These techniques, may be applied on a large sample size data set to create a fully developed prostatome that could serve as a spatial prior for targeted biopsies by urologists. Additionally, the AnCoR framework could allow for incorporation of complementary imaging and molecular data, thereby enabling their careful correlation for population based radio-omics studies. {\textcopyright} 2014 American Association of Physicists in Medicine. }, } |
2014 | Journal | Peter Rautek, Stefan Bruckner, M. Eduard Gröller, Markus Hadwiger (2014). ViSlang: A system for interpreted domain-specific languages for scientific visualization. IEEE Transactions on Visualization and Computer Graphics, 20(12), pp. 2388–2396. (link) (bib) x @article{Rautek2014, year = { 2014 }, volume = { 20 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { ViSlang: A system for interpreted domain-specific languages for scientific visualization }, pages = { 2388--2396 }, number = { 12 }, keywords = { Domain-specific languages,Volume visualization,Volume visualization framework }, journal = { IEEE Transactions on Visualization and Computer Graphics }, issn = { 10772626 }, doi = { 10.1109/TVCG.2014.2346318 }, author = { Rautek and Bruckner and Gr{\"{o}}ller and Hadwiger }, abstract = { Researchers from many domains use scientific visualization in their daily practice. Existing implementations of algorithms usually come with a graphical user interface (high-level interface), or as software library or source code (low-level interface). In this paper we present a system that integrates domain-specific languages (DSLs) and facilitates the creation of new DSLs. DSLs provide an effective interface for domain scientists avoiding the difficulties involved with low-level interfaces and at the same time offering more flexibility than high-level interfaces. We describe the design and implementation of ViSlang, an interpreted language specifically tailored for scientific visualization. A major contribution of our design is the extensibility of the ViSlang language. Novel DSLs that are tailored to the problems of the domain can be created and integrated into ViSlang. We show that our approach can be added to existing user interfaces to increase the flexibility for expert users on demand, but at the same time does not interfere with the user experience of novice users. To demonstrate the flexibility of our approach we present new DSLs for volume processing, querying and visualization. We report the implementation effort for new DSLs and compare our approach with Matlab and Python implementations in terms of run-time performance. }, } |
2014 | Journal | Francesca Pennati, Caterina Salito, Guido Baroni, Jason Woods, Andrea Aliverti (2014). Comparison Between Multivolume CT-Based Surrogates of Regional Ventilation in Healthy Subjects. Academic Radiology, 21(10), pp. 1268–1275. (link) (bib) x @article{Pennati2014, year = { 2014 }, volume = { 21 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Comparison Between Multivolume CT-Based Surrogates of Regional Ventilation in Healthy Subjects }, pages = { 1268--1275 }, number = { 10 }, keywords = { CT imaging,Gravity,Healthy lung,Ventilation }, journal = { Academic Radiology }, issn = { 18784046 }, doi = { 10.1016/j.acra.2014.05.022 }, author = { Pennati and Salito and Baroni and Woods and Aliverti }, abstract = { Rationale and Objectives: The assessment of regional ventilation is of critical importance when investigating lung function during disease progression and planning of pulmonary interventions. Recently, different computed tomography (CT)-based parameters have been proposed as surrogates of lung ventilation. The aim of the present study was to compare these parameters, namely variations of density ($\delta$HU), specific volume (sVol), and specific gas volume ($\delta$SVg) between different lung volumes, in relation to their topographic distribution within the lung. Materials and Methods: Ten healthy volunteers were scanned via high-resolution CT at residual volume (RV) and total lung capacity (TLC); $\delta$HU, sVol, and $\delta$SVg were mapped voxel by voxel after registering TLC onto RV. Variations of the three parameters along the vertical and horizontal directions were analyzed. Results: Along the vertical direction (from ventral to dorsal regions), a strong dependence on gravity was found in $\delta$HU and sVol, with greater values in the dorsal regions of the lung (P{\textless}.001), whereas $\delta$SVg was more homogeneously distributed within the lung. Conversely, along the caudocranial direction (from lung bases to apexes) where no gravitational gradient is present, the three parameters behaved similarly, with lower values at the apices. Conclusions: $\delta$HU, sVol, and $\delta$SVg behave differently along the gravity direction. As the greater amount of air delivered to the dependent portion of the lung supplies a larger number of alveoli, the amount of gas delivered to alveoli compared to the mass of tissue is not gravity dependent. The minimization of gravity dependence in the distribution of ventilation when using $\delta$SVg suggests that this parameter is more reliable to discriminate healthy from pathologic regions. }, } |
2014 | Journal | Mike Meyer, David Elliott, Andrew D. Wood, Nicholas F. Polys, Matthew Colbert, Jessica A. Maisano, Patricia Vickers-Rich, Michael Hall, Karl H. Hoffman, Gabi Schneider, Shuhai Xiao (2014). Three-dimensional microCT analysis of the Ediacara fossil Pteridinium simplex sheds new light on its ecology and phylogenetic affinity. Precambrian Research, 249, pp. 79–87. (link) (bib) x @article{Meyer2014, year = { 2014 }, volume = { 249 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Three-dimensional microCT analysis of the Ediacara fossil Pteridinium simplex sheds new light on its ecology and phylogenetic affinity }, pages = { 79--87 }, keywords = { Aar member,Ediacaran,MicroCT,Namibia,Pteridinium,Taphonomy }, journal = { Precambrian Research }, issn = { 03019268 }, doi = { 10.1016/j.precamres.2014.04.013 }, author = { Meyer and Elliott and Wood and Polys and Colbert and Maisano and Vickers-Rich and Hall and Hoffman and Schneider and Xiao }, abstract = { Ediacara fossils often exhibit enigmatic taphonomy that complicates morphological characterization and ecological and phylogenetic interpretation; such is the case with Pteridinium simplex from the late Ediacaran Aar Member in southern Namibia. P. simplex is often preserved as three-dimensional (3D) casts and molds in coarse-grained quartzites, making detailed morphological characterization difficult. In addition, P. simplex is often transported, distorted, and embedded in gutter fills or channel deposits, further obscuring its morphologies. By utilizing microfocus X-ray computed tomography (microCT) techniques, we are able to trace individual specimens and their vanes in order to digitally restore the 3D morphology of this enigmatic fossil. Our analysis shows that P. simplex has a very flexible integument that can be bent, folded, twisted, stretched, and torn, indicating a certain degree of elasticity. In the analyzed specimens, we find no evidence for vane identity change or penetrative growth that were previously used as evidence to support a fully endobenthic lifestyle of P. simplex; instead, evidence is consistent with the traditional interpretation of a semi-endobenthic or epibenthic lifestyle. This interpretation needs to be further tested through microCT analysis of P. simplex specimens preserved in situ rather than transported ones. The elastic integument of P. simplex is inconsistent with a phylogenetic affinity with xenophyophore protists; instead, its physical property is consistent with the presence of collagen, chitin, and cellulose, an inference that would provide constraints on the phylogenetic affinity of P. simplex. {\textcopyright} 2014 Elsevier B.V. }, } |
2014 | Journal | Sarah A. Mattonen, David A. Palma, Cornelis J.A. Haasbeek, Suresh Senan, Aaron D. Ward (2014). Early prediction of tumor recurrence based on CT texture changes after stereotactic ablative radiotherapy (SABR) for lung cancer. Medical Physics, 41(3), pp. 14. (link) (bib) x @article{Mattonen2014, year = { 2014 }, volume = { 41 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Early prediction of tumor recurrence based on CT texture changes after stereotactic ablative radiotherapy (SABR) for lung cancer }, pages = { 14 }, number = { 3 }, keywords = { cancer recurrence,computed tomography,lung,stereotactic radiation therapy,texture analysis }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4866219 }, author = { Mattonen and Palma and Haasbeek and Senan and Ward }, abstract = { Purpose: Benign computed tomography (CT) changes due to radiation induced lung injury (RILI) are common following stereotactic ablative radiotherapy (SABR) and can be difficult to differentiate from tumor recurrence. The authors measured the ability of CT image texture analysis, compared to more traditional measures of response, to predict eventual cancer recurrence based on CT images acquired within 5 months of treatment. Methods: A total of 24 lesions from 22 patients treated with SABR were selected for this study: 13 with moderate to severe benign RILI, and 11 with recurrence. Three-dimensional (3D) consolidative and ground-glass opacity (GGO) changes were manually delineated on all follow-up CT scans. Two size measures of the consolidation regions (longest axial diameter and 3D volume) and nine appearance features of the GGO were calculated: 2 first-order features [mean density and standard deviation of density (first-order texture)], and 7 second-order texture features [energy, entropy, correlation, inverse difference moment (IDM), inertia, cluster shade, and cluster prominence]. For comparison, the corresponding response evaluation criteria in solid tumors measures were also taken for the consolidation regions. Prediction accuracy was determined using the area under the receiver operating characteristic curve (AUC) and two-fold cross validation (CV). Results: For this analysis, 46 diagnostic CT scans scheduled for approximately 3 and 6 months post-treatment were binned based on their recorded scan dates into 2-5 month and 5-8 month follow-up time ranges. At 2-5 months post-treatment, first-order texture, energy, and entropy provided AUCs of 0.79-0.81 using a linear classifier. On two-fold CV, first-order texture yielded 73{\%} accuracy versus 76{\%}-77{\%} with the second-order features. The size measures of the consolidative region, longest axial diameter and 3D volume, gave two-fold CV accuracies of 60{\%} and 57{\%}, and AUCs of 0.72 and 0.65, respectively. Conclusions: Texture measures of the GGO appearance following SABR demonstrated the ability to predict recurrence in individual patients within 5 months of SABR treatment. Appearance changes were also shown to be more accurately predictive of recurrence, as compared to size measures within the same time period. With further validation, these results could form the substrate for a clinically useful computer-aided diagnosis tool which could provide earlier salvage of patients with recurrence. {\textcopyright} 2014 American Association of Physicists in Medicine. }, } |
2014 | Journal | Yixun Liu, Samira M. Sadowski, Allison B. Weisbrod, Electron Kebebew, Ronald M. Summers, Jianhua Yao (2014). Patient specific tumor growth prediction using multimodal images. Medical Image Analysis, 18(3), pp. 555–566. (link) (bib) x @article{Liu2014a, year = { 2014 }, volume = { 18 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Patient specific tumor growth prediction using multimodal images }, pages = { 555--566 }, number = { 3 }, keywords = { Intracellular volume fraction,Metabolic rate,Multimodal images,Tumor growth modeling }, journal = { Medical Image Analysis }, issn = { 13618423 }, doi = { 10.1016/j.media.2014.02.005 }, author = { Liu and Sadowski and Weisbrod and Kebebew and Summers and Yao }, abstract = { Personalized tumor growth model is valuable in tumor staging and therapy planning. In this paper, we present a patient specific tumor growth model based on longitudinal multimodal imaging data including dual-phase CT and FDG-PET. The proposed Reaction-Advection-Diffusion model is capable of integrating cancerous cell proliferation, infiltration, metabolic rate and extracellular matrix biomechanical response. To bridge the model with multimodal imaging data, we introduce Intracellular Volume Fraction (ICVF) measured from dual-phase CT and Standardized Uptake Value (SUV) measured from FDG-PET into the model. The patient specific model parameters are estimated by fitting the model to the observation, which leads to an inverse problem formalized as a coupled Partial Differential Equations (PDE)-constrained optimization problem. The optimality system is derived and solved by the Finite Difference Method. The model was evaluated by comparing the predicted tumors with the observed tumors in terms of average surface distance (ASD), root mean square difference (RMSD) of the ICVF map, average ICVF difference (AICVFD) of tumor surface and tumor relative volume difference (RVD) on six patients with pathologically confirmed pancreatic neuroendocrine tumors. The ASD between the predicted tumor and the reference tumor was 2.4 ± 0.5. mm, the RMSD was 4.3 ± 0.4{\%}, the AICVFD was 2.6 ± 0.6{\%}, and the RVD was 7.7 ± 1.3{\%}. {\textcopyright} 2014. }, } |
2014 | Journal | Ting Liu, Cory Jones, Mojtaba Seyedhosseini, Tolga Tasdizen (2014). A modular hierarchical approach to 3D electron microscopy image segmentation. Journal of Neuroscience Methods, 226, pp. 88–102. (link) (bib) x @article{Liu2014, year = { 2014 }, volume = { 226 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A modular hierarchical approach to 3D electron microscopy image segmentation }, pages = { 88--102 }, keywords = { Electron microscopy,Hierarchical segmentation,Image segmentation,Neuron reconstruction,Semi-automatic segmentation }, journal = { Journal of Neuroscience Methods }, issn = { 01650270 }, doi = { 10.1016/j.jneumeth.2014.01.022 }, author = { Liu and Jones and Seyedhosseini and Tasdizen }, abstract = { The study of neural circuit reconstruction, i.e., connectomics, is a challenging problem in neuroscience. Automated and semi-automated electron microscopy (EM) image analysis can be tremendously helpful for connectomics research. In this paper, we propose a fully automatic approach for intra-section segmentation and inter-section reconstruction of neurons using EM images. A hierarchical merge tree structure is built to represent multiple region hypotheses and supervised classification techniques are used to evaluate their potentials, based on which we resolve the merge tree with consistency constraints to acquire final intra-section segmentation. Then, we use a supervised learning based linking procedure for the inter-section neuron reconstruction. Also, we develop a semi-automatic method that utilizes the intermediate outputs of our automatic algorithm and achieves intra-segmentation with minimal user intervention. The experimental results show that our automatic method can achieve close-to-human intra-segmentation accuracy and state-of-the-art inter-section reconstruction accuracy. We also show that our semi-automatic method can further improve the intra-segmentation accuracy. {\textcopyright} 2014 Elsevier B.V. }, } |
2014 | Journal | Yann Le Poul, Annabel Whibley, Mathieu Chouteau, Florence Prunier, Violaine Llaurens, Mathieu Joron (2014). Evolution of dominance mechanisms at a butterfly mimicry supergene. Nature Communications, 5, pp. 8. (link) (bib) x @article{LePoul2014, year = { 2014 }, volume = { 5 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evolution of dominance mechanisms at a butterfly mimicry supergene }, pages = { 8 }, journal = { Nature Communications }, issn = { 20411723 }, doi = { 10.1038/ncomms6644 }, author = { {Le Poul} and Whibley and Chouteau and Prunier and Llaurens and Joron }, abstract = { Genetic dominance in polymorphic loci may respond to selection; however, the evolution of dominance in complex traits remains a puzzle. We analyse dominance at a wing-patterning supergene controlling local mimicry polymorphism in the butterfly Heliconius numata. Supergene alleles are associated with chromosomal inversion polymorphism, defining ancestral versus derived alleles. Using controlled crosses and the new procedure, Colour Pattern Modelling, allowing whole-wing pattern comparisons, we estimate dominance coefficients between alleles. Here we show strict dominance in sympatry favouring mimicry and inconsistent dominance throughout the wing between alleles from distant populations. Furthermore, dominance among derived alleles is uncoordinated across wing-pattern elements, producing mosaic heterozygous patterns determined by a hierarchy in colour expression. By contrast, heterozygotes with an ancestral allele show complete, coordinated dominance of the derived allele, independently of colours. Therefore, distinct dominance mechanisms have evolved in association with supergene inversions, in response to strong selection on mimicry polymorphism. }, } |
2014 | Journal | Samaneh Kazemifar, John J. Drozd, Nagalingam Rajakumar, Michael J. Borrie, Robert Bartha (2014). Automated algorithm to measure changes in medial temporal lobe volume in Alzheimer disease. Journal of Neuroscience Methods, 227, pp. 35–46. (link) (bib) x @article{Kazemifar2014, year = { 2014 }, volume = { 227 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated algorithm to measure changes in medial temporal lobe volume in Alzheimer disease }, pages = { 35--46 }, keywords = { Alzheimer disease,Hippocampus,MRI,Medial temporal lobe segmentation,Multi-atlas,Shape }, journal = { Journal of Neuroscience Methods }, issn = { 1872678X }, doi = { 10.1016/j.jneumeth.2014.01.033 }, author = { Kazemifar and Drozd and Rajakumar and Borrie and Bartha }, abstract = { Background: The change in volume of anatomic structures is as a sensitive indicator of Alzheimer disease (AD) progression. Although several methods are available to measure brain volumes, improvements in speed and automation are required. Our objective was to develop a fully automated, fast, and reliable approach to measure change in medial temporal lobe (MTL) volume, including primarily hippocampus. Methods: The MTL volume defined in an atlas image was propagated onto each baseline image and a level set algorithm was applied to refine the shape and smooth the boundary. The MTL of the baseline image was then mapped onto the corresponding follow-up image to measure volume change ($\delta$MTL). Baseline and 24 months 3D T1-weighted images from the Alzheimer Disease Neuroimaging Initiative (ADNI) were randomly selected for 50 normal elderly controls (NECs), 50 subjects with mild cognitive impairment (MCI) and 50 subjects with AD to test the algorithm. The method was compared to the FreeSurfer segmentation tools. Results: The average $\delta$MTL (mean±SEM) was 68±35mm3 in NEC, 187±38mm3 in MCI and 300±34mm3 in the AD group and was significantly different (p{\textless}0.0001) between all three groups. The $\delta$MTL was correlated with cognitive decline. Comparison with existing method(s): Results for the FreeSurfer software were similar but did not detect significant differences between the MCI and AD groups. Conclusion: This novel segmentation approach is fully automated and provides a robust marker of brain atrophy that shows different rates of atrophy over 2 years between NEC, MCI, and AD groups. {\textcopyright} 2014 Elsevier B.V. }, } |
2014 | Journal | Florian Jug, Tobias Pietzsch, Stephan Preibisch, Pavel Tomancak (2014). Bioimage Informatics in the context of Drosophila research. Methods, 68(1), pp. 60–73. (link) (bib) x @article{Jug2014, year = { 2014 }, volume = { 68 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Bioimage Informatics in the context of Drosophila research }, pages = { 60--73 }, number = { 1 }, keywords = { Drosophila,Image analysis,Processing,Registration,Segmentation,Tracking }, journal = { Methods }, issn = { 10959130 }, doi = { 10.1016/j.ymeth.2014.04.004 }, author = { Jug and Pietzsch and Preibisch and Tomancak }, abstract = { Modern biological research relies heavily on microscopic imaging. The advanced genetic toolkit of Drosophila makes it possible to label molecular and cellular components with unprecedented level of specificity necessitating the application of the most sophisticated imaging technologies. Imaging in Drosophila spans all scales from single molecules to the entire populations of adult organisms, from electron microscopy to live imaging of developmental processes. As the imaging approaches become more complex and ambitious, there is an increasing need for quantitative, computer-mediated image processing and analysis to make sense of the imagery. Bioimage Informatics is an emerging research field that covers all aspects of biological image analysis from data handling, through processing, to quantitative measurements, analysis and data presentation. Some of the most advanced, large scale projects, combining cutting edge imaging with complex bioimage informatics pipelines, are realized in the Drosophila research community. In this review, we discuss the current research in biological image analysis specifically relevant to the type of systems level image datasets that are uniquely available for the Drosophila model system. We focus on how state-of-the-art computer vision algorithms are impacting the ability of Drosophila researchers to analyze biological systems in space and time. We pay particular attention to how these algorithmic advances from computer science are made usable to practicing biologists through open source platforms and how biologists can themselves participate in their further development. {\textcopyright} 2014 The Authors. }, } |
2014 | Journal | Yuanyuan Ge, Ricky T. O'Brien, Chun Chien Shieh, Jeremy T. Booth, Paul J. Keall (2014). Toward the development of intrafraction tumor deformation tracking using a dynamic multi-leaf collimator. Medical Physics, 41(6), pp. 10. (link) (bib) x @article{Ge2014, year = { 2014 }, volume = { 41 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Toward the development of intrafraction tumor deformation tracking using a dynamic multi-leaf collimator }, pages = { 10 }, number = { 6 }, keywords = { DMLC tracking,adaptation,tumor deformation }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4873682 }, author = { Ge and O'Brien and Shieh and Booth and Keall }, abstract = { Purpose: Intrafraction deformation limits targeting accuracy in radiotherapy. Studies show tumor deformation of over 10 mm for both single tumor deformation and system deformation (due to differential motion between primary tumors and involved lymph nodes). Such deformation cannot be adapted to with current radiotherapy methods. The objective of this study was to develop and experimentally investigate the ability of a dynamic multi-leaf collimator (DMLC) tracking system to account for tumor deformation. Methods: To compensate for tumor deformation, the DMLC tracking strategy is to warp the planned beam aperture directly to conform to the new tumor shape based on real time tumor deformation input. Two deformable phantoms that correspond to a single tumor and a tumor system were developed. The planar deformations derived from the phantom images in beam's eye view were used to guide the aperture warping. An in-house deformable image registration software was developed to automatically trigger the registration once new target image was acquired and send the computed deformation to the DMLC tracking software. Because the registration speed is not fast enough to implement the experiment in real-time manner, the phantom deformation only proceeded to the next position until registration of the current deformation position was completed. The deformation tracking accuracy was evaluated by a geometric target coverage metric defined as the sum of the area incorrectly outside and inside the ideal aperture. The individual contributions from the deformable registration algorithm and the finite leaf width to the tracking uncertainty were analyzed. Clinical proof-of-principle experiment of deformation tracking using previously acquired MR images of a lung cancer patient was implemented to represent the MRI-Linac environment. Intensity-modulated radiation therapy (IMRT) treatment delivered with enabled deformation tracking was simulated and demonstrated. Results: The first experimental investigation of adapting to tumor deformation has been performed using simple deformable phantoms. For the single tumor deformation, the A u+Ao was reduced over 56{\%} when deformation was larger than 2 mm. Overall, the total improvement was 82{\%}. For the tumor system deformation, the Au+Ao reductions were all above 75{\%} and the total Au+Ao improvement was 86{\%}. Similar coverage improvement was also found in simulating deformation tracking during IMRT delivery. The deformable image registration algorithm was identified as the dominant contributor to the tracking error rather than the finite leaf width. The discrepancy between the warped beam shape and the ideal beam shape due to the deformable registration was observed to be partially compensated during leaf fitting due to the finite leaf width. The clinical proof-of-principle experiment demonstrated the feasibility of intrafraction deformable tracking for clinical scenarios. Conclusions: For the first time, we developed and demonstrated an experimental system that is capable of adapting the MLC aperture to account for tumor deformation. This work provides a potentially widely available management method to effectively account for intrafractional tumor deformation. This proof-of-principle study is the first experimental step toward the development of an image-guided radiotherapy system to treat deforming tumors in real-time. {\textcopyright} 2014 American Association of Physicists in Medicine. }, } |
2014 | Journal | F. Fusseis, X. Xiao, C. Schrank, F. De Carlo (2014). A brief guide to synchrotron radiation-based microtomography in (structural) geology and rock mechanics. Journal of Structural Geology, 65, pp. 1–16. (link) (bib) x @article{Fusseis2014, year = { 2014 }, volume = { 65 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A brief guide to synchrotron radiation-based microtomography in (structural) geology and rock mechanics }, pages = { 1--16 }, keywords = { 3D petrography,Digital rock physics,Synchrotron,X-ray microtomography }, journal = { Journal of Structural Geology }, issn = { 01918141 }, doi = { 10.1016/j.jsg.2014.02.005 }, author = { Fusseis and Xiao and Schrank and {De Carlo} }, abstract = { This contribution outlines Synchrotron-based X-ray micro-tomography and its potential use in structural geology and rock mechanics. The paper complements several recent reviews of X-ray microtomography. We summarize the general approach to data acquisition, post-processing as well as analysis and thereby aim to provide an entry point for the interested reader. The paper includes tables listing relevant beamlines, a list of all available imaging techniques, and available free and commercial software packages for data visualization and quantification. We highlight potential applications in a review of relevant literature including time-resolved experiments and digital rock physics. The paper concludes with a report on ongoing developments and upgrades at synchrotron facilities to frame the future possibilities for imaging sub-second processes in centimetre-sized samples. {\textcopyright} 2014 Elsevier Ltd. }, } |
2014 | Journal | Olivier Ertz, Sergio J. Rey, Stéphane Joost (2014). The open source dynamics in geospatial research and education. Journal of Spatial Information Science, 8(2014), pp. 67–71. (link) (bib) x @article{Ertz2014, year = { 2014 }, volume = { 8 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84919960299{\&}doi=10.5311{\%}2FJOSIS.2014.8.182{\&}partnerID=40{\&}md5=a7c671f5acc3d9aeaea0fe07a547c7fb }, type = { Journal Article }, title = { The open source dynamics in geospatial research and education }, pages = { 67--71 }, number = { 2014 }, journal = { Journal of Spatial Information Science }, issn = { 1948660X }, doi = { 10.5311/JOSIS.2014.8.182 }, author = { Ertz and Rey and Joost }, } |
2014 | Journal | S. Dietrich, M. Koch, P. Elsner, K. Weidenmann (2014). Measurement of Sub-Surface Core Damage in Sandwich Structures Using In-situ Hertzian Indentation During X-ray Computed Tomography. Experimental Mechanics, 54(8), pp. 1385–1393. (link) (bib) x @article{Dietrich2014, year = { 2014 }, volume = { 54 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Measurement of Sub-Surface Core Damage in Sandwich Structures Using In-situ Hertzian Indentation During X-ray Computed Tomography }, pages = { 1385--1393 }, number = { 8 }, keywords = { Contact modelling,GFRP honeycomb sandwich,In-situ loading,Indentation,Micro-computed tomography }, journal = { Experimental Mechanics }, issn = { 17412765 }, doi = { 10.1007/s11340-014-9902-2 }, author = { Dietrich and Koch and Elsner and Weidenmann }, abstract = { Composite sandwich structures with honeycomb cores show varying properties in geometry and mechanical behavior depending on the studied scale. Herein a new test and evaluation method for sub-surface core damage in the indentation area of honeycomb sandwich structures using computed tomography is presented. The combination of X-ray micro-computed tomography (X-$\mu$CT) and an image analysis procedure adjusted to the detection of core deformation mechanisms allows the extraction and quantification of externally invisible, sub-surface damage in the sandwich composite. For this specific contact or indentation loading case on the sandwich face sheet an in-situ device is introduced, enabling a 3D analysis of the structural change during progressing indentation depth. }, } |
2014 | Journal | Benjamin De Leener, Samuel Kadoury, Julien Cohen-Adad (2014). Robust, accurate and fast automatic segmentation of the spinal cord. NeuroImage, 98, pp. 528–536. (link) (bib) x @article{DeLeener2014, year = { 2014 }, volume = { 98 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Robust, accurate and fast automatic segmentation of the spinal cord }, pages = { 528--536 }, keywords = { Automatic,Deformable model,MRI,Propagation,Spinal cord segmentation }, journal = { NeuroImage }, issn = { 10959572 }, doi = { 10.1016/j.neuroimage.2014.04.051 }, author = { {De Leener} and Kadoury and Cohen-Adad }, abstract = { Spinal cord segmentation provides measures of atrophy and facilitates group analysis via inter-subject correspondence. Automatizing this procedure enables studies with large throughput and minimizes user bias. Although several automatic segmentation methods exist, they are often restricted in terms of image contrast and field-of-view. This paper presents a new automatic segmentation method (PropSeg) optimized for robustness, accuracy and speed. The algorithm is based on the propagation of a deformable model and is divided into three parts: firstly, an initialization step detects the spinal cord position and orientation using a circular Hough transform on multiple axial slices rostral and caudal to the starting plane and builds an initial elliptical tubular mesh. Secondly, a low-resolution deformable model is propagated along the spinal cord. To deal with highly variable contrast levels between the spinal cord and the cerebrospinal fluid, the deformation is coupled with a local contrast-to-noise adaptation at each iteration. Thirdly, a refinement process and a global deformation are applied on the propagated mesh to provide an accurate segmentation of the spinal cord. Validation was performed in 15 healthy subjects and two patients with spinal cord injury, using T1- and T2-weighted images of the entire spinal cord and on multiecho T2*-weighted images. Our method was compared against manual segmentation and against an active surface method. Results show high precision for all the MR sequences. Dice coefficients were 0.9 for the T1- and T2-weighted cohorts and 0.86 for the T2*-weighted images. The proposed method runs in less than 1min on a normal computer and can be used to quantify morphological features such as cross-sectional area along the whole spinal cord. {\textcopyright} 2014 Elsevier Inc. }, } |
2014 | Journal | Eric M. Bultman, Ethan K. Brodsky, Debra E. Horng, Pablo Irarrazaval, William R. Schelman, Walter F. Block, Scott B. Reeder (2014). Quantitative hepatic perfusion modeling using DCE-MRI with sequential breathholds. Journal of Magnetic Resonance Imaging, 39(4), pp. 853–865. (link) (bib) x @article{Bultman2014, year = { 2014 }, volume = { 39 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Quantitative hepatic perfusion modeling using DCE-MRI with sequential breathholds }, pmid = { 24395144 }, pages = { 853--865 }, number = { 4 }, keywords = { DCE-MRI,hepatic perfusion modeling,hepatocellular carcinoma,quantitative perfusion MRI,tumor perfusion modeling }, journal = { Journal of Magnetic Resonance Imaging }, issn = { 15222586 }, doi = { 10.1002/jmri.24238 }, author = { Bultman and Brodsky and Horng and Irarrazaval and Schelman and Block and Reeder }, abstract = { Purpose To develop and demonstrate the feasibility of a new formulation for quantitative perfusion modeling in the liver using interrupted DCE-MRI data acquired during multiple sequential breathholds. Materials and Methods A new mathematical formulation to estimate quantitative perfusion parameters using interrupted data was developed. Using this method, we investigated whether a second degree-of-freedom in the tissue residue function (TRF) improves quality-of-fit criteria when applied to a dual-input single-compartment perfusion model. We subsequently estimated hepatic perfusion parameters using DCE-MRI data from 12 healthy volunteers and 9 cirrhotic patients with a history of hepatocellular carcinoma (HCC); and examined the utility of these estimates in differentiating between healthy liver, cirrhotic liver, and HCC. Results Quality-of-fit criteria in all groups were improved using a Weibull TRF (2 degrees-of-freedom) versus an exponential TRF (1 degree-of-freedom), indicating nearer concordance of source DCE-MRI data with the Weibull model. Using the Weibull TRF, arterial fraction was greater in cirrhotic versus normal liver (39 ± 23{\%} versus 15 ± 14{\%}, P = 0.07). Mean transit time (20.6 ± 4.1 s versus 9.8 ± 3.5 s, P = 0.01) and arterial fraction (39 ± 23{\%} versus 73 ± 14{\%}, P = 0.04) were both significantly different between cirrhotic liver and HCC, while differences in total perfusion approached significance. Conclusion This work demonstrates the feasibility of estimating hepatic perfusion parameters using interrupted data acquired during sequential breathholds. {\textcopyright} 2013 Wiley Periodicals, Inc. }, } |
2014 | Journal | Brian B. Avants, Nicholas J. Tustison, Michael Stauffer, Gang Song, Baohua Wu, James C. Gee (2014). The Insight ToolKit image registration framework. Frontiers in Neuroinformatics, 8(APR), pp. 13. (link) (bib) x @article{Avants2014, year = { 2014 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { The Insight ToolKit image registration framework }, pages = { 13 }, number = { APR }, keywords = { Brain,Death,MRI,Open-source,Registration }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, doi = { 10.3389/fninf.2014.00044 }, author = { Avants and Tustison and Stauffer and Song and Wu and Gee }, abstract = { Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and improve reproducibility. Version 4 of the Insight ToolKit (ITK4) seeks to establish new standards in publicly available image registration methodology. ITK4 makes several advances in comparison to previous versions of ITK. ITK4 supports both multivariate images and objective functions; it also unifies high-dimensional (deformation field) and low-dimensional (affine) transformations with metrics that are reusable across transform types and with composite transforms that allow arbitrary series of geometric mappings to be chained together seamlessly. Metrics and optimizers take advantage of multi-core resources, when available. Furthermore, ITK4 reduces the parameter optimization burden via principled heuristics that automatically set scaling across disparate parameter types (rotations vs. translations). A related approach also constrains steps sizes for gradient-based optimizers. The result is that tuning for different metrics and/or image pairs is rarely necessary allowing the researcher to more easily focus on design/comparison of registration strategies. In total, the ITK4 contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. Finally, we contextualize this work with a reference registration evaluation study with application to pediatric brain labeling. {\textcopyright} 2014 Avants, Tustison, Stauffer, Song, Wuand Gee. }, } |
2014 | Journal | Daniil P. Aksenov, Limin Li, Gheorghe Iordanescu, Michael J. Miller, Alice M. Wyrwicz (2014). Volume effect of localized injection in functional MRI and electrophysiology. Magnetic Resonance in Medicine, 72(4), pp. 1170–1175. (link) (bib) x @article{Aksenov2014, year = { 2014 }, volume = { 72 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Volume effect of localized injection in functional MRI and electrophysiology }, pmid = { 24273205 }, pages = { 1170--1175 }, number = { 4 }, keywords = { ACSF,Brain,Injection,Single unit,Volume effect,fMRI }, journal = { Magnetic Resonance in Medicine }, issn = { 15222594 }, doi = { 10.1002/mrm.24996 }, author = { Aksenov and Li and Iordanescu and Miller and Wyrwicz }, abstract = { Purpose: The local injection of neurotransmitter agonists and antagonists to modulate recorded neurons in awake animals has long been an important and widely used technique in neuroscience. Combined with functional magnetic resonance imaging (fMRI) and simultaneous electrophysiology, local injection enables the study of specific brain regions under precise modulations of their neuronal activity. However, localized injections are often accompanied by mechanical displacement of the tissue, known as volume effect (VE), which can induce changes in electrophysiological recordings as well as artifacts that are particular to fMRI studies. Methods: We characterize the changes produced by VE in an agarose phantom as well as during stimulus-evoked and resting-state fMRI and simultaneously acquired electrophysiology in awake rabbits. Results: Our results demonstrate that localized injection can produce significant intensity changes in fMRI data, even while effects on electrophysiological recordings are minimized. These changes are localized to the vicinity of the injection needle and diminish over time due to diffusion of the injected volume. Conclusion: Sufficient time should be allowed for drug diffusion to ensure stable results, particularly for resting-state fMRI experiments. }, } |
2014 | Journal | Troy K. Adebar, Ashley E. Fletcher, Allison M. Okamura (2014). 3-D ultrasound-guided robotic needle steering in biological tissue. IEEE Transactions on Biomedical Engineering, 61(12), pp. 2899–2910. (link) (bib) x @article{Adebar2014, year = { 2014 }, volume = { 61 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { 3-D ultrasound-guided robotic needle steering in biological tissue }, pages = { 2899--2910 }, number = { 12 }, keywords = { Image-guided intervention,robotic needle steering,ultrasound Doppler,ultrasound imaging }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 15582531 }, doi = { 10.1109/TBME.2014.2334309 }, author = { Adebar and Fletcher and Okamura }, abstract = { Robotic needle steering systems have the potential to greatly improve medical interventions, but they require new methods for medical image guidance. Three-dimensional (3-D) ultrasound is a widely available, low-cost imaging modality that may be used to provide real-time feedback to needle steering robots. Unfortunately, the poor visibility of steerable needles in standard grayscale ultrasound makes automatic segmentation of the needles impractical. A new imaging approach is proposed, in which high-frequency vibration of a steerable needle makes it visible in ultrasound Doppler images. Experiments demonstrate that segmentation from this Doppler data is accurate to within 1-2 mm. An image-guided control algorithm that incorporates the segmentation data as feedback is also described. In experimental tests in ex vivo bovine liver tissue, a robotic needle steering system implementing this control scheme was able to consistently steer a needle tip to a simulated target with an average error of 1.57 mm. Implementation of 3-D ultrasound-guided needle steering in biological tissue represents a significant step toward the clinical application of robotic needle steering. }, } |
2014 | Journal | George Teodoro, Tony Pan, Tahsin Kurc, Jun Kong, Lee Cooper, Scott Klasky, Joel Saltz (2014). Region templates: Data representation and management for high-throughput image analysis. Parallel Computing, 40(10), pp. 589–610. (link) (bib) x @article{RN953, year = { 2014 }, volume = { 40 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908545293{\&}doi=10.1016{\%}2Fj.parco.2014.09.003{\&}partnerID=40{\&}md5=811558beee3d6bf9758c3001bf55bb7b }, type = { Journal Article }, title = { Region templates: Data representation and management for high-throughput image analysis }, pages = { 589--610 }, number = { 10 }, keywords = { GPGPU,Heterogeneous environments,Image analysis,Microscopy imaging,Storage and I/O }, journal = { Parallel Computing }, issn = { 01678191 }, eprint = { 1405.7958 }, doi = { 10.1016/j.parco.2014.09.003 }, author = { Teodoro and Pan and Kurc and Kong and Cooper and Klasky and Saltz }, arxivid = { 1405.7958 }, archiveprefix = { arXiv }, abstract = { We introduce a region template abstraction and framework for the efficient storage, management and processing of common data types in analysis of large datasets of high resolution images on clusters of hybrid computing nodes. The region template abstraction provides a generic container template for common data structures, such as points, arrays, regions, and object sets, within a spatial and temporal bounding box. It allows for different data management strategies and I/O implementations, while providing a homogeneous, unified interface to applications for data storage and retrieval. A region template application is represented as a hierarchical dataflow in which each computing stage may be represented as another dataflow of finer-grain tasks. The execution of the application is coordinated by a runtime system that implements optimizations for hybrid machines, including performance-aware scheduling for maximizing the utilization of computing devices and techniques to reduce the impact of data transfers between CPUs and GPUs. An experimental evaluation on a state-of-the-art hybrid cluster using a microscopy imaging application shows that the abstraction adds negligible overhead (about 3{\%}) and achieves good scalability and high data transfer rates. Optimizations in a high speed disk based storage implementation of the abstraction to support asynchronous data transfers and computation result in an application performance gain of about 1.13x. Finally, a processing rate of 11,730 4K x 4K tiles per minute was achieved for the microscopy imaging application on a cluster with 100 nodes (300 GPUs and 1200 CPU cores). This computation rate enables studies with very large datasets. }, } |
2014 | In Collection | J Zhang, H Sorby, J Clement, C D L Thomas, P Hunter, P Nielsen, D Lloyd, M Taylor, T Besier (2014). The MAP client: User-friendly musculoskeletal modelling workflows. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 182–192. (link) (bib) x @incollection{Zhang2014a, year = { 2014 }, volume = { 8789 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84911390478{\&}partnerID=40{\&}md5=d52525096590e132a0f2bdfbee3e3a8e }, type = { Serial }, title = { The MAP client: User-friendly musculoskeletal modelling workflows }, pages = { 182--192 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Zhang and Sorby and Clement and Thomas and Hunter and Nielsen and Lloyd and Taylor and Besier }, } |
2014 | In Collection | S Tourbier, X Bresson, P Hagmann, J P Thiran, R Meuli, M B Cuadra (2014). Efficient total variation algorithm for fetal brain MRI reconstruction. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 252–259. (link) (bib) x @incollection{Tourbier2014a, year = { 2014 }, volume = { 8674 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906979454{\&}doi=10.1007{\%}2F978-3-319-10470-6{\_}32{\&}partnerID=40{\&}md5=d90bc6bfffc46271b10c0f02d0ea2f2a }, type = { Serial }, title = { Efficient total variation algorithm for fetal brain MRI reconstruction }, pages = { 252--259 }, doi = { 10.1007/978-3-319-10470-6_32 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Tourbier and Bresson and Hagmann and Thiran and Meuli and Cuadra }, } |
2014 | In Collection | Hans J. Johnson, Luis Ib, Matthew Mccormick, Software Consortium (2014). The ITK Software Guide Book 2 : Design and Functionality Fourth Edition Updated for ITK version 4.6. In The ITK Software GUIDE, pp. 805. (bib) x @incollection{Johnson2014a, year = { 2014 }, title = { The ITK Software Guide Book 2 : Design and Functionality Fourth Edition Updated for ITK version 4.6 }, pages = { 805 }, keywords = { Guide,Registration,Segmentation }, isbn = { 978-1-930934-28-3 }, doi = { 1�930934-15�7 }, booktitle = { The ITK Software GUIDE }, author = { Johnson and Ib and Mccormick and Consortium }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmentation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired from such medical instrumentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT scan may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environment known as CMake to manage platform-specific project generation and compilation process in a platform-independent way. ITK is implemented in C++. ITK's implementation style employs generic programming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template. The use of C++ templating means that the code is highly efficient and many issues are discovered at compile- time, rather than at run-time during program execution. It also means that many of ITK's algorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high-level programming language Python. This enables rapid prototyping and faster exploration of ideas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is available for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, and extend the software because ITK is an open-source project. ITK uses a model of software development known as Extreme Programming. Extreme Programming collapses the usual software development methodology into a simultaneous iterative process of design-implement-test-release. The key features of Extreme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated continuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf. This book is a guide to developing soft- ware with ITK; it is the first of two companion books. This book covers building and installation, general architecture and design, as well as the process of contributing in the ITK community. The second book covers detailed design and functionality for reading and and writing images, filtering, registration, segmentation, and performing statistical analysis. }, } |
2014 | In Collection | Sébastien Tourbier, Xavier Bresson, Patric Hagmann, Jean Philippe Thiran, Reto Meuli, Meritxell Bach Cuadra (2014). Efficient total variation algorithm for fetal brain MRI reconstruction. In P Golland, N Hata, C Barillot, J Hornegger, R Howe, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 252–259, Berlin. (link) (bib) x @incollection{Tourbier2014, year = { 2014 }, volume = { 8674 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906979454{\&}doi=10.1007{\%}2F978-3-319-10470-6{\_}32{\&}partnerID=40{\&}md5=d90bc6bfffc46271b10c0f02d0ea2f2a {\%}3CGo to }, type = { Serial }, title = { Efficient total variation algorithm for fetal brain MRI reconstruction }, series = { Lecture Notes in Computer Science }, publisher = { Springer-Verlag Berlin }, pages = { 252--259 }, number = { PART 2 }, issn = { 16113349 }, isbn = { 9783319104690 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-319-10470-6_32 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Tourbier and Bresson and Hagmann and Thiran and Meuli and Cuadra }, address = { Berlin }, abstract = { Fetal MRI reconstruction aims at finding a high-resolution image given a small set of low-resolution images. It is usually modeled as an inverse problem where the regularization term plays a central role in the reconstruction quality. Literature has considered several regularization terms s.a. Dirichlet/Laplacian energy [1], Total Variation (TV)-based energies [2,3] and more recently non-local means [4]. Although TV energies are quite attractive because of their ability in edge preservation, standard explicit steepest gradient techniques have been applied to optimize fetal-based TV energies. The main contribution of this work lies in the introduction of a well-posed TV algorithm from the point of view of convex optimization. Specifically, our proposed TV optimization algorithm for fetal reconstruction is optimal w.r.t. the asymptotic and iterative convergence speeds O(1/n 2) and O(1/√$\epsilon$), while existing techniques are in O(1/n) and O(1/$\epsilon$). We apply our algorithm to (1) clinical newborn data, considered as ground truth, and (2) clinical fetal acquisitions. Our algorithm compares favorably with the literature in terms of speed and accuracy. {\textcopyright} 2014 Springer International Publishing. }, } |
2014 | In Collection | Ju Zhang, Hugh Sorby, John Clement, C. David L. Thomas, Peter Hunter, Poul Nielsen, David Lloyd, Mark Taylor, Thor Besier (2014). The MAP client: User-friendly musculoskeletal modelling workflows. In F Bello, S Cotin, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 182–192, Cham. (link) (bib) x @incollection{Zhang2014, year = { 2014 }, volume = { 8789 }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-84911390478{\&}partnerID=40{\&}md5=d52525096590e132a0f2bdfbee3e3a8e }, type = { Serial }, title = { The MAP client: User-friendly musculoskeletal modelling workflows }, series = { Lecture Notes in Computer Science }, publisher = { Springer International Publishing Ag }, pages = { 182--192 }, keywords = { Biomechanical modelling,Musculoskeletal atlas project,Musculoskeletal modelling,Open source software,Personalised simulation,Pipelines,workflows }, issn = { 16113349 }, isbn = { 9783319120560 }, editor = { [object Object],[object Object] }, doi = { 10.1007/978-3-319-12057-7_21 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Zhang and Sorby and Clement and Thomas and Hunter and Nielsen and Lloyd and Taylor and Besier }, address = { Cham }, abstract = { Subject-specific models of the musculoskeletal system are capable of accurately estimating function and loads and show promise for clinical use. However, creating subject-specific models is time-consuming and requires high levels of expertise. To address these issues, we have developed the open source Musculoskeletal Atlas Project (MAP) Client software. The MAP Client provides a user-friendly interface for creating musculoskeletal modelling workflows using community-created plug-ins. In this paper, we discuss the design of the MAP Client, its plug-in architecture and its integration with the Physiome Model Repository. We demonstrate the use of MAP Client with a subject-specific femur modeling workflow using a set of modular open source plug-ins for image segmentation, landmark prediction, model registration and customisation. Our long-term goal is to foster a community of MAP users and plug-in developers to accelerate the clinical use of computational models. }, } |
2014 | In Collection | Amit Shah, Oliver Zettinig, Tobias Maurer, Cristina Precup, Christian Schulte Zu Berge, Jakob Weiss, Benjamin Frisch, Nassir Navab (2014). An Open Source Multimodal Image-Guided Prostate Biopsy Framework. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. NA (link) (bib) x @incollection{Shah2014, year = { 2014 }, volume = { 8680 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84921489612{\&}doi=10.1007{\%}2F978-3-319-13909-8{\_}1{\&}partnerID=40{\&}md5=dda55817f88614188e848aa38b528c3b }, type = { Serial }, title = { An Open Source Multimodal Image-Guided Prostate Biopsy Framework }, keywords = { MRI,Multimodal image-guided biopsy,Open source software,PET,Prostate cancer,TRUS }, issn = { 16113349 }, doi = { 10.1007/978-3-319-13909-8_1 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Shah and Zettinig and Maurer and Precup and {Schulte Zu Berge} and Weiss and Frisch and Navab }, abstract = { Although variousmodalities are used in prostate cancer imaging, transrectal ultrasound (TRUS) guided biopsy remains the gold standard for diagnosis. However, TRUS suffers from low sensitivity, leading to an elevated rate of false negative results. Magnetic Resonance Imaging (MRI) on the other hand provides currently the most accurate imagebased evaluation of the prostate. Thus, TRUS/MRI fusion image-guided biopsy has evolved to be the method of choice to circumvent the limitations of TRUS-only biopsy. Most commercial frameworks that offer such a solution rely on rigid TRUS/MRI fusion and rarely use additional information from other modalities such as Positron Emission Tomography (PET). Other frameworks require long interaction times and are complex to integrate with the clinical workflow. Available solutions are not fully able to meet the clinical requirements of speed and high precision at low cost simultaneously.We introduce an open source fusion biopsy framework that is low cost, simple to use and has minimal overhead in clinical workflow. Hence, it is ideal as a research platform for the implementation and rapid bench to bedside translation of new image registration and visualization approaches. We present the current status of the framework that uses pre-interventional PET and MRI rigidly registered with 3D TRUS for prostate biopsy guidance and discuss results from first clinical cases. }, } |
2014 | In Collection | Hans J. Johnson, Luis Ib, Matthew Mccormick, Software Consortium (2014). The ITK Software Guide Book 2 : Design and Functionality Fourth Edition Updated for ITK version 4.6. In The ITK Software GUIDE, pp. 805. (bib) x @incollection{Johnson2014, year = { 2014 }, title = { The ITK Software Guide Book 2 : Design and Functionality Fourth Edition Updated for ITK version 4.6 }, pages = { 805 }, keywords = { Guide,Registration,Segmentation }, isbn = { 978-1-930934-28-3 }, doi = { 1�930934-15�7 }, booktitle = { The ITK Software GUIDE }, author = { Johnson and Ib and Mccormick and Consortium }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmentation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired from such medical instrumentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT scan may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environment known as CMake to manage platform-specific project generation and compilation process in a platform-independent way. ITK is implemented in C++. ITK's implementation style employs generic programming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template. The use of C++ templating means that the code is highly efficient and many issues are discovered at compile- time, rather than at run-time during program execution. It also means that many of ITK's algorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high-level programming language Python. This enables rapid prototyping and faster exploration of ideas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is available for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, and extend the software because ITK is an open-source project. ITK uses a model of software development known as Extreme Programming. Extreme Programming collapses the usual software development methodology into a simultaneous iterative process of design-implement-test-release. The key features of Extreme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated continuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf. This book is a guide to developing soft- ware with ITK; it is the first of two companion books. This book covers building and installation, general architecture and design, as well as the process of contributing in the ITK community. The second book covers detailed design and functionality for reading and and writing images, filtering, registration, segmentation, and performing statistical analysis. }, } |
2014 | In Conf. Proceedings | Emiliano Pastorelli, Heiko Herrmann (2014). Virtual Reality visualization for short fibre orientation analysis. In Proceedings of the Biennial Baltic Electronics Conference, BEC, pp. 201–204, New York. (link) (bib) x @inproceedings{Pastorelli2014, year = { 2014 }, volume = { 2015-Novem }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971310302{\&}doi=10.1109{\%}2FBEC.2014.7320591{\&}partnerID=40{\&}md5=3476efbc0430ba44724fcc8006637b70 {\%}3CGo to }, type = { Book }, title = { Virtual Reality visualization for short fibre orientation analysis }, series = { 2014 Proceedings of the 14th Biennial Baltic Electronics Conference }, publisher = { Ieee }, pages = { 201--204 }, issn = { 17363705 }, isbn = { 9781467395397 }, doi = { 10.1109/BEC.2014.7320591 }, booktitle = { Proceedings of the Biennial Baltic Electronics Conference, BEC }, author = { Pastorelli and Herrmann }, address = { New York }, abstract = { The paper investigates the beneficial contribution of visual feedback in the development of an algorithm for the automatized analysis of fibre orientations in short fibre reinforced composites. Of special interest was steel fibre reinforced concrete (SFRC), a multi-disciplinary research area involving material sciences, physics and civil engineering. More in detail, this paper explains how scientific visualization techniques, employed on a Virtual Reality environment, contribute to the understanding of the SFRC properties, both for research and educational aims. Furthermore, the analysis algorithm to obtain fibre orientation distributions from noisy tomography scans is presented. }, } |
2014 | In Conf. Proceedings | Florence Kremer, Joachim Giard, Merence Sibomana, Jose Seabra, Jonathan Orban De Xivry, Rudi Labarbe, Benoit MacQ (2014). Feasibility and preliminary validation of 2D/3D image registration using fixed 2-D X-ray devices in image-guided radiotherapy. In 2014 1st International Conference on Advanced Technologies for Signal and Image Processing, ATSIP 2014, pp. 172–176. (link) (bib) x @inproceedings{Kremera, year = { 2014 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84903775128{\&}doi=10.1109{\%}2FATSIP.2014.6834600{\&}partnerID=40{\&}md5=a48a78e38dae1ddafa3b391003173fad }, type = { Conference Proceedings }, title = { Feasibility and preliminary validation of 2D/3D image registration using fixed 2-D X-ray devices in image-guided radiotherapy }, pages = { 172--176 }, keywords = { patient positioning,radiotherapy,stereoscopic geometry }, isbn = { 9781479948888 }, doi = { 10.1109/ATSIP.2014.6834600 }, booktitle = { 2014 1st International Conference on Advanced Technologies for Signal and Image Processing, ATSIP 2014 }, author = { Kremer and Giard and Sibomana and Seabra and {De Xivry} and Labarbe and MacQ }, abstract = { In radiotherapy, fixed 2-D X-ray imaging devices have several advantages compared to gantry-mounted systems, such as less geometrical deformations and the possibility to monitor 3-D markers motion in real-time. However, there is a lack of studies concerning the geometry of these systems. For example, in the case of a non-orthogonal geometry, the effect of the angle between the X-ray axes has not been investigated yet. In the first part of this study, the optimal angle was analyzed theoretically. Results showed that 60° between the axes still enables displacements of the order of 0.35 mm to be detected. In a second step, the performance of the registration method for such oblique configuration was evaluated on phantom data sets. It was found that using images separated by 60° rather than 90° required more than twice as much the number of iterations to obtain sufficient accuracy (i.e. 0.7 mm and 0.5°). {\textcopyright} 2014 IEEE. }, } |
2014 | In Conf. Proceedings | Luis G. Torres, Cenk Baykal, Ron Alterovitz (2014). Interactive-rate motion planning for concentric tube robots. In Proceedings - IEEE International Conference on Robotics and Automation, pp. 1915–1921. (link) (bib) x @inproceedings{RN952, year = { 2014 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929208407{\&}doi=10.1109{\%}2FICRA.2014.6907112{\&}partnerID=40{\&}md5=fda323933ad36f2a50b9d3d5493cdb34 }, type = { Conference Proceedings }, title = { Interactive-rate motion planning for concentric tube robots }, publisher = { Institute of Electrical and Electronics Engineers Inc. }, pages = { 1915--1921 }, issn = { 10504729 }, isbn = { 10504729 (ISSN) }, doi = { 10.1109/ICRA.2014.6907112 }, booktitle = { Proceedings - IEEE International Conference on Robotics and Automation }, author = { Torres and Baykal and Alterovitz }, abstract = { Concentric tube robots may enable new, safer minimally invasive surgical procedures by moving along curved paths to reach difficult-to-reach sites in a patient's anatomy. Operating these devices is challenging due to their complex, unintuitive kinematics and the need to avoid sensitive structures in the anatomy. In this paper, we present a motion planning method that computes collision-free motion plans for concentric tube robots at interactive rates. Our method's high speed enables a user to continuously and freely move the robot's tip while the motion planner ensures that the robot's shaft does not collide with any anatomical obstacles. Our approach uses a highly accurate mechanical model of tube interactions, which is important since small movements of the tip position may require large changes in the shape of the device's shaft. Our motion planner achieves its high speed and accuracy by combining offline precomputation of a collision-free roadmap with online position control. We demonstrate our interactive planner in a simulated neurosurgical scenario where a user guides the robot's tip through the environment while the robot automatically avoids collisions with the anatomical obstacles. }, } |
2014 | In Conf. Proceedings | Sokratis Makrogiannis, Luigi Ferrucci (2014). Software system for computing material and structural properties of bone and muscle in the lower extremity from pQCT. In Sensing Technologies for Global Health, Military Medicine, and Environmental Monitoring IV, pp. 911216. (link) (bib) x @inproceedings{RN951, year = { 2014 }, volume = { 9112 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907042843{\&}doi=10.1117{\%}2F12.2050790{\&}partnerID=40{\&}md5=f686ccba07e29e0052a7bb5edd216e5b }, type = { Conference Proceedings }, title = { Software system for computing material and structural properties of bone and muscle in the lower extremity from pQCT }, publisher = { SPIE }, pages = { 911216 }, issn = { 1996756X }, isbn = { 9781628410495 }, doi = { 10.1117/12.2050790 }, booktitle = { Sensing Technologies for Global Health, Military Medicine, and Environmental Monitoring IV }, author = { Makrogiannis and Ferrucci }, abstract = { Peripheral Quantitative Computed Tomography (pQCT) is a non-invasive imaging technology that is well-suited for quantification of bone structural and material properties. Because of its increasing use and applicability, the development of automated quantification methods for pQCT images is an appealing field of research. In this paper we introduce a software system for hard and soft tissue quantification in the lower leg using pQCT imaging data. The main stages of our approach are the segmentation and identification of bone, muscle and fat, and the computation of densitometric and geometric variables of each regional tissue type. Our system was validated against reference area and densitometric measurements over a set of test images and produced encouraging results. {\textcopyright} 2014 Copyright SPIE. }, } |
2014 | In Conf. Proceedings | Iago Landesa-Vazquez, Jose Luis Alba-Castro, Moises Mera-Iglesias, David Aramburu-Nunez, Antonio Lopez-Medina, Victor Munoz-Garzon (2014). ARTFIBio: A cross-platform image registration tool for tumor response quantification in head and neck cancer. In 2014 IEEE-EMBS International Conference on Biomedical and Health Informatics, BHI 2014, pp. 149–152. (link) (bib) x @inproceedings{RN950, year = { 2014 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906848785{\&}doi=10.1109{\%}2FBHI.2014.6864326{\&}partnerID=40{\&}md5=edbfdf06c7b7a66d48a98f667bc3d4ad }, type = { Conference Proceedings }, title = { ARTFIBio: A cross-platform image registration tool for tumor response quantification in head and neck cancer }, publisher = { IEEE Computer Society }, pages = { 149--152 }, isbn = { 9781479921317 }, doi = { 10.1109/BHI.2014.6864326 }, booktitle = { 2014 IEEE-EMBS International Conference on Biomedical and Health Informatics, BHI 2014 }, author = { Landesa-Vazquez and Alba-Castro and Mera-Iglesias and Aramburu-Nunez and Lopez-Medina and Munoz-Garzon }, abstract = { In this paper we present a novel image registration software that has been specifically designed to suit the needs of the study of tumor response quantification. Joining a cross-platform architecture, the exclusive use of open-source libraries and some unique features, our tool is being successfully used in the frame of the ARTFIBio project, focused on the study of predictive individualized models of head and neck tumor response to radiotherapy. {\textcopyright} 2014 IEEE. }, } |
2013 | Book | E Kugu (2013). Satellite Image Denoising Using Bilateral Filter with SPEA2 Optimized Parameters, Ieee, 2013. (link) (bib) x @book{Kugu2013, year = { 2013 }, url = { {\%}3CGo to }, type = { Book }, title = { Satellite Image Denoising Using Bilateral Filter with SPEA2 Optimized Parameters }, series = { Proceedings of 6th International Conference on Recent Advances in Space Technologies }, publisher = { Ieee }, pages = { 217--223 }, isbn = { 978-1-4673-6396-9; 978-1-4673-6395-2 }, author = { Kugu }, address = { New York }, } |
2013 | Book | Hans J. Johnson, Matt Mccormick, Luis Ibanez, Insight Software Consortium (2013). The ITK Software Guide Third Edition - Updated for ITK version 4.5, NA 2013. (pdf) (bib) x @book{johnson2013itk, year = { 2013 }, url = { http://itk.org/ItkSoftwareGuide.pdf }, title = { The ITK Software Guide Third Edition - Updated for ITK version 4.5 }, author = { Johnson and Mccormick and Ibanez and Consortium }, abstract = { of ideas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is available for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, and extend the software because ITK is an open-source project. ITK uses a model of software development known as Extreme Programming. Extreme Programming collapses the usual software development methodology into a simultaneous iterative process of design-implement-test-release. The key features of Extreme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated continuously, reflecting the quality of the code at any moment. This book is a guide to developing software with ITK. It covers building and installation, ar- chitecture and design, image analysis theory and its applications, as well as the process of contributing to the ITK community. The most recent version of this document http://itk.org/ItkSoftwareGuide.pdf. }, } |
2013 | Book chapter | A Rezaei, J Nuyts, Ieee (2013). NA in Joint registration of attenuation and activity images in gated TOF-PET, Ieee, pp. NA IEEE Nuclear Science Symposium and Medical Imaging Conference. (link) (bib) x @inbook{Rezaei2013, year = { 2013 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Joint registration of attenuation and activity images in gated TOF-PET }, series = { IEEE Nuclear Science Symposium and Medical Imaging Conference }, publisher = { Ieee }, isbn = { 978-1-4799-0534-8 }, booktitle = { 2013 Ieee Nuclear Science Symposium and Medical Imaging Conference }, author = { Rezaei and Nuyts and Ieee }, address = { New York }, } |
2013 | Book chapter | T Seidel, T Draebing, G Seemann, F B Sachse (2013). NA in A Semi-automatic Approach for Segmentation of Three-Dimensional Microscopic Image Stacks of Cardiac Tissue, Edited by S Ourselin, D Rueckert, N Smith, Springer-Verlag Berlin, pp. 300–307, Lecture Notes in Computer Science, Vol. 7945. (link) (bib) x @inbook{Seidel2013, year = { 2013 }, volume = { 7945 }, url = { {\%}3CGo to }, type = { Book Section }, title = { A Semi-automatic Approach for Segmentation of Three-Dimensional Microscopic Image Stacks of Cardiac Tissue }, series = { Lecture Notes in Computer Science }, publisher = { Springer-Verlag Berlin }, pages = { 300--307 }, isbn = { 978-3-642-38899-6; 978-3-642-38898-9 }, editor = { [object Object],[object Object],[object Object] }, booktitle = { Functional Imaging and Modeling of the Heart }, author = { Seidel and Draebing and Seemann and Sachse }, address = { Berlin }, } |
2013 | Book chapter | Nelson Velasco Toledo, Andrea Rueda, Cristina Santa Marta, Eduardo Romero (2013). NA in Super-resolution in cardiac MRI using a Bayesian approach, Edited by S Ourselin, D R Haynor, Spie-Int Soc Optical Engineering, pp. 866932, Proceedings of SPIE, Vol. 8669, ISBN: 16057422. (link) (bib) x @inbook{Toledo2013, year = { 2013 }, volume = { 8669 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Super-resolution in cardiac MRI using a Bayesian approach }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 866932 }, issn = { 16057422 }, isbn = { 9780819494436 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2007074 }, booktitle = { Medical Imaging 2013: Image Processing }, author = { {Velasco Toledo} and Rueda and {Santa Marta} and Romero }, address = { Bellingham }, abstract = { Acquisition of proper cardiac MR images is highly limited by continued heart motion and apnea periods. A typical acquisition results in volumes with inter-slice separations of up to 8 mm. This paper presents a super-resolution strategy that estimates a high-resolution image from a set of low-resolution image series acquired in different non- orthogonal orientations. The proposal is based on a Bayesian approach that implements a Maximum a Posteriori (MAP) estimator combined with a Wiener filter. A pre-processing stage was also included, to correct or eliminate differences in the image intensities and to transform the low-resolution images to a common spatial reference system. The MAP estimation includes an observation image model that represents the different contributions to the voxel intensities based on a 3D Gaussian function. A quantitative and qualitative assessment was performed using synthetic and real images, showing that the proposed approach produces a high-resolution image with significant improvements (about 3dB in PSNR) with respect to a simple trilinear interpolation. The Wiener filter shows little contribution to the final result, demonstrating that the MAP uniformity prior is able to filter out a large amount of the acquisition noise. {\textcopyright} 2013 SPIE. }, } |
2013 | Book chapter | Rachel Sparks, B. Nicholas Bloch, Ernest Feleppa, Dean Barratt, Anant Madabhushi (2013). NA in Fully automated prostate magnetic resonance imaging and transrectal ultrasound fusion via a probabilistic registration metric, Edited by D R Holmes, Z R Yaniv, Spie-Int Soc Optical Engineering, pp. 86710A, Proceedings of SPIE, Vol. 8671, ISBN: 0277-786X. (link) (bib) x @inbook{Sparks2013, year = { 2013 }, volume = { 8671 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Fully automated prostate magnetic resonance imaging and transrectal ultrasound fusion via a probabilistic registration metric }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 86710A }, issn = { 0277-786X }, isbn = { 9780819494450 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2007610 }, booktitle = { Medical Imaging 2013: Image-Guided Procedures, Robotic Interventions, and Modeling }, author = { Sparks and Bloch and Feleppa and Barratt and Madabhushi }, address = { Bellingham }, abstract = { In this work, we present a novel, automated, registration method to fuse magnetic resonance imaging (MRI) and transrectal ultrasound (TRUS) images of the prostate. Our methodology consists of: (1) delineating the prostate on MRI, (2) building a probabilistic model of prostate location on TRUS, and (3) aligning the MRI prostate segmentation to the TRUS probabilistic model. TRUS-guided needle biopsy is the current gold standard for prostate cancer (CaP) diagnosis. Up to 40{\%} of CaP lesions appear isoechoic on TRUS, hence TRUS-guided biopsy cannot reliably target CaP lesions and is associated with a high false negative rate. MRI is better able to distinguish CaP from benign prostatic tissue, but requires special equipment and training. MRI-TRUS fusion, whereby MRI is acquired pre-operatively and aligned to TRUS during the biopsy procedure, allows for information from both modalities to be used to help guide the biopsy. The use of MRI and TRUS in combination to guide biopsy at least doubles the yield of positive biopsies. Previous work on MRI-TRUS fusion has involved aligning manually determined fiducials or prostate surfaces to achieve image registration. The accuracy of these methods is dependent on the reader's ability to determine fiducials or prostate surfaces with minimal error, which is a difficult and time-consuming task. Our novel, fully automated MRI-TRUS fusion method represents a significant advance over the current state-of-the-art because it does not require manual intervention after TRUS acquisition. All necessary preprocessing steps (i.e. delineation of the prostate on MRI) can be performed offline prior to the biopsy procedure. We evaluated our method on seven patient studies, with B-mode TRUS and a 1.5 T surface coil MRI. Our method has a root mean square error (RMSE) for expertly selected fiducials (consisting of the urethra, calcifications, and the centroids of CaP nodules) of 3 .39 ± 0.85 mm. {\textcopyright} 2013 SPIE. }, } |
2013 | Book chapter | Mirabela Rusu, B. Nicolas Bloch, Carl C. Jaffe, Neil M. Rofsky, Elizabeth M. Genega, Ernest Feleppa, Robert E. Lenkinski, Anant Madabhushi (2013). NA in Statistical 3D prostate imaging atlas construction via anatomically constrained registration, Edited by S Ourselin, D R Haynor, Spie-Int Soc Optical Engineering, pp. 866913, Proceedings of SPIE, Vol. 8669, ISBN: 0277-786X. (link) (bib) x @inbook{Rusu2013, year = { 2013 }, volume = { 8669 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Statistical 3D prostate imaging atlas construction via anatomically constrained registration }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 866913 }, issn = { 0277-786X }, isbn = { 9780819494436 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2006941 }, booktitle = { Medical Imaging 2013: Image Processing }, author = { Rusu and Bloch and Jaffe and Rofsky and Genega and Feleppa and Lenkinski and Madabhushi }, address = { Bellingham }, abstract = { Statistical imaging atlases allow for integration of information from multiple patient studies collected across different image scales and modalities, such as multi-parametric (MP) MRI and histology, providing population statistics regarding a specific pathology within a single canonical representation. Such atlases are particularly valuable in the identification and validation of meaningful imaging signatures for disease characterization in vivo within a population. Despite the high incidence of prostate cancer, an imaging atlas focused on different anatomic structures of the prostate, i.e. an anatomic atlas, has yet to be constructed. In this work we introduce a novel framework for MRI atlas construction that uses an iterative, anatomically constrained registration (AnCoR) scheme to enable the proper alignment of the prostate (Pr) and central gland (CG) boundaries. Our current implementation uses endorectal, 1.5T or 3T, T2-weighted MRI from 51 patients with biopsy confirmed cancer; however, the prostate atlas is seamlessly extensible to include additional MRI parameters. In our cohort, radical prostatectomy is performed following MP-MR image acquisition; thus ground truth annotations for prostate cancer are available from the histological specimens. Once mapped onto MP-MRI through elastic registration of histological slices to corresponding T2-w MRI slices, the annotations are utilized by the AnCoR framework to characterize the 3D statistical distribution of cancer per anatomic structure. Such distributions are useful for guiding biopsies toward regions of higher cancer likelihood and understanding imaging profiles for disease extent in vivo. We evaluate our approach via the Dice similarity coefficient (DSC) for different anatomic structures (delineated by expert radiologists): Pr, CG and peripheral zone (PZ). The AnCoR-based atlas had a CG DSC of 90.36{\%}, and Pr DSC of 89.37{\%}. Moreover, we evaluated the deviation of anatomic landmarks, the urethra and veromontanum, and found 3.64 mm and respectively 4.31 mm. Alternative strategies that use only the T2-w MRI or the prostate surface to drive the registration were implemented as comparative approaches. The AnCoR framework outperformed the alternative strategies by providing the lowest landmark deviations. {\textcopyright} 2013 SPIE. }, } |
2013 | Journal | Bradley C. Lowekamp, David T. Chen, Luis Ibá\~nez, Daniel Blezek (2013). The design of simpleITK. Frontiers in Neuroinformatics, 7(DEC), pp. 45. (link) (bib) x @article{Lowekamp2013, year = { 2013 }, volume = { 7 }, url = { http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3874546{\&}tool=pmcentrez{\&}rendertype=abstract https://www.frontiersin.org/article/10.3389/fninf.2013.00045 }, title = { The design of simpleITK }, pmid = { 24416015 }, pages = { 45 }, number = { DEC }, keywords = { Image processing and analysis,Image processing software,Insight toolkit,Segmentation,Software design,Software development }, journal = { Frontiers in Neuroinformatics }, issn = { 16625196 }, isbn = { 2762010004 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Lowekamp et al/Frontiers in Neuroinformatics/Lowekamp et al. - 2013 - The design of simpleITK.pdf:pdf }, doi = { 10.3389/fninf.2013.00045 }, author = { Lowekamp and Chen and Ib{\'{a}}{\~{n}}ez and Blezek }, abstract = { SimpleITK is a new interface to the Insight Segmentation and Registration Toolkit (ITK) designed to facilitate rapid prototyping, education and scientific activities via high level programming languages. ITK is a templated C++ library of image processing algorithms and frameworks for biomedical and other applications, and it was designed to be generic, flexible and extensible. Initially, ITK provided a direct wrapping interface to languages such as Python and Tcl through the WrapITK system. Unlike WrapITK, which exposed ITK's complex templated interface, SimpleITK was designed to provide an easy to use and simplified interface to ITK's algorithms. It includes procedural methods, hides ITK's demand driven pipeline, and provides a template-less layer. Also SimpleITK provides practical conveniences such as binary distribution packages and overloaded operators. Our user-friendly design goals dictated a departure from the direct interface wrapping approach of WrapITK, toward a new facade class structure that only exposes the required functionality, hiding ITK's extensive template use. Internally SimpleITK utilizes a manual description of each filter with code-generation and advanced C++ meta-programming to provide the higher-level interface, bringing the capabilities of ITK to a wider audience. SimpleITK is licensed as open source software library under the Apache License Version 2.0 and more information about downloading it can be found at http://www.simpleitk.org. {\textcopyright} 2013 Lowek amp, Chen, Ib{\'{a}}{\~{n}}ez and Blezek. }, } |
2013 | Journal | Stephan B. Sobottka, Tobias Meyer, Matthias Kirsch, Edmund Koch, Ralf Steinmeier, Ute Morgenstern, Gabriele Schackert (2013). Intraoperative optical imaging of intrinsic signals: A reliable method for visualizing stimulated functional brain areas during surgery. Journal of Neurosurgery, 119(4), pp. 853–863. (link) (bib) x @article{Sobottka2013, year = { 2013 }, volume = { 119 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84881515898{\&}doi=10.3171{\%}2F2013.5.JNS122155{\&}partnerID=40{\&}md5=8ed4109f65f8364384e1830b43ef2107 }, type = { Journal Article }, title = { Intraoperative optical imaging of intrinsic signals: A reliable method for visualizing stimulated functional brain areas during surgery }, pages = { 853--863 }, number = { 4 }, keywords = { Brain tumors,Functional brain mapping,Functional imaging,Functional neurosurgery,Image-guided surgery,Intraoperative optical imaging,Intrinsic signals,Oncology,Somatosensory cortex }, journal = { Journal of Neurosurgery }, issn = { 00223085 }, doi = { 10.3171/2013.5.JNS122155 }, author = { Sobottka and Meyer and Kirsch and Koch and Steinmeier and Morgenstern and Schackert }, abstract = { Object. Intraoperative optical imaging (IOI) is an experimental technique used for visualizing functional brain areas after surgical exposure of the cerebral cortex. This technique identifies areas of local changes in blood volume and oxygenation caused by stimulation of specific brain functions. The authors describe a new IOI method, including innovative data analysis, that can facilitate intraoperative functional imaging on a routine basis. To evaluate the reliability and validity of this approach, they used the new IOI method to demonstrate visualization of the median nerve area of the somatosensory cortex. Methods. In 41 patients with tumor lesions adjacent to the postcentral gyrus, lesions were surgically removed by using IOI during stimulation of the contralateral median nerve. Optical properties of the cortical tissue were measured with a sensitive camera system connected to a surgical microscope. Imaging was performed by using 9 cycles of alternating prolonged stimulation and rest periods of 30 seconds. Intraoperative optical imaging was based on blood volume changes detected by using a filter at an isosbestic wavelength ($\lambda$ = 568 nm). A spectral analysis algorithm was used to improve computation of the activity maps. Movement artifacts were compensated for by an elastic registration algorithm. For validation, intraoperative conduction of the phase reversal over the central sulcus and postoperative evaluation of the craniotomy site were used. Results. The new method and analysis enabled significant differentiation (p {\textless} 0.005) between functional and nonfunctional tissue. The identification and visualization of functionally intact somatosensory cortex was highly reliable; sensitivity was 94.4{\%} and specificity was almost 100{\%}. The surgeon was provided with a 2D high-resolution activity map within 12 minutes. No method-related side effects occurred in any of the 41 patients. Conclusions. The authors' new approach makes IOI a contact-free and label-free optical technique that can be used safely in a routine clinical setup. Intraoperative optical imaging can be used as an alternative to other methods for the identification of sensory cortex areas and offers the added benefit of a high-resolution map of functional activity. It has great potential for visualizing and monitoring additional specific functional brain areas such as the visual, motor, and speech cortex. A prospective national multicenter clinical trial is currently being planned. {\textcopyright} AANS, 2013. }, } |
2013 | Journal | Tobias Pietzsch, Stephan Preibisch, Pavel Toman\vcák, Stephan Saalfeld (2013). Erratum: ImgLib2 - Generic image processing in Java (Bioinformatics (2012) 28:22 (3009-3011) DOI: 10.1093/bioinformatics/bts543). Bioinformatics, 29(2), pp. 298. (link) (bib) x @article{Pietzsch2013, year = { 2013 }, volume = { 29 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Erratum: ImgLib2 - Generic image processing in Java (Bioinformatics (2012) 28:22 (3009-3011) DOI: 10.1093/bioinformatics/bts543) }, pages = { 298 }, number = { 2 }, journal = { Bioinformatics }, issn = { 13674803 }, doi = { 10.1093/bioinformatics/bts685 }, author = { Pietzsch and Preibisch and Toman{\v{c}}{\'{a}}k and Saalfeld }, } |
2013 | Journal | Oliver Bieri (2013). Ultra-fast steady state free precession and its application to in vivo 1H morphological and functional lung imaging at 1.5 tesla. Magnetic Resonance in Medicine, 70(3), pp. 657–663. (link) (bib) x @article{Bieri2013, year = { 2013 }, volume = { 70 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883229895{\&}doi=10.1002{\%}2Fmrm.24858{\&}partnerID=40{\&}md5=8542d776ecb40faea69bd7bf6e63ebf1 }, type = { Journal Article }, title = { Ultra-fast steady state free precession and its application to in vivo 1H morphological and functional lung imaging at 1.5 tesla }, pages = { 657--663 }, number = { 3 }, keywords = { SSFP,imaging,lung,steady state,ultra-fast }, journal = { Magnetic Resonance in Medicine }, issn = { 07403194 }, doi = { 10.1002/mrm.24858 }, author = { Bieri }, abstract = { Purpose The speed limit for three-dimensional Fourier-encoded steady state free precession (SSFP) imaging is explored on a clinical whole body system and pushed toward a pulse repetition time (TR) close to or even below the 1 ms regime; in the following referred to as ultra-fast SSFP imaging. Methods To this end, contemporary optimization strategies, such as efficient gradient switching patterns, partial echoes, ramp sampling techniques, and a target-related design of excitation pulses were applied to explore the lower boundaries in TR for SSFP-based Cartesian imaging. Results Generally, minimal TR was limited in vivo by peripheral nerve stimulation, allowing a TR ∼1 ms for isotropic resolutions down to about 2 mm. As a result, ultra-fast balanced SSFP provides artifact-free images even for targets with severe susceptibility variations, and native high-resolution structural and functional in vivo 1H imaging of the human lung is demonstrated at 1.5 T. Conclusion On clinical whole body MRI systems, the TR of SSFP-based Cartesian imaging can be pushed toward the 1 ms regime. As a result, ultra-fast SSFP protocols might represent a promising new powerful approach for SSFP-based imaging, not only for lung but also in a variety of clinical and scientific applications. Copyright {\textcopyright} 2013 Wiley Periodicals, Inc. }, } |
2013 | Journal | Helen Xu, Andras Lasso, Peter Guion, Axel Krieger, Aradhana Kaushal, Anurag K. Singh, Peter A. Pinto, Jonathan Coleman, Robert L. Grubb, Jean Baptiste Lattouf, Cynthia Menard, Louis L. Whitcomb, Gabor Fichtinger (2013). Accuracy analysis in MRI-guided robotic prostate biopsy. International Journal of Computer Assisted Radiology and Surgery, 8(6), pp. 937–944. (link) (bib) x @article{Xu2013, year = { 2013 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Accuracy analysis in MRI-guided robotic prostate biopsy }, pages = { 937--944 }, number = { 6 }, keywords = { Accuracy validation,Image registration,MRI-guidance,Prostate biopsy }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-013-0831-9 }, author = { Xu and Lasso and Guion and Krieger and Kaushal and Singh and Pinto and Coleman and Grubb and Lattouf and Menard and Whitcomb and Fichtinger }, abstract = { Purpose: To assess retrospectively the clinical accuracy of an magnetic resonance imaging-guided robotic prostate biopsy system that has been used in the US National Cancer Institute for over 6 years. Methods: Series of 2D transverse volumetric MR image slices of the prostate both pre (high-resolution T2-weighted)- and post (low-resolution)- needle insertions were used to evaluate biopsy accuracy. A three-stage registration algorithm consisting of an initial two-step rigid registration followed by a B-spline deformable alignment was developed to capture prostate motion during biopsy. The target displacement (distance between planned and actual biopsy target), needle placement error (distance from planned biopsy target to needle trajectory), and biopsy error (distance from actual biopsy target to needle trajectory) were calculated as accuracy assessment. Results: A total of 90 biopsies from 24 patients were studied. The registrations were validated by checking prostate contour alignment using image overlay, and the results were accurate to within 2 mm. The mean target displacement, needle placement error, and clinical biopsy error were 5.2, 2.5, and 4.3 mm, respectively. Conclusion: The biopsy error reported suggests that quantitative imaging techniques for prostate registration and motion compensation may improve prostate biopsy targeting accuracy. {\textcopyright} 2013 CARS. }, } |
2013 | Journal | Gert Wollny, Peter Kellman, Mar\'ia Jesus Ledesma-Carbayo, Matthew M. Skinner, Jean Jaques Hublin, Thomas Hierl (2013). MIA - A free and open source software for gray scale medical image analysis. Source Code for Biology and Medicine, 8, pp. NA (link) (bib) x @article{Wollny2013, year = { 2013 }, volume = { 8 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84885357120{\&}doi=10.1186{\%}2F1751-0473-8-20{\&}partnerID=40{\&}md5=67c49f93d1bf780334ae506f97a71ff2 }, type = { Journal Article }, title = { MIA - A free and open source software for gray scale medical image analysis }, journal = { Source Code for Biology and Medicine }, issn = { 17510473 }, doi = { 10.1186/1751-0473-8-20 }, author = { Wollny and Kellman and Ledesma-Carbayo and Skinner and Hublin and Hierl }, abstract = { Background: Gray scale images make the bulk of data in bio-medical image analysis, and hence, the main focus of many image processing tasks lies in the processing of these monochrome images. With ever improving acquisition devices, spatial and temporal image resolution increases, and data sets become very large. Various image processing frameworks exists that make the development of new algorithms easy by using high level programming languages or visual programming. These frameworks are also accessable to researchers that have no background or little in software development because they take care of otherwise complex tasks. Specifically, the management of working memory is taken care of automatically, usually at the price of requiring more it. As a result, processing large data sets with these tools becomes increasingly difficult on work station class computers. One alternative to using these high level processing tools is the development of new algorithms in a languages like C++, that gives the developer full control over how memory is handled, but the resulting workflow for the prototyping of new algorithms is rather time intensive, and also not appropriate for a researcher with little or no knowledge in software development. Another alternative is in using command line tools that run image processing tasks, use the hard disk to store intermediate results, and provide automation by using shell scripts. Although not as convenient as, e.g. visual programming, this approach is still accessable to researchers without a background in computer science. However, only few tools exist that provide this kind of processing interface, they are usually quite task specific, and don't provide an clear approach when one wants to shape a new command line tool from a prototype shell script.Results: The proposed framework, MIA, provides a combination of command line tools, plug-ins, and libraries that make it possible to run image processing tasks interactively in a command shell and to prototype by using the according shell scripting language. Since the hard disk becomes the temporal storage memory management is usually a non-issue in the prototyping phase. By using string-based descriptions for filters, optimizers, and the likes, the transition from shell scripts to full fledged programs implemented in C++ is also made easy. In addition, its design based on atomic plug-ins and single tasks command line tools makes it easy to extend MIA, usually without the requirement to touch or recompile existing code.Conclusion: In this article, we describe the general design of MIA, a general purpouse framework for gray scale image processing. We demonstrated the applicability of the software with example applications from three different research scenarios, namely motion compensation in myocardial perfusion imaging, the processing of high resolution image data that arises in virtual anthropology, and retrospective analysis of treatment outcome in orthognathic surgery. With MIA prototyping algorithms by using shell scripts that combine small, single-task command line tools is a viable alternative to the use of high level languages, an approach that is especially useful when large data sets need to be processed. {\textcopyright} 2013 Wollny et al.; licensee BioMed Central Ltd. }, } |
2013 | Journal | Stephan Wienert, Daniel Heim, Manato Kotani, Björn Lindequist, Albrecht Stenzinger, Masaru Ishii, Peter Hufnagl, Michael Beil, Manfred Dietel, Carsten Denkert, Frederick Klauschen (2013). CognitionMaster: An object-based image analysis framework. Diagnostic Pathology, 8(1), pp. 8. (link) (bib) x @article{Wienert2013, year = { 2013 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { CognitionMaster: An object-based image analysis framework }, pages = { 8 }, number = { 1 }, keywords = { Image analysis,Object-based image analysis,Open source,Software }, journal = { Diagnostic Pathology }, issn = { 17461596 }, doi = { 10.1186/1746-1596-8-34 }, author = { Wienert and Heim and Kotani and Lindequist and Stenzinger and Ishii and Hufnagl and Beil and Dietel and Denkert and Klauschen }, abstract = { Background: Automated image analysis methods are becoming more and more important to extract and quantify image features in microscopy-based biomedical studies and several commercial or open-source tools are available. However, most of the approaches rely on pixel-wise operations, a concept that has limitations when high-level object features and relationships between objects are studied and if user-interactivity on the object-level is desired.Results: In this paper we present an open-source software that facilitates the analysis of content features and object relationships by using objects as basic processing unit instead of individual pixels. Our approach enables also users without programming knowledge to compose " analysis pipelines" that exploit the object-level approach. We demonstrate the design and use of example pipelines for the immunohistochemistry-based cell proliferation quantification in breast cancer and two-photon fluorescence microscopy data about bone-osteoclast interaction, which underline the advantages of the object-based concept.Conclusions: We introduce an open source software system that offers object-based image analysis. The object-based concept allows for a straight-forward development of object-related interactive or fully automated image analysis solutions. The presented software may therefore serve as a basis for various applications in the field of digital image analysis. {\textcopyright} 2013 Wienert et al.; licensee BioMed Central Ltd. }, } |
2013 | Journal | Kenneth L. Urish, Ashley A. Williams, John R. Durkin, Constance R. Chu (2013). Registration of Magnetic Resonance Image Series for Knee Articular Cartilage Analysis: Data from the Osteoarthritis Initiative. Cartilage, 4(1), pp. 20–27. (link) (bib) x @article{Urish2013, year = { 2013 }, volume = { 4 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Registration of Magnetic Resonance Image Series for Knee Articular Cartilage Analysis: Data from the Osteoarthritis Initiative }, pages = { 20--27 }, number = { 1 }, keywords = { DESS,MRI,T2,cartilage,registration }, journal = { Cartilage }, issn = { 19476035 }, doi = { 10.1177/1947603512451745 }, author = { Urish and Williams and Durkin and Chu }, abstract = { Objective: Although conventional radiography is used to assess osteoarthritis in a clinical setting, it has limitations, including an inability to stage early cartilage degeneration. There is a growing interest in using quantitative magnetic resonance imaging to identify degenerative changes in articular cartilage, including the large multicentered study, the Osteoarthritis Initiative (OAI). There is a demand for suitable image registration and segmentation software to complete this analysis. The objective of this study was to develop and validate the open source software, ImageK, that registers 3 T MRI T2 mapping and double echo steady state (DESS) knee MRI sequences acquired in the OAI protocol. Methods: A C++ library, the insight toolkit, was used to develop open source software to register DESS and T2 mapping image MRI sequences using Mattes's Multimodality Mutual information metric. Results: Registration was assessed using three separate methods. A checkerboard layout demonstrated acceptable visual alignment. Fiducial markers placed in cadaveric knees measured a registration error of 0.85 voxels. Measuring the local variation in Mattes's Mutual Information metric in the local area of the registered solution showed precision within 1 pixel. In this group, the registered solution required a transform of 56 voxels in translation and 1 degree of rotation. Conclusion: The software we have developed, ImageK, provides free, open source image analysis software that registers DESS and T2 mapping sequences of knee articular cartilage within 1 voxel accuracy. This image registration software facilitates quantitative MRI analyses of knee articular cartilage. {\textcopyright} The Author(s) 2013. }, } |
2013 | Journal | Tao Sun, Tung Hsin Wu, Shyh Jen Wang, Bang Hung Yang, Nien Yun Wu, Greta S.P. Mok (2013). Low dose interpolated average CT for thoracic PET/CT attenuation correction using an active breathing controller. Medical Physics, 40(10), pp. 9. (link) (bib) x @article{Sun2013, year = { 2013 }, volume = { 40 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Low dose interpolated average CT for thoracic PET/CT attenuation correction using an active breathing controller }, pages = { 9 }, number = { 10 }, keywords = { Active breathing controller,Attenuation correction,PET/CT,Respiratory artifacts }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4820976 }, author = { Sun and Wu and Wang and Yang and Wu and Mok }, abstract = { Purpose: The temporal mismatch between PET and standard helical CT (HCT) causes substantial respiratory artifacts in PET reconstructed images when using HCT as the attenuation map. Previously we developed an interpolated average CT (IACT) method for attenuation correction (AC) and demonstrated its merits in simulations. In this study we aim to apply IACT in patients with thoracic lesions using an active breathing controller (ABC). Methods: Under local ethics approval, we recruited 15 patients with a total of 18 lesions in different thoracic regions: left upper lobe (2), right upper lobe (4), right hilum (3), right lower lobe (3), left hilum (2), and esophagus (4). All patients underwent whole body PET scans 1 h after 300-480 MBq 18F-FDG injection, depending on the patients' weight. The PET sinograms were reconstructed with AC using: (i) standard HCT [120 kV, smart mA (30-150 mA), 0.984:1 pitch] and (ii) IACT obtained from end-inspiration and end-expiration breath-hold HCTs (120 kV, 10 mA, 0.984:1 pitch) aided by ABC. IACT was obtained by averaging the intensity of two extreme phases and the interpolated phases between them, where the nonlinear interpolation was obtained by B-spline registration and with an empirical sinusoidal function. The SUVmax, SUVmean, and the differences of centroid-of-lesion (d) between PET and different CT schemes were measured for each lesion. Results: From visual inspection, the respiratory artifacts and blurring generally reduced in the thoracic region for PET IACT. Matching between CT and PET improved for PETIACT, with an average decrease of d for 1.34 ± 1.79 mm as compared to PET HCT. The SUVmax and SUVmean were consistently higher for PETIACT versus PETHCT for all lesions, with (30.95 ± 18.63){\%} and (22.39 ± 15.91){\%} average increase, respectively. Conclusions: IACT-ABC reduces respiratory artifacts, PET/CT misregistration and enhances lesion quantitation. This technique is a robust and low dose AC protocol for clinical oncology application especially in the thoracic region. {\textcopyright} 2013 American Association of Physicists in Medicine. }, } |
2013 | Journal | C. Sjöberg, A. Ahnesjö (2013). Multi-atlas based segmentation using probabilistic label fusion with adaptive weighting of image similarity measures. Computer Methods and Programs in Biomedicine, 110(3), pp. 308–319. (link) (bib) x @article{Sjoberg2013, year = { 2013 }, volume = { 110 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Multi-atlas based segmentation using probabilistic label fusion with adaptive weighting of image similarity measures }, pages = { 308--319 }, number = { 3 }, keywords = { Atlas based segmentation,Deformable registration,Label fusion,Multi-atlas segmentation,Radiotherapy prostate,Segmentation }, journal = { Computer Methods and Programs in Biomedicine }, issn = { 01692607 }, doi = { 10.1016/j.cmpb.2012.12.006 }, author = { Sj{\"{o}}berg and Ahnesj{\"{o}} }, abstract = { Label fusion multi-atlas approaches for image segmentation can give better segmentation results than single atlas methods. We present a multi-atlas label fusion strategy based on probabilistic weighting of distance maps. Relationships between image similarities and segmentation similarities are estimated in a learning phase and used to derive fusion weights that are proportional to the probability for each atlas to improve the segmentation result. The method was tested using a leave-one-out strategy on a database of 21 pre-segmented prostate patients for different image registrations combined with different image similarity scorings. The probabilistic weighting yields results that are equal or better compared to both fusion with equal weights and results using the STAPLE algorithm. Results from the experiments demonstrate that label fusion by weighted distance maps is feasible, and that probabilistic weighted fusion improves segmentation quality more the stronger the individual atlas segmentation quality depends on the corresponding registered image similarity. The regions used for evaluation of the image similarity measures were found to be more important than the choice of similarity measure. {\textcopyright} 2013 Elsevier Ireland Ltd. }, } |
2013 | Journal | Sauli Savolainen, Mika Kortesniemi, Marjut Timonen, Vappu Reijonen, Linda Kuusela, Jouni Uusi-Simola, Eero Salli, Hanna Koivunoro, Tiina Seppälä, Nadja Lönnroth, Petteri Välimäki, Heini Hyvönen, Petri Kotiluoto, Tom Serén, Antti Kuronen, Sami Heikkinen, Antti Kosunen, Iiro Auterinen (2013). Boron neutron capture therapy (BNCT) in Finland: Technological and physical prospects after 20 years of experiences. Physica Medica, 29(3), pp. 233–248. (link) (bib) x @article{Savolainen2013, year = { 2013 }, volume = { 29 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Boron neutron capture therapy (BNCT) in Finland: Technological and physical prospects after 20 years of experiences }, pages = { 233--248 }, number = { 3 }, keywords = { Beam dosimetry,Boron imaging and determination,Diffusion imaging,Dose calculation,Image registration,Neutron dosimetry,Neutron sources,Treatment planning }, journal = { Physica Medica }, issn = { 11201797 }, doi = { 10.1016/j.ejmp.2012.04.008 }, author = { Savolainen and Kortesniemi and Timonen and Reijonen and Kuusela and Uusi-Simola and Salli and Koivunoro and Sepp{\"{a}}l{\"{a}} and L{\"{o}}nnroth and V{\"{a}}lim{\"{a}}ki and Hyv{\"{o}}nen and Kotiluoto and Ser{\'{e}}n and Kuronen and Heikkinen and Kosunen and Auterinen }, abstract = { Boron Neutron Capture Therapy (BNCT) is a binary radiotherapy method developed to treat patients with certain malignant tumours. To date, over 300 treatments have been carried out at the Finnish BNCT facility in various on-going and past clinical trials. In this technical review, we discuss our research work in the field of medical physics to form the groundwork for the Finnish BNCT patient treatments, as well as the possibilities to further develop and optimize the method in the future. Accordingly, the following aspects are described: neutron sources, beam dosimetry, treatment planning, boron imaging and determination, and finally the possibilities to detect the efficacy and effects of BNCT on patients. {\textcopyright} 2012 Associazione Italiana di Fisica Medica. }, } |
2013 | Journal | Martin Oelschlagel, Tobias Meyer, Hannes Wahl, Stephan B. Sobottka, Matthias Kirsch, Gabriele Schackert, Ute Morgenstern (2013). Evaluation of intraoperative optical imaging analysis methods by phantom and patient measurements. Biomedizinische Technik, 58(3), pp. 257–267. (link) (bib) x @article{Oelschlagel2013, year = { 2013 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of intraoperative optical imaging analysis methods by phantom and patient measurements }, pages = { 257--267 }, number = { 3 }, keywords = { Data analysis,Intrinsic signals,Optical imaging,Spectral analysis }, journal = { Biomedizinische Technik }, issn = { 00135585 }, doi = { 10.1515/bmt-2012-0077 }, author = { Oelschlagel and Meyer and Wahl and Sobottka and Kirsch and Schackert and Morgenstern }, abstract = { Intraoperative optical imaging (IOI) is a localization method for functional areas of the human brain cortex during neurosurgical procedures. The aim of the current work was to develop of a new analysis technique for the computation of two-dimensional IOI activity maps that is suited especially for use in clinical routine. The new analysis technique includes a stimulation scheme that comprises 30-s rest and 30-s stimulation conditions, in connection with pixelwise spectral power analysis for activity map calculation. A software phantom was used for verification of the implemented algorithms as well as for the comparison with the commonly used relative difference imaging method. Furthermore, the analysis technique was tested using intraoperative measurements on eight patients. The comparison with the relative difference algorithm revealed an averaged improvement of the signal-to-noise ratio between 95{\%} and 130{\%} for activity maps computed from intraoperatively acquired patient datasets. The results show that the new imaging technique improves the activity map quality of IOI especially under difficult intraoperative imaging conditions and is therefore especially suited for use in clinical routine. {\textcopyright} 2013 Walter de Gruyter GmbH. }, } |
2013 | Journal | Nilesh N. Mistry, Tejan Diwanji, Xiutao Shi, Sabin Pokharel, Steven Feigenberg, Steven M. Scharf, Warren D. D'Souza (2013). Evaluation of fractional regional ventilation using 4D-CT and effects of breathing maneuvers on ventilation. International Journal of Radiation Oncology Biology Physics, 87(4), pp. 825–831. (link) (bib) x @article{Mistry2013, year = { 2013 }, volume = { 87 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of fractional regional ventilation using 4D-CT and effects of breathing maneuvers on ventilation }, pages = { 825--831 }, number = { 4 }, journal = { International Journal of Radiation Oncology Biology Physics }, issn = { 03603016 }, doi = { 10.1016/j.ijrobp.2013.07.032 }, author = { Mistry and Diwanji and Shi and Pokharel and Feigenberg and Scharf and D'Souza }, abstract = { Purpose: Current implementations of methods based on Hounsfield units to evaluate regional lung ventilation do not directly incorporate tissue-based mass changes that occur over the respiratory cycle. To overcome this, we developed a 4-dimensional computed tomography (4D-CT)-based technique to evaluate fractional regional ventilation (FRV) that uses an individualized ratio of tidal volume to end-expiratory lung volume for each voxel. We further evaluated the effect of different breathing maneuvers on regional ventilation. The results from this work will help elucidate the relationship between global and regional lung function. Methods and Materials: Eight patients underwent 3 sets of 4D-CT scans during 1 session using free-breathing, audiovisual guidance, and active breathing control. FRV was estimated using a density-based algorithm with mass correction. Internal validation between global and regional ventilation was performed by use of the imaging data collected during the use of active breathing control. The impact of breathing maneuvers on FRV was evaluated comparing the tidal volume from 3 breathing methods. Results: Internal validation through comparison between the global and regional changes in ventilation revealed a strong linear correlation (slope of 1.01, R2 of 0.97) between the measured global lung volume and the regional lung volume calculated by use of the "mass corrected" FRV. A linear relationship was established between the tidal volume measured with the automated breathing control system and FRV based on 4D-CT imaging. Consistently larger breathing volumes were observed when coached breathing techniques were used. Conclusions: The technique presented improves density-based evaluation of lung ventilation and establishes a link between global and regional lung ventilation volumes. Furthermore, the results obtained are comparable with those of other techniques of functional evaluation such as spirometry and hyperpolarized-gas magnetic resonance imaging. These results were demonstrated on retrospective analysis of patient data, and further research using prospective data is under way to validate this technique against established clinical tests. {\textcopyright} 2013 Elsevier Inc. }, } |
2013 | Journal | Tobias Meyer, Stephan B. Sobottka, Matthias Kirsch, Gabriele Schackert, Ralf Steinmeier, Edmund Koch, Ute Morgenstern (2013). Intraoperative optical imaging of functional brain areas for improved image-guided surgery. Biomedizinische Technik, 58(3), pp. 225–236. (link) (bib) x @article{Meyer2013a, year = { 2013 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Intraoperative optical imaging of functional brain areas for improved image-guided surgery }, pages = { 225--236 }, number = { 3 }, keywords = { Functional imaging,Intrinsic signals,Somatosensory cortex }, journal = { Biomedizinische Technik }, issn = { 00135585 }, doi = { 10.1515/bmt-2012-0072 }, author = { Meyer and Sobottka and Kirsch and Schackert and Steinmeier and Koch and Morgenstern }, abstract = { Intraoperative optical imaging of intrinsic signals can improve the localization of functional areas of the cortex. On the basis of a review of the current state of technology, a setup was developed and evaluated. The aim was to implement an easy-to-use and robust imaging setup that can be used in clinical routine with standard hardware equipment (surgical microscope, high-resolution camera, stimulator for peripheral nerve stimulation) and custom-made software for intraoperative and postoperative data analysis. Evaluation of different light sources (halogen, xenon) showed a sufficient temporal behavior of xenon light without using a stabilized power supply. Spatial binning (2 ×?2) of the camera reduces temporal variations in the images by preserving a high spatial resolution. The setup was tested in eight patients. Images were acquired continuously for 9 min with alternating 30-s rest and 30-s stimulation conditions. Intraoperative measurement and visualization of high-resolution two-dimensional activity maps could be achieved in 15 min. The detected functional regions corresponded with anatomical and electrophysiological validation. The integration of optical imaging in clinical routine could successfully be achieved using standard hardware, which improves guidance for the surgeon during interventions near the eloquent areas of the brain. {\textcopyright} 2013 Walter de Gruyter GmbH. }, } |
2013 | Journal | Tobias Meyer, Julia Ku\ss, Falk Uhlemann, Stefan Wagner, Matthias Kirsch, Stephan B. Sobottka, Ralf Steinmeier, Gabriele Schackert, Ute Morgenstern (2013). Autostereoscopic 3D visualization and image processing system for neurosurgery. Biomedizinische Technik, 58(3), pp. 281–291. (link) (bib) x @article{Meyer2013, year = { 2013 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Autostereoscopic 3D visualization and image processing system for neurosurgery }, pages = { 281--291 }, number = { 3 }, keywords = { Autostereoscopy,Diffusion tensor imaging,Interaction,Multimodality phantoms,Visualization }, journal = { Biomedizinische Technik }, issn = { 00135585 }, doi = { 10.1515/bmt-2012-0079 }, author = { Meyer and Ku{\ss} and Uhlemann and Wagner and Kirsch and Sobottka and Steinmeier and Schackert and Morgenstern }, abstract = { A demonstrator system for planning neurosurgical procedures was developed based on commercial hardware and software. The system combines an easyto- use environment for surgical planning with highend visualization and the opportunity to analyze data sets for research purposes. The demonstrator system is based on the software AMIRA. Specific algorithms for segmentation, elastic registration, and visualization have been implemented and adapted to the clinical workflow. Modules from AMIRA and the image processing library Insight Segmentation and Registration Toolkit (ITK) can be combined to solve various image processing tasks. Customized modules tailored to specific clinical problems can easily be implemented using the AMIRA application programming interface and a self-developed framework for ITK filters. Visualization is done via autostereoscopic displays, which provide a 3D impression without viewing aids. A Spaceball device allows a comfortable, intuitive way of navigation in the data sets. Via an interface to a neurosurgical navigation system, the demonstrator system can be used intraoperatively. The precision, applicability, and benefit of the demonstrator system for planning of neurosurgical interventions and for neurosurgical research were successfully evaluated by neurosurgeons using phantom and patient data sets. {\textcopyright} 2013 Walter de Gruyter GmbH. }, } |
2013 | Journal | Matthew M. McCormick, Tomy Varghese (2013). An approach to unbiased subsample interpolation for motion tracking. Ultrasonic Imaging, 35(2), pp. 76–89. (link) (bib) x @article{McCormick2013, year = { 2013 }, volume = { 35 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { An approach to unbiased subsample interpolation for motion tracking }, pages = { 76--89 }, number = { 2 }, keywords = { motion tracking,sinc reconstruction,strain imaging,subsample interpolation }, journal = { Ultrasonic Imaging }, issn = { 01617346 }, doi = { 10.1177/0161734613476176 }, author = { McCormick and Varghese }, abstract = { Accurate subsample displacement estimation is necessary for ultrasound elastography because of the small deformations that occur and the subsequent application of a derivative operation on local displacements. Many of the commonly used subsample estimation techniques introduce significant bias errors. This article addresses a reduced bias approach to subsample displacement estimations that consists of a two-dimensional windowed-sinc interpolation with numerical optimization. It is shown that a Welch or Lanczos window with a Nelder-Mead simplex or regular-step gradient-descent optimization is well suited for this purpose. Little improvement results from a sinc window radius greater than four data samples. The strain signal-to-noise ratio (SNR) obtained in a uniformly elastic phantom is compared with other parabolic and cosine interpolation methods; it is found that the strain SNR ratio is improved over parabolic interpolation from 11.0 to 13.6 in the axial direction and 0.7 to 1.1 in the lateral direction for an applied 1{\%} axial deformation. The improvement was most significant for small strains and displacement tracking in the lateral direction. This approach does not rely on special properties of the image or similarity function, which is demonstrated by its effectiveness with the application of a previously described regularization technique. {\textcopyright} The Author(s) 2013. }, } |
2013 | Journal | Andreas Maier, Hannes G. Hofmann, Martin Berger, Peter Fischer, Chris Schwemmer, Haibo Wu, Kerstin Müller, Joachim Hornegger, Jang Hwan Choi, Christian Riess, Andreas Keil, Rebecca Fahrig (2013). CONRAD - A software framework for cone-beam imaging in radiology. Medical Physics, 40(11), pp. 8. (link) (bib) x @article{Maier2013, year = { 2013 }, volume = { 40 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { CONRAD - A software framework for cone-beam imaging in radiology }, pages = { 8 }, number = { 11 }, keywords = { C-arm computed tomography (CT),GPU,cone-beam,hardware acceleration,open-source,software frameworks }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.4824926 }, author = { Maier and Hofmann and Berger and Fischer and Schwemmer and Wu and M{\"{u}}ller and Hornegger and Choi and Riess and Keil and Fahrig }, abstract = { Purpose: In the community of x-ray imaging, there is a multitude of tools and applications that are used in scientific practice. Many of these tools are proprietary and can only be used within a certain lab. Often the same algorithm is implemented multiple times by different groups in order to enable comparison. In an effort to tackle this problem, the authors created CONRAD, a software framework that provides many of the tools that are required to simulate basic processes in x-ray imaging and perform image reconstruction with consideration of nonlinear physical effects. Methods: CONRAD is a Java-based state-of-the-art software platform with extensive documentation. It is based on platform-independent technologies. Special libraries offer access to hardware acceleration such as OpenCL. There is an easy-to-use interface for parallel processing. The software package includes different simulation tools that are able to generate up to 4D projection and volume data and respective vector motion fields. Well known reconstruction algorithms such as FBP, DBP, and ART are included. All algorithms in the package are referenced to a scientific source. Results: A total of 13 different phantoms and 30 processing steps have already been integrated into the platform at the time of writing. The platform comprises 74.000 nonblank lines of code out of which 19{\%} are used for documentation. The software package is available for download at http://conrad.stanford.edu. To demonstrate the use of the package, the authors reconstructed images from two different scanners, a table top system and a clinical C-arm system. Runtimes were evaluated using the RabbitCT platform and demonstrate state-of-the-art runtimes with 2.5 s for the 256 problem size and 12.4 s for the 512 problem size. Conclusions: As a common software framework, CONRAD enables the medical physics community to share algorithms and develop new ideas. In particular this offers new opportunities for scientific collaboration and quantitative performance comparison between the methods of different groups. {\textcopyright} 2013 American Association of Physicists in Medicine. }, } |
2013 | Journal | Antonio Latorre, Lidia Alonso-Nanclares, Santiago Muelas, José Mar\'ia Pe\~na, Javier Defelipe (2013). 3D segmentations of neuronal nuclei from confocal microscope image stacks. Frontiers in Neuroanatomy, 7(DEC), pp. 10. (link) (bib) x @article{LaTorre2013, year = { 2013 }, volume = { 7 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { 3D segmentations of neuronal nuclei from confocal microscope image stacks }, pages = { 10 }, number = { DEC }, keywords = { 3D reconstruction,Automatic segmentation,Cerebral cortex,Image processing,Neuron }, journal = { Frontiers in Neuroanatomy }, issn = { 16625129 }, doi = { 10.3389/fnana.2013.00049 }, author = { Latorre and Alonso-Nanclares and Muelas and Pe{\~{n}}a and Defelipe }, abstract = { In this paper, we present an algorithm to create 3D segmentations of neuronal cells from stacks of previously segmented 2D images. The idea behind this proposal is to provide a general method to reconstruct 3D structures from 2D stacks, regardless of how these 2D stacks have been obtained. The algorithm not only reuses the information obtained in the 2D segmentation, but also attempts to correct some typical mistakes made by the 2D segmentation algorithms (for example, under segmentation of tightly-coupled clusters of cells). We have tested our algorithm in a real scenario-the segmentation of the neuronal nuclei in different layers of the rat cerebral cortex. Several representative images from different layers of the cerebral cortex have been considered and several 2D segmentation algorithms have been compared. Furthermore, the algorithm has also been compared with the traditional 3D Watershed algorithm and the results obtained here show better performance in terms of correctly identified neuronal nuclei. {\textcopyright} 2013 LaTorre, Alonso-Nanclares, Muelas, Pe{\~{n}}a and DeFelipe. }, } |
2013 | Journal | Pencilla Lang, Michael W.A. Chu, Dan Bainbridge, Gerard M. Guiraudon, Douglas L. Jones, Terry M. Peters (2013). Surface-based CT-TEE registration of the aortic root. IEEE Transactions on Biomedical Engineering, 60(12), pp. 3382–3390. (link) (bib) x @article{Lang2013, year = { 2013 }, volume = { 60 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Surface-based CT-TEE registration of the aortic root }, pages = { 3382--3390 }, number = { 12 }, keywords = { CT,image registration,image-guided procedure,transcatheter aortic valve replacement,transesophageal echo }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 00189294 }, doi = { 10.1109/TBME.2013.2249582 }, author = { Lang and Chu and Bainbridge and Guiraudon and Jones and Peters }, abstract = { Transcatheter aortic valve implantation (TAVI) is a minimally invasive alternative to conventional aortic valve replacement for severe aortic stenosis in high-risk patients in which a stent-based bioprosthetic valve is delivered into the heart via a catheter. TAVI relies largely on single-plane fluoroscopy for intraoperative navigation and guidance, which provides only gross imaging of anatomical structures. Inadequate imaging leading to suboptimal valve positioning contributes to many of the early complications experienced by TAVI patients, including valve embolism, coronary ostia obstruction, paravalvular leak, heart block, and secondary nephrotoxicity from excessive contrast use. Improved visualization can be provided using intraoperative registration of a CT-derived surface to transesophageal echo (TEE) images. In this study, the accuracy and robustness of a surface-based registration method suitable for intraoperative use are evaluated, and the performances of different TEE surface extraction methods are compared. The use of cross-plane TEE contours demonstrated the best accuracy, with registration errors of less than 5 mm. This guidance system uses minimal intraoperative interaction and workflow modification, does not require tool calibration or additional intraoperative hardware, and can be implemented at all cardiac centers at extremely low cost. {\textcopyright} 2013 IEEE. }, } |
2013 | Journal | Florence Kremer, Tom Dresselaers, Brecht Heyde, Vesselina Ferferieva, Ellen Caluwé, Hon Fai Choi, Piet Claus, Wouter Oosterlinck, Stefan Janssens, Uwe Himmelreich, Jan D'hooge (2013). 2-D Strain Assessment in the Mouse Through Spatial Compounding of Myocardial Velocity Data: InVivo Feasibility. Ultrasound in Medicine and Biology, 39(10), pp. 1848–1860. (link) (bib) x @article{Kremer2013, year = { 2013 }, volume = { 39 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883465702{\&}doi=10.1016{\%}2Fj.ultrasmedbio.2013.04.004{\&}partnerID=40{\&}md5=7e4e566e8788b13da38aa8c29cc1271b }, type = { Journal Article }, title = { 2-D Strain Assessment in the Mouse Through Spatial Compounding of Myocardial Velocity Data: InVivo Feasibility }, pages = { 1848--1860 }, number = { 10 }, keywords = { 2-D myocardial strain,Doppler,Mouse,Spatial compounding }, journal = { Ultrasound in Medicine and Biology }, issn = { 1879291X }, doi = { 10.1016/j.ultrasmedbio.2013.04.004 }, author = { Kremer and Dresselaers and Heyde and Ferferieva and Caluw{\'{e}} and Choi and Claus and Oosterlinck and Janssens and Himmelreich and D'hooge }, abstract = { Ultrasound assessment of myocardial strain can provide valuable information on regional cardiac function. However, Doppler-based methods often used in practice for strain estimation suffer from angle dependency. In this study, a partial solution to that fundamental limitation is presented. We have previously reported using simulated data sets that spatial compounding of axial velocities obtained at three steering angles can theoretically outperform 2-D speckle tracking for 2-D strain estimation in the mouse heart. In this study, the feasibility of the method was analyzed invivo using spatial compounding of Doppler velocities on six mice with myocardial infarction and five controls, and results were compared with those of tagged microscopic magnetic resonance imaging ($\mu$MRI). Circumferential estimates quantified by means of both ultrasound and $\mu$MRI could detect regional dysfunction. Between echocardiography and $\mu$MRI, a good regression coefficient was obtained for circumferential strain estimates (. r= 0.69), whereas radial strain estimates correlated only moderately (. r= 0.37). A second echocardiography was performed after $\mu$MRI to test the reproducibility of the compounding method. This yielded a higher correlation coefficient for the circumferential component than for the radial component (. r= 0.74 circumferentially, r= 0.49 radially). {\textcopyright} 2013 World Federation for Ultrasound in Medicine {\&} Biology. }, } |
2013 | Journal | Antje Christin Knopf, Dirk Boye, Antony Lomax, Shininchiro Mori (2013). Adequate margin definition for scanned particle therapy in the incidence of intrafractional motion. Physics in Medicine and Biology, 58(17), pp. 6079–6094. (link) (bib) x @article{Knopf2013, year = { 2013 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Adequate margin definition for scanned particle therapy in the incidence of intrafractional motion }, pages = { 6079--6094 }, number = { 17 }, journal = { Physics in Medicine and Biology }, issn = { 00319155 }, doi = { 10.1088/0031-9155/58/17/6079 }, author = { Knopf and Boye and Lomax and Mori }, abstract = { Advanced 4D dose calculations (4DDCs) for scanned particle therapy show that in the incidence of motion, it is insufficient to use target contours defined on one reference CT phase. ICRU Report 62 (ICRU 1999 ICRU Report 62 (Bethesda, MD: ICRU)) advises that variations in size, shape and position of CTVs relative to anatomic reference points have to be considered for internal target volumes (ITVs). In addition to geometrical margin adaption, changes of water equivalent path length have to be considered for particle therapy. Different ITV concepts have been applied to six representative patients (liver and lung indications) based on 4DCT. Geometrical ITVs (gITV) were calculated by combining deformed CTVs over all motion phases. To take into account path length changes, range adapted ITVs (raITV) were established as the union of range adapted CTVs in all phases. For gated delivery, gat-gITVs and gat-raITVs were calculated. Extensive 4DDCs have been performed for two exemplary patients to illustrate that neither re-scanning nor gating can sufficiently compensate for motion effects if no appropriate margins are employed and to evaluate the effectiveness of gITVs and raITVs. CTVs significantly differ from gITVs and raITVs in size (up to a factor 2 in volume). But also raITVs and gITVs differ significantly in size and are spatially displaced, particularly for lung patients. raITVs show a strong field dependence in shape. All volumes are reduced in size when gating is applied and considered during margin adaption. 4D dose distributions show big improvements when gITV or raITV are used compared to CTVs. However, the use of either gITVs or raITVs do not result in significant differences. If raITVs are used, slightly better target coverage is gained at the cost of more healthy tissue exposure. Our results emphasize that adapted target volumes have to be used for scanned particle therapy in the presence of motion. However, even though gITVs and raITVs differ significantly in shape and size, this difference does not necessarily translate into significant differences in the resultant 4D dose distributions. {\textcopyright} 2013 Institute of Physics and Engineering in Medicine. }, } |
2013 | Journal | Brecht Heyde, Stefaan Bouchez, Sabine Thieren, Michael Vandenheuvel, Ruta Jasaityte, Daniel Barbosa, Piet Claus, Frederik Maes, Patrick Wouters, Jan D'hooge (2013). Elastic image registration to quantify 3-d regional myocardial deformation from volumetric ultrasound: Experimental validation in an animal model. Ultrasound in Medicine and Biology, 39(9), pp. 1688–1697. (link) (bib) x @article{Heyde2013, year = { 2013 }, volume = { 39 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Elastic image registration to quantify 3-d regional myocardial deformation from volumetric ultrasound: Experimental validation in an animal model }, pages = { 1688--1697 }, number = { 9 }, keywords = { Echocardiography,Elastic registration,Invivo,Sonomicrometry,Strain,Validation }, journal = { Ultrasound in Medicine and Biology }, issn = { 1879291X }, doi = { 10.1016/j.ultrasmedbio.2013.02.463 }, author = { Heyde and Bouchez and Thieren and Vandenheuvel and Jasaityte and Barbosa and Claus and Maes and Wouters and D'hooge }, abstract = { Although real-time 3-D echocardiography has the potential to allow more accurate assessment of global and regional ventricular dynamics compared with more traditional 2-D ultrasound examinations, it still requires rigorous testing and validation should it break through as a standard examination in routine clinical practice. However, only a limited number of studies have validated 3-D strain algorithms in an invivo experimental setting. The aim of the present study, therefore, was to validate a registration-based strain estimation methodology in an animal model. Volumetric images were acquired in 14 open-chest sheep instrumented with ultrasonic microcrystals. Radial strain (e{\{}open{\}}RR), longitudinal strain (e{\{}open{\}}LL) and circumferential strain (e{\{}open{\}}CC) were estimated during different stages: at rest, during reduced and increased cardiac inotropy induced by esmolol and dobutamine infusion, respectively, and during acute ischemia. Agreement between image-based and microcrystal-based strain estimates was evaluated by their linear correlation, indicating that all strain components could be estimated with acceptable accuracy (r= 0.69 for e{\{}open{\}}RR, r= 0.64 for e{\{}open{\}}LL and r= 0.62 for e{\{}open{\}}CC). These findings are comparable to the performance of the current state-of-the-art commercial 3-D speckle tracking methods. Furthermore, shape of the strain curves, timing of peak values and location of dysfunctional regions were identified well. Whether 3-D elastic registration performs better than 3-D block matching-based methodologies still remains to be proven. {\textcopyright} 2013 World Federation for Ultrasound in Medicine {\&} Biology. }, } |
2013 | Journal | Richard J. Giuly, Keun Young Kim, Mark H. Ellisman (2013). DP2: Distributed 3D image segmentation using micro-labor workforce. Bioinformatics, 29(10), pp. 1359–1360. (link) (bib) x @article{Giuly2013, year = { 2013 }, volume = { 29 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { DP2: Distributed 3D image segmentation using micro-labor workforce }, pages = { 1359--1360 }, number = { 10 }, journal = { Bioinformatics }, issn = { 13674803 }, doi = { 10.1093/bioinformatics/btt154 }, author = { Giuly and Kim and Ellisman }, abstract = { This application note describes a new scalable semi-automatic approach, the Dual Point Decision Process, for segmentation of 3D structures contained in 3D microscopy. The segmentation problem is distributed to many individual workers such that each receives only simple questions regarding whether two points in an image are placed on the same object. A large pool of micro-labor workers available through Amazon's Mechanical Turk system provides the labor in a scalable manner. {\textcopyright} 2013 The Author. Published by Oxford University Press. All rights reserved. }, } |
2013 | Journal | Anders Garpebring, Patrik Brynolfsson, Jun Yu, Ronnie Wirestam, Adam Johansson, Thomas Asklund, Mikael Karlsson (2013). Uncertainty estimation in dynamic contrast-enhanced MRI. Magnetic Resonance in Medicine, 69(4), pp. 992–1002. (link) (bib) x @article{Garpebring2013, year = { 2013 }, volume = { 69 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Uncertainty estimation in dynamic contrast-enhanced MRI }, pages = { 992--1002 }, number = { 4 }, keywords = { accuracy,dynamic contrast-enhanced-MRI,precision analysis,uncertainty estimation }, journal = { Magnetic Resonance in Medicine }, issn = { 07403194 }, doi = { 10.1002/mrm.24328 }, author = { Garpebring and Brynolfsson and Yu and Wirestam and Johansson and Asklund and Karlsson }, abstract = { Using dynamic contrast-enhanced MRI (DCE-MRI), it is possible to estimate pharmacokinetic (PK) parameters that convey information about physiological properties, e.g., in tumors. In DCE-MRI, errors propagate in a nontrivial way to the PK parameters. We propose a method based on multivariate linear error propagation to calculate uncertainty maps for the PK parameters. Uncertainties in the PK parameters were investigated for the modified Kety model. The method was evaluated with Monte Carlo simulations and exemplified with in vivo brain tumor data. PK parameter uncertainties due to noise in dynamic data were accurately estimated. Noise with standard deviation up to 15{\%} in the baseline signal and the baseline T1 map gave estimated uncertainties in good agreement with the Monte Carlo simulations. Good agreement was also found for up to 15{\%} errors in the arterial input function amplitude. The method was less accurate for errors in the bolus arrival time with disagreements of 23{\%}, 32{\%}, and 29{\%} for Ktrans, ve, and vp, respectively, when the standard deviation of the bolus arrival time error was 5.3 s. In conclusion, the proposed method provides efficient means for calculation of uncertainty maps, and it was applicable to a wide range of sources of uncertainty. {\textcopyright} 2012 Wiley Periodicals, Inc. }, } |
2013 | Journal | Berta Mart\'i Fuster, Oscar Esteban, Xavier Planes, Pablo Aguiar, Cristina Crespo, Carles Falcon, Gert Wollny, Sebasti\`a Rub\'i Sureda, Xavier Setoain, Alejandro F. Frangi, Maria J. Ledesma, Andrés Santos, Javier Pav\'ia, Dom\`enec Ros (2013). FocusDET, a new toolbox for SISCOM analysis. Evaluation of the registration accuracy using monte carlo simulation. Neuroinformatics, 11(1), pp. 77–89. (link) (bib) x @article{Fuster2013, year = { 2013 }, volume = { 11 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { FocusDET, a new toolbox for SISCOM analysis. Evaluation of the registration accuracy using monte carlo simulation }, pages = { 77--89 }, number = { 1 }, keywords = { Epilepsy,Monte Carlo simulation,Reconstruction algorithms,Registration assessment,SISCOM }, journal = { Neuroinformatics }, issn = { 15392791 }, doi = { 10.1007/s12021-012-9158-x }, author = { {Mart{\'{i}} Fuster} and Esteban and Planes and Aguiar and Crespo and Falcon and Wollny and {Rub{\'{i}} Sureda} and Setoain and Frangi and Ledesma and Santos and Pav{\'{i}}a and Ros }, abstract = { Subtraction of Ictal SPECT Co-registered to MRI (SISCOM) is an imaging technique used to localize the epileptogenic focus in patients with intractable partial epilepsy. The aim of this study was to determine the accuracy of registration algorithms involved in SISCOM analysis using FocusDET, a new user-friendly application. To this end, Monte Carlo simulation was employed to generate realistic SPECT studies. Simulated sinograms were reconstructed by using the Filtered BackProjection (FBP) algorithm and an Ordered Subsets Expectation Maximization (OSEM) reconstruction method that included compensation for all degradations. Registration errors in SPECT-SPECT and SPECT-MRI registration were evaluated by comparing the theoretical and actual transforms. Patient studies with well-localized epilepsy were also included in the registration assessment. Global registration errors including SPECT-SPECT and SPECT-MRI registration errors were less than 1.2 mm on average, exceeding the voxel size (3.32 mm) of SPECT studies in no case. Although images reconstructed using OSEM led to lower registration errors than images reconstructed with FBP, differences after using OSEM or FBP in reconstruction were less than 0.2 mm on average. This indicates that correction for degradations does not play a major role in the SISCOM process, thereby facilitating the application of the methodology in centers where OSEM is not implemented with correction of all degradations. These findings together with those obtained by clinicians from patients via MRI, interictal and ictal SPECT and video-EEG, show that FocusDET is a robust application for performing SISCOM analysis in clinical practice. {\textcopyright} 2012 Springer Science+Business Media, LLC. }, } |
2013 | Journal | Alex A.T. Bui, William Hsu, Corey Arnold, Suzie El-Saden, Denise R. Aberle, Ricky K. Taira (2013). Imaging-based observational databases for clinical problem solving: The role of informatics. Journal of the American Medical Informatics Association, 20(6), pp. 1053–1058. (link) (bib) x @article{Bui2013, year = { 2013 }, volume = { 20 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Imaging-based observational databases for clinical problem solving: The role of informatics }, pages = { 1053--1058 }, number = { 6 }, journal = { Journal of the American Medical Informatics Association }, issn = { 10675027 }, doi = { 10.1136/amiajnl-2012-001340 }, author = { Bui and Hsu and Arnold and El-Saden and Aberle and Taira }, abstract = { Imaging has become a prevalent tool in the diagnosis and treatment of many diseases, providing a unique in vivo, multi-scale view of anatomic and physiologic processes. With the increased use of imaging and its progressive technical advances, the role of imaging informatics is now evolving-from one of managing images, to one of integrating the full scope of clinical information needed to contextualize and link observations across phenotypic and genotypic scales. Several challenges exist for imaging informatics, including the need for methods to transform clinical imaging studies and associated data into structured information that can be organized and analyzed. We examine some of these challenges in establishing imaging-based observational databases that can support the creation of comprehensive disease models. The development of these databases and ensuing models can aid in medical decision making and knowledge discovery and ultimately, transform the use of imaging to support individually-tailored patient care. }, } |
2013 | Journal | A. Akbarzadeh, D. Gutierrez, A. Baskin, M. R. Ay, A. Ahmadian, N. Riahi Alam, K. O. Lövblad, H. Zaidi (2013). Evaluation of whole-body mr to ct deformable image registration. Journal of Applied Clinical Medical Physics, 14(4), pp. 238–253. (link) (bib) x @article{Akbarzadeh2013a, year = { 2013 }, volume = { 14 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of whole-body mr to ct deformable image registration }, pages = { 238--253 }, number = { 4 }, keywords = { Attenuationcorrection,Deformable model,Image registration,PET/CT,PET/MRI }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1120/jacmp.v14i4.4163 }, author = { Akbarzadeh and Gutierrez and Baskin and Ay and Ahmadian and {Riahi Alam} and L{\"{o}}vblad and Zaidi }, abstract = { Multimodality image registration plays a crucial role in various clinical and research applications. The aim of this study is to present an optimized MR to CT whole-body deformable image registration algorithm and its validation using clinical studies. A 3D intermodality registration technique based on B-spline transformation was performed using optimized parameters of the elastix package based on the Insight Toolkit (ITK) framework. Twenty-eight (17 male and 11 female) clinical studies were used in this work. The registration was evaluated using anatomical landmarks and segmented organs. In addition to 16 anatomical landmarks, three key organs (brain, lungs, and kidneys) and the entire body volume were segmented for evaluation. Several parameters - such as the Euclidean distance between anatomical landmarks, target overlap, Dice and Jaccard coefficients, false positives and false negatives, volume similarity, distance error, and Hausdorff distance - were calculated to quantify the quality of the registration algorithm. Dice coefficients for the majority of patients ({\textgreater} 75{\%}) were in the 0.8-1 range for the whole body, brain, and lungs, which satisfies the criteria to achieve excellent alignment. On the other hand, for kidneys, Dice coefficients for volumes of 25{\%} of the patients meet excellent volume agreement requirement, while the majority of patients satisfy good agreement criteria ({\textgreater} 0.6). For all patients, the distance error was in 0-10 mm range for all segmented organs. In summary, we optimized and evaluated the accuracy of an MR to CT deformable registration algorithm. The registered images constitute a useful 3D whole-body MR-CT atlas suitable for the development and evaluation of novel MR-guided attenuation correction procedures on hybrid PET-MR systems. }, } |
2013 | Journal | A. Akbarzadeh, M. R. Ay, A. Ahmadian, N. Riahi Alam, H. Zaidi (2013). MRI-guided attenuation correction in whole-body PET/MR: Assessment of the effect of bone attenuation. Annals of Nuclear Medicine, 27(2), pp. 152–162. (link) (bib) x @article{Akbarzadeh2013, year = { 2013 }, volume = { 27 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { MRI-guided attenuation correction in whole-body PET/MR: Assessment of the effect of bone attenuation }, pages = { 152--162 }, number = { 2 }, keywords = { Attenuation correction,PET/CT,PET/MRI,Quantification,Tissue classification }, journal = { Annals of Nuclear Medicine }, issn = { 09147187 }, doi = { 10.1007/s12149-012-0667-3 }, author = { Akbarzadeh and Ay and Ahmadian and {Riahi Alam} and Zaidi }, abstract = { Objective: Hybrid PET/MRI presents many advantages in comparison with its counterpart PET/CT in terms of improved soft-tissue contrast, decrease in radiation exposure, and truly simultaneous and multi-parametric imaging capabilities. However, the lack of well-established methodology for MR-based attenuation correction is hampering further development and wider acceptance of this technology. We assess the impact of ignoring bone attenuation and using different tissue classes for generation of the attenuation map on the accuracy of attenuation correction of PET data. Methods: This work was performed using simulation studies based on the XCAT phantom and clinical input data. For the latter, PET and CT images of patients were used as input for the analytic simulation model using realistic activity distributions where CT-based attenuation correction was utilized as reference for comparison. For both phantom and clinical studies, the reference attenuation map was classified into various numbers of tissue classes to produce three (air, soft tissue and lung), four (air, lungs, soft tissue and cortical bones) and five (air, lungs, soft tissue, cortical bones and spongeous bones) class attenuation maps. Results: The phantom studies demonstrated that ignoring bone increases the relative error by up to 6.8 {\%} in the body and up to 31.0 {\%} for bony regions. Likewise, the simulated clinical studies showed that the mean relative error reached 15 {\%} for lesions located in the body and 30.7 {\%} for lesions located in bones, when neglecting bones. These results demonstrate an underestimation of about 30 {\%} of tracer uptake when neglecting bone, which in turn imposes substantial loss of quantitative accuracy for PET images produced by hybrid PET/MRI systems. Conclusion: Considering bones in the attenuation map will considerably improve the accuracy of MR-guided attenuation correction in hybrid PET/MR to enable quantitative PET imaging on hybrid PET/MR technologies. {\textcopyright} 2012 The Japanese Society of Nuclear Medicine. }, } |
2013 | In Collection | T Seidel, T Draebing, G Seemann, F B Sachse (2013). A semi-automatic approach for segmentation of three-dimensional microscopic image stacks of cardiac tissue. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 300–307. (link) (bib) x @incollection{Seidel2013a, year = { 2013 }, volume = { 7945 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84879835400{\&}doi=10.1007{\%}2F978-3-642-38899-6{\_}36{\&}partnerID=40{\&}md5=f7e591d9366417855cbf78f95bd7e1d2 }, type = { Serial }, title = { A semi-automatic approach for segmentation of three-dimensional microscopic image stacks of cardiac tissue }, pages = { 300--307 }, doi = { 10.1007/978-3-642-38899-6_36 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Seidel and Draebing and Seemann and Sachse }, } |
2013 | In Collection | Thomas Seidel, Thomas Draebing, Gunnar Seemann, Frank B. Sachse (2013). A semi-automatic approach for segmentation of three-dimensional microscopic image stacks of cardiac tissue. In S Ourselin, D Rueckert, N Smith, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 300–307, Berlin. (link) (bib) x @incollection{Seidel2013a, year = { 2013 }, volume = { 7945 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84879835400{\&}doi=10.1007{\%}2F978-3-642-38899-6{\_}36{\&}partnerID=40{\&}md5=f7e591d9366417855cbf78f95bd7e1d2 {\%}3CGo to }, type = { Serial }, title = { A semi-automatic approach for segmentation of three-dimensional microscopic image stacks of cardiac tissue }, series = { Lecture Notes in Computer Science }, publisher = { Springer-Verlag Berlin }, pages = { 300--307 }, keywords = { algorithm,cardiac tissue,confocal microscopy,segmentation,three-dimensional }, issn = { 03029743 }, isbn = { 9783642388989 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-642-38899-6_36 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Seidel and Draebing and Seemann and Sachse }, address = { Berlin }, abstract = { The segmentation of three-dimensional microscopic images of cardiac tissues provides important parameters for characterizing cardiac diseases and modeling of tissue function. Segmenting these images is, however, challenging. Currently only time-consuming manual approaches have been developed for this purpose. Here, we introduce an efficient approach for the semi-automatic segmentation (SAS) of cardiomyocytes and the extracellular space in image stacks obtained from confocal microscopy. The approach is based on a morphological watershed algorithm and iterative creation of watershed seed points on a distance map. Results of SAS were consistent with results from manual segmentation (Dice similarity coefficient: 90.8±2.6{\%}). Cell volume was 4.6±6.5{\%} higher in SAS cells, which mainly resulted from cell branches and membrane protrusions neglected by manual segmentation. We suggest that the novel approach constitutes an important tool for characterizing normal and diseased cardiac tissues. Furthermore, the approach is capable of providing crucial parameters for modeling of tissue structure and function. {\textcopyright} 2013 Springer-Verlag. }, } |
2013 | In Conf. Proceedings | Ahmadreza Rezaei, Johan Nuyts (2013). Joint registration of attenuation and activity images in gated TOF-PET. In IEEE Nuclear Science Symposium Conference Record, pp. NA New York. (link) (bib) x @inproceedings{Rezaei2013, year = { 2013 }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904204340{\&}doi=10.1109{\%}2FNSSMIC.2013.6829031{\&}partnerID=40{\&}md5=9a3c352e39a5cde2bb765fec1156cfb1 }, type = { Book Section }, title = { Joint registration of attenuation and activity images in gated TOF-PET }, series = { IEEE Nuclear Science Symposium and Medical Imaging Conference }, publisher = { Ieee }, issn = { 10957863 }, isbn = { 9781479905348 }, doi = { 10.1109/NSSMIC.2013.6829031 }, booktitle = { IEEE Nuclear Science Symposium Conference Record }, author = { Rezaei and Nuyts }, address = { New York }, abstract = { To date, attenuation correction of gated PET emission data remains a challenge. Joint activity and attenuation estimation methods may contribute to solving this challenge. In this work, we demonstrate a framework in which the gated PET activity and attenuation images are jointly reconstructed and then registered to a reference frame by a joint registration approach. The method is studied and compared to common approaches by means of 2D and 3D simulations. {\textcopyright} 2013 IEEE. }, } |
2013 | In Conf. Proceedings | Emin Kugu (2013). Satellite image denoising using Bilateral Filter with SPEA2 optimized parameters. In RAST 2013 - Proceedings of 6th International Conference on Recent Advances in Space Technologies, pp. 217–223, New York. (link) (bib) x @inproceedings{Kugu2013, year = { 2013 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883888988{\&}doi=10.1109{\%}2FRAST.2013.6581204{\&}partnerID=40{\&}md5=eba34fda9e6d5ba7ef3f98fd189e8ed5 {\%}3CGo to }, type = { Book }, title = { Satellite image denoising using Bilateral Filter with SPEA2 optimized parameters }, series = { Proceedings of 6th International Conference on Recent Advances in Space Technologies }, publisher = { Ieee }, pages = { 217--223 }, keywords = { Image denoising,SPEA2,bilateral filter,parameter optimization }, isbn = { 9781467363938 }, doi = { 10.1109/RAST.2013.6581204 }, booktitle = { RAST 2013 - Proceedings of 6th International Conference on Recent Advances in Space Technologies }, author = { Kugu }, address = { New York }, abstract = { Satellite imaging is being the most attractive source of information for the governmental agencies and the commercial companies in last decade. The quality of the images is very important especially for the military or the police forces to pick the valuable information from the details. Satellite images may have unwanted signals called as noise in addition to useful information for several reasons such as heat generated electrons, bad sensor, wrong ISO settings, vibration and clouds. There are several image enhancement algorithms to reduce the effects of noise over the image to see the details and gather meaningful information. Many of these algorithms accept several parameters from the user to reach the best results. In the process of denoising, there is always a competition between the noise reduction and the fine preservation. If there is a competition between the objectives then an evolutionary multi objective optimization (EMO) is needed. In this work, the parameters of the image denoising algorithms have been optimized to minimize the trade-off by using improved Strength Pareto Evolutionary Algorithm (SPEA2). SPEA2 differs from the other EMO algorithms with the fitness assignment, the density estimation and the archive truncation processes. There is no single optimal solution in a multi objective problems instead there is a set of solutions called as Pareto efficient. Four objective functions, namely Mean Square Error (MSE), Entropy, Structural SIMilarity (SSIM) and Second Derivative of the image, have been used in this work. MSE is calculated by taking the square of difference between the noise free image and the deniosed image. Entropy is a measure of randomness of the content of difference image. The lower entropy is the better. The second derivate of an image can be achieved by convolving the image with the Laplacian Mask. SSIM algorithm is based on the similarities of the structures on the noise free image and the structures of the denoised image. For the image enhancement algorithms, Insight Segmentation and Registration Toolkit (ITK) is selected. ITK is an open source project and it is being developed in C++ to provide developers with a rich set of applications for image analysis. It includes tens of image filters for the registration and segmentation purposes. In this work, Bilateral Image Filter is evaluated in the field of satellite imaging for the noise removal process. The evaluated filter receives two parameters from the user side within their predefined ranges. Here, SPEA2 algorithm takes the responsibility to optimize these parameters to reach the best noise free image results. SPEA2 algorithm was implemented in Matlab and executable files of image filter were called in Matlab environment. The results of the work were represented graphically to show the effectiveness of selected method. {\textcopyright} 2013 IEEE. }, } |
2013 | In Conf. Proceedings | Gregory H. Chu, Pechin Lo, Hyun J. Kim, Martin Auerbach, Jonathan Goldin, Keith Henkel, Ashley Banola, Darren Morris, Heidi Coy, Matthew S. Brown (2013). Preliminary results of automated removal of degenerative joint disease in bone scan lesion segmentation. In Medical Imaging 2013: Computer-Aided Diagnosis, pp. 867007, Bellingham. (link) (bib) x @inproceedings{Chu2013, year = { 2013 }, volume = { 8670 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84878418964{\&}doi=10.1117{\%}2F12.2008082{\&}partnerID=40{\&}md5=a9e7952d5bc2b133959d4ad734f5d4ac }, type = { Conference Proceedings }, title = { Preliminary results of automated removal of degenerative joint disease in bone scan lesion segmentation }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 867007 }, issn = { 0277786X }, isbn = { 9780819494443 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.2008082 }, booktitle = { Medical Imaging 2013: Computer-Aided Diagnosis }, author = { Chu and Lo and Kim and Auerbach and Goldin and Henkel and Banola and Morris and Coy and Brown }, address = { Bellingham }, abstract = { Whole-body bone scintigraphy (or bone scan) is a highly sensitive method for visualizing bone metastases and is the accepted standard imaging modality for detection of metastases and assessment of treatment outcomes. The development of a quantitative biomarker using computer-aided detection on bone scans for treatment response assessment may have a significant impact on the evaluation of novel oncologic drugs directed at bone metastases. One of the challenges to lesion segmentation on bone scans is the non-specificity of the radiotracer, manifesting as high activity related to non-malignant processes like degenerative joint disease, sinuses, kidneys, thyroid and bladder. In this paper, we developed an automated bone scan lesion segmentation method that implements intensity normalization, a two-threshold model, and automated detection and removal of areas consistent with non-malignant processes from the segmentation. The two-threshold model serves to account for outlier bone scans with elevated and diffuse intensity distributions. Parameters to remove degenerative joint disease were trained using a multi-start Nelder-Mead simplex optimization scheme. The segmentation reference standard was constructed manually by a panel of physicians. We compared the performance of the proposed method against a previously published method. The results of a two-fold cross validation show that the overlap ratio improved in 67.0{\%} of scans, with an average improvement of 5.1{\%} points. {\textcopyright} 2013 SPIE. }, } |
2012 | Book chapter | Alexander Haak, Marijn van Stralen, Gerard van Burken, Stefan Klein, Josien P. W. Pluim, Nico de Jong, Antonius F. W. van der Steen, Johan G. Bosch (2012). NA in Comparison of spatiotemporal interpolators for 4D image reconstruction from 2D transesophageal ultrasound, Edited by J G Bosch, M M Doyley, Spie-Int Soc Optical Engineering, pp. 832007, Proceedings of SPIE, Vol. 8320, ISBN: 16057422. (link) (bib) x @inbook{Haak2012a, year = { 2012 }, volume = { 8320 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Comparison of spatiotemporal interpolators for 4D image reconstruction from 2D transesophageal ultrasound }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 832007 }, issn = { 16057422 }, isbn = { 9780819489692 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.905893 }, booktitle = { Medical Imaging 2012: Ultrasonic Imaging, Tomography, and Therapy }, author = { Haak and Stralen and Burken and Klein and Pluim and Jong and Steen and Bosch }, address = { Bellingham }, } |
2012 | Book chapter | Iván Mac\'ia, Manuel Gra\~na (2012). NA in Vascular section estimation in medical images using combined feature detection and evolutionary optimization, Edited by E Corchado, V Snasel, A Abraham, M Wozniak, M Grana, S B Cho, Springer-Verlag Berlin, pp. 503–513, Lecture Notes in Computer Science, Vol. 7209 LNAI, ISBN: 03029743. (link) (bib) x @inbook{Macia2012, year = { 2012 }, volume = { 7209 LNAI }, url = { {\%}3CGo to }, type = { Book Section }, title = { Vascular section estimation in medical images using combined feature detection and evolutionary optimization }, series = { Lecture Notes in Computer Science }, publisher = { Springer-Verlag Berlin }, pages = { 503--513 }, number = { PART 2 }, keywords = { Evolutionary Optimization,Feature Detectors,Medialness,Medical Image Analysis,Section Estimator,Vascular Analysis,Vascular Tracking,Vesselness,Vessels }, issn = { 03029743 }, isbn = { 9783642289309 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-642-28931-6_48 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Mac{\'{i}}a and Gra{\~{n}}a }, address = { Berlin }, abstract = { Accurate detection and extraction of 3D vascular structures is a crucial step for many medical image applications that require vascular analysis. Vessel tracking algorithms iteratively follow vascular branches point by point, obtaining geometric descriptors, such as centerlines and sections of branches, that describe patient-specific vasculature. In order to obtain these descriptors, most approaches use specialized scaled vascular feature detectors. However, these detectors may fail due to the presence of nearby spurious structures, incorrect scale or parameter choice or other undesired effects, obtaining incorrect local sections which may lead to unrecoverable errors during the tracking procedure. We propose to combine this approach with an evolutionary optimization framework that use specific modified vascular detectors as cost functions in order to obtain accurate vascular sections when the direct detection approach fails. We demonstrate the validity of this new approach with experiments using real datasets. We also show that, for a family of medialness functions, the procedure can be performed at fixed small scales which is computationally efficient for local kernel-based estimators. {\textcopyright} 2012 Springer-Verlag. }, } |
2012 | Journal | Sean B. Ziegeler, James D. Dykes, Jay F. Shriver (2012). Spatial error metrics for oceanographic model verification. Journal of Atmospheric and Oceanic Technology, 29(2), pp. 260–266. (link) (bib) x @article{Ziegeler2012, year = { 2012 }, volume = { 29 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Spatial error metrics for oceanographic model verification }, pages = { 260--266 }, number = { 2 }, keywords = { Error analysis,Forecast verification,Ocean models }, journal = { Journal of Atmospheric and Oceanic Technology }, issn = { 07390572 }, doi = { 10.1175/JTECH-D-11-00109.1 }, author = { Ziegeler and Dykes and Shriver }, abstract = { A common problem with modern numerical oceanographic models is spatial displacement, including misplacement and misshapenness of ocean circulation features. Traditional error metrics, such as least squares methods, are ineffective in many such cases; for example, only small errors in the location of a frontal pattern are translated to large differences in least squares of intensities. Such problems are common in meteorological forecast verification as well, so the application of spatial error metrics have been a recently popular topic there. Spatial error metrics separate model error into a displacement component and an intensity component, providing a more reliable assessment of model biases and a more descriptive portrayal of numerical model prediction skill. The application of spatial error metrics to oceanographic models has been sparse, and further advances for both meteorology and oceanography exist in the medical imaging field. These advances are presented, along with modifications necessary for oceanographic model output. Standard methods and options for those methods in the literature are explored, and where the best arrangements of options are unclear, comparison studies are conducted. These trials require the reproduction of synthetic displacements in conjunction with synthetic intensity perturbations across 480 Navy Coastal Ocean Model (NCOM) temperature fields from various regions of the globe throughout 2009. Study results revealed the success of certain approaches novel to both meteorology and oceanography, including B-spline transforms and mutual information. That, combined with other common methods, such as quasi-Newton optimization and land masking, could best recover the synthetic displacements under various synthetic intensity changes. {\textcopyright} 2012 American Meteorological Society. }, } |
2012 | Journal | Dandan Zheng, Jun Lu, Ariel Jefferson, Cheng Zhang, Jian Wu, William Sleeman, Elisabeth Weiss, Nesrin Dogan, Shiyu Song, Jeffrey Williamson (2012). A protocol to extend the longitudinal coverage of on-board cone-beam CT. Journal of Applied Clinical Medical Physics, 13(4), pp. 141–151. (link) (bib) x @article{Zheng2012, year = { 2012 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A protocol to extend the longitudinal coverage of on-board cone-beam CT }, pages = { 141--151 }, number = { 4 }, keywords = { Cbct,Field of view,Igart,Image registration }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1120/jacmp.v13i4.3796 }, author = { Zheng and Lu and Jefferson and Zhang and Wu and Sleeman and Weiss and Dogan and Song and Williamson }, abstract = { The longitudinal coverage of a LINAC-mounted CBCT scan is limited to the corresponding dimensional limits of its flat panel detector, which is often shorter than the length of the treatment field. These limits become apparent when fields are designed to encompass wide regions, as when providing nodal coverage. Therefore, we developed a novel protocol to acquire double orbit CBCT images using a commercial system, and combine the images to extend the longitudinal coverage for image-guided adaptive radiotherapy (IGART). The protocol acquires two CBCT scans with a couch shift similar to the "step-and-shoot" cine CT acquisition, allowing a small longitudinal overlap of the two reconstructed volumes. An in-house DICOM reading/writing software was developed to combine the two image sets into one. Three different approaches were explored to handle the possible misalignment between the two image subsets: simple stacking, averaging the overlapped volumes, and a 3D-3D image registration with the three translational degrees of freedom. Using thermoluminescent dosimeters and custom-designed holders for a CTDI phantom set, dose measurements were carried out to assess the resultant imaging dose of the technique and its geometric distribution. Deformable registration was tested on patient images generated with the double-orbit protocol, using both the planning FBCT and the artificially deformed CBCT as source images. The protocol was validated on phantoms and has been employed clinically for IRB-approved IGART studies for head and neck and prostate cancer patients. }, } |
2012 | Journal | Huanmei Wu, Qingya Zhao, Minsong Cao, Indra Das (2012). A line profile-based double partial fusion method for acquiring planning CT of oversized patients in radiation treatment. Journal of Applied Clinical Medical Physics, 13(2), pp. 20–31. (link) (bib) x @article{Wu2012, year = { 2012 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A line profile-based double partial fusion method for acquiring planning CT of oversized patients in radiation treatment }, pages = { 20--31 }, number = { 2 }, keywords = { Double scanning,Oversized patients,Partial registration,Planning CT }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1120/jacmp.v13i2.3629 }, author = { Wu and Zhao and Cao and Das }, abstract = { True 3D CT dataset for treatment planning of an oversized patient is difficult to acquire due to the bore size and field of view (FOV) reconstruction. This project aims to provide a simple approach to reconstruct true CT data for oversize patients using CT scanner with limited FOV by acquiring double partial CT (left and right side) images. An efficient line profile-based method has been developed to minimize the difference of the CT numbers in the overlapping region between the right and left images and to generate a complete true 3D CT dataset in the natural state. New image processing modules have been developed and integrated to the Insight Segmentation {\&} Registration Toolkit (ITK 3.6) package. For example, different modules for image cropping, line profile generation, line profile matching, and optimized partial image fusion have been developed. The algorithm has been implemented for images containing the bony structure of the spine and tested on 3D CT planning datasets from both phantom and real patients with satisfactory results in both cases. The proposed optimized line profile-based partial registration method provides a simple and accurate method for acquiring a complete true 3D CT dataset for an oversized patient using CT scanning with small bore size, that can be used for accurate treatment planning. }, } |
2012 | Journal | Markus N. Streicher, Andreas Schäfer, Enrico Reimer, Bibek Dhital, Robert Trampel, Dimo Ivanov, Robert Turner (2012). Effects of air susceptibility on proton resonance frequency MR thermometry. Magnetic Resonance Materials in Physics, Biology and Medicine, 25(1), pp. 41–47. (link) (bib) x @article{Streicher2012, year = { 2012 }, volume = { 25 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Effects of air susceptibility on proton resonance frequency MR thermometry }, pages = { 41--47 }, number = { 1 }, keywords = { Air,PRF,PRFS-based MR thermometry,Susceptibility,Temperature }, journal = { Magnetic Resonance Materials in Physics, Biology and Medicine }, issn = { 09685243 }, doi = { 10.1007/s10334-011-0249-8 }, author = { Streicher and Sch{\"{a}}fer and Reimer and Dhital and Trampel and Ivanov and Turner }, abstract = { Object The temperature dependence of the proton resonance frequency (PRF) is often used in MR thermometry. However, thismethod is prone to even very small changes in local magnetic field strength. Here, we report on the effects of susceptibility changes of surrounding air on the magnetic field inside an object and their inferred effect on themeasuredMR temperature. Materials and methods MR phase thermometry was performed on spherical agar phantoms enclosed in cylindrical containers at 7T. The air susceptibility inside the cylindrical container was changed by both heating the air and changing the gas composition. Results Changing the temperature of surrounding air from 23 to 69?C caused an apparent MR temperature error of 2 K. When ambient air was displaced by 100{\%} oxygen, the MR temperature error increased to 40 K. The magnetic field shift and therefore error in inferred MR temperature scales linearly with volume susceptibility change and has a strong and nontrivial dependence on the experimental configuration. Conclusion Air susceptibility changes associated with oxygen concentration changes greatly affect PRFMR thermometry measurements. Air temperature changes can also affect thesemeasurements, but to a smaller degree. For uncalibrated MR thermometry, air susceptibility changes may be a significant source of error. {\textcopyright} ESMRMB 2011. }, } |
2012 | Journal | Tobias Pietzsch, Stephan Preibisch, Pavel Toman\vcák, Stephan Saalfeld (2012). Img lib 2-generic image processing in Java. Bioinformatics, 28(22), pp. 3009–3011. (link) (bib) x @article{Pietzsch2012, year = { 2012 }, volume = { 28 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Img lib 2-generic image processing in Java }, pages = { 3009--3011 }, number = { 22 }, journal = { Bioinformatics }, issn = { 14602059 }, doi = { 10.1093/bioinformatics/bts543 }, author = { Pietzsch and Preibisch and Toman{\v{c}}{\'{a}}k and Saalfeld }, abstract = { ImgLib2 is an open-source Java library for n-dimensional data representation and manipulation with focus on image processing. It aims at minimizing code duplication by cleanly separating pixelalgebra, data access and data representation in memory. Algorithms can be implemented for classes of pixel types and generic access patterns by which they become independent of the specific dimensionality, pixel type and data representation. ImgLib2 illustrates that an elegant high-level programming interface can be achieved without sacrificing performance. It provides efficient implementations of common data types, storage layouts and algorithms. It is the data model underlying ImageJ2, the KNIME Image Processing toolbox and an increasing number of Fiji-Plugins. {\textcopyright} 2012 The Author. }, } |
2012 | Journal | Coert T. Metz, Nora Baka, Hortense Kirisli, Michiel Schaap, Stefan Klein, Lisan A. Neefjes, Nico R. Mollet, Boudewijn Lelieveldt, Marleen De Bruijne, Wiro J. Niessen, Theo Van Walsum (2012). Regression-based cardiac motion prediction from single-phase CTA. IEEE Transactions on Medical Imaging, 31(6), pp. 1311–1325. (link) (bib) x @article{Metz2012, year = { 2012 }, volume = { 31 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Regression-based cardiac motion prediction from single-phase CTA }, pages = { 1311--1325 }, number = { 6 }, keywords = { Cardiac,heart,motion prediction,principal component regression (PCR),shape,statistical models }, journal = { IEEE Transactions on Medical Imaging }, issn = { 02780062 }, doi = { 10.1109/TMI.2012.2190938 }, author = { Metz and Baka and Kirisli and Schaap and Klein and Neefjes and Mollet and Lelieveldt and {De Bruijne} and Niessen and {Van Walsum} }, abstract = { State of the art cardiac computed tomography (CT) enables the acquisition of imaging data of the heart over the entire cardiac cycle at concurrent high spatial and temporal resolution. However, in clinical practice, acquisition is increasingly limited to 3-D images. Estimating the shape of the cardiac structures throughout the entire cardiac cycle from a 3-D image is therefore useful in applications such as the alignment of preoperative computed tomography angiography (CTA) to intra-operative X-ray images for improved guidance in coronary interventions. We hypothesize that the motion of the heart is partially explained by its shape and therefore investigate the use of three regression methods for motion estimation from single-phase shape information. Quantitative evaluation on 150 4-D CTA images showed a small, but statistically significant, increase in the accuracy of the predicted shape sequences when using any of the regression methods, compared to shape-independent motion prediction by application of the mean motion. The best results were achieved using principal component regression resulting in point-to-point errors of 2.3$\backslash$pm 0.5 mm, compared to values of 2.7$\backslash$pm 0.6 mm for shape-independent motion estimation. Finally, we showed that this significant difference withstands small variations in important parameter settings of the landmarking procedure. {\textcopyright} 2012 IEEE. }, } |
2012 | Journal | Carlos S. Mendoza, Bego\~na Acha, Carmen Serrano, Tomás G\'omez-C\'ia (2012). Fast parameter-free region growing segmentation with application to surgical planning. Machine Vision and Applications, 23(1), pp. 165–177. (link) (bib) x @article{Mendoza2012, year = { 2012 }, volume = { 23 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Fast parameter-free region growing segmentation with application to surgical planning }, pages = { 165--177 }, number = { 1 }, keywords = { CT,Region growing,Segmentation,Surgical planning,Virtual reality }, journal = { Machine Vision and Applications }, issn = { 09328092 }, doi = { 10.1007/s00138-010-0274-z }, author = { Mendoza and Acha and Serrano and G{\'{o}}mez-C{\'{i}}a }, abstract = { In this paper, we propose a self-assessed adaptive region growing segmentation algorithm. In the context of an experimental virtual-reality surgical planning software platform, our method successfully delineates main tissues relevant for reconstructive surgery, such as fat, muscle, and bone. We rely on a self-tuning approach to deal with a great variety of imaging conditions requiring limited user intervention (one seed). The detection of the optimal parameters is managed internally using a measure of the varying contrast of the growing region, and the stopping criterion is adapted to the noise level in the dataset thanks to the sampling strategy used for the assessment function. Sampling is referred to the statistics of a neighborhood around the seed(s), so that the sampling period becomes greater when images are noisier, resulting in the acquisition of a lower frequency version of the contrast function. Validation is provided for synthetic images, as well as real CT datasets. For the CT test images, validation is referred to manual delineations for 10 cases and to subjective assessment for another 35. High values of sensitivity and specificity, as well as Dice's coefficient and Jaccard's index on one hand, and satisfactory subjective evaluation on the other hand, prove the robustness of our contrast-based measure, even suggesting suitability for calibration of other region-based segmentation algorithms. {\textcopyright} 2010 Springer-Verlag. }, } |
2012 | Journal | Iván Mac\'ia, Manuel Gra\~na, Celine Paloc (2012). Knowledge management in image-based analysis of blood vessel structures. Knowledge and Information Systems, 30(2), pp. 457–491. (link) (bib) x @article{Macia2012b, year = { 2012 }, volume = { 30 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Knowledge management in image-based analysis of blood vessel structures }, pages = { 457--491 }, number = { 2 }, keywords = { Knowledge representation,Medical image,Vessel analysis }, journal = { Knowledge and Information Systems }, issn = { 02191377 }, doi = { 10.1007/s10115-010-0377-x }, author = { Mac{\'{i}}a and Gra{\~{n}}a and Paloc }, abstract = { We have detected the lack of a widely accepted knowledge representation model in the area of Blood Vessel analysis. We find that such a tool is needed for the future development of the field and our own research efforts. It will allow easy reuse of software pieces through appropriate abstractions, facilitating the development of innovative methods, procedures and applications. We include a thorough review of vascular morphology image analysis. After the identification of the key representation elements and operations, we propose a Vessel Knowledge Representation (VKR) model that would fill this gap. We give insights into its implementation based on standard Object-Oriented Programming tools and paradigms. The VKR would easily integrate with existing medical imaging and visualization software platforms, such as the Insight ToolKit (ITK) and Visualization Toolkit (VTK). {\textcopyright} 2011 Springer-Verlag London Limited. }, } |
2012 | Journal | Mika Kortesniemi, Eero Salli, Raija Seuri (2012). Organ dose calculation in CT based on scout image data and automatic image registration. Acta Radiologica, 53(8), pp. 908–913. (link) (bib) x @article{Kortesniemi2012, year = { 2012 }, volume = { 53 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Organ dose calculation in CT based on scout image data and automatic image registration }, pages = { 908--913 }, number = { 8 }, keywords = { Computed tomography,Dosimetry,Technical aspects }, journal = { Acta Radiologica }, issn = { 02841851 }, doi = { 10.1258/ar.2012.110611 }, author = { Kortesniemi and Salli and Seuri }, abstract = { Background: Computed tomography (CT) has become the main contributor of the cumulative radiation exposure in radiology. Information on cumulative exposure history of the patient should be available for efficient management of radiation exposures and for radiological justification. Purpose: To develop and evaluate automatic image registration for organ dose calculation in CT. Material and Methods: Planning radiograph (scout) image data describing CT scan ranges from 15 thoracic CT examinations (9 men and 6 women) and 10 abdominal CT examinations (6 men and 4 women) were co-registered with the reference trunk CT scout image. 2-D affine transformation and normalized correlation metric was used for image registration. Longitudinal (z-axis) scan range coordinates on the reference scout image were converted into slice locations on the CT-Expo anthropomorphic male and female models, following organ and effective dose calculations. Results: The average deviation of z-location of studied patient images from the corresponding location in the reference scout image was 6.2 mm. The ranges of organ and effective doses with constant exposure parameters were from 0 to 28.0 mGy and from 7.3 to 14.5 mSv, respectively. The mean deviation of the doses for fully irradiated organs (inside the scan range), partially irradiated organs and non-irradiated organs (outside the scan range) was 1{\%}, 5{\%}, and 22{\%}, respectively, due to image registration. Conclusion: The automated image processing method to registrate individual chest and abdominal CT scout radiograph with the reference scout radiograph is feasible. It can be used to determine the individual scan range coordinates in z-direction to calculate the organ dose values. The presented method could be utilized in automatic organ dose calculation in CT for radiation exposure tracking of the patients. }, } |
2012 | Journal | B. J. Kopecky, J. S. Duncan, K. L. Elliott, B. Fritzsch (2012). Three-dimensional reconstructions from optical sections of thick mouse inner ears using confocal microscopy. Journal of Microscopy, 248(3), pp. 292–298. (link) (bib) x @article{Kopecky2012, year = { 2012 }, volume = { 248 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Three-dimensional reconstructions from optical sections of thick mouse inner ears using confocal microscopy }, pages = { 292--298 }, number = { 3 }, keywords = { Confocal microscopy,Inner ear,Mouse,STSLIM,Segmentation,Three-dimensional reconstruction }, journal = { Journal of Microscopy }, issn = { 00222720 }, doi = { 10.1111/j.1365-2818.2012.03673.x }, author = { Kopecky and Duncan and Elliott and Fritzsch }, abstract = { Three-dimensional (3D) reconstructions of the vertebrate inner ear have provided novel insights into the development of this complex organ. 3D reconstructions enable superior analysis of phenotypic differences between wild type and mutant ears but can result in laborious work when reconstructed from physically sectioned material. Although nondestructive optical sectioning light sheet microscopy may ultimately prove the ideal solution, these technologies are not yet commercially available, or in many instances are not monetarily feasible. Here we introduce a simple technique to image a fluorescently labelled ear at different stages throughout development at high resolution enabling 3D reconstruction of any component of the inner ear using confocal microscopy. We provide a step-by-step manual from tissue preparation to imaging to 3D reconstruction and analysis including a rationale and troubleshooting guide at each step for researchers with different equipment, protocols, and access to resources to successfully incorporate the principles of this method and customize them to their laboratory settings. {\textcopyright} 2012 Royal Microscopical Society. }, } |
2012 | Journal | Pasi Kankaanpää, Lassi Paavolainen, Silja Tiitta, Mikko Karjalainen, Joacim Päivärinne, Jonna Nieminen, Varpu Marjomäki, Jyrki Heino, Daniel J. White (2012). BioImageXD: An open, general-purpose and high-throughput image-processing platform. Nature Methods, 9(7), pp. 683–689. (link) (bib) x @article{Kankaanpaa2012, year = { 2012 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { BioImageXD: An open, general-purpose and high-throughput image-processing platform }, pages = { 683--689 }, number = { 7 }, journal = { Nature Methods }, issn = { 15487091 }, doi = { 10.1038/nmeth.2047 }, author = { Kankaanp{\"{a}}{\"{a}} and Paavolainen and Tiitta and Karjalainen and P{\"{a}}iv{\"{a}}rinne and Nieminen and Marjom{\"{a}}ki and Heino and White }, abstract = { BioImageXD puts open-source computer science tools for three-dimensional visualization and analysis into the hands of all researchers, through a user-friendly graphical interface tuned to the needs of biologists. BioImageXD has no restrictive licenses or undisclosed algorithms and enables publication of precise, reproducible and modifiable workflows. It allows simple construction of processing pipelines and should enable biologists to perform challenging analyses of complex processes. We demonstrate its performance in a study of integrin clustering in response to selected inhibitors. {\textcopyright} 2012 Nature America, Inc. All rights reserved. }, } |
2012 | Journal | Andrew Godley, Ergun Ahunbay, Cheng Peng, X. Allen Li (2012). Accumulating daily-varied dose distributions of prostate radiation therapy with soft-tissue-based KV CT guidance. Journal of Applied Clinical Medical Physics, 13(3), pp. 98–107. (link) (bib) x @article{Godley2012, year = { 2012 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Accumulating daily-varied dose distributions of prostate radiation therapy with soft-tissue-based KV CT guidance }, pages = { 98--107 }, number = { 3 }, keywords = { Cumulative dose,Deformable image registration,Prostate radiation therapy,Soft-tissue-based registration }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1120/jacmp.v13i3.3859 }, author = { Godley and Ahunbay and Peng and {Allen Li} }, abstract = { Even with daily image guidance based on soft tissue registration, deviations of fractional doses can be quite large due to changes in patient anatomy. It is of interest to ascertain the cumulative effect of these deviations on the total delivered dose. Daily kV CT data acquired using an in-room CT for five prostate cancer patients were analyzed. Each daily CT was deformably registered to the planning CT using an in-house tool. The resulting deformation field was used to map the delivered daily dose onto the planning CT, then summed to obtain the cumulative (total delivered) dose to the patient. The delivered cumulative values of prostate D100 on average were only 2.9{\%} less than their planned values, while the PTV D95 were 3.6{\%} less. The delivered rectum and bladder V70s can be twice what was planned. The less than 3{\%} difference between delivered and planned prostate coverage indicates that the PTV margin of 5 mm was sufficient with the soft-tissue-based kV CT guidance for the cases studied. }, } |
2012 | Journal | Wolfgang Busch, Brad T. Moore, Bradley Martsberger, Daniel L. MacE, Richard W. Twigg, Jee Jung, Iulian Pruteanu-Malinici, Scott J. Kennedy, Gregory K. Fricke, Robert L. Clark, Uwe Ohler, Philip N. Benfey (2012). A microfluidic device and computational platform for high-throughput live imaging of gene expression. Nature Methods, 9(11), pp. 1101–1106. (link) (bib) x @article{Busch2012, year = { 2012 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A microfluidic device and computational platform for high-throughput live imaging of gene expression }, pages = { 1101--1106 }, number = { 11 }, journal = { Nature Methods }, issn = { 15487091 }, doi = { 10.1038/nmeth.2185 }, author = { Busch and Moore and Martsberger and MacE and Twigg and Jung and Pruteanu-Malinici and Kennedy and Fricke and Clark and Ohler and Benfey }, abstract = { To fully describe gene expression dynamics requires the ability to quantitatively capture expression in individual cells over time. Automated systems for acquiring and analyzing real-time images are needed to obtain unbiased data across many samples and conditions. We developed a microfluidics device, the RootArray, in which 64 Arabidopsis thaliana seedlings can be grown and their roots imaged by confocal microscopy over several days without manual intervention. To achieve high throughput, we decoupled acquisition from analysis. In the acquisition phase, we obtain images at low resolution and segment to identify regions of interest. Coordinates are communicated to the microscope to record the regions of interest at high resolution. In the analysis phase, we reconstruct three-dimensional objects from stitched high-resolution images and extract quantitative measurements from a virtual medial section of the root. We tracked hundreds of roots to capture detailed expression patterns of 12 transgenic reporter lines under different conditions. {\textcopyright} 2012 Nature America, Inc. All rights reserved. }, } |
2012 | Journal | Matthew S. Brown, Gregory H. Chu, Hyun J. Kim, Martin Allen-Auerbach, Cheryce Poon, Juliette Bridges, Adria Vidovic, Bharath Ramakrishna, Judy Ho, Michael J. Morris, Steven M. Larson, Howard I. Scher, Jonathan G. Goldin (2012). Computer-aided quantitative bone scan assessment of prostate cancer treatment response. Nuclear Medicine Communications, 33(4), pp. 384–394. (link) (bib) x @article{Brown2012, year = { 2012 }, volume = { 33 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Computer-aided quantitative bone scan assessment of prostate cancer treatment response }, pages = { 384--394 }, number = { 4 }, keywords = { bone neoplasms,bone scan,computer-assisted detection,computer-assisted image processing,prostate cancer,radionuclide imaging }, journal = { Nuclear Medicine Communications }, issn = { 01433636 }, doi = { 10.1097/MNM.0b013e3283503ebf }, author = { Brown and Chu and Kim and Allen-Auerbach and Poon and Bridges and Vidovic and Ramakrishna and Ho and Morris and Larson and Scher and Goldin }, abstract = { OBJECTIVE: The development and evaluation of a computer-aided bone scan analysis technique to quantify changes in tumor burden and assess treatment effects in prostate cancer clinical trials. METHODS: We have developed and report on a commercial fully automated computer-aided detection (CAD) system. Using this system, scan images were intensity normalized, and then lesions were identified and segmented by anatomic region-specific intensity thresholding. Detected lesions were compared against expert markings to assess the accuracy of the CAD system. The metrics Bone Scan Lesion Area, Bone Scan Lesion Intensity, and Bone Scan Lesion Count were calculated from identified lesions, and their utility in assessing treatment effects was evaluated by analyzing before and after scans from metastatic castration-resistant prostate cancer patients: 10 treated and 10 untreated. In this study, patients were treated with cabozantinib, a MET/vascular endothelial growth factor inhibitor resulting in high rates of resolution of bone scan abnormalities. RESULTS: Our automated CAD system identified bone lesion pixels with 94{\%} sensitivity, 89{\%} specificity, and 89{\%} accuracy. Significant differences in changes from baseline were found between treated and untreated groups in all assessed measurements derived by our system. The most significant measure, Bone Scan Lesion Area, showed a median (interquartile range) change from baseline at week 6 of 7.13{\%} (27.61) in the untreated group compared with -73.76{\%} (45.38) in the cabozantinib-treated group (P=0.0003). CONCLUSION: Our system accurately and objectively identified and quantified metastases in bone scans, allowing for interpatient and intrapatient comparison. It demonstrates potential as an objective measurement of treatment effects, laying the foundation for validation against other clinically relevant outcome measures. Copyright {\textcopyright} Lippincott Williams {\&} Wilkins. }, } |
2012 | Journal | Silvain Bériault, Fahd Al Subaie, D. Louis Collins, Abbas F. Sadikot, G. Bruce Pike (2012). A multi-modal approach to computer-assisted deep brain stimulation trajectory planning. International Journal of Computer Assisted Radiology and Surgery, 7(5), pp. 687–704. (link) (bib) x @article{Beriault2012, year = { 2012 }, volume = { 7 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { A multi-modal approach to computer-assisted deep brain stimulation trajectory planning }, pages = { 687--704 }, number = { 5 }, keywords = { Decision-support system,Deep brain stimulation,Image-guided neurosurgery,Parkinson's disease,Preoperative planning }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-012-0768-4 }, author = { B{\'{e}}riault and Subaie and Collins and Sadikot and Pike }, abstract = { Purpose: Both frame-based and frameless approaches to deep brain stimulation (DBS) require planning of insertion trajectories that mitigate hemorrhagic risk and loss of neurological function. Currently, this is done by manual inspection of multiple potential electrode trajectories on MR-imaging data. We propose and validate a method for computer-assisted DBS trajectory planning. Method: Our framework integrates multi-modal MRI analysis (T1w, SWI, TOF-MRA) to compute suitable DBS trajectories that optimize the avoidance of specific critical brain structures. A cylinder model is used to process each trajectory and to evaluate complex surgical constraints described via a combination of binary and fuzzy segmented datasets. The framework automatically aggregates the multiple constraints into a unique ranking of recommended low-risk trajectories. Candidate trajectories are represented as a few well-defined cortical entry patches of best-ranked trajectories and presented to the neurosurgeon for final trajectory selection. Results: The proposed algorithm permits a search space containing over 8,000 possible trajectories to be processed in less than 20 s. A retrospective analysis on 14 DBS cases of patients with severe Parkinson's disease reveals that our framework can improve the simultaneous optimization of many pre-formulated surgical constraints. Furthermore, all automatically computed trajectories were evaluated by two neurosurgeons, were judged suitable for surgery and, in many cases, were judged preferable or equivalent to the manually planned trajectories used during the operation. Conclusions: This work provides neurosurgeons with an intuitive and flexible decision-support system that allows objective and patient-specific optimization of DBS lead trajectories, which should improve insertion safety and reduce surgical time. {\textcopyright} 2012 CARS. }, } |
2012 | Journal | Albina Asadulina, Aurora Panzera, Csaba Veraszt\'o, Christian Liebig, Gáspár Jékely (2012). Whole-body gene expression pattern registration in Platynereis larvae. EvoDevo, 3(1), pp. 12. (link) (bib) x @article{Asadulina2012, year = { 2012 }, volume = { 3 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Whole-body gene expression pattern registration in Platynereis larvae }, pages = { 12 }, number = { 1 }, journal = { EvoDevo }, issn = { 20419139 }, doi = { 10.1186/2041-9139-3-27 }, author = { Asadulina and Panzera and Veraszt{\'{o}} and Liebig and J{\'{e}}kely }, abstract = { Background: Digital anatomical atlases are increasingly used in order to depict different gene expression patterns and neuronal morphologies within a standardized reference template. In evo-devo, a discipline in which the comparison of gene expression patterns is a widely used approach, such standardized anatomical atlases would allow a more rigorous assessment of the conservation of and changes in gene expression patterns during micro- and macroevolutionary time scales. Due to its small size and invariant early development, the annelid Platynereis dumerilii is particularly well suited for such studies. Recently a reference template with registered gene expression patterns has been generated for the anterior part (episphere) of the Platynereis trochophore larva and used for the detailed study of neuronal development.Results: Here we introduce and evaluate a method for whole-body gene expression pattern registration for Platynereis trochophore and nectochaete larvae based on whole-mount in situ hybridization, confocal microscopy, and image registration. We achieved high-resolution whole-body scanning using the mounting medium 2,2'-thiodiethanol (TDE), which allows the matching of the refractive index of the sample to that of glass and immersion oil thereby reducing spherical aberration and improving depth penetration. This approach allowed us to scan entire whole-mount larvae stained with nitroblue tetrazolium/5-bromo-4-chloro-3-indolyl phosphate (NBT/BCIP) in situ hybridization and counterstained fluorescently with an acetylated-tubulin antibody and the nuclear stain 4'6-diamidino-2-phenylindole (DAPI). Due to the submicron isotropic voxel size whole-mount larvae could be scanned in any orientation. Based on the whole-body scans, we generated four different reference templates by the iterative registration and averaging of 40 individual image stacks using either the acetylated-tubulin or the nuclear-stain signal for each developmental stage. We then registered to these templates the expression patterns of cell-type specific genes. In order to evaluate the gene expression pattern registration, we analyzed the absolute deviation of cell-center positions. Both the acetylated-tubulin- and the nuclear-stain-based templates allowed near-cellular-resolution gene expression registration. Nuclear-stain-based templates often performed significantly better than acetylated-tubulin-based templates. We provide detailed guidelines and scripts for the use and further expansion of the Platynereis gene expression atlas.Conclusions: We established whole-body reference templates for the generation of gene expression atlases for Platynereis trochophore and nectochaete larvae. We anticipate that nuclear-staining-based image registration will be applicable for whole-body alignment of the embryonic and larval stages of other organisms in a similar size range. {\textcopyright} 2012 Asadulina et al.; licensee BioMed Central Ltd. }, } |
2012 | Journal | Chris Allan, Jean Marie Burel, Josh Moore, Colin Blackburn, Melissa Linkert, Scott Loynton, Donald MacDonald, William J. Moore, Carlos Neves, Andrew Patterson, Michael Porter, Aleksandra Tarkowska, Brian Loranger, Jerome Avondo, Ingvar Lagerstedt, Luca Lianas, Simone Leo, Katherine Hands, Ron T. Hay, Ardan Patwardhan, Christoph Best, Gerard J. Kleywegt, Gianluigi Zanetti, Jason R. Swedlow (2012). OMERO: Flexible, model-driven data management for experimental biology. Nature Methods, 9(3), pp. 245–253. (link) (bib) x @article{Allan2012, year = { 2012 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { OMERO: Flexible, model-driven data management for experimental biology }, pages = { 245--253 }, number = { 3 }, journal = { Nature Methods }, issn = { 15487091 }, doi = { 10.1038/nmeth.1896 }, author = { Allan and Burel and Moore and Blackburn and Linkert and Loynton and MacDonald and Moore and Neves and Patterson and Porter and Tarkowska and Loranger and Avondo and Lagerstedt and Lianas and Leo and Hands and Hay and Patwardhan and Best and Kleywegt and Zanetti and Swedlow }, abstract = { Data-intensive research depends on tools that manage multidimensional, heterogeneous datasets. We built OME Remote Objects (OMERO), a software platform that enables access to and use of a wide range of biological data. OMERO uses a server-based middleware application to provide a unified interface for images, matrices and tables. OMERO's design and flexibility have enabled its use for light-microscopy, high-content-screening, electron-microscopy and even non-image-genotype data. OMERO is open-source software, available at http://openmicroscopy.org/. {\textcopyright} 2012 Nature America, Inc. All rights reserved. }, } |
2012 | In Collection | Brian B Avants, Nicholas J Tustison, Gang Song, Baohua Wu, Michael Stauffer, Matthew M McCormick, Hans J Johnson, James C Gee (2012). A unified image registration framework for ITK. In D (Eds. Dawant, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 266–275. (link) (bib) x @incollection{Avants2012, year = { 2012 }, volume = { 7359 LNCS }, url = { http://link.springer.com/10.1007/978-3-642-31340-0{\_}28 http://link.springer.com/chapter/10.1007{\%}2F978-3-642-31340-0{\_}28 }, title = { A unified image registration framework for ITK }, publisher = { Springer Berlin Heidelberg }, pages = { 266--275 }, organization = { Springer Berlin Heidelberg }, number = { LNCS 7359 }, issn = { 03029743 }, isbn = { 9783642313394 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Avants et al/Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)/Avants et al. - 2012 - A unified image registration framework for ITK.pdf:pdf }, editor = { [object Object] }, edition = { 5th Intern }, doi = { 10.1007/978-3-642-31340-0_28 }, chapter = { A Unified }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Avants and Tustison and Song and Wu and Stauffer and McCormick and Johnson and Gee }, abstract = { Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and may improve reproducibility. Version 4 of the Insight ToolKit ( ITK 4 ) seeks to establish new standards in publicly available image registration methodology. In this work, we provide an overview and preliminary evaluation of the revised toolkit against registration based on the previous major ITK version (3.20). Furthermore, we propose a nomenclature that may be used to discuss registration frameworks via schematic representations. In total, the ITK 4 contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. }, } |
2012 | In Collection | Brian B. Avants, Nicholas J. Tustison, Gang Song, Baohua Wu, Michael Stauffer, Matthew M. McCormick, Hans J. Johnson, James C. Gee (2012). A unified image registration framework for ITK. In D (Eds. Dawant, editor, Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 266–275. (link) (bib) x @incollection{Avants2012, year = { 2012 }, volume = { 7359 LNCS }, url = { http://link.springer.com/10.1007/978-3-642-31340-0{\_}28 http://link.springer.com/chapter/10.1007{\%}2F978-3-642-31340-0{\_}28 http://link.springer.com/chapter/10.1007{\%}252F978-3-642-31340-0{\_}28 }, title = { A unified image registration framework for ITK }, publisher = { Springer Berlin Heidelberg }, pages = { 266--275 }, organization = { Springer Berlin Heidelberg }, number = { LNCS 7359 }, issn = { 03029743 }, isbn = { 9783642313394 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Avants et al/Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)/Avants et al. - 2012 - A unified image registration framework for ITK.pdf:pdf }, editor = { [object Object] }, edition = { 5th Intern }, doi = { 10.1007/978-3-642-31340-0_28 }, chapter = { A Unified }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Avants and Tustison and Song and Wu and Stauffer and McCormick and Johnson and Gee }, annote = { From Duplicate 2 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 1 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 1 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) {\#}{\#}CONTRIBUTIONS: As a member of ITK development team, I worked closely to develop, test, and implement the registration frameworks described in this work.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 2 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 1 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) {\#}{\#}CONTRIBUTIONS: As a member of ITK development team, I worked closely to develop, test, and implement the registration frameworks described in this work.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} }, abstract = { Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and may improve reproducibility. Version 4 of the Insight ToolKit ( ITK ) seeks to establish new standards in publicly available image registration methodology. In this work, we provide an overview and preliminary evaluation of the revised toolkit against registration based on the previous major ITK version (3.20). Furthermore, we propose a nomenclature that may be used to discuss registration frameworks via schematic representations. In total, the ITK contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. {\textcopyright} 2012 Springer-Verlag. }, } |
2012 | In Collection | Iván Mac\'ia, Manuel Gra\~na (2012). Vascular section estimation in medical images using combined feature detection and evolutionary optimization. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 503–513. (link) (bib) x @incollection{Macia2012a, year = { 2012 }, volume = { 7209 LNAI }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858807369{\&}doi=10.1007{\%}2F978-3-642-28931-6{\_}48{\&}partnerID=40{\&}md5=1574b66250aeb6a43bb8a54db03d91e3 }, type = { Serial }, title = { Vascular section estimation in medical images using combined feature detection and evolutionary optimization }, pages = { 503--513 }, number = { PART 2 }, keywords = { Evolutionary Optimization,Feature Detectors,Medialness,Medical Image Analysis,Section Estimator,Vascular Analysis,Vascular Tracking,Vesselness,Vessels }, issn = { 03029743 }, isbn = { 9783642289309 }, doi = { 10.1007/978-3-642-28931-6_48 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Mac{\'{i}}a and Gra{\~{n}}a }, abstract = { Accurate detection and extraction of 3D vascular structures is a crucial step for many medical image applications that require vascular analysis. Vessel tracking algorithms iteratively follow vascular branches point by point, obtaining geometric descriptors, such as centerlines and sections of branches, that describe patient-specific vasculature. In order to obtain these descriptors, most approaches use specialized scaled vascular feature detectors. However, these detectors may fail due to the presence of nearby spurious structures, incorrect scale or parameter choice or other undesired effects, obtaining incorrect local sections which may lead to unrecoverable errors during the tracking procedure. We propose to combine this approach with an evolutionary optimization framework that use specific modified vascular detectors as cost functions in order to obtain accurate vascular sections when the direct detection approach fails. We demonstrate the validity of this new approach with experiments using real datasets. We also show that, for a family of medialness functions, the procedure can be performed at fixed small scales which is computationally efficient for local kernel-based estimators. {\textcopyright} 2012 Springer-Verlag. }, } |
2012 | In Collection | Jason Kutarnia, Peder C. Pedersen (2012). Generation of 3D ultrasound training volumes from freehand acquired data. In Studies in Health Technology and Informatics, pp. 238–244. (link) (bib) x @incollection{Kutarnia2012, year = { 2012 }, volume = { 173 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84860628403{\&}doi=10.3233{\%}2F978-1-61499-022-2-238{\&}partnerID=40{\&}md5=1799c545e84fbed039abc026f951ab6d }, type = { Serial }, title = { Generation of 3D ultrasound training volumes from freehand acquired data }, pages = { 238--244 }, keywords = { Freehand ultrasound,Non-rigid registration }, issn = { 18798365 }, isbn = { 9781614990215 }, doi = { 10.3233/978-1-61499-022-2-238 }, booktitle = { Studies in Health Technology and Informatics }, author = { Kutarnia and Pedersen }, abstract = { We are developing a low cost ultrasound training system running on a laptop in which the user scans a generic 3D curved surface representing the patient using a 5 DoF sensor. A critical component of this system is the generation of ultrasound training image volumes, which need to cover a complete body region in order to provide a realistic scanning experience. This research attempts to develop stitching techniques to generate large global volumes from smaller overlapping volumes acquired using freehand techniques. {\textcopyright} 2012 The authors and IOS Press. All rights reserved. }, } |
2012 | In Conf. Proceedings | Alexander Haak, Stefan Klein, Gerard Van Burken, Nico De Jong, Antonius F.W. Van Der Steen, Johannes G. Bosch, Marijn Van Stralen, Josien P.W. Pluim (2012). Optimal kernel sizes for 4D image reconstruction using normalized convolution from sparse fast-rotating transesophageal 2D ultrasound images. In IEEE International Ultrasonics Symposium, IUS, pp. 703–706, New York. (link) (bib) x @inproceedings{Haak2012, year = { 2012 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84882446800{\&}doi=10.1109{\%}2FULTSYM.2012.0175{\&}partnerID=40{\&}md5=2eab7eaf8d8fbce0ab5d12917b0183ab {\%}3CGo to }, type = { Book }, title = { Optimal kernel sizes for 4D image reconstruction using normalized convolution from sparse fast-rotating transesophageal 2D ultrasound images }, series = { 2012 Ieee International Ultrasonics Symposium }, publisher = { Ieee }, pages = { 703--706 }, keywords = { kernel optimization,normalized convolution,quasi newton }, issn = { 19485719 }, isbn = { 9781467345613 }, doi = { 10.1109/ULTSYM.2012.0175 }, booktitle = { IEEE International Ultrasonics Symposium, IUS }, author = { Haak and Klein and {Van Burken} and {De Jong} and {Van Der Steen} and Bosch and {Van Stralen} and Pluim }, address = { New York }, abstract = { A transesophageal echocardiography (TEE) micro-probe is suitable for monitoring long minimally invasive interventions in the heart, because it is well tolerated by patients. To visualize complex 3D structures of the beating heart, a 4D-image reconstruction derived from irregularly and sparsely sampled 2D images is needed. We previously showed that normalized convolution (NC) with optimized kernels performs better than nearest-neighbor or linear interpolation. In order to use NC for image reconstructions we need to be able to predict optimal kernel sizes. We therefore present an advanced optimization scheme, and estimate optimal NC kernel sizes for five different patient-data sets. From the optimization results we derive a model for estimating optimal NC kernel sizes. As ground truth (GT), we used five full-volume 4D TEE patient scans, acquired with the X7-2t matrix transducer. To simulate 2D data acquisition, the GT datasets were sliced at random rotation angles and at random normalized cardiac phases. Data sets containing 400, 600, 900, 1350, and 1800 2D images were created for all patients, producing a total of 25 data sets. A 2D Gaussian function was used as NC kernel, and optimal kernel sizes were obtained with a quasi-Newton optimizer. A power law model was fitted to the optimal kernels estimated. We conclude that optimal kernel sizes for NC can be successfully predicted by a model at the cost of a relatively small increase in the reconstruction error. {\textcopyright} 2012 IEEE. }, } |
2012 | In Conf. Proceedings | Gregory H. Chu, Pechin Lo, Hyun J. Kim, Peiyun Lu, Bharath Ramakrishna, David Gjertson, Cheryce Poon, Martin Auerbach, Jonathan Goldin, Matthew S. Brown (2012). Automated segmentation of tumors on bone scans using anatomy-specific thresholding. In Medical Imaging 2012: Computer-Aided Diagnosis, pp. 83150F, Bellingham. (link) (bib) x @inproceedings{Chu2012, year = { 2012 }, volume = { 8315 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874884929{\&}doi=10.1117{\%}2F12.911462{\&}partnerID=40{\&}md5=892c7bf1991afd54a3da43cf68fda6d8 }, type = { Conference Proceedings }, title = { Automated segmentation of tumors on bone scans using anatomy-specific thresholding }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 83150F }, issn = { 16057422 }, isbn = { 9780819489647 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.911462 }, booktitle = { Medical Imaging 2012: Computer-Aided Diagnosis }, author = { Chu and Lo and Kim and Lu and Ramakrishna and Gjertson and Poon and Auerbach and Goldin and Brown }, address = { Bellingham }, abstract = { Quantification of overall tumor area on bone scans may be a potential biomarker for treatment response assessment and has, to date, not been investigated. Segmentation of bone metastases on bone scans is a fundamental step for this response marker. In this paper, we propose a fully automated computerized method for the segmentation of bone metastases on bone scans, taking into account characteristics of different anatomic regions. A scan is first segmented into anatomic regions via an atlas-based segmentation procedure, which involves non-rigidly registering a labeled atlas scan to the patient scan. Next, an intensity normalization method is applied to account for varying levels of radiotracer dosing levels and scan timing. Lastly, lesions are segmented via anatomic regionspecific intensity thresholding. Thresholds are chosen by receiver operating characteristic (ROC) curve analysis against manual contouring by board certified nuclear medicine physicians. A leave-one-out cross validation of our method on a set of 39 bone scans with metastases marked by 2 board-certified nuclear medicine physicians yielded a median sensitivity of 95.5{\%}, and specificity of 93.9{\%}. Our method was compared with a global intensity thresholding method. The results show a comparable sensitivity and significantly improved overall specificity, with a p-value of 0.0069. }, } |
2011 | Book chapter | G N Stevenson, S L Collins, L Impey, J A Noble, Ieee (2011). NA in SURFACE PARAMETERISATION OF THE UTERO/PLACENTAL INTERFACE USING 3D POWER DOPPLER ULTRASOUND, Ieee, pp. 891–894, IEEE International Symposium on Biomedical Imaging. (link) (bib) x @inbook{Stevenson2011, year = { 2011 }, url = { {\%}3CGo to }, type = { Book Section }, title = { SURFACE PARAMETERISATION OF THE UTERO/PLACENTAL INTERFACE USING 3D POWER DOPPLER ULTRASOUND }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 891--894 }, isbn = { 978-1-4244-4128-0 }, booktitle = { 2011 8th Ieee International Symposium on Biomedical Imaging: From Nano to Macro }, author = { Stevenson and Collins and Impey and Noble and Ieee }, address = { New York }, } |
2011 | Journal | Ronald K. Pierson, Hans J. Johnson, Gregory Harris, Helen Keefe, Jane S. Paulsen, Nancy C. Andreasen, Vincent A. Magnotta (2011). Fully automated analysis using BRAINS: AutoWorkup. NeuroImage, 54(1), pp. 328–336. (link) (bib) x @article{Pierson2011, year = { 2011 }, volume = { 54 }, url = { http://www.sciencedirect.com/science/article/pii/S1053811910009055 papers2://publication/uuid/35F9A8B8-DD4A-4857-903F-16D3F7CCAEF7 http://www.ncbi.nlm.nih.gov/pubmed/20600977 }, title = { Fully automated analysis using BRAINS: AutoWorkup }, publisher = { Elsevier Inc. }, pmid = { 20600977 }, pages = { 328--336 }, number = { 1 }, month = { jan }, keywords = { Automated image analysis,BRAINS,Morphometry,Pipeline,Segmentation,Volumetric analysis }, journal = { NeuroImage }, issn = { 10538119 }, isbn = { 1095-9572 (Electronic)$\backslash$n1053-8119 (Linking) }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Pierson et al/NeuroImage/Pierson et al. - 2011 - Fully automated analysis using BRAINS AutoWorkup.pdf:pdf }, doi = { 10.1016/j.neuroimage.2010.06.047 }, author = { Pierson and Johnson and Harris and Keefe and Paulsen and Andreasen and Magnotta }, annote = { \#}{\#}CONTRIBUTIONS: I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\# }, abstract = { The BRAINS (Brain Research: Analysis of Images, Networks, and Systems) image analysis software has been in use, and in constant development, for over 20. years. The original neuroimage analysis pipeline using BRAINS was designed as a semiautomated procedure to measure volumes of the cerebral lobes and subcortical structures, requiring manual intervention at several stages in the process. Through use of advanced image processing algorithms the need for manual intervention at stages of image realignment, tissue sampling, and mask editing have been eliminated. In addition, inhomogeneity correction, intensity normalization, and mask cleaning routines have been added to improve the accuracy and consistency of the results. The fully automated method, AutoWorkup, is shown in this study to be more reliable (ICC ≥ 0.96, Jaccard index ≥ 0.80, and Dice index ≥ 0.89 for all tissues in all regions) than the average of 18 manual raters. On a set of 1130 good quality scans, the failure rate for correct realignment was 1.1{\%}, and manual editing of the brain mask was required on 4{\%} of the scans. In other tests, AutoWorkup is shown to produce measures that are reliable for data acquired across scanners, scanner vendors, and across sequences. Application of AutoWorkup for the analysis of data from the 32-site, multivendor PREDICT-HD study yield estimates of reliability to be greater than or equal to 0.90 for all tissues and regions. {\textcopyright} 2010 Elsevier Inc. }, } |
2011 | Journal | Ashley Newton, Lucie Wright (2011). Teaching toolkit for medical students. Clinical Teacher, 8(4), pp. 254–257. (bib) x @article{Newton2011, year = { 2011 }, volume = { 8 }, title = { Teaching toolkit for medical students }, pages = { 254--257 }, number = { 4 }, journal = { Clinical Teacher }, issn = { 17434971 }, doi = { 10.1111/j.1743-498X.2011.00453.x }, author = { Newton and Wright }, abstract = { Background: From teaching juniors and peers to educating patients, it is imperative for all doctors to have basic core teaching skills. The Junior Association for the Study of Medical Education (JASME) felt that a short course in the fundamentals of teaching would be well received by students. Context: This article shares the lessons from a one-day teaching course aimed at senior medical students. Qualitative feedback helped decide which aspects of the course were most valued. Intervention: The course was piloted in London. It combined interactive plenary sessions on teaching theory with practical teaching sessions. Each student taught a small group of others a basic clinical skill, and the student teacher then received extensive feedback from their peers and an experienced clinician with a special interest in medical education. There was an opportunity to re-teach part of the skill after having taken the feedback on board. Implications: Students completed questionnaires at the start and end of the day to ascertain their expectations of the course and what they found most useful. Expectations can be grouped into three main areas: students wanted to improve their teaching skills; gain teaching experience; and receive feedback on their teaching. The most valuable part of the course was being able to practise teaching and receive feedback. Keywords used to describe the feedback included 'individual', 'valuable', 'constructive', 'instant' and 'in depth'. By continuing to run similar workshops we hope that we can further encourage the teachers of tomorrow. {\textcopyright} Blackwell Publishing Ltd 2011. }, } |
2011 | Journal | Ronald Pierson, Hans Johnson, Gregory Harris, Helen Keefe, Jane S. Paulsen, Nancy C. Andreasen, Vincent A. Magnotta (2011). Fully automated analysis using BRAINS: AutoWorkup. NeuroImage, 54(1), pp. 328–336. (link) (bib) x @article{RID:1020151228438-28, year = { 2011 }, volume = { 54 }, url = { http://www.sciencedirect.com/science/article/pii/S1053811910009055 papers2://publication/uuid/35F9A8B8-DD4A-4857-903F-16D3F7CCAEF7 http://www.ncbi.nlm.nih.gov/pubmed/20600977 }, title = { Fully automated analysis using BRAINS: AutoWorkup }, publisher = { Elsevier Inc. }, pmid = { 20600977 }, pages = { 328--336 }, number = { 1 }, month = { jan }, keywords = { Automated image analysis,BRAINS,Morphometry,Pipeline,Segmentation,Volumetric analysis }, journal = { NeuroImage }, issn = { 10538119 }, isbn = { 1095-9572 (Electronic)$\backslash$n1053-8119 (Linking) }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Pierson et al/NeuroImage/Pierson et al. - 2011 - Fully automated analysis using BRAINS AutoWorkup.pdf:pdf }, doi = { 10.1016/j.neuroimage.2010.06.047 }, author = { Pierson and Johnson and Harris and Keefe and Paulsen and Andreasen and Magnotta }, annote = { From Duplicate 1 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) {\#}{\#}CONTRIBUTIONS: I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) From Duplicate 2 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) From Duplicate 1 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) From Duplicate 1 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) {\#}{\#}CONTRIBUTIONS: I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { The BRAINS (Brain Research: Analysis of Images, Networks, and Systems) image analysis software has been in use, and in constant development, for over 20. years. The original neuroimage analysis pipeline using BRAINS was designed as a semiautomated procedure to measure volumes of the cerebral lobes and subcortical structures, requiring manual intervention at several stages in the process. Through use of advanced image processing algorithms the need for manual intervention at stages of image realignment, tissue sampling, and mask editing have been eliminated. In addition, inhomogeneity correction, intensity normalization, and mask cleaning routines have been added to improve the accuracy and consistency of the results. The fully automated method, AutoWorkup, is shown in this study to be more reliable (ICC ≥ 0.96, Jaccard index ≥ 0.80, and Dice index ≥ 0.89 for all tissues in all regions) than the average of 18 manual raters. On a set of 1130 good quality scans, the failure rate for correct realignment was 1.1{\%}, and manual editing of the brain mask was required on 4{\%} of the scans. In other tests, AutoWorkup is shown to produce measures that are reliable for data acquired across scanners, scanner vendors, and across sequences. Application of AutoWorkup for the analysis of data from the 32-site, multivendor PREDICT-HD study yield estimates of reliability to be greater than or equal to 0.90 for all tissues and regions. {\textcopyright} 2010 Elsevier Inc. }, } |
2011 | Journal | Tristan Whitmarsh, Ludovic Humbert, Mathieu De Craene, Luis M. Del Rio Barquero, Alejandro F. Frangi (2011). Reconstructing the 3D shape and bone mineral density distribution of the proximal femur from dual-energy x-ray absorptiometry. IEEE Transactions on Medical Imaging, 30(12), pp. 2101–2114. (link) (bib) x @article{Whitmarsh2011, year = { 2011 }, volume = { 30 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Reconstructing the 3D shape and bone mineral density distribution of the proximal femur from dual-energy x-ray absorptiometry }, pages = { 2101--2114 }, number = { 12 }, keywords = { Active appearance model,active shape model,deformable models,image reconstruction,image registration }, journal = { IEEE Transactions on Medical Imaging }, issn = { 02780062 }, doi = { 10.1109/TMI.2011.2163074 }, author = { Whitmarsh and Humbert and {De Craene} and {Del Rio Barquero} and Frangi }, abstract = { The accurate diagnosis of osteoporosis has gained increasing importance due to the aging of our society. Areal bone mineral density (BMD) measured by dual-energy X-ray absorptiometry (DXA) is an established criterion in the diagnosis of osteoporosis. This measure, however, is limited by its two-dimensionality. This work presents a method to reconstruct both the 3D bone shape and 3D BMD distribution of the proximal femur from a single DXA image used in clinical routine. A statistical model of the combined shape and BMD distribution is presented, together with a method for its construction from a set of quantitative computed tomography (QCT) scans. A reconstruction is acquired in an intensity based 3D-2D registration process whereby an instance of the model is found that maximizes the similarity between its projection and the DXA image. Reconstruction experiments were performed on the DXA images of 30 subjects, with a model constructed from a database of QCT scans of 85 subjects. The accuracy was evaluated by comparing the reconstructions with the same subject QCT scans. The method presented here can potentially improve the diagnosis of osteoporosis and fracture risk assessment from the low radiation dose and low cost DXA devices currently used in clinical routine. {\textcopyright} 2006 IEEE. }, } |
2011 | Journal | Daniela Wellein, Silvia Born, Matthias Pfeifle, Frank Duffner, Dirk Bartz (2011). A pipeline for interactive cortex segmentation. Computer Science - Research and Development, 26(1-2), pp. 87–96. (link) (bib) x @article{Wellein2011, year = { 2011 }, volume = { 26 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-79251618712{\&}doi=10.1007{\%}2Fs00450-010-0130-4{\&}partnerID=40{\&}md5=63eb5e1f357ccd1387341c04ab3f7522 }, type = { Journal Article }, title = { A pipeline for interactive cortex segmentation }, pages = { 87--96 }, number = { 1-2 }, keywords = { Cortex segmentation,Level set method,Neurosurgical intervention planning,Segmentation evaluation,User interaction,Watershed algorithm }, journal = { Computer Science - Research and Development }, issn = { 18652034 }, doi = { 10.1007/s00450-010-0130-4 }, author = { Wellein and Born and Pfeifle and Duffner and Bartz }, abstract = { In various clinical or research scenarios, such as neurosurgical intervention planning, diagnostics, or clinical studies concerning neurological diseases, cortex segmentation can be of great value. As, e.g., the visualization of the cortical surface along with target and risk structures enables conservative access planning and gives context information about the patient-specific anatomy. We present an interactive cortex segmentation pipeline (CSP) for T1-weighted MR images, utilizing watershed and level set methods. It is designed to allow the user to adjust the intermediate results at any stage of the segmentation process. Particular attention is paid to the appropriate visualization of the segmentation in the context of the original data for verification and to different interaction methods (manual editing, parameter tuning, morphological operations). Evaluation of the interactive CSP is performed with the Segmentation Validation Engine (SVE) by Shattuck et al. (NeuroImage 45(2):431-439, 2009). The segmentation quality of our method is comparable to the best results of three different established methods: the brain extraction tool (BET), brain surface extractor (BSE), and hybrid watershed algorithm (HWA). Being designed for interaction, the CSP integrates the users' expertise by allowing him to perform correction at any stage of the pipeline, enabling him to easily achieve a segmentation fulfilling his specific needs. {\textcopyright} 2010 Springer-Verlag. }, } |
2011 | Journal | Art Riddle, Justin Dean, Joshua R. Buser, Xi Gong, Jennifer Maire, Kevin Chen, Tahir Ahmad, Victor Cai, Thuan Nguyen, Christopher D. Kroenke, A. Roger Hohimer, Stephen A. Back (2011). Histopathological correlates of magnetic resonance imaging-defined chronic perinatal white matter injury. Annals of Neurology, 70(3), pp. 493–507. (link) (bib) x @article{Riddle2011, year = { 2011 }, volume = { 70 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Histopathological correlates of magnetic resonance imaging-defined chronic perinatal white matter injury }, pages = { 493--507 }, number = { 3 }, journal = { Annals of Neurology }, issn = { 03645134 }, doi = { 10.1002/ana.22501 }, author = { Riddle and Dean and Buser and Gong and Maire and Chen and Ahmad and Cai and Nguyen and Kroenke and Hohimer and Back }, abstract = { Objective: Although magnetic resonance imaging (MRI) is the optimal imaging modality to define cerebral white-matter injury (WMI) in preterm survivors, the histopathological features of MRI-defined chronic lesions are poorly defined. We hypothesized that chronic WMI is related to a combination of delayed oligodendrocyte (OL) lineage cell death and arrested maturation of preoligodendrocytes (preOLs). We determined whether ex vivo MRI can distinguish distinct microglial and astroglial responses related to WMI progression and arrested preOL differentiation. Methods: We employed a preterm fetal sheep model of global cerebral ischemia in which acute WMI results in selective preOL degeneration. We developed novel algorithms to register histopathologically- defined lesions with contrast-weighted and diffusion-weighted high-field ex vivo MRI data. Results: Despite mild delayed preOL degeneration, preOL density recovered to control levels by 7 days after ischemia and was {\^{a}}2 fold greater at 14 days. However, premyelinating OLs were significantly diminished at 7 and 14 days. WMI evolved to mostly gliotic lesions where arrested preOL differentiation was directly proportional to the magnitude of astrogliosis. A reduction in cerebral WM volume was accompanied by four classes of MRI-defined lesions. Each lesion type displayed unique astroglial and microglial responses that corresponded to distinct forms of necrotic or non-necrotic injury. High-field MRI defined 2 novel hypointense signal abnormalities on T 2-weighted images that coincided with microscopic necrosis or identified astrogliosis with high sensitivity and specificity. Interpretation: These studies support the potential of high-field MRI for early identification of microscopic necrosis and gliosis with preOL maturation arrest, a common form of WMI in preterm survivors. {\textcopyright} 2011 American Neurological Association. }, } |
2011 | Journal | J. Ramirez, E. Gibson, A. Quddus, N. J. Lobaugh, A. Feinstein, B. Levine, C. J.M. Scott, N. Levy-Cooperman, F. Q. Gao, S. E. Black (2011). Lesion Explorer: A comprehensive segmentation and parcellation package to obtain regional volumetrics for subcortical hyperintensities and intracranial tissue. NeuroImage, 54(2), pp. 963–973. (link) (bib) x @article{Ramirez2011, year = { 2011 }, volume = { 54 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Lesion Explorer: A comprehensive segmentation and parcellation package to obtain regional volumetrics for subcortical hyperintensities and intracranial tissue }, pages = { 963--973 }, number = { 2 }, keywords = { Aging,Alzheimer's disease,Brain volume,Lesion analysis,Leukoariosis,MRI,Segmentation,White matter hyperintensities }, journal = { NeuroImage }, issn = { 10538119 }, doi = { 10.1016/j.neuroimage.2010.09.013 }, author = { Ramirez and Gibson and Quddus and Lobaugh and Feinstein and Levine and Scott and Levy-Cooperman and Gao and Black }, abstract = { Subcortical hyperintensities (SH) are a commonly observed phenomenon on MRI of the aging brain (Kertesz et al., 1988). Conflicting behavioral, cognitive and pathological associations reported in the literature underline the need to develop an intracranial volumetric analysis technique to elucidate pathophysiological origins of SH in Alzheimer's disease (AD), vascular cognitive impairment (VCI) and normal aging (De Leeuw et al., 2001; Mayer and Kier, 1991; Pantoni and Garcia, 1997; Sachdev et al., 2008). The challenge is to develop processing tools that effectively and reliably quantify subcortical small vessel disease in the context of brain tissue compartments. Segmentation and brain region parcellation should account for SH subtypes which are often classified as: periventricular (pvSH) and deep white (dwSH), incidental white matter disease or lacunar infarcts and Virchow-Robin spaces. Lesion Explorer (LE) was developed as the final component of a comprehensive volumetric segmentation and parcellation image processing stream built upon previously published methods (Dade et al., 2004; Kovacevic et al., 2002). Inter-rater and inter-method reliability was accomplished both globally and regionally. Volumetric analysis showed high inter-rater reliability both globally (ICC = 99) and regionally (ICC = 98). Pixel-wise spatial congruence was also high (SI = 97). Whole brain pvSH volumes yielded high inter-rater reliability (ICC = 99). Volumetric analysis against an alternative kNN segmentation revealed high inter-method reliability (ICC = 97). Comparison with visual rating scales showed high significant correlations (ARWMC: r = 86; CHIPS: r = 87). The pipeline yields a comprehensive and reliable individualized volumetric profile for subcortical vasculopathy that includes regionalized (26 brain regions) measures for: GM, WM, sCSF, vCSF, lacunar and non-lacunar pvSH and dwSH. {\textcopyright} 2010 Elsevier Inc. }, } |
2011 | Journal | Thomas S. Pheiffer, Jao J. Ou, Rowena E. Ong, Michael I. Miga (2011). Automatic generation of boundary conditions using demons nonrigid image registration for use in 3-D modality-independent elastography. IEEE Transactions on Biomedical Engineering, 58(9), pp. 2607–2616. (link) (bib) x @article{Pheiffer2011, year = { 2011 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic generation of boundary conditions using demons nonrigid image registration for use in 3-D modality-independent elastography }, pages = { 2607--2616 }, number = { 9 }, keywords = { Boundary conditions,elastography,finite element methods,image registration }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 00189294 }, doi = { 10.1109/TBME.2011.2159791 }, author = { Pheiffer and Ou and Ong and Miga }, abstract = { Modality-independent elastography (MIE) is a method of elastography that reconstructs the elastic properties of tissue using images acquired under different loading conditions and a biomechanical model. Boundary conditions are a critical input to the algorithm and are often determined by time-consuming point correspondence methods requiring manual user input. This study presents a novel method of automatically generating boundary conditions by nonrigidly registering two image sets with a demons diffusion-based registration algorithm. The use of this method was successfully performed in silico using magnetic resonance and X-ray-computed tomography image data with known boundary conditions. These preliminary results produced boundary conditions with an accuracy of up to 80 compared to the known conditions. Demons-based boundary conditions were utilized within a 3-D MIE reconstruction to determine an elasticity contrast ratio between tumor and normal tissue. Two phantom experiments were then conducted to further test the accuracy of the demons boundary conditions and the MIE reconstruction arising from the use of these conditions. Preliminary results show a reasonable characterization of the material properties on this first attempt and a significant improvement in the automation level and viability of the method. {\textcopyright} 2011 IEEE. }, } |
2011 | Journal | Dário A.B. Oliveira, Raul Q. Feitosa, Mauro M. Correia (2011). Segmentation of liver, its vessels and lesions from CT images for surgical planning. BioMedical Engineering Online, 10, pp. NA (link) (bib) x @article{Oliveira2011, year = { 2011 }, volume = { 10 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-79954573225{\&}doi=10.1186{\%}2F1475-925X-10-30{\&}partnerID=40{\&}md5=c81b11df842390263ed95985f684c387 }, type = { Journal Article }, title = { Segmentation of liver, its vessels and lesions from CT images for surgical planning }, journal = { BioMedical Engineering Online }, issn = { 1475925X }, doi = { 10.1186/1475-925X-10-30 }, author = { Oliveira and Feitosa and Correia }, abstract = { Background: Cancer treatments are complex and involve different actions, which include many times a surgical procedure. Medical imaging provides important information for surgical planning, and it usually demands a proper segmentation, i.e., the identification of meaningful objects, such as organs and lesions. This study proposes a methodology to segment the liver, its vessels and nodules from computer tomography images for surgical planning.Methods: The proposed methodology consists of four steps executed sequentially: segmentation of liver, segmentation of vessels and nodules, identification of hepatic and portal veins, and segmentation of Couinaud anatomical segments. Firstly, the liver is segmented by a method based on a deformable model implemented through level sets, of which parameters are adjusted by using a supervised optimization procedure. Secondly, a mixture model is used to segment nodules and vessels through a region growing process. Then, the identification of hepatic and portal veins is performed using liver anatomical knowledge and a vein tracking algorithm. Finally, the Couinaud anatomical segments are identified according to the anatomical liver model proposed by Couinaud.Results: Experiments were conducted using data and metrics brought from the liver segmentation competition held in the Sliver07 conference. A subset of five exams was used for estimation of segmentation parameter values, while 15 exams were used for evaluation. The method attained a good performance in 17 of the 20 exams, being ranked as the 6thbest semi-automatic method when comparing to the methods described on the Sliver07 website (2008). It attained visual consistent results for nodules and veins segmentation, and we compiled the results, showing the best, worst, and mean results for all dataset.Conclusions: The method for liver segmentation performed well, according to the results of the numerical evaluation implemented, and the segmentation of liver internal structures were consistent with the anatomy of the liver, as confirmed by a specialist. The analysis provided evidences that the method to segment the liver may be applied to segment other organs, especially to those whose distribution of voxel intensities is nearly Gaussian shaped. {\textcopyright} 2011 Oliveira et al; licensee BioMed Central Ltd. }, } |
2011 | Journal | Matthew D. McGee, Darren Weber, Nicholas Day, Cathy Vitelli, Danielle Crippen, Laura A. Herndon, David H. Hall, Simon Melov (2011). Loss of intestinal nuclei and intestinal integrity in aging C. elegans. Aging Cell, 10(4), pp. 699–710. (link) (bib) x @article{McGee2011, year = { 2011 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Loss of intestinal nuclei and intestinal integrity in aging C. elegans }, pages = { 699--710 }, number = { 4 }, keywords = { Aging,C. elegans,Intestine,Microvilli,Nucleus }, journal = { Aging Cell }, issn = { 14749718 }, doi = { 10.1111/j.1474-9726.2011.00713.x }, author = { McGee and Weber and Day and Vitelli and Crippen and Herndon and Hall and Melov }, abstract = { The roundworm C. elegans is widely used as an aging model, with hundreds of genes identified that modulate aging (Kaeberlein et al., 2002. Mech. Ageing Dev.123, 1115-1119). The development and bodyplan of the 959 cells comprising the adult have been well described and established for more than 25years (Sulston {\&} Horvitz, 1977. Dev. Biol.56, 110-156; Sulston et al., 1983. Dev. Biol.100, 64-119.). However, morphological changes with age in this optically transparent animal are less well understood, with only a handful of studies investigating the pathobiology of aging. Age-related changes in muscle (Herndon, 2002. Nature419, 808-814), neurons (Herndon, 2002), intestine and yolk granules (Garigan, 2002. Genetics161, 1101-1112; Herndon, 2002), nuclear architecture (Haithcock, 2005. Proc. Natl Acad. Sci. USA102, 16690-16695), tail nuclei (Golden, 2007. Aging Cell6, 179-188), and the germline (Golden, 2007) have been observed via a variety of traditional relatively low-throughput methods. We report here a number of novel approaches to study the pathobiology of aging C. elegans. We combined histological staining of serial-sectioned tissues, transmission electron microscopy, and confocal microscopy with 3D volumetric reconstructions and characterized age-related morphological changes in multiple wild-type individuals at different ages. This enabled us to identify several novel pathologies with age in the C. elegans intestine, including the loss of critical nuclei, the degradation of intestinal microvilli, changes in the size, shape, and cytoplasmic contents of the intestine, and altered morphologies caused by ingested bacteria. The three-dimensional models we have created of tissues and cellular components from multiple individuals of different ages represent a unique resource to demonstrate global heterogeneity of a multicellular organism. {\textcopyright} 2011 The Authors. Aging Cell {\textcopyright} 2011 Blackwell Publishing Ltd/Anatomical Society of Great Britain and Ireland. }, } |
2011 | Journal | Matthew McCormick, Nicholas Rubert, Tomy Varghese (2011). Bayesian regularization applied to ultrasound strain imaging. IEEE Transactions on Biomedical Engineering, 58(6), pp. 1612–1620. (link) (bib) x @article{McCormick2011, year = { 2011 }, volume = { 58 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-79956345107{\&}doi=10.1109{\%}2FTBME.2011.2106500{\&}partnerID=40{\&}md5=6d53735bfa77d8fa5f9b895224795479 }, type = { Journal Article }, title = { Bayesian regularization applied to ultrasound strain imaging }, pages = { 1612--1620 }, number = { 6 }, keywords = { Bayes procedures,biomedical acoustic imaging,biomedical imaging,displacement measurement,image motion analysis,strain measurement }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 00189294 }, doi = { 10.1109/TBME.2011.2106500 }, author = { McCormick and Rubert and Varghese }, abstract = { Noise artifacts due to signal decorrelation and reverberation are a considerable problem in ultrasound strain imaging. For block-matching methods, information from neighboring matching blocks has been utilized to regularize the estimated displacements. We apply a recursive Bayesian regularization algorithm developed by Hayton et al. [Artif. Intell., vol. 114, pp. 125-156, 1999] to phase-sensitive ultrasound RF signals to improve displacement estimation. The parameter of regularization is reformulated, and its meaning examined in the context of strain imaging. Tissue-mimicking experimental phantoms and RF data incorporating finite-element models for the tissue deformation and frequency-domain ultrasound simulations are used to compute the optimal parameter with respect to nominal strain and algorithmic iterations. The optimal strain regularization parameter was found to be twice the nominal strain and did not vary significantly with algorithmic iterations. The technique demonstrates superior performance over median filtering in noise reduction at strains 5 and higher for all quantitative experiments performed. For example, the strain SNR was 11 dB higher than that obtained using a median filter at 7 strain. It has to be noted that for applied deformations lower than 1, since signal decorrelation errors are minimal, using this approach may degrade the displacement image. {\textcopyright} 2010 IEEE. }, } |
2011 | Journal | Christian Loyek, Nasir M. Rajpoot, Michael Khan, Tim W. Nattkemper (2011). BioIMAX: A Web 2.0 approach for easy exploratory and collaborative access to multivariate bioimage data. BMC Bioinformatics, 12, pp. 11. (link) (bib) x @article{Loyek2011a, year = { 2011 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { BioIMAX: A Web 2.0 approach for easy exploratory and collaborative access to multivariate bioimage data }, pages = { 11 }, journal = { BMC Bioinformatics }, issn = { 14712105 }, doi = { 10.1186/1471-2105-12-297 }, author = { Loyek and Rajpoot and Khan and Nattkemper }, abstract = { Background: Innovations in biological and biomedical imaging produce complex high-content and multivariate image data. For decision-making and generation of hypotheses, scientists need novel information technology tools that enable them to visually explore and analyze the data and to discuss and communicate results or findings with collaborating experts from various places.Results: In this paper, we present a novel Web2.0 approach, BioIMAX, for the collaborative exploration and analysis of multivariate image data by combining the webs collaboration and distribution architecture with the interface interactivity and computation power of desktop applications, recently called rich internet application.Conclusions: BioIMAX allows scientists to discuss and share data or results with collaborating experts and to visualize, annotate, and explore multivariate image data within one web-based platform from any location via a standard web browser requiring only a username and a password. BioIMAX can be accessed at http://ani.cebitec.uni-bielefeld.de/BioIMAX with the username "test" and the password "test1" for testing purposes. {\textcopyright} 2011 Loyek et al; licensee BioMed Central Ltd. }, } |
2011 | Journal | Steve G. Langer, Todd French (2011). Virtual machine performance benchmarking. Journal of Digital Imaging, 24(5), pp. 883–889. (link) (bib) x @article{Langer2011, year = { 2011 }, volume = { 24 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Virtual machine performance benchmarking }, pages = { 883--889 }, number = { 5 }, keywords = { Computer hardware,Computer systems,Computers in medicine }, journal = { Journal of Digital Imaging }, issn = { 08971889 }, doi = { 10.1007/s10278-010-9358-6 }, author = { Langer and French }, abstract = { The attractions of virtual computing are many: reduced costs, reduced resources and simplified maintenance. Any one of these would be compelling for a medical imaging professional attempting to support a complex practice on limited resources in an era of ever tightened reimbursement. In particular, the ability to run multiple operating systems optimized for different tasks (computational image processing on Linux versus office tasks on Microsoft operating systems) on a single physical machine is compelling. However, there are also potential drawbacks. High performance requirements need to be carefully considered if they are to be executed in an environment where the running software has to execute through multiple layers of device drivers before reaching the real disk or network interface. Our lab has attempted to gain insight into the impact of virtualization on performance by benchmarking the following metrics on both physical and virtual platforms: local memory and disk bandwidth, network bandwidth, and integer and floating point performance. The virtual performance metrics are compared to baseline performance on "bare metal." The results are complex, and indeed somewhat surprising. {\textcopyright} Society for Imaging Informatics in Medicine 2010. }, } |
2011 | Journal | Thomas M. Hsieh, Yi Min Liu, Chun Chih Liao, Furen Xiao, I. Jen Chiang, Jau Min Wong (2011). Automatic segmentation of meningioma from non-contrasted brain MRI integrating fuzzy clustering and region growing. BMC Medical Informatics and Decision Making, 11(1), pp. 12. (link) (bib) x @article{Hsieh2011, year = { 2011 }, volume = { 11 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automatic segmentation of meningioma from non-contrasted brain MRI integrating fuzzy clustering and region growing }, pages = { 12 }, number = { 1 }, journal = { BMC Medical Informatics and Decision Making }, issn = { 14726947 }, doi = { 10.1186/1472-6947-11-54 }, author = { Hsieh and Liu and Liao and Xiao and Chiang and Wong }, abstract = { Background: In recent years, magnetic resonance imaging (MRI) has become important in brain tumor diagnosis. Using this modality, physicians can locate specific pathologies by analyzing differences in tissue character presented in different types of MR images. This paper uses an algorithm integrating fuzzy-c-mean (FCM) and region growing techniques for automated tumor image segmentation from patients with menigioma. Only non-contrasted T1 and T2 -weighted MR images are included in the analysis. The study's aims are to correctly locate tumors in the images, and to detect those situated in the midline position of the brain. Methods. The study used non-contrasted T1- and T2-weighted MR images from 29 patients with menigioma. After FCM clustering, 32 groups of images from each patient group were put through the region-growing procedure for pixels aggregation. Later, using knowledge-based information, the system selected tumor-containing images from these groups and merged them into one tumor image. An alternative semi-supervised method was added at this stage for comparison with the automatic method. Finally, the tumor image was optimized by a morphology operator. Results from automatic segmentation were compared to the "ground truth" (GT) on a pixel level. Overall data were then evaluated using a quantified system. Results: The quantified parameters, including the "percent match" (PM) and "correlation ratio" (CR), suggested a high match between GT and the present study's system, as well as a fair level of correspondence. The results were compatible with those from other related studies. The system successfully detected all of the tumors situated at the midline of brain. Six cases failed in the automatic group. One also failed in the semi-supervised alternative. The remaining five cases presented noticeable edema inside the brain. In the 23 successful cases, the PM and CR values in the two groups were highly related. Conclusions: Results indicated that, even when using only two sets of non-contrasted MR images, the system is a reliable and efficient method of brain-tumor detection. With further development the system demonstrates high potential for practical clinical use. {\textcopyright} 2011 Hsieh et al; licensee BioMed Central Ltd. }, } |
2011 | Journal | Anders Garpebring, Ronnie Wirestam, Jun Yu, Thomas Asklund, Mikael Karlsson (2011). Phase-based arterial input functions in humans applied to dynamic contrast-enhanced MRI: Potential usefulness and limitations. Magnetic Resonance Materials in Physics, Biology and Medicine, 24(4), pp. 233–245. (link) (bib) x @article{Garpebring2011, year = { 2011 }, volume = { 24 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Phase-based arterial input functions in humans applied to dynamic contrast-enhanced MRI: Potential usefulness and limitations }, pages = { 233--245 }, number = { 4 }, keywords = { Arterial input function,Dynamic contrast-enhanced MRI,Phase quantification }, journal = { Magnetic Resonance Materials in Physics, Biology and Medicine }, issn = { 09685243 }, doi = { 10.1007/s10334-011-0257-8 }, author = { Garpebring and Wirestam and Yu and Asklund and Karlsson }, abstract = { Object: Phase-based arterial input functions (AIFs) provide a promising alternative to standard magnitude-based AIFs, for example, because inflow effects are avoided. The usefulness of phase-based AIFs in clinical dynamic contrast-enhanced MRI (DCE-MRI) was investigated, and relevant pitfalls and sources of uncertainty were identified. Materials and methods: AIFs were registered from eight human subjects on, in total, 21 occasions. AIF quality was evaluated by comparing AIFs from right and left internal carotid arteries and by assessing the reliability of blood plasma volume estimates. Results: Phase-based AIFs yielded an average bolus peak of 3.9 mM and a residual concentration of 0.37 mM after 3 min, (0.033 mmol/kg contrast agent injection). The average blood plasma volume was 2.7{\%} when using the AIF peak in the estimation, but was significantly different (p {\textless} 0.0001) and less physiologically reasonable when based on the AIF tail concentration. Motion-induced phase shifts and accumulation of contrast agent in background tissue regions were identified as main sources of uncertainty. Conclusion: Phase-based AIFs are a feasible alternative to magnitude AIFs, but sources of errors exist, making quantification difficult, especially of the AIF tail. Improvement of the technique is feasible and also required for the phase-based AIF approach to reach its full potential. {\textcopyright} 2011 ESMRMB. }, } |
2011 | Journal | M. Esposito, P. Bosco, L. Rei, M. Aiello (2011). Volumetric analysis on MRI and PET images for the early diagnosis of Alzheimer's disease. Nuovo Cimento della Societa Italiana di Fisica C, 34(1), pp. 175–185. (link) (bib) x @article{Esposito2011, year = { 2011 }, volume = { 34 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84870415031{\&}doi=10.1393{\%}2Fncc{\%}2Fi2011-10807-0{\&}partnerID=40{\&}md5=56add27b1e0232de26da65f2420fcbc5 }, type = { Journal Article }, title = { Volumetric analysis on MRI and PET images for the early diagnosis of Alzheimer's disease }, pages = { 175--185 }, number = { 1 }, journal = { Nuovo Cimento della Societa Italiana di Fisica C }, issn = { 18269885 }, doi = { 10.1393/ncc/i2011-10807-0 }, author = { Esposito and Bosco and Rei and Aiello }, abstract = { In this paper we present the development of a software for the extraction of the hippocampus and surrounding medial-temporal-lobe (MTL) regions from T1-weighted magnetic resonance (MR) and from Positron Emission Tomography (PET) images with no interactive input from the user. With this software we introduce a novel statistical index computed on the intensities in the automatically extracted MTL regions. This index is a measure of gray-matter (GM) atrophy and allows to: distinguish between (a) patients with Alzheimer's disease (AD), patients with amnestic mild cognitive impairment (aMCI), (b) patients with amnestic mild cognitive impairment who will later develop AD in a time frame of 2 years (aMCIconv), and (c) a set of age-matched elderly controls. Once refined, this method could be used to infer about the clinical outcome of aMCI patients. PACS 87.57.nj-Registration. PACS 87.57.nm-Segmentation. PACS 87.57.R-Computer-aided diagnosis. PACS 87.61.Tg-Clinical applications. {\textcopyright} Societ{\`{a}} Italiana di Fisica. }, } |
2011 | Journal | Markus Conzelmann, Sarah Lena Offenburger, Albina Asadulina, Timea Keller, Thomas A. Münch, Gáspár Jékely (2011). Neuropeptides regulate swimming depth of Platynereis larvae. Proceedings of the National Academy of Sciences of the United States of America, 108(46), pp. E1174–E1183. (link) (bib) x @article{Conzelmann2011, year = { 2011 }, volume = { 108 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Neuropeptides regulate swimming depth of Platynereis larvae }, pages = { E1174--E1183 }, number = { 46 }, keywords = { FMRFamide-related peptides,Neural circuit,Sensory-motor neuron,Zooplankton }, journal = { Proceedings of the National Academy of Sciences of the United States of America }, issn = { 00278424 }, doi = { 10.1073/pnas.1109085108 }, author = { Conzelmann and Offenburger and Asadulina and Keller and M{\"{u}}nch and J{\'{e}}kely }, abstract = { Cilia-based locomotion is the major form of locomotion for microscopic planktonic organisms in the ocean. Given their negative buoyancy, these organisms must control ciliary activity to maintain an appropriate depth. The neuronal bases of depth regulation in ciliary swimmers are unknown. To gain insights into depth regulation we studied ciliary locomotor control in the planktonic larva of the marine annelid, Platynereis. We found several neuropeptides expressed in distinct sensory neurons that innervate locomotor cilia. Neuropeptides altered ciliary beat frequency and the rate of calcium-evoked ciliary arrests. These changes influenced larval orientation, vertical swimming, and sinking, resulting in upward or downward shifts in the steady-state vertical distribution of larvae. Our findings indicate that Platynereis larvae have depth-regulating peptidergic neurons that directly translate sensory inputs into locomotor output on effector cilia. We propose that the simple circuitry found in these ciliated larvae represents an ancestral state in nervous system evolution. }, } |
2011 | Journal | Andrea Chincarini, Paolo Bosco, Piero Calvini, Gianluca Gemme, Mario Esposito, Chiara Olivieri, Luca Rei, Sandro Squarcia, Guido Rodriguez, Roberto Bellotti, Piergiorgio Cerello, Ivan De Mitri, Alessandra Retico, Flavio Nobili (2011). Local MRI analysis approach in the diagnosis of early and prodromal Alzheimer's disease. NeuroImage, 58(2), pp. 469–480. (link) (bib) x @article{Chincarini2011, year = { 2011 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Local MRI analysis approach in the diagnosis of early and prodromal Alzheimer's disease }, pages = { 469--480 }, number = { 2 }, keywords = { Alzheimer's disease,Hippocampus,Image analysis,MRI,Medial temporal lobe }, journal = { NeuroImage }, issn = { 10538119 }, doi = { 10.1016/j.neuroimage.2011.05.083 }, author = { Chincarini and Bosco and Calvini and Gemme and Esposito and Olivieri and Rei and Squarcia and Rodriguez and Bellotti and Cerello and {De Mitri} and Retico and Nobili }, abstract = { Background: Medial temporal lobe (MTL) atrophy is one of the key biomarkers to detect early neurodegenerative changes in the course of Alzheimer's disease (AD). There is active research aimed at identifying automated methodologies able to extract accurate classification indexes from T1-weighted magnetic resonance images (MRI). Such indexes should be fit for identifying AD patients as early as possible. Subjects: A reference group composed of 144. AD patients and 189 age-matched controls was used to train and test the procedure. It was then applied on a study group composed of 302 MCI subjects, 136 having progressed to clinically probable AD (MCI-converters) and 166 having remained stable or recovered to normal condition after a 24. month follow-up (MCI-non converters). All subjects came from the ADNI database. Methods: We sampled the brain with 7 relatively small volumes, mainly centered on the MTL, and 2 control regions. These volumes were filtered to give intensity and textural MRI-based features. Each filtered region was analyzed with a Random Forest (RF) classifier to extract relevant features, which were subsequently processed with a Support Vector Machine (SVM) classifier. Once a prediction model was trained and tested on the reference group, it was used to compute a classification index (CI) on the MCI cohort and to assess its accuracy in predicting AD conversion in MCI patients. The performance of the classification based on the features extracted by the whole 9 volumes is compared with that derived from each single volume. All experiments were performed using a bootstrap sampling estimation, and classifier performance was cross-validated with a 20-fold paradigm. Results: We identified a restricted set of image features correlated with the conversion to AD. It is shown that most information originate from a small subset of the total available features, and that it is enough to give a reliable assessment. We found multiple, highly localized image-based features which alone are responsible for the overall clinical diagnosis and prognosis. The classification index is able to discriminate Controls from AD with an Area Under Curve (AUC) = 0.97 (sensitivity ≃ 89{\%} at specificity ≃ 94{\%}) and Controls from MCI-converters with an AUC = 0.92 (sensitivity ≃ 89{\%} at specificity ≃ 80{\%}). MCI-converters are separated from MCI-non converters with AUC = 0.74(sensitivity ≃ 72{\%} at specificity ≃ 65{\%}). Findings: The present automated MRI-based technique revealed a strong relationship between highly localized baseline-MRI features and the baseline clinical assessment. In addition, the classification index was also used to predict the probability of AD conversion within a time frame of two years. The definition of a single index combining local analysis of several regions can be useful to detect AD neurodegeneration in a typical MCI population. {\textcopyright} 2011 Elsevier Inc. }, } |
2011 | Journal | Yanling Chi, Jimin Liu, Sudhakar K. Venkatesh, Su Huang, Jiayin Zhou, Qi Tian, Wieslaw L. Nowinski (2011). Segmentation of liver vasculature from contrast enhanced CT images using context-based voting. IEEE Transactions on Biomedical Engineering, 58(8), pp. 2144–2153. (link) (bib) x @article{Chi2011, year = { 2011 }, volume = { 58 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Segmentation of liver vasculature from contrast enhanced CT images using context-based voting }, pages = { 2144--2153 }, number = { 8 }, keywords = { Liver vasculature segmentation,multiple feature point voting,vessel context,vessel junction measure }, journal = { IEEE Transactions on Biomedical Engineering }, issn = { 00189294 }, doi = { 10.1109/TBME.2010.2093523 }, author = { Chi and Liu and Venkatesh and Huang and Zhou and Tian and Nowinski }, abstract = { A novel vessel context-based voting is proposed for automatic liver vasculature segmentation in CT images. It is able to conduct full vessel segmentation and recognition of multiple vasculatures effectively. The vessel context describes context information of a voxel related to vessel properties, such as intensity, saliency, direction, and connectivity. Voxels are grouped to liver vasculatures hierarchically based on vessel context. They are first grouped locally into vessel branches with the advantage of a vessel junction measurement and then grouped globally into vasculatures, which is implemented using a multiple feature point voting mechanism. The proposed method has been evaluated on ten clinical CT datasets. Segmentation of third-order vessel trees from CT images (0.76×0.76 2.0 mm) of the portal venous phase takes less than 3 min on a PC with 2.0 GHz dual core processor and the average segmentation accuracy is up to 98. {\textcopyright} 2011 IEEE. }, } |
2011 | Journal | Ma Consuelo Bastida-Jumilla, Jorge Larrey-Ruiz, Rafael Verd\'u-Monedero, Juan Morales-Sánchez, José Luis Sancho-G\'omez (2011). DRR and portal image registration for automatic patient positioning in radiotherapy treatment. Journal of Digital Imaging, 24(6), pp. 999–1009. (link) (bib) x @article{BastidaJumilla2011, year = { 2011 }, volume = { 24 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { DRR and portal image registration for automatic patient positioning in radiotherapy treatment }, pages = { 999--1009 }, number = { 6 }, keywords = { Biomedical image analysis,Image feature enhancement,Image registration,Radiotherapy }, journal = { Journal of Digital Imaging }, issn = { 1618727X }, doi = { 10.1007/s10278-011-9376-z }, author = { Bastida-Jumilla and Larrey-Ruiz and Verd{\'{u}}-Monedero and Morales-S{\'{a}}nchez and Sancho-G{\'{o}}mez }, abstract = { Image processing turns out to be essential in the planning and verification of radiotherapy treatments. Before applying a radiotherapy treatment, a dosimetry planning must be performed. Usually, the planning is done by means of an X-ray volumetric analysis using computerized tomography, where the area to be radiated is marked out. During the treatment phase, it is necessary to place the patient under the particle accelerator exactly as considered in the dosimetry stage. Coarse alignment is achieved using fiduciary markers placed over the patient's skin as external references. Later, fine alignment is provided by comparing a digitally reconstructed radiography (DRR) from the planning stage and a portal image captured by the accelerator in the treatment stage. The preprocessing of DRR and portal images, as well as the minimization of the non-shared information between both kinds of images, is mandatory for the correct operation of the image registration algorithm. With this purpose, mathematical morphology and image processing techniques have been used. The present work describes a fully automatic method to calculate more accurately the necessary displacement of the couch to place the patient exactly at the planned position. The proposed method to achieve the correct positioning of the patient is based on advanced image registration techniques. Preliminary results show a perfect match with the displacement estimated by the physician. {\textcopyright} Society for Imaging Informatics in Medicine 2011. }, } |
2011 | Journal | Richard Beare, Daniel Micevski, Chris Share, Luke Parkinson, Phil Ward, Wojtek Goscinski, Mike Kuiper (2011). CITK-an architecture and examples of CUDA enabled ITK filters, Release 0.00. The Insight Journal, NA pp. NA (bib) x @article{beare2011citk, year = { 2011 }, title = { CITK-an architecture and examples of CUDA enabled ITK filters, Release 0.00 }, journal = { The Insight Journal }, author = { Beare and Micevski and Share and Parkinson and Ward and Goscinski and Kuiper }, } |
2011 | In Collection | Christian Loyek, Jan Kölling, Daniel Langenkämper, Karsten Niehaus, Tim W. Nattkemper (2011). A Web2.0 strategy for the collaborative analysis of complex bioimages. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 258–269. (link) (bib) x @incollection{Loyek2011, year = { 2011 }, volume = { 7014 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-80455129929{\&}doi=10.1007{\%}2F978-3-642-24800-9{\_}25{\&}partnerID=40{\&}md5=14d944bb2916a977fa416b20b12387e3 }, type = { Serial }, title = { A Web2.0 strategy for the collaborative analysis of complex bioimages }, pages = { 258--269 }, keywords = { Bioimage Informatics,Data Mining,Exploratory Data Analysis,High-content screening,Information Visualization,Life Science,Rich Internet Application,Semantic Annotation,Web2.0 }, issn = { 03029743 }, isbn = { 9783642247996 }, doi = { 10.1007/978-3-642-24800-9_25 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Loyek and K{\"{o}}lling and Langenk{\"{a}}mper and Niehaus and Nattkemper }, abstract = { Life science research aims at understanding the relationships in genomics, proteomics and metabolomics on all levels of biological self organization, dealing with data of increasing dimension and complexity. Bioimages represent a new data domain in this context, gaining growing attention since it closes important gaps left by the established molecular techniques. We present a new, web-based strategy that allows a new way of collaborative bioimage interpretaion through knowledge integration. We show, how this can be supported by combining data mining algorithms running on powerful compute servers and a next generation rich internet application (RIA) front-end offering database/project management and high-level tools for exploratory data analysis and annotation. We demonstrate our system BioIMAX using a bioimage dataset from High-Content Screening experiments to study bacterial infection in cell cultures. {\textcopyright} 2011 Springer-Verlag. }, } |
2011 | In Conf. Proceedings | A. Akbarzadeh, M. R. Ay, A. Ahmadian, N. Riahi Alam, H. Zaidi (2011). Impact of using different tissue classes on the accuracy of MR-based attenuation correction in PET-MRI. In IEEE Nuclear Science Symposium Conference Record, pp. 2524–2530, New York. (link) (bib) x @inproceedings{Akbarzadeh, year = { 2011 }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858681408{\&}doi=10.1109{\%}2FNSSMIC.2011.6152682{\&}partnerID=40{\&}md5=cb5e411de14cb1afbdf27ca41e4b2746 }, type = { Book Section }, title = { Impact of using different tissue classes on the accuracy of MR-based attenuation correction in PET-MRI }, series = { IEEE Nuclear Science Symposium and Medical Imaging Conference }, publisher = { Ieee }, pages = { 2524--2530 }, issn = { 10957863 }, isbn = { 9781467301183 }, doi = { 10.1109/NSSMIC.2011.6152682 }, booktitle = { IEEE Nuclear Science Symposium Conference Record }, author = { Akbarzadeh and Ay and Ahmadian and {Riahi Alam} and Zaidi }, address = { New York }, abstract = { Diagnosis , staging and treatment of disease depends on the morphological and functional information obtained from multimodality molecular imaging systems. The combination of functional and morphological information is now routinely performed to overcome the limitations of each individual modality. Attenuation of photons in the object under study is one of the main limitations of quantitative PET imaging. Attenuation correction plays a pivotal role in PET imaging. However, the availability of CT data on hybrid PET/CT scanners made it possible to build an accurate attenuation map. One of the well-known methods for generation of the attenuation map on PE/MRI systems is MR-based attenuation correction (MRAC) where image segmentation is used to classify MRI into several classes corresponding to different attenuation factors. In this study we investigate the effect of using different numbers of classes for the generation of attenuation maps on the accuracy of attenuation correction of PET data. The study was carried out using simulations of the XCAT phantom and 10 clinical studies. For the later, CT and PET images of 10 patients were used with CT-based attenuation correction assumed as reference. MRI was classified into different classes to produce two, three and four-class attenuation maps using the ITK library. The relative error showed that the lower number of classes will increase the global error over 8{\%}. The elimination of bony structures from the attenuation map will cause a local error over 3{\%}. In clinical studies, SUV mean and SUVmax were calculated for each AC method. The results seem to indicate an underestimation of 11{\%} because of neglecting bone. {\textcopyright} 2011 IEEE. }, } |
2011 | In Conf. Proceedings | Gordon N. Stevenson, Sally L. Collins, Lawrence Impey, J. Alison Noble (2011). Surface parameterisation of the utero/placental interface using 3D power doppler ultrasound. In Proceedings - International Symposium on Biomedical Imaging, pp. 891–894, New York. (link) (bib) x @inproceedings{Stevenson2011, year = { 2011 }, url = { {\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-80055033565{\&}doi=10.1109{\%}2FISBI.2011.5872547{\&}partnerID=40{\&}md5=906b8369dc14ae8e1bb4597aa31b1ec7 }, type = { Book Section }, title = { Surface parameterisation of the utero/placental interface using 3D power doppler ultrasound }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 891--894 }, keywords = { Doppler measurements,Ultrasonography,anatomical structure,biomedical image processing,pregnancy }, issn = { 19457928 }, isbn = { 9781424441280 }, doi = { 10.1109/ISBI.2011.5872547 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Stevenson and Collins and Impey and Noble }, address = { New York }, abstract = { Studying the surface of an internal organ such as the placenta using three-dimensional ultrasound (3D US) is difficult. Image data from the surrounding tissue makes accurate identification of the interface technically challenging. The placental/maternal interface (basal plate) is thought to be the location of significant vascular pathology causing major maternal and fetal morbidity. We propose a new method for identifying this interface which combined with parameterisation (flattening) offers a novel way to study the vasculature of the developing placenta. {\textcopyright} 2011 IEEE. }, } |
2011 | In Conf. Proceedings | Alexander Haak, Marijn Van Stralen, Gerard Van Burken, Stefan Klein, Josien P.W. Pluim, Nico De Jong, Antonius F.W. Van Der Steen, Johannes G. Bosch (2011). Spatiotemporal interpolation by normalized convolution for 4D transesophageal echocardiography. In IEEE International Ultrasonics Symposium, IUS, pp. 152–155, New York. (link) (bib) x @inproceedings{Haaka, year = { 2011 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869068068{\&}doi=10.1109{\%}2FULTSYM.2011.0038{\&}partnerID=40{\&}md5=e5a5718535ae51415891100024bf51a5 {\%}3CGo to }, type = { Conference Proceedings }, title = { Spatiotemporal interpolation by normalized convolution for 4D transesophageal echocardiography }, series = { IEEE International Ultrasonics Symposium }, publisher = { Ieee }, pages = { 152--155 }, issn = { 19485719 }, isbn = { 9781457712531 }, doi = { 10.1109/ULTSYM.2011.0038 }, booktitle = { IEEE International Ultrasonics Symposium, IUS }, author = { Haak and {Van Stralen} and {Van Burken} and Klein and Pluim and {De Jong} and {Van Der Steen} and Bosch }, address = { New York }, abstract = { For interventional monitoring, we aim at 4D ultrasound reconstructions of structures in the beating heart from 2D transesophageal echo images by fast scan plane rotation, unsynchronized to the heart rate. For such sparsely and irregularly sampled 2D images, a special spatiotemporal interpolation approach is desired. We have previously shown the potential of spatiotemporal interpolation by normalized convolution (NC). In this work we optimized NC for our application and compared it to nearest neighbor interpolation (NN) and to temporal binning followed by linear spatial interpolation (LTB). The test datasets consisted of 600, 1350, and 1800 2D images and were derived by slicing a 4D echocardiography data sets at random rotation angle ($\theta$, range: 0-180°) and random normalized cardiac phase ($\tau$, range: 0-1). A Gaussian kernel was used for NC and optimal kernel sizes ($\sigma$ $\tau$ and $\sigma$ $\theta$) were found by performing an exhaustive search. The RMS gray value error (RMSE) of the reconstructed images was computed for all interpolation methods. The estimated optimal kernels were $\sigma$ $\theta$=3.24°/ $\sigma$ $\tau$=0.048, $\sigma$ $\theta$=2.34°/$\sigma$ $\tau$=0.026, and $\sigma$ $\theta$=1.89°/$\sigma$ $\tau$=0.023 for 600, 1350, and 1800 input images, respectively. The minimum RMSE for NC was 13.8, 10.4, and 9.4 for 600, 1350, and 1800 input images, respectively. The NN/LTB reconstruction had an RMSE of 17.8/16.4, 13.9/15.1, and 12.0/14.7 for 600, 1350, and 1800 2D input images, respectively. We showed that NC outperforms NN and LTB. For a small number of input images the advantage of NC is more pronounced. {\textcopyright} 2011 IEEE. }, } |
2011 | In Conf. Proceedings | Gregor Miller, Sidney Fels, Steve Oldridge (2011). A conceptual structure for computer vision. In Proceedings - 2011 Canadian Conference on Computer and Robot Vision, CRV 2011, pp. 168–174. (link) (bib) x @inproceedings{Miller, year = { 2011 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-80051813078{\&}doi=10.1109{\%}2FCRV.2011.29{\&}partnerID=40{\&}md5=dcf66af3aaedbee7cecd406172b38797 }, type = { Conference Proceedings }, title = { A conceptual structure for computer vision }, pages = { 168--174 }, keywords = { Computer Vision,Vision Development,Vision Systems }, isbn = { 9780769543628 }, doi = { 10.1109/CRV.2011.29 }, booktitle = { Proceedings - 2011 Canadian Conference on Computer and Robot Vision, CRV 2011 }, author = { Miller and Fels and Oldridge }, abstract = { The research presented in this paper represents several novel conceptual contributions to the computer vision literature. In this position paper, our goal is to define the scope of computer vision analysis and discuss a new categorisation of the computer vision problem. We first provide a novel decomposition of computer vision into base components which we term the axioms of vision. These are used to define researcher-level and developer-level access to vision algorithms, in a way which does not require expert knowledge of computer vision. We discuss a new line of thought for computer vision by basing analyses on descriptions of the problem instead of in terms of algorithms. From this an abstraction can be developed to provide a layer above algorithmic details. This is extended to the idea of a formal description language which may be automatically interpreted thus allowing those not familiar with computer vision techniques to utilise sophisticated methods. {\textcopyright} 2011 IEEE. }, } |
2011 | In Conf. Proceedings | Petr Maule, Jana Kle\vcková, Vladim\'ir Rohan (2011). Automated approach for whole brain infarction core delineation: Using non-contrast and computed tomography angiography. In KDIR 2011 - Proceedings of the International Conference on Knowledge Discovery and Information Retrieval, pp. 433–437. (link) (bib) x @inproceedings{Maule, year = { 2011 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84862235383{\&}partnerID=40{\&}md5=1d80a348add3d1bf8abea3228c5457a1 }, type = { Conference Proceedings }, title = { Automated approach for whole brain infarction core delineation: Using non-contrast and computed tomography angiography }, pages = { 433--437 }, keywords = { Acute stroke,Automated infarction core segmentation,Brain ischemia,Perfusion blood volume,Volumetric maps }, isbn = { 9789898425799 }, doi = { 10.5220/0003651704330437 }, booktitle = { KDIR 2011 - Proceedings of the International Conference on Knowledge Discovery and Information Retrieval }, author = { Maule and Kle{\v{c}}kov{\'{a}} and Rohan }, abstract = { This article proposes automated approach for whole brain infarction core delineation while using only non-contrast computed tomography and computed tomography angiography. The main aim is to provide additional information measuring infarction core volume while exceeding certain level is contraindication of early recanalization. Process of generation of Perfusion Blood Volume maps is described first followed by description of process of infarction core delineation. Verification of correctness is based on comparison against follow-up examinations. Discussion and future works summarizes weaknesses of the method and steps for improvement. }, } |
2011 | In Conf. Proceedings | Martino Marisaldi, Piera MacCagnani, Francesco Moscatelli, Claudio Labanti, Fabio Fuschino, Michela Prest, Alessandro Berra, Davide Bolognini, Massimo Ghioni, Ivan Rech, Angelo Gulinatti, Andrea Giudice, Georg Simmerle, Danilo Rubini, Andrea Candelori, Serena Mattiazzo (2011). Single Photon Avalanche Diodes for space applications. In IEEE Nuclear Science Symposium Conference Record, pp. 129–134. (link) (bib) x @inproceedings{Marisaldi, year = { 2011 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858651704{\&}doi=10.1109{\%}2FNSSMIC.2011.6154465{\&}partnerID=40{\&}md5=9e403fad320eec958f118ac47b0d92e4 }, type = { Conference Proceedings }, title = { Single Photon Avalanche Diodes for space applications }, pages = { 129--134 }, issn = { 10957863 }, isbn = { 9781467301183 }, doi = { 10.1109/NSSMIC.2011.6154465 }, booktitle = { IEEE Nuclear Science Symposium Conference Record }, author = { Marisaldi and MacCagnani and Moscatelli and Labanti and Fuschino and Prest and Berra and Bolognini and Ghioni and Rech and Gulinatti and Giudice and Simmerle and Rubini and Candelori and Mattiazzo }, abstract = { We study the possibility to use Single Photon Avalanche Diodes (SPADs) optically coupled to scintillating fibers as a novel type of gamma-ray detector for space applications. SPADs are silicon devices operating under polarization conditions above the junction breakdown voltage (typical overvoltage of 5V), for which a single photon interacting in the active region is sufficient to trigger a self sustainable avalanche discharge. SPADs can thus be used for the detection of very low light levels with an absolute timing accuracy of about 30 ps for single photon detection, without spectroscopic capabilities. In this presentation we report the preliminary results on large area SPAD (actual results refers to SPADs having 200 $\mu$m diameter, with the aim to grow up to 500 $\mu$m SPADs) coupled to scintillating fibers as the basic module for a particle tracker for space application. Dark counts rate as low as few tens of kHz at room temperature, lowering down to few kHz at -10°C have been obtained for the 200 $\mu$m devices, in accordance with the basic requirements for the proposed application. Similar instruments based on silicon photomultiplier (SiPM) readout have already been studied, but none based on SPAD has been realized up to now. Moreover, since very few information is available on SPADs for the use in a space environment, we performed bulk damage and total dose radiation tests with protons and gamma-rays in order to evaluate their radiation hardness properties and their suitability for application in a Low Earth Orbit (LEO) space mission. With this aim the SPAD devices have be irradiated using up to 20 krad total dose with gamma-rays and 5 krad with protons. {\textcopyright} 2011 IEEE. }, } |
2011 | In Conf. Proceedings | Florence Kremer, Tom Dresselaers, Brecht Heyde, Vesselina Ferferieva, Ellen Caluwé, Hon Fai Choi, Wouter Oosterlinck, Stefan Janssens, Uwe Himmelreich, Jan D'hooge (2011). 2D myocardial strain in the mouse through spatial compounding: In-vivo feasibility study. In IEEE International Ultrasonics Symposium, IUS, pp. 939–942. (link) (bib) x @inproceedings{Kremer, year = { 2011 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869030007{\&}doi=10.1109{\%}2FULTSYM.2011.0230{\&}partnerID=40{\&}md5=785bfbda4e1f62661f6a74a21c318fe6 }, type = { Conference Proceedings }, title = { 2D myocardial strain in the mouse through spatial compounding: In-vivo feasibility study }, pages = { 939--942 }, issn = { 19485719 }, isbn = { 9781457712531 }, doi = { 10.1109/ULTSYM.2011.0230 }, booktitle = { IEEE International Ultrasonics Symposium, IUS }, author = { Kremer and Dresselaers and Heyde and Ferferieva and Caluw{\'{e}} and Choi and Oosterlinck and Janssens and Himmelreich and D'hooge }, abstract = { Ultrasound assessment of myocardial strain can give valuable information on regional cardiac function. Speckle tracking is often used for this purpose as it can estimate the 2D myocardial strain tensor. However, in the mouse setting, speckle tracking remains challenging due to the high heart rate and the relatively thin wall compared to the typical size of the speckles. We have previously shown using simulated data sets that spatial compounding of axial velocities obtained at 3 steering angles can outperform 2D speckle tracking for 2D strain estimation in the mouse heart. In this study, beam steering was applied at -20°, 0° and 20° on short axis views of 5 control and 6 infarct mice. The lateral motion component was reconstructed through spatial compounding and results were compared to tagged $\mu$MRI. Circumferential estimates quantified by means of ultrasound and MRI could both detect regional dysfunction. Between echo and MRI, a good regression coefficient was obtained for circumferential strain estimates (r = 0.69), while radial strain estimates correlated only moderately (r = 0.37). {\textcopyright} 2011 IEEE. }, } |
2011 | Technical report | Regina E Y Kim, Hans J Johnson, Norman K Williams, Eun Young Regina Kim, Williams N K Kim EYR Johnson HJ (2011). Affine Transformation for Landmark Based Registration Initializer in ITK. NA NA NA (bib) x @techreport{Kim2011, year = { 2011 }, title = { Affine Transformation for Landmark Based Registration Initializer in ITK }, pages = { 1--8 }, mendeley-tags = { Insight Journal }, keywords = { Affine Transformation for Landmark Based Registrat,Insight Journal }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Engineering/Kim et al. - 2011 - Affine Transformation for Landmark Based Registration Initializer in ITK.pdf:pdf }, booktitle = { Engineering }, author = { Kim and Johnson and Williams and Kim and {Kim EYR Johnson HJ} }, annote = { From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) }, } |
2011 | Technical report | Regina EY Y Kim, Hans J. Johnson, Norman K Williams, Eun Young Regina Kim, Williams N K Kim EYR Johnson HJ (2011). Affine Transformation for Landmark Based Registration Initializer in ITK. NA NA NA (bib) x @techreport{Kim2011, year = { 2011 }, title = { Affine Transformation for Landmark Based Registration Initializer in ITK }, pages = { 1--8 }, mendeley-tags = { Insight Journal }, keywords = { Affine Transformation for Landmark Based Registrat,Insight Journal }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Engineering/Kim et al. - 2011 - Affine Transformation for Landmark Based Registration Initializer in ITK.pdf:pdf }, booktitle = { Engineering }, author = { Kim and Johnson and Williams and Kim and {Kim EYR Johnson HJ} }, annote = { From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina EY Y; Johnson, Hans J.; Williams, Norman K; Kim, Eun Young Regina; Kim EYR Johnson HJ, Williams N K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J; Williams, Norman K; Kim, Eun Young Regina; Kim EYR Johnson HJ, Williams N K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina EY; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J; Williams, Norman K; Kim, Eun Young Regina; Kim EYR Johnson HJ, Williams N K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) }, abstract = { This document describes an affine transformation algorithm as an additional feature for landmark based registration in ITK www.itk.org. The algorithm is based on the paper by Sp{\"{a}}th, H [2]. The author derives a set of linear equations from paired landmarks and generates an affine transform from them. The method implemented here gives more freedom in the choice of registration and/or initialization method in ITK. The submission describes ITK implementation of the algorithm. }, } |
2010 | Book chapter | Jens Rittscher (2010). NA in Characterization of Biological Processes through Automated Image Analysis, Edited by M L Yarmush, J S Duncan, M L Gray, Annual Reviews, pp. 315–344, Annual Review of Biomedical Engineering, Vol. 12, ISBN: 1523-9829. (link) (bib) x @inbook{Rittscher2010, year = { 2010 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Characterization of Biological Processes through Automated Image Analysis }, series = { Annual Review of Biomedical Engineering }, publisher = { Annual Reviews }, pages = { 315--344 }, number = { 1 }, issn = { 1523-9829 }, isbn = { 978-0-8243-3512-0 }, editor = { [object Object],[object Object],[object Object] }, doi = { 10.1146/annurev-bioeng-070909-105235 }, booktitle = { Annual Review of Biomedical Engineering }, author = { Rittscher }, address = { Palo Alto }, abstract = { The systems-level analysis of complex biological processes requires methods that enable the quantification of a broad range of phenotypical alterations, the precise localization of signaling events, and the ability to correlate such signaling events in the context of the spatial organization of the biological specimen. The goal of this review is to illustrate that, when combined with modern imaging platforms and labeling techniques, automated image analysis methods can provide such quantitative information. The article attempts to review necessary image analysis techniques as well as applications that utilize these techniques to provide the data that will enable systems-level biology. The text includes a review of image registration and image segmentation methods, as well as algorithms that enable the analysis of cellular architecture, cell morphology, and tissue organization. Various methods that enable the analysis of dynamic events are also presented. }, } |
2010 | Book chapter | P. Fallavollita, Z. KarimAghaloo, E. C. Burdette, D. Y. Song, P. Abolmaesumi, G. Fichtinger (2010). NA in Localization of brachytherapy seeds in ultrasound by registration to fluoroscopy, Edited by K H Wong, M I Miga, Spie-Int Soc Optical Engineering, pp. 762519, Proceedings of SPIE, Vol. 7625, ISBN: 16057422. (link) (bib) x @inbook{Fallavollita2010b, year = { 2010 }, volume = { 7625 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Localization of brachytherapy seeds in ultrasound by registration to fluoroscopy }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 762519 }, issn = { 16057422 }, isbn = { 9780819480262 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.844015 }, booktitle = { Medical Imaging 2010: Visualization, Image-Guided Procedures, and Modeling }, author = { Fallavollita and KarimAghaloo and Burdette and Song and Abolmaesumi and Fichtinger }, address = { Bellingham }, } |
2010 | Book chapter | Lejla Alic, Joost C. Haeck, Stefan Klein, Karin Bol, Sandra T. van Tiel, Piotr A. Wielopolski, Magda Bijster, Wiro J. Niessen, Monique Bernsen, Jifke F. Veenland, Marion de Jong (2010). NA in Multi-modal image registration: matching MRI with histology, Edited by R C Molthen, J B Weaver, Spie-Int Soc Optical Engineering, pp. 762603, Proceedings of SPIE, Vol. 7626, ISBN: 16057422. (link) (bib) x @inbook{Alic2010, year = { 2010 }, volume = { 7626 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Multi-modal image registration: matching MRI with histology }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 762603 }, issn = { 16057422 }, isbn = { 9780819480279 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.844123 }, booktitle = { Medical Imaging 2010: Biomedical Applications in Molecular, Structural, and Functional Imaging }, author = { Alic and Haeck and Klein and Bol and Tiel and Wielopolski and Bijster and Niessen and Bernsen and Veenland and Jong }, address = { Bellingham }, abstract = { Spatial correspondence between histology and multi sequence MRI can provide information about the capabilities of non-invasive imaging to characterize cancerous tissue. However, shrinkage and deformation occurring during the excision of the tumor and the histological processing complicate the co registration of MR images with histological sections. This work proposes a methodology to establish a detailed 3D relation between histology sections and in vivo MRI tumor data. The key features of the methodology are a very dense histological sampling (up to 100 histology slices per tumor), mutual information based non-rigid B-spline registration, the utilization of the whole 3D data sets, and the exploitation of an intermediate ex vivo MRI. In this proof of concept paper, the methodology was applied to one tumor. We found that, after registration, the visual alignment of tumor borders and internal structures was fairly accurate. Utilizing the intermediate ex vivo MRI, it was possible to account for changes caused by the excision of the tumor: we observed a tumor expansion of 20{\%}. Also the effects of fixation, dehydration and histological sectioning could be determined: 26{\%} shrinkage of the tumor was found. The annotation of viable tissue, performed in histology and transformed to the in vivo MRI, matched clearly with high intensity regions in MRI. With this methodology, histological annotation can be directly related to the corresponding in vivo MRI. This is a vital step for the evaluation of the feasibility of multi-spectral MRI to depict histological ground-truth. {\textcopyright} 2010 Copyright SPIE - The International Society for Optical Engineering. }, } |
2010 | Book chapter | I. Mac\'ia, M. Gra\~na, C. Paloc (2010). NA in Towards a proposal for a vessel knowledge representation model, Edited by R Setchi, I Jordanov, R J Howlett, L C Jain, Springer-Verlag Berlin, pp. 80–87, Lecture Notes in Artificial Intelligence, Vol. 6279 LNAI, ISBN: 03029743. (link) (bib) x @inbook{Macia2010, year = { 2010 }, volume = { 6279 LNAI }, url = { {\%}3CGo to }, type = { Book Section }, title = { Towards a proposal for a vessel knowledge representation model }, series = { Lecture Notes in Artificial Intelligence }, publisher = { Springer-Verlag Berlin }, pages = { 80--87 }, number = { PART 4 }, issn = { 03029743 }, isbn = { 3642153836 }, editor = { [object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-642-15384-6_9 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Mac{\'{i}}a and Gra{\~{n}}a and Paloc }, address = { Berlin }, abstract = { We propose the development of a knowledge representation model in the area of Blood Vessel analysis, whose need we feel for the future development of the field and for our own research efforts. It will allow easy reuse of software pieces through appropriate abstractions, facilitating the development of innovative methods, procedures and applications. In this paper we present some key ideas that will be fully developed elsewhere. {\textcopyright} Springer-Verlag 2010. }, } |
2010 | Journal | J Hawley, H Johnson, J Dowling, M Malaterre, P B Greer, O Salvado (2010). Introduction to ITK resample in-place image filter. Insight Journal, NA pp. NA (bib) x @article{hawley2010introduction, year = { 2010 }, title = { Introduction to ITK resample in-place image filter }, journal = { Insight Journal }, author = { Hawley and Johnson and Dowling and Malaterre and Greer and Salvado }, } |
2010 | Journal | Wei Lu, Hans Johnson, J Hawley, Hans Johnson, J Dowling, M Malaterre, P B Greer, O Salvado (2010). Introduction to ITK resample in-place image filter. Insight Journal, NA pp. 3–6. (link) (bib) x @article{hawley2010introduction, year = { 2010 }, url = { http://ir.uiowa.edu/etd/851/ }, title = { Introduction to ITK resample in-place image filter }, pages = { 3--6 }, journal = { Insight Journal }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Lu et al/Insight Journal/Lu et al. - 2010 - Introduction to ITK resample in-place image filter.pdf:pdf }, author = { Lu and Johnson and Hawley and Johnson and Dowling and Malaterre and Greer and Salvado }, } |
2010 | Journal | Gert Wollny, Maria J. Ledesma-Carbayo, Peter Kellman, Andres Santos (2010). Exploiting quasiperiodicity in motion correction of free-breathing myocardial perfusion MRI. IEEE Transactions on Medical Imaging, 29(8), pp. 1516–1527. (link) (bib) x @article{Wollny2010, year = { 2010 }, volume = { 29 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Exploiting quasiperiodicity in motion correction of free-breathing myocardial perfusion MRI }, pages = { 1516--1527 }, number = { 8 }, keywords = { Heart,image registration,myocardial perfusion }, journal = { IEEE Transactions on Medical Imaging }, issn = { 02780062 }, doi = { 10.1109/TMI.2010.2049270 }, author = { Wollny and Ledesma-Carbayo and Kellman and Santos }, abstract = { Free-breathing image acquisition is desirable in first-pass gadolinium-enhanced magnetic resonance imaging (MRI), but the breathing movements hinder the direct automatic analysis of the myocardial perfusion and qualitative readout by visual tracking. Nonrigid registration can be used to compensate for these movements but needs to deal with local contrast and intensity changes with time. We propose an automatic registration scheme that exploits the quasiperiodicity of free breathing to decouple movement from intensity change. First, we identify and register a subset of the images corresponding to the same phase of the breathing cycle. This registration step deals with small differences caused by movement but maintains the full range of intensity change. The remaining images are then registered to synthetic references that are created as a linear combination of images belonging to the already registered subset. Because of the quasiperiodic respiratory movement, the subset images are distributed evenly over time and, therefore, the synthetic references exhibit intensities similar to their corresponding unregistered images. Thus, this second registration step needs to account only for the movement. Validation experiments were performed on data obtained from six patients, three slices per patient, and the automatically obtained perfusion profiles were compared with profiles obtained by manually segmenting the myocardium. The results show that our automatic approach is well suited to compensate for the free-breathing movement and that it achieves a significant improvement in the average Pearson correlation coefficient between manually and automatically obtained perfusion profiles before (0.87±0.18) and after (0.96±0.09) registration. {\textcopyright} 2010 IEEE. }, } |
2010 | Journal | Christian von Falck, Simone Meier, Steffen Jördens, Benjamin King, Michael Galanski, Hoen oh Shin (2010). Semiautomated Segmentation of Pleural Effusions in MDCT Datasets. Academic Radiology, 17(7), pp. 841–848. (link) (bib) x @article{Falck2010, year = { 2010 }, volume = { 17 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-77952884852{\&}doi=10.1016{\%}2Fj.acra.2010.02.011{\&}partnerID=40{\&}md5=af8867fcfe3518e57c7b6076890f3032 }, type = { Journal Article }, title = { Semiautomated Segmentation of Pleural Effusions in MDCT Datasets }, pages = { 841--848 }, number = { 7 }, keywords = { MDCT,Pleural effusion,diaphragm,segmentation,volumetry }, journal = { Academic Radiology }, issn = { 10766332 }, doi = { 10.1016/j.acra.2010.02.011 }, author = { Falck and Meier and J{\"{o}}rdens and King and Galanski and Shin }, abstract = { Rationale and Objectives: To develop and evaluate a novel algorithm for semiautomated segmentation and volumetry of pleural effusions in multidetector computed tomography (MDCT) datasets. Materials and Methods: A seven-step algorithm for semiautomated segmentation of pleural effusions in MDCT datasets was developed, mainly using algorithms from the ITK image processing library. Semiautomated segmentation of pleural effusions was performed in 40 MDCT datasets of the chest (males = 22, females = 18, mean age: 56.7 ± 19.3 years). The accuracy of the semiautomated segmentation as compared with a manual segmentation approach was quantified based on the differences of the segmented volumes, the degree of over-/undersegmentation, and the Hausdorff distance. The time needed for the semiautomated and the manual segmentation process were recorded and compared. Results: The mean volume of the pleural effusions was 557.30 mL (± 477.27 mL) for the semiautomated and 553.19 (± 473.49 mL) for the manual segmentation. The difference was not statistically significant (Student t-test, P = .133). Regression analysis confirmed a strong relationship between the semiautomated algorithm and the gold standard (r2 = 0.998). Mean overlap of the segmented areas was 79{\%} (± 9.3{\%}) over all datasets with moderate oversegmentation (22{\%} ± 9.3{\%}) and undersegmentation (21{\%} ± 9.7{\%}). The mean Hausdorff distance was 17.2 mm (± 8.35 mm). The mean duration of the semiautomated segmentation process with user interaction was 8.4 minutes (± 2.6 minutes) as compared to 32.9 minutes (± 17.4 minutes) for manual segmentation. Conclusion: The semiautomated algorithm for segmentation and volumetry of pleural effusions in MDCT datasets shows a high diagnostic accuracy when compared with manual segmentation. {\textcopyright} 2010 AUR. }, } |
2010 | Journal | Siddharth Vikal, Paweena U-Thainual, John A. Carrino, Iulian Iordachita, Gregory S. Fischer, Gabor Fichtinger (2010). Perk Station-Percutaneous surgery training and performance measurement platform. Computerized Medical Imaging and Graphics, 34(1), pp. 19–32. (link) (bib) x @article{Vikal2010, year = { 2010 }, volume = { 34 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Perk Station-Percutaneous surgery training and performance measurement platform }, pages = { 19--32 }, number = { 1 }, keywords = { Augmented reality,Image guidance,Needle placement,Surgical training }, journal = { Computerized Medical Imaging and Graphics }, issn = { 08956111 }, doi = { 10.1016/j.compmedimag.2009.05.001 }, author = { Vikal and U-Thainual and Carrino and Iordachita and Fischer and Fichtinger }, abstract = { Motivation: Image-guided percutaneous (through the skin) needle-based surgery has become part of routine clinical practice in performing procedures such as biopsies, injections and therapeutic implants. A novice physician typically performs needle interventions under the supervision of a senior physician; a slow and inherently subjective training process that lacks objective, quantitative assessment of the surgical skill and performance. Shortening the learning curve and increasing procedural consistency are important factors in assuring high-quality medical care. Methods: This paper describes a laboratory validation system, called Perk Station, for standardized training and performance measurement under different assistance techniques for needle-based surgical guidance systems. The initial goal of the Perk Station is to assess and compare different techniques: 2D image overlay, biplane laser guide, laser protractor and conventional freehand. The main focus of this manuscript is the planning and guidance software system developed on the 3D Slicer platform, a free, open source software package designed for visualization and analysis of medical image data. Results: The prototype Perk Station has been successfully developed, the associated needle insertion phantoms were built, and the graphical user interface was fully implemented. The system was inaugurated in undergraduate teaching and a wide array of outreach activities. Initial results, experiences, ongoing activities and future plans are reported. {\textcopyright} 2009 Elsevier Ltd. All rights reserved. }, } |
2010 | Journal | Christian H. Riedel, Ulf Jensen, Axel Rohr, Marc Tietke, Karsten Alfke, Stephan Ulmer, Olav Jansen (2010). Assessment of thrombus in acute middle cerebral artery occlusion using thin-slice nonenhanced computed tomography reconstructions. Stroke, 41(8), pp. 1659–1664. (link) (bib) x @article{Riedel2010, year = { 2010 }, volume = { 41 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Assessment of thrombus in acute middle cerebral artery occlusion using thin-slice nonenhanced computed tomography reconstructions }, pages = { 1659--1664 }, number = { 8 }, keywords = { CT,acute care,acute stroke,embolic stroke,embolism,imaging,neuroradiology,stroke care,stroke management,thrombolysis }, journal = { Stroke }, issn = { 00392499 }, doi = { 10.1161/STROKEAHA.110.580662 }, author = { Riedel and Jensen and Rohr and Tietke and Alfke and Ulmer and Jansen }, abstract = { Background and Purpose-: We sought to evaluate how accurately length and volume of thrombotic clots occluding cerebral arteries of patients with acute ischemic stroke can be assessed from nonenhanced CT (NECT) scans reconstructed with different slice widths. Methods-: NECT image data of 58 patients with acute ischemic stroke with vascular occlusion proven by CT angiography were reconstructed with slice widths of 1.25 mm, 2.5 mm, 3.75 mm, and 5 mm. Thrombus lengths and volumes were quantified based on these NECT images by detecting and segmenting intra-arterial hyperdensities. The results were compared with reference values of thrombus length and volume obtained from CT angiography images using Bland-Altman analysis and predefined levels or tolerance to find NECT slice thicknesses that allow for sufficiently accurate thrombus quantification. Results-: Thrombus length can be measured with high accuracy using the hyperdense middle cerebral artery sign detected in NECT images with slice thicknesses of 1.25 mm and 2.5 mm. We found mean deviations from the reference values and limits of agreement of-0.1 mm±0.6 mm with slice widths of 1.25 mm and 0.1 mm±0.7 mm for slice widths of 2.5 mm. Thrombus length measurements in NECT images with higher slice width and all evaluated thrombus volume measurements exhibited severe dependence on the level and did not match the accuracy criteria. Conclusion-: The length of the hyperdense middle cerebral artery sign as detected on thin-slice NECT reconstructions in patients with acute ischemic stroke can be used to quantify thrombotic burden accurately. Thus, it might qualify as a new diagnostic parameter in acute stroke management that indicates and quantifies the extent of vascular obliteration. {\textcopyright} 2010 American Heart Association, Inc. }, } |
2010 | Journal | Patrik Raudaschl, Karl Fritscher, Tobias Roth, Christian Kammerlander, Rainer Schubert (2010). Analysis of the micro-migration of sliding hip screws by using point-based registration. International Journal of Computer Assisted Radiology and Surgery, 5(5), pp. 455–460. (link) (bib) x @article{Raudaschl2010, year = { 2010 }, volume = { 5 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Analysis of the micro-migration of sliding hip screws by using point-based registration }, pages = { 455--460 }, number = { 5 }, keywords = { Cut-out,Hip fractures,Micro-migration,Sliding hip screw }, journal = { International Journal of Computer Assisted Radiology and Surgery }, issn = { 18616429 }, doi = { 10.1007/s11548-010-0498-4 }, author = { Raudaschl and Fritscher and Roth and Kammerlander and Schubert }, abstract = { Purpose The favored treatment for many hip fractures is a sliding hip screw, and its usage is expected to increase in the future.Failures can be reduced, and complications detected earlier by semi-automated CT image analysis.The most frequent failure is due to the screw cut-out from the femoral head.Methods An image-based method was developed for early detection of complications and assessment of anchorage quality relative to implant model, bone quality or tip-apex distance (TAD).This method evaluates micro-migration using CT images acquired at different time points (immediately post-op and 3-month later).Serial CT image registration and transformation methods were applied, including point-based registration, to achieve semi-automated evaluations.Results Qualitative and quantitative validation of the image registration was performed with measurement mean error determination by different observers.The micro-migration evaluation by clinicians compared favorably with semiautomated image-based results.Conclusion Semi-automatic evaluation of hip screw micromigration using CT images is feasible and can aid observation of convalescence.The method may be amenable to full automation, a future goal for this work. {\textcopyright} CARS 2010. }, } |
2010 | Journal | Blake C. Lucas, John A. Bogovic, Aaron Carass, Pierre Louis Bazin, Jerry L. Prince, Dzung L. Pham, Bennett A. Landman (2010). The Java Image Science Toolkit (JIST) for rapid prototyping and publishing of neuroimaging software. Neuroinformatics, 8(1), pp. 5–17. (link) (bib) x @article{Lucas2010, year = { 2010 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { The Java Image Science Toolkit (JIST) for rapid prototyping and publishing of neuroimaging software }, pages = { 5--17 }, number = { 1 }, keywords = { Image processing,MRI,Parallel processing,Pipeline,Rapid prototyping }, journal = { Neuroinformatics }, issn = { 15392791 }, doi = { 10.1007/s12021-009-9061-2 }, author = { Lucas and Bogovic and Carass and Bazin and Prince and Pham and Landman }, abstract = { Non-invasive neuroimaging techniques enable extraordinarily sensitive and specific in vivo study of the structure, functional response and connectivity of biological mechanisms. With these advanced methods comes a heavy reliance on computer-based processing, analysis and interpretation. While the neuroimaging community has produced many excellent academic and commercial tool packages, new tools are often required to interpret new modalities and paradigms. Developing custom tools and ensuring interoperability with existing tools is a significant hurdle. To address these limitations, we present a new framework for algorithm development, that implicitly ensures tool interoperability, generates graphical user interfaces, provides advanced batch processing tools, and, most importantly, requires minimal additional programming or computational overhead. Javabased rapid prototyping with this system is an efficient and practical approach to evaluate new algorithms since the proposed system ensures that rapidly constructed prototypes are actually fully-functional processing modules with support for multiple GUI's, a broad range of file formats, and distributed computation. Herein, we demonstrate MRI image processing with the proposed system for cortical surface extraction in large cross-sectional cohorts, provide a system for fully automated diffusion tensor image analysis, and illustrate how the system can be used as a simulation framework for the development of a new image analysis method. Hie system is released as open source under the Lesser GNU Public License (LGPL) through the Neuroimaging Infoimatics Tools and Resources Clearinghouse (NITRC). {\textcopyright} Springer Science+Business Media, LLC 2010. }, } |
2010 | Journal | P. Fallavollita, Z. Karim Aghaloo, E. C. Burdette, D. Y. Song, P. Abolmaesumi, G. Fichtinger (2010). Registration between ultrasound and fluoroscopy or CT in prostate brachytherapy. Medical Physics, 37(6), pp. 2749–2760. (link) (bib) x @article{Fallavollita2010a, year = { 2010 }, volume = { 37 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Registration between ultrasound and fluoroscopy or CT in prostate brachytherapy }, pages = { 2749--2760 }, number = { 6 }, keywords = { Fluoroscopy,Prostate brachytherapy,Registration,Ultrasound }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.3416937 }, author = { Fallavollita and {Karim Aghaloo} and Burdette and Song and Abolmaesumi and Fichtinger }, abstract = { Purpose: In prostate brachytherapy, transrectal ultrasound (TRUS) is used to visualize the anatomy, while implanted seeds can be visualized by fluoroscopy. Intraoperative dosimetry optimization is possible using a combination of TRUS and fluoroscopy, but requires localization of the fluoroscopy-derived seed cloud, relative to the anatomy as seen on TRUS. The authors propose to develop a method of registration of TRUS images and the implants reconstructed from fluoroscopy. Methods: A phantom was implanted with 48 seeds then imaged with TRUS and CT. Seeds were reconstructed from CT yielding a cloud of seeds. Fiducial-based ground-truth registration was established between the TRUS and CT. TRUS images are filtered, compounded, and registered to the reconstructed implants by using an intensity-based metric. The authors evaluated a volume-to-volume and point-to-volume registration scheme. In total, seven TRUS filtering techniques and three image similarity metrics were analyzed. The method was also tested on human subject data captured from a brachytherapy procedure. Results: For volume-to-volume registration, noise reduction filter and normalized correlation metrics yielded the best result: An average of 0.54±0.11 mm seed localization error relative to ground truth. For point-to-volume registration, noise reduction combined with beam profile filter and mean squares metrics yielded the best result: An average of 0.38±0.19 mm seed localization error relative to the ground truth. In human patient data, C-arm fluoroscopy images showed 81 radioactive seeds implanted inside the prostate. A qualitative analysis showed clinically correct agreement between the seeds visible in TRUS and reconstructed from intraoperative fluoroscopy imaging. The measured registration error compared to the manually selected seed locations by the clinician was 2.86±1.26 mm. Conclusions: Fully automated registration between TRUS and the reconstructed seeds performed well in ground-truth phantom experiments and qualitative observation showed adequate performance on early clinical patient data. {\textcopyright} 2010 American Association of Physicists in Medicine. }, } |
2010 | Journal | Pascal Fallavollita (2010). Acquiring multiview C-arm images to assist cardiac ablation procedures. Eurasip Journal on Image and Video Processing, 2010, pp. 10. (link) (bib) x @article{Fallavollita2010, year = { 2010 }, volume = { 2010 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Acquiring multiview C-arm images to assist cardiac ablation procedures }, pages = { 10 }, journal = { Eurasip Journal on Image and Video Processing }, issn = { 16875176 }, doi = { 10.1155/2010/871409 }, author = { Fallavollita }, abstract = { CARTO XP is an electroanatomical cardiac mapping system that provides 3D color-coded maps of the electrical activity of the heart; however it is expensive and it can only use a single costly magnetic catheter for each patient intervention. Our approach consists of integrating fluoroscopic and electrical data from the RF catheters into the same image so as to better guide RF ablation, shorten the duration of this procedure, increase its efficacy, and decrease hospital cost when compared to CARTO XP. We propose a method that relies on multi-view C-arm fluoroscopy image acquisition for (1) the 3D reconstruction of the anatomical structure of interest, (2) the robust temporal tracking of the tip-electrode of a mapping catheter between the diastolic and systolic phases and (3) the 2D/3D registration of color coded isochronal maps directly on the 2D fluoroscopy image that would help the clinician guide the ablation procedure much more effectively. The method has been tested on canine experimental data. {\textcopyright} 2010 Pascal Fallavollita. }, } |
2010 | Journal | Josephine Barnes, L. Anne Mitchell, Jonathan Kennedy, Antonio J. Bastos-Leite, Suzie Barker, Manja Lehmann, R. Chris Nordstrom, Chris Frost, Joseph R. Smith, Ellen Garde, Martin N. Rossor, Nick C. Fox (2010). Does registration of serial MRI improve diagnosis of dementia?. Neuroradiology, 52(11), pp. 987–995. (link) (bib) x @article{Barnes2010, year = { 2010 }, volume = { 52 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-78650210726{\&}doi=10.1007{\%}2Fs00234-010-0665-x{\&}partnerID=40{\&}md5=52c55fbcb14f73d705d6a72ebba15776 }, type = { Journal Article }, title = { Does registration of serial MRI improve diagnosis of dementia? }, pages = { 987--995 }, number = { 11 }, keywords = { Dementia,Diagnosis,Registration,Serial MRI,Visual assessment }, journal = { Neuroradiology }, issn = { 00283940 }, doi = { 10.1007/s00234-010-0665-x }, author = { Barnes and Mitchell and Kennedy and Bastos-Leite and Barker and Lehmann and Nordstrom and Frost and Smith and Garde and Rossor and Fox }, abstract = { Introduction We aimed to assess the value of a second MR scan in the radiological diagnosis of dementia. Methods One hundred twenty subjects with clinical followup of at least 1 year with two scans were selected from a cognitive disorders clinic. Scans were reviewed as a single first scan (method A), two unregistered scans presented sideby- side (method B) and a registered pair (method C). Scans were presented to two neuroradiologists and a clinician together with approximate scan interval (if applicable) and age. Raters decided on a main and subtype diagnosis. Results There was no evidence that differences between methods (expressed as relative odds of a correct response) differed between reviewers (p=0.17 for degenerative condition or not, p=0.5 for main diagnosis, p=0.16 for subtype). Accordingly, results were pooled over reviewers. For distinguishing normal/non-progressors from degenerative conditions, the proportions correctly diagnosed were higher with methods B and C than with A (p=0.001, both tests). The difference between method B and C was not statistically significant (p=0.18). For main diagnosis, the proportion of correct diagnoses were highest with method C for all three reviewers; however, this was not statistically significant comparing with method A (p=0.23) or with method B (p=0.16). For subtype diagnosis, there was some evidence that method C was better than method A (p=0.01) and B (p=0.048). Conclusions Serial MRI and registration may improve visual diagnosis in dementia. {\textcopyright} Springer-Verlag 2010. }, } |
2010 | Journal | Erik W. Anderson, Gilbert A. Preston, Claudio T. Silva (2010). Using python for signal processing and visualization. Computing in Science and Engineering, 12(4), pp. 90–95. (link) (bib) x @article{Anderson2010, year = { 2010 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Using python for signal processing and visualization }, pages = { 90--95 }, number = { 4 }, keywords = { Brain,Data visualization,Electroencephalography,Libraries,Magnetic resonance imaging,Python,Sensors,Signal processing,Time frequency analysis,Visualization }, journal = { Computing in Science and Engineering }, issn = { 15219615 }, doi = { 10.1109/MCSE.2010.91 }, author = { Anderson and Preston and Silva }, abstract = { Applying Python to a neuroscience project let developers put complex data processing and advanced visualization techniques together in a coherent framework. {\textcopyright} 2006 IEEE. }, } |
2010 | In Collection | I. Mac\'ia, M. Gra\~na, C. Paloc (2010). Towards a proposal for a vessel knowledge representation model. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 80–87. (link) (bib) x @incollection{Macia2010a, year = { 2010 }, volume = { 6279 LNAI }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-78649239965{\&}doi=10.1007{\%}2F978-3-642-15384-6{\_}9{\&}partnerID=40{\&}md5=0484a83cf98f28846128f6c2d221cffd }, type = { Serial }, title = { Towards a proposal for a vessel knowledge representation model }, pages = { 80--87 }, number = { PART 4 }, issn = { 03029743 }, isbn = { 3642153836 }, doi = { 10.1007/978-3-642-15384-6_9 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Mac{\'{i}}a and Gra{\~{n}}a and Paloc }, abstract = { We propose the development of a knowledge representation model in the area of Blood Vessel analysis, whose need we feel for the future development of the field and for our own research efforts. It will allow easy reuse of software pieces through appropriate abstractions, facilitating the development of innovative methods, procedures and applications. In this paper we present some key ideas that will be fully developed elsewhere. {\textcopyright} Springer-Verlag 2010. }, } |
2010 | In Conf. Proceedings | André Bernardini, Christoph Wotzlaw, Hans-Gerd Lipinski, Joachim Fandrey (2010). An automated real-time microscopy system for analysis of fluorescence resonance energy transfer. In Optics, Photonics, and Digital Technologies for Multimedia Applications, pp. 772311. (link) (bib) x @inproceedings{Bernardini, year = { 2010 }, volume = { 7723 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-77954408709{\&}doi=10.1117{\%}2F12.854027{\&}partnerID=40{\&}md5=62af8ffb1680932adc4db6fa28a2b4f1 }, type = { Conference Proceedings }, title = { An automated real-time microscopy system for analysis of fluorescence resonance energy transfer }, pages = { 772311 }, issn = { 0277786X }, isbn = { 9780819481962 }, doi = { 10.1117/12.854027 }, booktitle = { Optics, Photonics, and Digital Technologies for Multimedia Applications }, author = { Bernardini and Wotzlaw and Lipinski and Fandrey }, abstract = { Molecular imaging based on Fluorescence Resonance Energy Transfer (FRET) is widely used in cellular physiology both for protein-protein interaction analysis and detecting conformational changes of single proteins, e.g. during activation of signaling cascades. However, getting reliable results from FRET measurements is still hampered by methodological problems such as spectral bleed through, chromatic aberration, focal plane shifts and false positive FRET. Particularly false positive FRET signals caused by random interaction of the fluorescent dyes can easily lead to misinterpretation of the data. This work introduces a Nipkow Disc based FRET microscopy system, that is easy to operate without expert knowledge of FRET. The system automatically accounts for all relevant sources of errors and provides various result presentations of two, three and four dimensional FRET data. Two examples are given to demonstrate the scope of application. An interaction analysis of the two subunits of the hypoxia-inducible transcription factor 1 demonstrates the use of the system as a tool for protein-protein interaction analysis. As an example for time lapse observations, the conformational change of the fluorophore labeled heat shock protein 33 in the presence of oxidant stress is shown. }, } |
2010 | In Conf. Proceedings | Luis Álvarez, Luis Baumela, Pedro Henr\'iquez, Pablo Márquez-Neila (2010). Morphological snakes. In Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, pp. 2197–2202. (link) (bib) x @inproceedings{Alvarez, year = { 2010 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-77955987357{\&}doi=10.1109{\%}2FCVPR.2010.5539900{\&}partnerID=40{\&}md5=15e7390806889a59ec5c0fa284c87850 }, type = { Conference Proceedings }, title = { Morphological snakes }, pages = { 2197--2202 }, issn = { 10636919 }, isbn = { 9781424469840 }, doi = { 10.1109/CVPR.2010.5539900 }, booktitle = { Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition }, author = { {\'{A}}lvarez and Baumela and Henr{\'{i}}quez and M{\'{a}}rquez-Neila }, abstract = { We introduce a morphological approach to curve evolution. The differential operators used in the standard PDE snake models can be approached using morphological operations on a binary level set. By combining the morphological operators associated to the PDE components we achieve a new snakes evolution algorithm. This new solution is based on numerical methods which are very simple, fast and stable. Moreover, since the level set is just a binary piecewise constant function, this approach does not require to estimate a contour distance function. To illustrate the results obtained we present some numerical experiments on real images. {\textcopyright}2010 IEEE. }, } |
2009 | Book | T Mandl, J Martinek, W Mayr, F Rattay, M Reichel, E Moser (2009). TOWARDS A NUMERICAL 3D MODEL OF FUNCTIONAL ELECTRICAL STIMULATION OF DENERVATED, DEGENERATED HUMAN SKELETAL MUSCLE, Univ De La Laguna, 2009. (link) (bib) x @book{Mandl2009, year = { 2009 }, url = { {\%}3CGo to }, type = { Book }, title = { TOWARDS A NUMERICAL 3D MODEL OF FUNCTIONAL ELECTRICAL STIMULATION OF DENERVATED, DEGENERATED HUMAN SKELETAL MUSCLE }, series = { Emss 2009: 21st European Modeling and Simulation Symposium, Vol Ii }, publisher = { Univ De La Laguna }, pages = { 209--+ }, isbn = { 978-84-692-5415-8 }, author = { Mandl and Martinek and Mayr and Rattay and Reichel and Moser }, address = { La Laguna }, } |
2009 | Book | Li Mei Song, Jing Luo, Yu Hua Wen (2009). Virtual surgery based on medical images, Ieee, 2009, ISBN: 10033289. (link) (bib) x @book{Song2009a, year = { 2009 }, volume = { 25 }, url = { {\%}3CGo to }, type = { Book }, title = { Virtual surgery based on medical images }, series = { 2009 3rd International Conference on Bioinformatics and Biomedical Engineering, Vols 1-11 }, publisher = { Ieee }, pages = { 1481--1484 }, number = { 8 }, keywords = { Image process,Virtual segmentation,Virtual surgery,Visualization }, issn = { 10033289 }, isbn = { 978-1-4244-2901-1 }, booktitle = { Chinese Journal of Medical Imaging Technology }, author = { Song and Luo and Wen }, address = { New York }, abstract = { Objective: To establish a virtual surgery system based on three-dimensional visualization technique which can help the doctors to plot the surgery. Methods: A curve was created on the 3D space, and some points added on the curve adjusted by controlling the points which could be added and deleted. The angle could be computed according to the final curves, thus to place the scalpel to give convenience to the real surgery. Results: The virtual surgery system was used in hospital, and the results were satisfied. Conclusion: The virtual surgery system added to medical image process system is helpful to decrease risk of surgical operation. }, } |
2009 | Book chapter | Luca Corradi, Gabriele Arnulfo, Andrea Schenone, Ivan Porro, Marco Fato (2009). NA in XTENS - An eXTensible Environment for NeuroScience, Edited by T Solomonides, M HofmannApitius, M Freudigmann, S C Semler, Y Legre, M Kratz, Ios Press, pp. 127–136, Studies in Health Technology and Informatics, Vol. 147, ISBN: 18798365. (link) (bib) x @inbook{Corradi2009, year = { 2009 }, volume = { 147 }, url = { {\%}3CGo to }, type = { Book Section }, title = { XTENS - An eXTensible Environment for NeuroScience }, series = { Studies in Health Technology and Informatics }, publisher = { Ios Press }, pages = { 127--136 }, keywords = { Collaborative environment,Data integration,Epilepsy,Grid,Multimodal and multiscale analysis,Remote visualization,Surgical planning }, issn = { 18798365 }, isbn = { 9781607500278 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.3233/978-1-60750-027-8-127 }, booktitle = { Studies in Health Technology and Informatics }, author = { Corradi and Arnulfo and Schenone and Porro and Fato }, address = { Amsterdam }, abstract = { The XTENS (eXTensible Environment for NeuroScience) platform consists in an highly extensible environment for collaborative work that improve repeatability of experiment and provides data storage and analysis capabilities. The platform is divided in repository and application domains, branched in services with different purpose. The first domain is the central component of the platform and consists in a multimodal repository with a client-server architecture. The second one provides remote tools for image and signal visualization and analysis. The main issue for such a platform is not only to provide an extensible collaborative environment, but also to build a development platform for testing models and algorithms in neuroscience. For these reasons a Grid approach has been considered. Both computational and data Grids infrastructures can be exploited to analyze and share large datasets of distributed data. The architecture has been deployed to support surgical planning for patients affected by drug resistant epilepsy. In that scenario, a complex analysis for a fully multimodal dataset including different image modalities, EEG and video is required to localize the origin of the ictal discharge and critical brain areas. As first results, prototype versions of both repository and application domain components are presented. {\textcopyright} 2009 The authors and IOS Press. All rights reserved. }, } |
2009 | Book chapter | Tan Su Tung, Alwin Kumar Rathinam, Yuwaraj Kumar, Zainal Ariff Abdul Rahman (2009). NA in Additional cues derived from three dimensional image processing to aid customised reconstruction for medical applications, Edited by H B Zaman, P Robinson, M Petrou, P Olivier, H Schroder, T K Shih, Springer-Verlag Berlin, pp. 148–155, Lecture Notes in Computer Science, Vol. 5857 LNCS, ISBN: 03029743. (link) (bib) x @inbook{Tung2009, year = { 2009 }, volume = { 5857 LNCS }, url = { {\%}3CGo to }, type = { Book Section }, title = { Additional cues derived from three dimensional image processing to aid customised reconstruction for medical applications }, series = { Lecture Notes in Computer Science }, publisher = { Springer-Verlag Berlin }, pages = { 148--155 }, keywords = { 3D modelling,3D stereo visualisation,Medical image processing,VRC-UM,Virtual reality }, issn = { 03029743 }, isbn = { 3642050352 }, editor = { [object Object],[object Object],[object Object],[object Object],[object Object],[object Object] }, doi = { 10.1007/978-3-642-05036-7_15 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Tung and Rathinam and Kumar and Rahman }, address = { Berlin }, abstract = { Three dimensional (3D) image processing and visualisation methods were applied in craniomaxillofacial surgery for preoperative surgical procedures and surgery planning. Each patient differed in their formation of cranium and facial bones, hence requiring customised reconstruction to identify the defect area and to plan procedural steps. This paper explores the processing and visualisation of patients' data into 3D form, constructed from flat two dimensional (2D) Computed Tomography (CT) images. Depth perception has been useful to identify certain regions of Interest (ROI) elusive in 2D CT slices. We have noted that the 3D models have exemplified the depth perception with the provision of additional cues of perspective, motion, texture and steropsis. This has led to the improvement of treatment design and implementation for patients in this study. {\textcopyright} 2009 Springer-Verlag. }, } |
2009 | Journal | Li Mei Song, Jing Luo, Yu Hua Wen (2009). Virtual surgery based on medical images. Chinese Journal of Medical Imaging Technology, 25(8), pp. 1481–1484. (link) (bib) x @article{Song2009, year = { 2009 }, volume = { 25 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-78650586605{\&}partnerID=40{\&}md5=047142ba89c1419e7f5474c726cfdc20 }, type = { Journal Article }, title = { Virtual surgery based on medical images }, pages = { 1481--1484 }, number = { 8 }, keywords = { Image process,Virtual segmentation,Virtual surgery,Visualization }, journal = { Chinese Journal of Medical Imaging Technology }, issn = { 10033289 }, author = { Song and Luo and Wen }, abstract = { Objective: To establish a virtual surgery system based on three-dimensional visualization technique which can help the doctors to plot the surgery. Methods: A curve was created on the 3D space, and some points added on the curve adjusted by controlling the points which could be added and deleted. The angle could be computed according to the final curves, thus to place the scalpel to give convenience to the real surgery. Results: The virtual surgery system was used in hospital, and the results were satisfied. Conclusion: The virtual surgery system added to medical image process system is helpful to decrease risk of surgical operation. }, } |
2009 | Journal | Jouke Dijkstra, Alize E.H. Scheenstra, Rob C.G. Van De Ven, Louise Der Van Weerd, Arn M.J.M. Van Den Maagdenberg, Johan H.C. Reiber (2009). Automated segmentation of in vivo and ex vivo mouse brain magnetic resonance images. Molecular Imaging, 8(1), pp. 35–44. (link) (bib) x @article{Scheenstra2009a, year = { 2009 }, volume = { 8 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Automated segmentation of in vivo and ex vivo mouse brain magnetic resonance images }, pages = { 35--44 }, number = { 1 }, journal = { Molecular Imaging }, issn = { 15353508 }, doi = { 10.2310/7290.2009.00004 }, author = { Dijkstra and Scheenstra and {Van De Ven} and {Van Weerd} and {Van Den Maagdenberg} and Reiber }, abstract = { Segmentation of magnetic resonance imaging (MRI) data is required for many applications, such as the comparison of different structures or time points, and for annotation purposes. Currently, the gold standard for automated image segmentation is nonlinear atlas-based segmentation. However, these methods are either not sufficient or highly time consuming for mouse brains, owing to the low signal to noise ratio and low contrast between structures compared with other applications. We present a novel generic approach to reduce processing time for segmentation of various structures of mouse brains, in vivo and ex vivo. The segmentation consists of a rough affine registration to a template followed by a clustering approach to refine the rough segmentation near the edges. Compared with manual segmentations, the presented segmentation method has an average kappa index of 0.7 for 7 of 12 structures in in vivo MRI and 11 of 12 structures in ex vivo MRI. Furthermore, we found that these results were equal to the performance of a nonlinear segmentation method, but with the advantage of being 8 times faster. The presented automatic segmentation method is quick and intuitive and can be used for image registration, volume quantification of structures, and annotation. {\textcopyright} 2009 BC Decker Inc. }, } |
2009 | Journal | Kenneth L. Roach, Kevin R. King, Basak E. Uygun, Isaac S. Kohane, Martin L. Yarmush, Mehmet Toner (2009). High throughput single cell bioinformatics. Biotechnology Progress, 25(6), pp. 1772–1779. (link) (bib) x @article{Roach2009, year = { 2009 }, volume = { 25 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { High throughput single cell bioinformatics }, pages = { 1772--1779 }, number = { 6 }, keywords = { Cytometry,Free radicals,Hepatocytes,Membrane potential,Microfabrication,Microwells,Mitochondria }, journal = { Biotechnology Progress }, issn = { 87567938 }, doi = { 10.1002/btpr.289 }, author = { Roach and King and Uygun and Kohane and Yarmush and Toner }, abstract = { Advances in systems biology and bioinformatics have highlighted that no cell population is truly uniform and that stochastic behavior is an inherent property of many biological systems. As a result, bulk measurements can be misleading even when particular care has been taken to isolate a single cell type, and measurements averaged over multiple cell populations in a tissue can be as misleading as the average height at an elementary school. There is a growing need for experimental techniques that can provide a combination of single cell resolution, large cell populations, and the ability to track cells over multiple time points. In this article, a microwell array cytometry platform was developed to meet this need and investigate the heterogeneity and stochasticity of cell behavior on a single cell basis. The platform consisted of a microfabricated device with high-density arrays of cell-sized microwells and custom software for automated image processing and data analysis. As a model experimental system, we used primary hepatocytes labeled with fluorescent probes sensitive to mitochondrial membrane potential and free radical generation. The cells were exposed to oxidative stress and the responses were dynamically monitored for each cell. The resulting data was then analyzed using bioinformatics techniques such as hierarchical and k-means clustering to visualize the data and identify interesting features. The results showed that clustering of the dynamic data not only enhanced comparisons between the treatment groups but also revealed a number of distinct response patterns within each treatment group. Heatmaps with hierarchical clustering also provided a data-rich complement to survival curves in a dose response experiment. The microwell array cytometry platform was shown to be powerful, easy to use, and able to provide a detailed picture of the heterogeneity present in cell responses to oxidative stress. We believe that our microwell array cytometry platform will have general utility for a wide range of questions related to cell population heterogeneity, biological stochasticity, and cell behavior under stress conditions. {\textcopyright} 2009 American Institute of Chemical Engineers. }, } |
2009 | Journal | D. Ryan C. Rivest, Terence A. Riauka, Albert D. Murtha, B. Gino Fallone (2009). Prostate positioning errors associated with two automatic registration based image guidance strategies. Journal of Applied Clinical Medical Physics, 10(4), pp. 165–176. (link) (bib) x @article{Rivest2009, year = { 2009 }, volume = { 10 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Prostate positioning errors associated with two automatic registration based image guidance strategies }, pages = { 165--176 }, number = { 4 }, keywords = { Helical tomotherapy,Image registration,Prostate motion }, journal = { Journal of Applied Clinical Medical Physics }, issn = { 15269914 }, doi = { 10.1120/jacmp.v10i4.3071 }, author = { Rivest and Riauka and Murtha and Fallone }, abstract = { Daily image guidance for helical tomotherapy prostate patients is based on the registration of pretreatment megavoltage CT (MVCT) images and the original planning CT. The goal of registration, whether manual or automatic, is the overlap of the prostate; otherwise prostate misplacement may compromise the efficacy of treatment or lead to increased toxicity. A previous study demonstrated that without the aid of implanted fiducials, manual registration results in inaccurate prostate positioning. The objective of this work is to quantify prostate misplacement that results from automatic bone matching (BM) and image matching (IM) registration algorithms. 204 MVCT images from eight high-risk tomotherapy prostate patients were incorporated into this retrospective study. BM and IM registration algorithms - based on maximization of mutual information of bony anatomy only and the entire image, respectively - were used to independently register MVCT images to their respective planning images. A correlation coefficient based algorithm that uses known planning CT contour information was used for automatic prostate localization in each MVCT image. Daily prostate misplacement was determined by repositioning as calculated from the BM and the IM algorithms. Mean (± SD) and maximum 3D prostate positioning errors were 3.7 ± 2.1 mm and 11.8 mm for bone matching, and 4.6 ± 2.3 mm and 11.5 mm for image matching. In terms of translational directions, IM would lead to prostate positioning error ≥ 3 mm in any of the LR, AP or SI directions in 62{\%} of treatment fractions. The corresponding value for BM is 51{\%}. The values for positioning errors ≥ 5 mm were 29{\%} and 17{\%} for IM and BM, respectively. This data suggests automatic daily image guidance for tomotherapy prostate patients should be based on bone matching instead of image matching. }, } |
2009 | Journal | Kishore Mosaliganti, Firdaus Janoos, Okan Irfanoglu, Randall Ridgway, Raghu Machiraju, Kun Huang, Joel Saltz, Gustavo Leone, Michael Ostrowski (2009). Tensor classification of N-point correlation function features for histology tissue segmentation. Medical Image Analysis, 13(1), pp. 156–166. (link) (bib) x @article{Mosaliganti2009, year = { 2009 }, volume = { 13 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Tensor classification of N-point correlation function features for histology tissue segmentation }, pages = { 156--166 }, number = { 1 }, keywords = { Image segmentation,Microstructure,N-point correlation functions,Phenotyping }, journal = { Medical Image Analysis }, issn = { 13618415 }, doi = { 10.1016/j.media.2008.06.020 }, author = { Mosaliganti and Janoos and Irfanoglu and Ridgway and Machiraju and Huang and Saltz and Leone and Ostrowski }, abstract = { In this paper, we utilize the N-point correlation functions (N-pcfs) to construct an appropriate feature space for achieving tissue segmentation in histology-stained microscopic images. The N-pcfs estimate microstructural constituent packing densities and their spatial distribution in a tissue sample. We represent the multi-phase properties estimated by the N-pcfs in a tensor structure. Using a variant of higher-order singular value decomposition (HOSVD) algorithm, we realize a robust classifier that provides a multi-linear description of the tensor feature space. Validated results of the segmentation are presented in a case-study that focuses on understanding the genetic phenotyping differences in mouse placentae. {\textcopyright} 2008 Elsevier B.V. All rights reserved. }, } |
2009 | Journal | Andrew Godley, Ergun Ahunbay, Cheng Peng, X. Allen Li (2009). Automated registration of large deformations for adaptive radiation therapy of prostate cancer. Medical Physics, 36(4), pp. 1433–1441. (link) (bib) x @article{Godley2009, year = { 2009 }, volume = { 36 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-63849158774{\&}doi=10.1118{\%}2F1.3095777{\&}partnerID=40{\&}md5=54c366a7760aa8527e47d27e9d97aaa7 }, type = { Journal Article }, title = { Automated registration of large deformations for adaptive radiation therapy of prostate cancer }, pages = { 1433--1441 }, number = { 4 }, keywords = { Deformable registration,cumulative dose,dose deformation,prostate }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.3095777 }, author = { Godley and Ahunbay and Peng and Li }, abstract = { Available deformable registration methods are often inaccurate over large organ variation encountered, for example, in the rectum and bladder. The authors developed a novel approach to accurately and effectively register large deformations in the prostate region for adaptive radiation therapy. A software tool combining a fast symmetric demons algorithm and the use of masks was developed in C++ based on ITK libraries to register CT images acquired at planning and before treatment fractions. The deformation field determined was subsequently used to deform the delivered dose to match the anatomy of the planning CT. The large deformations involved required that the bladder and rectum volume be masked with uniform intensities of -1000 and 1000 HU, respectively, in both the planning and treatment CTs. The tool was tested for five prostate IGRT patients. The average rectum planning to treatment contour overlap improved from 67{\%} to 93{\%}, the lowest initial overlap is 43{\%}. The average bladder overlap improved from 83{\%} to 98{\%}, with a lowest initial overlap of 60{\%}. Registration regions were set to include a volume receiving 4{\%} of the maximum dose. The average region was 320×210×63, taking approximately 9 min to register on a dual 2.8 GHz Linux system. The prostate and seminal vesicles were correctly placed even though they are not masked. The accumulated doses for multiple fractions with large deformation were computed and verified. The tool developed can effectively supply the previously delivered dose for adaptive planning to correct for interfractional changes. {\textcopyright} 2009 American Association of Physicists in Medicine. }, } |
2009 | Journal | Qianqian Fang, Stefan A. Carp, Juliette Selb, Greg Boverman, Quan Zhang, Daniel B. Kopans, Richard H. Moore, Eric L. Miller, Dana H. Brooks, David A. Boas (2009). Combined optical imaging and mammography of the healthy breast: Optical contrast derived from breast structure and compression. IEEE Transactions on Medical Imaging, 28(1), pp. 30–42. (link) (bib) x @article{Fang2009, year = { 2009 }, volume = { 28 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-58149166606{\&}doi=10.1109{\%}2FTMI.2008.925082{\&}partnerID=40{\&}md5=0adeb1e35532e1ca200be1b4fda2df76 }, type = { Journal Article }, title = { Combined optical imaging and mammography of the healthy breast: Optical contrast derived from breast structure and compression }, pmid = { 19116186 }, pages = { 30--42 }, number = { 1 }, keywords = { Breast imaging,Multimodality imaging,Tomography }, journal = { IEEE Transactions on Medical Imaging }, issn = { 02780062 }, doi = { 10.1109/TMI.2008.925082 }, author = { Fang and Carp and Selb and Boverman and Zhang and Kopans and Moore and Miller and Brooks and Boas }, abstract = { In this paper, we report new progress in developing the instrument and software platform of a combined X-ray mammography/diffuse optical breast imaging system. Particularly, we focus on system validation using a series of balloon phantom experiments and the optical image analysis of 49 healthy patients. Using the finite-element method for forward modeling and a regularized Gauss-Newton method for parameter reconstruction, we recovered the inclusions inside the phantom and the hemoglobin images of the human breasts. An enhanced coupling coefficient estimation scheme was also incorporated to improve the accuracy and robustness of the reconstructions. The recovered average total hemoglobin concentration (HbT) and oxygen saturation (SO2) from 68 breast measurements are 16.2 $\mu$ and 71{\%}, respectively, where the HbT presents a linear trend with breast density. The low HbT value compared to literature is likely due to the associated mammographic compression. From the spatially co-registered optical/X-ray images, we can identify the chest-wall muscle, fatty tissue, and fibroglandular regions with an average HbT of 20.1 ± 6.1 $\mu$ for fibroglandular tissue, 15.4 ± 5.0 $\mu$ for adipose, and 22.2 ± 7.3 $\mu$ for muscle tissue. The differences between fibroglandular tissue and the corresponding adipose tissue are significant (p≤ 0.0001). At the same time, we recognize that the optical images are influenced, to a certain extent, by mammographical compression. The optical images from a subset of patients show composite features from both tissue structure and pressure distribution. We present mechanical simulations which further confirm this hypothesis. {\textcopyright} 2006 IEEE. }, } |
2009 | Journal | M. del Fresno, M. Vénere, A. Clausse (2009). A combined region growing and deformable model method for extraction of closed surfaces in 3D CT and MRI scans. Computerized Medical Imaging and Graphics, 33(5), pp. 369–376. (link) (bib) x @article{Fresno2009, year = { 2009 }, volume = { 33 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-67349195754{\&}doi=10.1016{\%}2Fj.compmedimag.2009.03.002{\&}partnerID=40{\&}md5=93b30aa2fbeb65fa285de2c2c00f07fc }, type = { Journal Article }, title = { A combined region growing and deformable model method for extraction of closed surfaces in 3D CT and MRI scans }, pages = { 369--376 }, number = { 5 }, keywords = { Deformable surface models,Hybrid methods,Image segmentation,MRI,Region growing }, journal = { Computerized Medical Imaging and Graphics }, issn = { 08956111 }, doi = { 10.1016/j.compmedimag.2009.03.002 }, author = { Fresno and V{\'{e}}nere and Clausse }, abstract = { Image segmentation of 3D medical images is a challenging problem with several still not totally solved practical issues, such as noise interference, variable object structures and image artifacts. This paper describes a hybrid 3D image segmentation method which combines region growing and deformable models to obtain accurate and topologically preserving surface structures of anatomical objects of interest. The proposed strategy starts by determining a rough but robust approximation of the objects using a region-growing algorithm. Then, the closed surface mesh that encloses the region is constructed and used as the initial geometry of a deformable model for the final refinement. This integrated strategy provides an alternative solution to one of the flaws of traditional deformable models, achieving good refinements of internal surfaces in few steps. Experimental segmentation results of complex anatomical structures on both simulated and real data from MRI scans are presented, and the method is assessed by comparing with standard reference segmentations of head MRI. The evaluation was mainly based on the average overlap measure, which was tested on the segmentation of white matter, corresponding to a simulated brain data set, showing excellent performance exceeding 90{\%} accuracy. In addition, the algorithm was applied to the detection of anatomical head structures on two real MRI and one CT data set. The final reconstructions resulting from the deformable models produce high quality meshes suitable for 3D visualization and further numerical analysis. The obtained results show that the approach achieves high quality segmentations with low computational complexity. {\textcopyright} 2009 Elsevier Ltd. All rights reserved. }, } |
2009 | In Collection | Alize E.H. Scheenstra, Michael Muskulus, Marius Staring, Arn M.J.V. Van Den Maagdenberg, Sjoerd Verduyn Lunel, J. Hans C. Reiber, Louise Van Der Weerd, Jouke Dijkstra (2009). The 3D moore-rayleigh test for the quantitative groupwise comparison of MR brain images. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 564–575. (link) (bib) x @incollection{Scheenstra2009, year = { 2009 }, volume = { 5636 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-70349313285{\&}doi=10.1007{\%}2F978-3-642-02498-6{\_}47{\&}partnerID=40{\&}md5=6667d3c02eda486716d2cfb5e09e139d }, type = { Serial }, title = { The 3D moore-rayleigh test for the quantitative groupwise comparison of MR brain images }, pages = { 564--575 }, issn = { 03029743 }, isbn = { 3642024971 }, doi = { 10.1007/978-3-642-02498-6_47 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Scheenstra and Muskulus and Staring and {Van Den Maagdenberg} and {Verduyn Lunel} and Reiber and {Van Der Weerd} and Dijkstra }, abstract = { Non-rigid registration of MR images to a common reference image results in deformation fields, from which anatomical differences can be statistically assessed, within and between populations. Without further assumptions, nonparametric tests are required and currently the analysis of deformation fields is performed by permutation tests. For deformation fields, often the vector magnitude is chosen as test statistic, resulting in a loss of information. In this paper, we consider the three dimensional Moore-Rayleigh test as an alternative for permutation tests. This nonparametric test offers two novel features: first, it incorporates both the directions and magnitude of the deformation vectors. Second, as its distribution function is available in closed form, this test statistic can be used in a clinical setting. Using synthetic data that represents variations as commonly encountered in clinical data, we show that the Moore-Rayleigh test outperforms the classical permutation test. {\textcopyright} 2009 Springer Berlin Heidelberg. }, } |
2009 | In Conf. Proceedings | Ronald Pierson, Gregory Harris, Hans J. Johnson, Steve Dunn, Vincent A. Magnotta (2009). Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation. In Medical Imaging 2009: Image Processing, pp. 72593N. (link) (bib) x @inproceedings{pierson2009maximize, year = { 2009 }, volume = { 7259 }, url = { http://link.aip.org/link/PSISDG/v7259/i1/p72593N/s1{\&}Agg=doi }, title = { Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation }, publisher = { Spie }, pages = { 72593N }, organization = { International Society for Optics and Photonics }, keywords = { brain extraction,magnetic resonance imaging,optimization }, issn = { 16057422 }, isbn = { 9780819475107 }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Pierson et al/Medical Imaging 2009 Image Processing/Pierson et al. - 2009 - Maximize uniformity summation heuristic (MUSH) a highly accurate simple method for intracranial delineation.pdf:pdf }, doi = { 10.1117/12.812322 }, booktitle = { Medical Imaging 2009: Image Processing }, author = { Pierson and Harris and Johnson and Dunn and Magnotta }, annote = { From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques. I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K; Harris, Gregory; Johnson, Hans J; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K; Harris, Gregory; Johnson, Hans J; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques. I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { A common procedure performed by many groups in the analysis of neuroimaging data is separating the brain from other tissues. This procedure is often utilized both by volumetric studies as well as functional imaging studies. Regardless of the intent, an accurate, robust method of identifying the brain or cranial vault is imperative. While this is a common requirement, there are relatively few tools to perform this task. Most of these tools require a T1 weighted image and are therefore not able to accurately define a region that includes surface CSF. In this paper, we have developed a novel brain extraction technique termed Maximize Uniformity by Summation Heuristic (MUSH) optimization. The algorithm was designed for extraction of the brain and surface CSF from a multi-modal magnetic resonance (MR) imaging study. The method forms a linear combination of multi-modal MR imaging data to make the signal intensity within the brain as uniform as possible. The resulting image is thresholded and simple morphological operators are utilized to generate the resulting representation of the brain. The resulting method was applied to a sample of 20 MR brain scans and compared to the results generated by 3dSkullStrip, 3dIntracranial, BET, and BET2. The average Jaccard metrics for the twenty subjects was 0.66 (BET), 0.61 (BET2), 0.88 (3dIntracranial), 0.91 (3dSkullStrip), and 0.94 (MUSH). }, } |
2009 | In Conf. Proceedings | Thomas Mandl, Johannes Martinek, Winfried Mayr, Frank Rattay, Martin Reichel, Ewald Moser (2009). Towards a numerical 3D model of functional electrical stimulation of denervated, degenerated human skeletal muscle. In 21st European Modeling and Simulation Symposium, EMSS 2009, pp. 209–+, La Laguna. (link) (bib) x @inproceedings{Mandl, year = { 2009 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874185461{\&}partnerID=40{\&}md5=a9627e50e42db43cbfa2f5ca618bdad4 {\%}3CGo to }, type = { Book }, title = { Towards a numerical 3D model of functional electrical stimulation of denervated, degenerated human skeletal muscle }, series = { Emss 2009: 21st European Modeling and Simulation Symposium, Vol Ii }, publisher = { Univ De La Laguna }, pages = { 209--+ }, keywords = { Finite difference method,Finite element method,Functional electrical stimulation,Patient specific model }, isbn = { 978-84-692-5415-8 }, booktitle = { 21st European Modeling and Simulation Symposium, EMSS 2009 }, author = { Mandl and Martinek and Mayr and Rattay and Reichel and Moser }, address = { La Laguna }, abstract = { Functional electrical stimulation (FES) of longterm denervated, degenerated human skeletal muscle has proven to be an effective method for improving a number of physiological parameters. In order to derive suitable stimulation configurations (electrode position and size) for certain muscle specific training tasks, the activation pattern induced by a given configuration must be known. The probability of activation can be estimated by activating functions which depend on the distribution of the externally applied electrical field. We thus chose to create both, a finite element (FE) and a finite difference (FD) model of the field distribution to simulate and study activation patterns and to compare their efficiency and feasibility. First preliminary results show good agreement between the two modeling approaches. }, } |
2009 | In Conf. Proceedings | Hendrik Rohn, Christian Klukas, Falk Schreiber (2009). Integration and visualisation of multimodal biological data. In GCB 2009 - German Conference on Bioinformatics 2009, pp. 105–115. (link) (bib) x @inproceedings{Rohn, year = { 2009 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960202320{\&}partnerID=40{\&}md5=c0b665d0351eb138a112ad74c961d17d }, type = { Conference Proceedings }, title = { Integration and visualisation of multimodal biological data }, pages = { 105--115 }, isbn = { 9783885792512 }, booktitle = { GCB 2009 - German Conference on Bioinformatics 2009 }, author = { Rohn and Klukas and Schreiber }, abstract = { Understanding complex biological systems requires data from manifold biological levels. Often this data is analysed in some meaningful context, for example, by integrating it into biological networks. However, spatial data given as 2D images or 3D volumes is commonly not taken into consideration and analysed separately. Here we present a new approach to integrate and analyse complex multimodal biological data in space and time. We present a data structure to manage this kind of data and discuss application examples for different data integration scenarios. }, } |
2009 | In Conf. Proceedings | Dário A.B. Oliveira, Raul Q. Feitosa, Mauro M. Correia (2009). Liver segmentation using level sets and genetic algorithms. In VISAPP 2009 - Proceedings of the 4th International Conference on Computer Vision Theory and Applications, pp. 154–159. (link) (bib) x @inproceedings{Oliveira, year = { 2009 }, volume = { 2 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-70349298510{\&}partnerID=40{\&}md5=921f6aa504ccbbbf03902b5d0fbb632b }, type = { Conference Proceedings }, title = { Liver segmentation using level sets and genetic algorithms }, pages = { 154--159 }, keywords = { Computer tomography,Genetic algorithms,Level sets,Liver segmentation,Medical imaging }, isbn = { 9789898111692 }, doi = { 10.5220/0001787401540159 }, booktitle = { VISAPP 2009 - Proceedings of the 4th International Conference on Computer Vision Theory and Applications }, author = { Oliveira and Feitosa and Correia }, abstract = { This paper presents a method based on level sets to segment the liver using Computer Tomography (CT) images. Initially, the liver boundary is manually set in one slice as an initial solution, and then the method automatically segments the liver in all other slices, sequentially. In each step of iteration it fits a Gaussian curve to the liver histogram to model the speed image in which the level sets propagates. The parameters of our method were estimated using Genetic Algorithms (GA) and a database of reference segmentations. The method was tested using 20 different exams and five different measures of performance, and the results obtained confirm the potential of the method. The cases in which the method presented a poor performance are also discussed in order to instigate further research. }, } |
2009 | In Conf. Proceedings | Brian Nett, Jie Tang, Beverly Aagaard-Kienitz, Howard Rowley, Guang-Hong Chen (2009). Low radiation dose C-arm cone-beam CT based on prior image constrained compressed sensing (PICCS): including compensation for image volume mismatch between multiple data acquisitions. In Medical Imaging 2009: Physics of Medical Imaging, pp. 725803. (link) (bib) x @inproceedings{Nett, year = { 2009 }, volume = { 7258 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-66749095565{\&}doi=10.1117{\%}2F12.813800{\&}partnerID=40{\&}md5=7427199cbd63c3c0e9c00bf6c4c4ff1e }, type = { Conference Proceedings }, title = { Low radiation dose C-arm cone-beam CT based on prior image constrained compressed sensing (PICCS): including compensation for image volume mismatch between multiple data acquisitions }, pages = { 725803 }, issn = { 16057422 }, isbn = { 9780819475091 }, doi = { 10.1117/12.813800 }, booktitle = { Medical Imaging 2009: Physics of Medical Imaging }, author = { Nett and Tang and Aagaard-Kienitz and Rowley and Chen }, abstract = { C-arm based cone-beam CT (CBCT) has evolved into a routine clinical imaging modality to provide threedimensional tomographic image guidance before, during, and after an interventional procedure. It is often used to update the clinician to the state of the patient anatomy and interventional tool placement. Due to the repeatedly use of CBCT, the accumulated radiation dose in an interventional procedure has become a concern. There is a strong desire from both patients and health care providers to reduce the radiation exposure required for these exams. The overall objective of this work is to propose and validate a method to significantly reduce the total radiation dose used during a CBCT image guided intervention. The basic concept is that the first cone-beam CT scan acquired at the full dose will be used to constrain the reconstruction of the later CBCT scans acquired at a much lower radiation dose. A recently developed new image reconstruction algorithm, Prior Image Constrained Compressed Sensing (PICCS), was used to reconstruct subsequent CBCT images with lower dose. This application differs from other applications of the PICCS algorithm, such as time-resolved CT or fourdimensional CBCT (4DCBCT), because the patient position may be frequently changed from one CBCT scan to another during the procedure. Thus, an image registration step to account for the change in patient position is indispensable for use of the PICCS image reconstruction algorithm. In this paper, the image registration step is combined with the PICCS algorithm to enable radiation dose reduction in CBCT image guided interventions. Experimental results acquired from a clinical C-arm system using a human cadaver were used to validate the PICCS algorithm based radiation dose reduction scheme. Using the proposed method in this paper, it has been demonstrated that, instead of 300 view angles, this technique requires about 20 cone-beam view angles to reconstruct CBCT angiograms. This signals a radiation dose reduction by a factor of approximately fifteen for subsequent acquisitions. }, } |
2008 | Journal | Stephanie Powell, Vincent A. Magnotta, Hans Johnson, Vamsi K. Jammalamadaka, Ronald Pierson, Nancy C. Andreasen (2008). Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures. NeuroImage, 39(1), pp. 238–247. (link) (bib) x @article{powell2008registration, year = { 2008 }, volume = { 39 }, url = { http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2253948{\&}tool=pmcentrez{\&}rendertype=abstract http://www.sciencedirect.com/science?{\_}ob=ArticleURL{\&}{\_}udi=B6WNP-4PGGP30-3{\&}{\_}user=440026{\&}{\_}rdoc=1{\&}{\_}fmt={\&}{\_}orig=search{\&}{\_}sort=d{\&}view=c{\&}{\_}acct=C000020939{\&}{\_}version= }, title = { Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures }, publisher = { Academic Press }, pmid = { 17904870 }, pages = { 238--247 }, number = { 1 }, month = { jan }, keywords = { Artificial neural networks,Brain segmentation,MRI,Registration-based segmentation,Support vector machine }, journal = { NeuroImage }, issn = { 10538119 }, isbn = { 1053-8119 (Print) }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Powell et al/NeuroImage/Powell et al. - 2008 - Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures.pdf:pdf }, eprint = { NIHMS150003 }, doi = { 10.1016/j.neuroimage.2007.05.063 }, author = { Powell and Magnotta and Johnson and Jammalamadaka and Pierson and Andreasen }, arxivid = { NIHMS150003 }, archiveprefix = { arXiv }, annote = { From Duplicate 1 (Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures - Powell, Stephanie; Magnotta, Vincent A.; Johnson, Hans J.; Jammalamadaka, Vamsi K.; Pierson, Ronald K.; Andreasen, Nancy C.) From Duplicate 2 (Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures - Powell, Stephanie; Magnotta, Vincent A.; Johnson, Hans J.; Jammalamadaka, Vamsi K.; Pierson, Ronald K.; Andreasen, Nancy C.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques. I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures - Powell, Stephanie; Magnotta, Vincent A.; Johnson, Hans J.; Jammalamadaka, Vamsi K.; Pierson, Ronald K.; Andreasen, Nancy C.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques. I developed custom analysis software to achieve the desired interpretation of results. I had substantial contributions to the software methods development, interpretation of validation results for this work. I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} }, abstract = { The large amount of imaging data collected in several ongoing multi-center studies requires automated methods to delineate brain structures of interest. We have previously reported on using artificial neural networks (ANN) to define subcortical brain structures. Here we present several automated segmentation methods using multidimensional registration. A direct comparison between template, probability, artificial neural network (ANN) and support vector machine (SVM)-based automated segmentation methods is presented. Three metrics for each segmentation method are reported in the delineation of subcortical and cerebellar brain regions. Results show that the machine learning methods outperform the template and probability-based methods. Utilization of these automated segmentation methods may be as reliable as manual raters and require no rater intervention. {\textcopyright} 2007 Elsevier Inc. All rights reserved. }, } |
2008 | Journal | Christian Von Falck, Alexander Hartung, Frank Berndzen, Benjamin King, Michael Galanski, Hoen Oh Shin (2008). Optimization of low-contrast detectability in thin-collimated modern multidetector CT using an interactive sliding-thin-slab averaging algorithm. Investigative Radiology, 43(4), pp. 229–235. (link) (bib) x @article{VonFalck2008, year = { 2008 }, volume = { 43 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-40949127029{\&}doi=10.1097{\%}2FRLI.0b013e3181614f2d{\&}partnerID=40{\&}md5=787eeed3860e71b1c776f64c81144821 }, type = { Journal Article }, title = { Optimization of low-contrast detectability in thin-collimated modern multidetector CT using an interactive sliding-thin-slab averaging algorithm }, pages = { 229--235 }, number = { 4 }, keywords = { Contrast,Imaging phantom,Spiral CT }, journal = { Investigative Radiology }, issn = { 00209996 }, doi = { 10.1097/RLI.0b013e3181614f2d }, author = { {Von Falck} and Hartung and Berndzen and King and Galanski and Shin }, abstract = { OBJECTIVES: To analyze the effects of the sliding-thin-slab averaging algorithm on low-contrast performance in MDCT imaging and to find reasonable parameters for clinical routine work. MATERIALS AND METHODS: A low-contrast phantom simulating hypodense lesions (20 HU object contrast) was scanned with a 16-slice spiral CT scanner using different mAs-settings of 25, 50, 100, and 195 mAs. Other scan parameters were as follows: tube voltage = 120 kVp, slice collimation = 0.625 mm, pitch = 1.375 (high speed), reconstruction interval = 0.5 mm. Images were reconstructed with soft, standard, and bone algorithms, resulting in a total of 12 datasets. A sliding-thin-slab averaging algorithm was applied to these primary datasets, systematically varying the slab thickness between 0.5 and 5.0 mm. The low-contrast performance of the resulting datasets was semi-automatically analyzed using a statistical reader-independent approach: A size-dependent analysis of the image noise within the phantom was used to empirically generate a contrast discrimination function (CDF). The ratio between the actual contrast and the minimum contrast necessary for the detection (as given by the CDF) was calculated for all lesions in each dataset and used to evaluate the low-contrast detectability of the different lesions at increasing slab thickness. The results were compared with the original datasets to calculate the improvement in low-contrast detectability. RESULTS: Using the sliding-thin-slab algorithm, low-contrast performance was increased by a factor between 1.1 and 1.7 when compared with the primary dataset. The improvement of the visibility index at optimal slab thickness when compared with the original slice thickness (0.625 mm) was statistically significant (P {\textless} 0.05, Student t test) for the following datasets: 8 mm: all datasets; 6 mm: 25 mAs/soft, 195 mAs/bone, 25 mAs/bone; 5 mm: 25 mAs/soft, 25 mAs/bone. The ideal slab thickness over all datasets was 43{\%} (±3{\%}) of the diameter of the lesion to be detected. CONCLUSIONS: The use of an interactive sliding-thin-slab averaging algorithm can be readily applied to optimize low-contrast detectability in thin-collimated CT datasets. As a general rule for daily routine, a slice thickness of approximately 2.5 to 3.0 mm can be regarded as a reasonable preset, resulting in an optimized detectability of lesions with a diameter of 5 mm and above. {\textcopyright} 2008 Lippincott Williams {\&} Wilkins, Inc. }, } |
2008 | Journal | Bodo Reitz, Olivier Gayou, David S. Parda, Moyed Miften (2008). Monitoring tumor motion with on-line mega-voltage cone-beam computed tomography imaging in a cine mode. Physics in Medicine and Biology, 53(4), pp. 823–836. (link) (bib) x @article{Reitz2008, year = { 2008 }, volume = { 53 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-39049165539{\&}doi=10.1088{\%}2F0031-9155{\%}2F53{\%}2F4{\%}2F001{\&}partnerID=40{\&}md5=1053838180560cce8431007aee909ef5 }, type = { Journal Article }, title = { Monitoring tumor motion with on-line mega-voltage cone-beam computed tomography imaging in a cine mode }, pages = { 823--836 }, number = { 4 }, journal = { Physics in Medicine and Biology }, issn = { 00319155 }, doi = { 10.1088/0031-9155/53/4/001 }, author = { Reitz and Gayou and Parda and Miften }, abstract = { Accurate daily patient localization is becoming increasingly important in external-beam radiotherapy (RT). Mega-voltage cone-beam computed tomography (MV-CBCT) utilizing a therapy beam and an on-board electronic portal imager can be used to localize tumor volumes and verify the patient's position prior to treatment. MV-CBCT produces a static volumetric image and therefore can only account for inter-fractional changes. In this work, the feasibility of using the MV-CBCT raw data as a fluoroscopic series of portal images to monitor tumor changes due to e.g. respiratory motion was investigated. A method was developed to read and convert the CB raw data into a cine. To improve the contrast-to-noise ratio on the MV-CB projection data, image post-processing with filtering techniques was investigated. Volumes of interest from the planning CT were projected onto the MV-cine. Because of the small exposure and the varying thickness of the patient depending on the projection angle, soft-tissue contrast was limited. Tumor visibility as a function of tumor size and projection angle was studied. The method was well suited in the upper chest, where motion of the tumor as well as of the diaphragm could be clearly seen. In the cases of patients with non-small cell lung cancer with medium or large tumor masses, we verified that the tumor mass was always located within the PTV despite respiratory motion. However for small tumors the method is less applicable, because the visibility of those targets becomes marginal. Evaluation of motion in non-superior-inferior directions might also be limited for small tumor masses. Viewing MV-CBCT data in a cine mode adds to the utility of MV-CBCT for verification of tumor motion and for deriving individualized treatment margins. {\textcopyright} 2008 Institute of Physics and Engineering in Medicine. }, } |
2008 | Journal | Andre B. Phillion, P. D. Lee, E. Maire, S. L. Cockcroft (2008). Quantitative assessment of deformation-induced damage in a Semisolid aluminum alloy via X-ray microtomography. Metallurgical and Materials Transactions A: Physical Metallurgy and Materials Science, 39(10), pp. 2459–2469. (link) (bib) x @article{Phillion2008, year = { 2008 }, volume = { 39 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Quantitative assessment of deformation-induced damage in a Semisolid aluminum alloy via X-ray microtomography }, pages = { 2459--2469 }, number = { 10 }, journal = { Metallurgical and Materials Transactions A: Physical Metallurgy and Materials Science }, issn = { 10735623 }, doi = { 10.1007/s11661-008-9584-4 }, author = { Phillion and Lee and Maire and Cockcroft }, abstract = { Semisolid tensile testing combined with X-ray microtomography (XMT) was used to characterize the development of internal damage as a function of strain in an aluminum-magnesium alloy, AA5182. Novel techniques were developed to allow the quantification of both the size evolution and orientation of the damage to determine mechanisms controlling the early stage growth and localization. During the initial stages of semisolid deformation, it was observed that strain was accommodated by both the growth of as-cast porosity and the detection of new damage-based voids. As the volume fraction of damage increases, the growth of voids occurs in an orientation perpendicular to the loading direction, both through expansion within the grain boundary liquid and void coalescence. The damage then localizes, causing failure. {\textcopyright} The Minerals, Metals {\&} Materials Society and ASM International 2008. }, } |
2008 | Journal | Hanchuan Peng (2008). Bioimage informatics: A new area of engineering biology. Bioinformatics, 24(17), pp. 1827–1836. (link) (bib) x @article{Peng2008, year = { 2008 }, volume = { 24 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Bioimage informatics: A new area of engineering biology }, pages = { 1827--1836 }, number = { 17 }, journal = { Bioinformatics }, issn = { 13674803 }, doi = { 10.1093/bioinformatics/btn346 }, author = { Peng }, abstract = { In recent years, the deluge of complicated molecular and cellular microscopic images creates compelling challenges for the image computing community. There has been an increasing focus on developing novel image processing, data mining, database and visualization techniques to extract, compare, search and manage the biological knowledge in these data-intensive problems. This emerging new area of bioinformatics can be called 'bioimage informatics'. This article reviews the advances of this field from several aspects, including applications, key techniques, available tools and resources. Application examples such as high-throughput/high-content phenotyping and atlas building for model organisms demonstrate the importance of bioimage informatics. The essential techniques to the success of these applications, such as bioimage feature identification, segmentation and tracking, registration, annotation, mining, image data management and visualization, are further summarized, along with a brief overview of the available bioimage databases, analysis tools and other resources. {\textcopyright} 2008 The Author(s). }, } |
2008 | In Conf. Proceedings | Rui Shen, Pierre Boulanger, Michelle Noga (2008). Med vis: A real-time immersive visualization environment for the exploration of medical volumetric data. In Proceedings - 5th International Conference BioMedical Visualization, Information Visualization in Medical and Biomedical Informatics, MediVis 2008, pp. 63–70. (link) (bib) x @inproceedings{Shen, year = { 2008 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-57849137509{\&}doi=10.1109{\%}2FMediVis.2008.10{\&}partnerID=40{\&}md5=11410f98c18a5d2bb0c6538a8af8ee5e }, type = { Conference Proceedings }, title = { Med vis: A real-time immersive visualization environment for the exploration of medical volumetric data }, pages = { 63--70 }, isbn = { 9780769532844 }, doi = { 10.1109/MediVis.2008.10 }, booktitle = { Proceedings - 5th International Conference BioMedical Visualization, Information Visualization in Medical and Biomedical Informatics, MediVis 2008 }, author = { Shen and Boulanger and Noga }, abstract = { This paper describes the Medical Visualizer, a real-time visualization system for analyzing medical volumetric data in various virtual environments, such as autostereoscopic displays, dual-projector screens and immersive environments such as the CAVE. Direct volume rendering is used for visualizing the details of medical volumetric data sets without intermediate geometric representations. By interactively manipulating the color and transparency functions through the friendly user interface, radiologists can either inspect the data set as a whole or focus on a specific region. In our system, 3D texture hardware is employed to accelerate the rendering process. The system is designed to be platform independent, as all virtual reality functions are separated from kernel functions. Due to its modular design, our system can be easily extended to other virtual environments, and new functions can be incorporated rapidly. {\textcopyright} 2008 IEEE. }, } |
2008 | In Conf. Proceedings | Q. Fang, S. A. Carp, J. Selb, R. Moore, D. B. Kopans, E. L. Miller, D. H. Brooks, D. A. Boas (2008). Spectrally constrained optical breast imaging with coregistered x-ray tomosynthesis. In Biomedical Optics, BIOMED 2008, pp. NA (link) (bib) x @inproceedings{Fanga, year = { 2008 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884324617{\&}partnerID=40{\&}md5=4341cad05ad6059b2b7866085d7c9037 }, type = { Conference Proceedings }, title = { Spectrally constrained optical breast imaging with coregistered x-ray tomosynthesis }, doi = { 10.1364/biomed.2008.bsub2 }, booktitle = { Biomedical Optics, BIOMED 2008 }, author = { Fang and Carp and Selb and Moore and Kopans and Miller and Brooks and Boas }, abstract = { We imaged 65 patients with a combined optical and tomosynthesis imaging system. The bulk optical properties from 72 healthy breasts and the reconstructed images using a spectrally-constrained algorithm for healthy and tumor breasts are reported. {\textcopyright} 2007 Optical Society of America. }, } |
2008 | In Conf. Proceedings | Q. Fang, S. A. Carp, J. Selb, R. Moore, D. B. Kopans, E. L. Miller, D. H. Brooks, D. A. Boas (2008). A multi-modality image reconstruction platform for diffuse optical tomography. In Biomedical Optics, BIOMED 2008, pp. NA (link) (bib) x @inproceedings{Fang, year = { 2008 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884342384{\&}partnerID=40{\&}md5=0a660aea258182c137661a84afe6ec4e }, type = { Conference Proceedings }, title = { A multi-modality image reconstruction platform for diffuse optical tomography }, doi = { 10.1364/biomed.2008.bmd24 }, booktitle = { Biomedical Optics, BIOMED 2008 }, author = { Fang and Carp and Selb and Moore and Kopans and Miller and Brooks and Boas }, abstract = { We present a software platform for image reconstruction and data analysis for diffuse optical tomography. The structure, algorithm and functionalities of the platform are reported together with the sample results produced by the platform. {\textcopyright} 2007 Optical Society of America. }, } |
2008 | In Conf. Proceedings | Piero Calvini, Andrea Chincarini, Stefania Donadio, Gianluca Gemme, Sandro Squarcia, Flavio Nobili, Guido Rodriguez, Roberto Bellotti, Ezio Catanzariti, Piergiorgio Cerello, Ivan De Mitri, Maria Evelina Fantacci (2008). Automatic localization of the hippocampal region in MR images to asses early diagnosis of Alzheimer's disease in MCI patients. In IEEE Nuclear Science Symposium Conference Record, pp. 4348–4354. (link) (bib) x @inproceedings{Calvini, year = { 2008 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-67649172535{\&}doi=10.1109{\%}2FNSSMIC.2008.4774245{\&}partnerID=40{\&}md5=0876425a595d10f8641904244780ec81 }, type = { Conference Proceedings }, title = { Automatic localization of the hippocampal region in MR images to asses early diagnosis of Alzheimer's disease in MCI patients }, pages = { 4348--4354 }, issn = { 10957863 }, isbn = { 9781424427154 }, doi = { 10.1109/NSSMIC.2008.4774245 }, booktitle = { IEEE Nuclear Science Symposium Conference Record }, author = { Calvini and Chincarini and Donadio and Gemme and Squarcia and Nobili and Rodriguez and Bellotti and Catanzariti and Cerello and {De Mitri} and Fantacci }, abstract = { Atrophy and other brain changes, which are typical of aging, generate wide inter-individual variability of morphology in the medial temporal lobe (MTL), including the hippocampal formation. Starting from a sample population of 133 MR images we developed a procedure that extracts from each MR two sub images, containing the hippocampal formations plus a portion of the adjacent tissues and cavities. Then, a small number of templates is selected among the previously obtained sub images, able to describe the morphological variability present in the whole population. Finally an automatic procedure is prepared which, on the basis of the given set of templates, is able to find both hippocampal formations in any new MR image. MR images ranging from normalcy to extreme atrophy can be successfully processed. The proposed approach, besides being a preliminary step towards the unsupervised segmentation of the hippocampus, extracts from the MR image information useful for diagnostic purposes and, in particular, could give the possibility of performing morphometric studies on the medial temporal lobe in an automated way. The automated analysis of MTL atrophy in the segmented volume is readily applied to the early assessment of Alzheimer Disease (AD), leading to discriminating converters from Mild Cognitive Impairment (MCI) to AD with an average three years follow-up. This procedure can quickly and reliably provide additional information in early diagnosis of AD. {\textcopyright} 2008 IEEE. }, } |
2007 | Book chapter | Alize E. H. Scheenstra, Jouke Dijkstra, Rob C. G. van de Ven, Louise van der Weerd, Johan H. C. Reiber (2007). NA in Automated segmentation of the ex vivo mouse brain, Edited by A Manduca, X P Hu, Spie-Int Soc Optical Engineering, pp. 651106, Proceedings of SPIE, Vol. 6511, ISBN: 16057422. (link) (bib) x @inbook{Scheenstra2007, year = { 2007 }, volume = { 6511 }, url = { {\%}3CGo to }, type = { Book Section }, title = { Automated segmentation of the ex vivo mouse brain }, series = { Proceedings of SPIE }, publisher = { Spie-Int Soc Optical Engineering }, pages = { 651106 }, issn = { 16057422 }, isbn = { 0819466298 }, editor = { [object Object],[object Object] }, doi = { 10.1117/12.708867 }, booktitle = { Medical Imaging 2007: Physiology, Function, and Structure from Medical Images }, author = { Scheenstra and Dijkstra and Ven and Weerd and Reiber }, address = { Bellingham }, abstract = { In biological image processing the segmentation of a volume is, although tedious, required for many applications, like the comparison of structures and annotation purposes. To automate this process, we present a segmentation method for various structures of the mouse brain. The segmentation consists of two parts; first a rough affine atlas-based registration was performed and second, the edges between structures were refined by an adapted Markov random field clustering approach. The segmentations results were compared to manual segmentations from two experts. The presented automatic segmentation method is quick, intuitive and suitable for registration purposes, but also for biological objectives, like comparison and annotation. }, } |
2007 | Journal | I. A. Rasmussen, F. Lindseth, O. M. Rygh, E. M. Berntsen, T. Selbekk, J. Xu, T. A. Nagelhus Hernes, E. Harg, A. Håberg, G. Unsgaard (2007). Functional neuronavigation combined with intra-operative 3D ultrasound: Initial experiences during surgical resections close to eloquent brain areas and future directions in automatic brain shift compensation of preoperative data. Acta Neurochirurgica, 149(4), pp. 365–378. (link) (bib) x @article{Rasmussen2007, year = { 2007 }, volume = { 149 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Functional neuronavigation combined with intra-operative 3D ultrasound: Initial experiences during surgical resections close to eloquent brain areas and future directions in automatic brain shift compensation of preoperative data }, pages = { 365--378 }, number = { 4 }, keywords = { Brain shift,Diffusion tensor imaging,Functional magnetic resonance imaging,Image guidance,Image registration,Intra-operative 3D ultrasound,Minimally invasive surgery,Multimodal imaging,Neuronavigation }, journal = { Acta Neurochirurgica }, issn = { 00016268 }, doi = { 10.1007/s00701-006-1110-0 }, author = { Rasmussen and Lindseth and Rygh and Berntsen and Selbekk and Xu and {Nagelhus Hernes} and Harg and H{\aa}berg and Unsgaard }, abstract = { Objective. The aims of this study were: 1) To develop protocols for, integration and assessment of the usefulness of high quality fMRI (functional magnetic resonance imaging) and DTI (diffusion tensor imaging) data in an ultrasound-based neuronavigation system. 2) To develop and demonstrate a co-registration method for automatic brain-shift correction of pre-operative MR data using intra-operative 3D ultrasound. Methods. Twelve patients undergoing brain surgery were scanned to obtain structural and fMRI data before the operation. In six of these patients, DTI data was also obtained. The preoperative data was imported into a commercial ultrasound-based navigation system and used for surgical planning and guidance. Intra-operative ultrasound volumes were acquired when needed during surgery and the multimodal data was used for guidance and resection control. The use of the available image information during planning and surgery was recorded. An automatic voxel-based registration method between preoperative MRA and intra-operative 3D ultrasound angiography (Power Doppler) was developed and tested postoperatively. Results. The study showed that it is possible to implement robust, high-quality protocols for fMRI and DTI and that the acquired data could be seamlessly integrated in an ultrasound-based neuronavigation system. Navigation based on fMRI data was found to be important for pre-operative planning in all twelve procedures. In five out of eleven cases the data was also found useful during the resection. DTI data was found to be useful for planning in all five cases where these data were imported into the navigation system. In two out of four cases DTI data was also considered important during the resection (in one case DTI data were acquired but not imported and in another case fMRI and DTI data could only be used for planning). Information regarding the location of important functional areas (fMRI) was more beneficial during the planning phase while DTI data was more helpful during the resection. Furthermore, the surgeon found it more user-friendly and efficient to interpret fMRI and DTI information when shown in a navigation system as compared to the traditional display on a light board or monitor. Updating MRI data for brain-shift using automatic co-registration of preoperative MRI with intra-operative ultrasound was feasible. Conclusion. In the present study we have demonstrated how both fMRI and DTI data can be acquired and integrated into a neuronavigation system for improved surgical planning and guidance. The surgeons reported that the integration of fMRI and DTI data in the navigation system represented valuable additional information presented in a user-friendly way and functional neuronavigation is now in routine use at our hospital. Furthermore, the present study showed that automatic ultrasound-based updates of important pre-operative MRI data are feasible and hence can be used to compensate for brain shift. {\textcopyright} 2007 Springer-Verlag. }, } |
2007 | Journal | Feng Qiao, Tinsu Pan, John W. Clark, Osama R. Mawlawi (2007). Region of interest motion compensation for PET image reconstruction. Physics in Medicine and Biology, 52(10), pp. NA (link) (bib) x @article{Qiao2007, year = { 2007 }, volume = { 52 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-34248204627{\&}doi=10.1088{\%}2F0031-9155{\%}2F52{\%}2F10{\%}2F003{\&}partnerID=40{\&}md5=deb8145b579395e0ef985aab49663666 }, type = { Journal Article }, title = { Region of interest motion compensation for PET image reconstruction }, number = { 10 }, journal = { Physics in Medicine and Biology }, issn = { 00319155 }, doi = { 10.1088/0031-9155/52/10/003 }, author = { Qiao and Pan and Clark and Mawlawi }, abstract = { A motion-incorporated reconstruction (MIR) method for gated PET imaging has recently been developed by several authors to correct for respiratory motion artifacts in PET imaging. This method however relies on a motion map derived from images (4D PET or 4D CT) of the entire field of view (FOV). In this study we present a region of interest (ROI)-based extension to this method, whereby only the motion map of a user-defined ROI is required and motion incorporation during image reconstruction is solely performed within the ROI. A phantom study and an NCAT computer simulation study were performed to test the feasibility of this method. The phantom study showed that the ROI-based MIR produced results that are within 1.26{\%} of those obtained by the full image-based MIR approach when using the same accurate motion information. The NCAT phantom study on the other hand, further verified that motion of features of interest in an image can be estimated more efficiently and potentially more accurately using the ROI-based approach. A reduction of motion estimation time from 450 s to 30 and 73 s was achieved for two different ROIs respectively. In addition, the ROI-based approach showed a reduction in registration error of 43{\%} for one ROI, which effectively reduced quantification bias by 44{\%} and 32{\%} using mean and maximum voxel values, respectively. {\textcopyright} 2007 IOP Publishing Ltd. }, } |
2007 | Journal | Aloys du Bois d'Aische, Mathieu De Craene, Xavier Geets, Vincent Grégoire, Benoit Macq, Simon K. Warfield (2007). Estimation of the deformations induced by articulated bodies: Registration of the spinal column. Biomedical Signal Processing and Control, 2(1), pp. 16–24. (link) (bib) x @article{BoisdAische2007, year = { 2007 }, volume = { 2 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-34248594102{\&}doi=10.1016{\%}2Fj.bspc.2007.03.002{\&}partnerID=40{\&}md5=845c913387e22bc8438d1efee07c729f }, type = { Journal Article }, title = { Estimation of the deformations induced by articulated bodies: Registration of the spinal column }, pages = { 16--24 }, number = { 1 }, keywords = { Articulation,Registration,Vertebrae }, journal = { Biomedical Signal Processing and Control }, issn = { 17468094 }, doi = { 10.1016/j.bspc.2007.03.002 }, author = { Bois d'Aische and {De Craene} and Geets and Gr{\'{e}}goire and Macq and Warfield }, abstract = { We present a new non-rigid registration algorithm estimating the displacement field generated by articulated bodies. Indeed the bony structures between different patient images may rigidly move while other tissues may deform in a more complex way. Our algorithm tracks the displacement induced in the column by a movement of the patient between two acquisitions. The volumetric deformation field in the whole body is then inferred from those displacements using a linear elastic biomechanical finite element model. We demonstrate in this paper that this method provides accurate results on 3D sets of computed tomography (CT), MR and positron emission tomography (PET) images and that the results of the registration algorithm show significant decreases in the mean, min and max errors. {\textcopyright} 2007 Elsevier Ltd. All rights reserved. }, } |
2007 | Journal | Ganesh Adluru, Suyash P. Awate, Tolga Tasdizen, Ross T. Whitaker, Edward V.R. DiBella (2007). Temporally constrained reconstruction of dynamic cardiac perfusion MRI. Magnetic Resonance in Medicine, 57(6), pp. 1027–1036. (link) (bib) x @article{Adluru2007, year = { 2007 }, volume = { 57 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Temporally constrained reconstruction of dynamic cardiac perfusion MRI }, pages = { 1027--1036 }, number = { 6 }, keywords = { Cardiac perfusion,Dynamic contrast-enhanced MR,L-curve,Regularization,Regularization parameter }, journal = { Magnetic Resonance in Medicine }, issn = { 07403194 }, doi = { 10.1002/mrm.21248 }, author = { Adluru and Awate and Tasdizen and Whitaker and DiBella }, abstract = { Dynamic contrast-enhanced (DCE) MRI is a powerful technique to probe an area of interest in the body. Here a temporally constrained reconstruction (TCR) technique that requires less k-space data over time to obtain good-quality reconstructed images is proposed. This approach can be used to improve the spatial or temporal resolution, or increase the coverage of the object of interest. The method jointly reconstructs the space-time data iteratively with a temporal constraint in order to resolve aliasing. The method was implemented and its feasibility tested on DCE myocardial perfusion data with little or no motion. The results obtained from sparse k-space data using the TCR method were compared with results obtained with a sliding-window (SW) method and from full data using the standard inverse Fourier transform (IFT) reconstruction. Acceleration factors of 5 (R = 5) were achieved without a significant loss in image quality. Mean improvements of 28 ± 4{\%} in the signal-to-noise ratio (SNR) and 14 ± 4{\%} in the contrast-to-noise ratio (CNR) were observed in the images reconstructed using the TCR method on sparse data (R = 5) compared to the standard I FT reconstructions from full data for the perfusion datasets. The method has the potential to improve dynamic myocardial perfusion imaging and also to reconstruct other sparse dynamic MR acquisitions. {\textcopyright} 2007 Wiley-Liss, Inc. }, } |
2007 | In Conf. Proceedings | Rehan Ali, Mark Gooding, Martin Christlieb, Michael Brady (2007). Phase-based segmentation of cells from brightfield microscopy. In 2007 4th IEEE International Symposium on Biomedical Imaging: From Nano to Macro - Proceedings, pp. 57–60, New York. (link) (bib) x @inproceedings{Ali2007, year = { 2007 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-36348985016{\&}doi=10.1109{\%}2FISBI.2007.356787{\&}partnerID=40{\&}md5=dd565221c35ae17147c212afbaded4f7 {\%}3CGo to }, type = { Conference Proceedings }, title = { Phase-based segmentation of cells from brightfield microscopy }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 57--60 }, keywords = { Biomedical image processing,Image segmentation,Microscopy }, isbn = { 1424406722 }, doi = { 10.1109/ISBI.2007.356787 }, booktitle = { 2007 4th IEEE International Symposium on Biomedical Imaging: From Nano to Macro - Proceedings }, author = { Ali and Gooding and Christlieb and Brady }, address = { New York }, abstract = { Segmentation of transparent cells in brightfield microscopy images could facilitate the quantitative analysis of corresponding fluorescence images. However, this presents a challenge due to irregular morphology and weak intensity variation, particularly in ultra-thin regions. A boundary detection technique is applied to a series of variable focus images whereby a level set contour is initialised on a defocused image with improved intensity contrast, and subsequently evolved towards the correct boundary using images of improving focus. Local phase coherence is used to identify features within the images, driving contour evolution particularly in near-focus images which lack intensity contrast. Preliminary results demonstrate the effectiveness of this approach in segmenting the main cell body regions. {\textcopyright} 2007 IEEE. }, } |
2007 | Technical report | Hans Johnson, Greg Harris, Kent Williams (2007). BRAINSFit: mutual information rigid registrations of whole-brain 3D images, using the insight toolkit. NA 1. NA (link) (bib) x @techreport{Johnson2007, year = { 2007 }, volume = { 57 }, url = { http://hdl.handle.net/1926/1291 papers2://publication/uuid/2965C56B-40A5-4111-B9E3-02B28C019DBF papers2://publication/uuid/858AE055-D019-47FA-BD17-7B6115C6221E papers2://publication/uuid/096DA036-956F-4AB9-8C4C-B0F9CCB9F7EE papers2://publication/uuid/1BA0 }, title = { BRAINSFit: mutual information rigid registrations of whole-brain 3D images, using the insight toolkit }, pages = { 1--10 }, number = { 1 }, keywords = { Mutual Information,Registration }, file = { :Users/johnsonhj/Documents/Mendeley Desktop/Johnson, Harris, Williams/Insight J/Johnson, Harris, Williams - 2007 - BRAINSFit mutual information rigid registrations of whole-brain 3D images, using the insight toolkit.pdf:pdf }, booktitle = { Insight J }, author = { Johnson and Harris and Williams }, annote = { From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K; Williams, Kent) From Duplicate 2 (BRAINSFit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 2 ( BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J. (University of Iowa); Harris, Greg; Williams, Kent ) From Duplicate 2 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K; Williams, Kent) From Duplicate 2 (BRAINSFit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 2 ( BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J. (University of Iowa); Harris, Greg; Williams, Kent ) From Duplicate 2 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K; Williams, Kent) From Duplicate 2 (BRAINSFit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 2 ( BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J. (University of Iowa); Harris, Greg; Williams, Kent ) }, abstract = { The University of Iowa's Psychiatric Iowa Neuroimaging Consortium (PINC) has developed a program for mutual information registration of BRAINS2 [2] data using ITK [1] classes, called BRAINSFit. We have written a helper class, itk::MultiModal3DMutualRegistrationHelper to simplify im- plementation and testing of different transform representations and optimizers. We have added a trans- form meeting the ITK standard, itk::ScaleVersor3DTransform. BRAINSFit is based on the regis- tration examples from ITK, but adds new features, including the ability to employ different transform representations and optimization functions. Our goal was to determine best practices for registering 3D rigid multimodal MRI of the human brain. A version of the current program is employed here at PINC daily for automated processing of acquired brain images. }, } |
2006 | Journal | Xenophon Papademetris, Alark Joshi (2006). An Introduction to Programming for medical image Analysis with the visualization Toolkit. Yale University, NA pp. 283. (link) (bib) x @article{Papademetris2006, year = { 2006 }, url = { http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.122.7477{\&}rep=rep1{\&}type=pdf }, title = { An Introduction to Programming for medical image Analysis with the visualization Toolkit }, pages = { 283 }, journal = { Yale University }, author = { Papademetris and Joshi }, abstract = { This book is an edited collection of class handouts that was written for the graduate seminar “Programming for Medical Image Analysis” (ENAS 920a). This class was taught at Yale University, Department of Biomedical Engineering, in the Fall of 2006 and again in the Spring 2009 semester. Some the comments in this draft version of the book reflect this fact. For example, see comments beginning “at Yale”. Furthermore, many of the references that will appear in the final version are still omitted. It is made available at this stage in the hope that it will be useful. }, } |
2006 | Journal | Feng Zhuge, Geoffrey D. Rubin, Shaohua Sun, Sandy Napel (2006). An abdominal aortic aneurysm segmentation method: Level set with region and statistical information. Medical Physics, 33(5), pp. 1440–1453. (link) (bib) x @article{Zhuge2006, year = { 2006 }, volume = { 33 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33646440020{\&}doi=10.1118{\%}2F1.2193247{\&}partnerID=40{\&}md5=b99c7457d5ae6ae677679f84b3f61548 }, type = { Journal Article }, title = { An abdominal aortic aneurysm segmentation method: Level set with region and statistical information }, pages = { 1440--1453 }, number = { 5 }, keywords = { Abdominal aortic aneurysm,CT angiography,Deformable model }, journal = { Medical Physics }, issn = { 00942405 }, doi = { 10.1118/1.2193247 }, author = { Zhuge and Rubin and Sun and Napel }, abstract = { We present a system for segmenting the human aortic aneurysm in CT angiograms (CTA), which, in turn, allows measurements of volume and morphological aspects useful for treatment planning. The system estimates a rough "initial surface," and then refines it using a level set segmentation scheme augmented with two external analyzers: The global region analyzer, which incorporates a priori knowledge of the intensity, volume, and shape of the aorta and other structures, and the local feature analyzer, which uses voxel location, intensity, and texture features to train and drive a support vector machine classifier. Each analyzer outputs a value that corresponds to the likelihood that a given voxel is part of the aneurysm, which is used during level set iteration to control the evolution of the surface. We tested our system using a database of 20 CTA scans of patients with aortic aneurysms. The mean and worst case values of volume overlap, volume error, mean distance error, and maximum distance error relative to human tracing were 95.3{\%}±1.4{\%} (s.d.); worst case=92.9{\%}, 3.5{\%}±2.5{\%} (s.d.); worst case=7.0{\%}, 0.6±0.2 mm (s.d.); worst case=1.0 mm, and 5.2±2.3mm (s.d.); worstcase=9.6 mm, respectively. When implemented on a 2.8 GHz Pentium IV personal computer, the mean time required for segmentation was 7.4±3.6min (s.d.). We also performed experiments that suggest that our method is insensitive to parameter changes within 10{\%} of their experimentally determined values. This preliminary study proves feasibility for an accurate, precise, and robust system for segmentation of the abdominal aneurysm from CTA data, and may be of benefit to patients with aortic aneurysms. {\textcopyright} 2006 American Association of Physicists in Medicine. }, } |
2006 | Journal | Fang Fang Yin, Shiva Das, John Kirkpatrick, Mark Oldham, Zhiheng Wang, Su Min Zhou (2006). Physics and imaging for targeting of oligometastases. Seminars in Radiation Oncology, 16(2), pp. 85–101. (link) (bib) x @article{Yin2006, year = { 2006 }, volume = { 16 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33645088409{\&}doi=10.1016{\%}2Fj.semradonc.2005.12.004{\&}partnerID=40{\&}md5=1274c24db6658fb341f2b0b1f475f7ce }, type = { Journal Article }, title = { Physics and imaging for targeting of oligometastases }, pages = { 85--101 }, number = { 2 }, journal = { Seminars in Radiation Oncology }, issn = { 10534296 }, doi = { 10.1016/j.semradonc.2005.12.004 }, author = { Yin and Das and Kirkpatrick and Oldham and Wang and Zhou }, abstract = { Oligometastases refer to metastases that are limited in number and location and are amenable to regional treatment. The majority of these metastases appear in the brain, lung, liver, and bone. Although the focus of interest in the past within radiation oncology has been on the treatment of intracranial metastases, there has been growing interest in extracranial sites such as the liver and lung. This is largely because of the rapid development of targeting techniques for oligometastases such as intensity-modulated and image-guided radiation therapy, which has made it possible to deliver single or a few fractions of high-dose radiation treatments, highly conformal to the target. The clinical decision to use radiation to treat oligometastases is based on both radiobiological and physics considerations. The radiobiological considerations involve improvement of treatment schema for time, dose, and volume. Areas of interests are hypofractionation, tumor and normal tissue tolerance, and hypoxia. The physics considerations for oligometastases treatment are focused mainly on ensuring treatment accuracy and precision. This article discusses the physics and imaging aspects involved in each step of the radiation treatment process for oligometastases, including target definition, treatment simulation, treatment planning, pretreatment target localization, radiation delivery, treatment verification, and treatment evaluation. {\textcopyright} 2006 Elsevier Inc. All rights reserved. }, } |
2006 | Journal | Stephan Rupp, Volker Daum (2006). Design and implementation aspects for plugin-base software frameworks. WSEAS Transactions on Computers, 5(2), pp. 425–432. (link) (bib) x @article{Rupp2006, year = { 2006 }, volume = { 5 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33645140919{\&}partnerID=40{\&}md5=8bdab8bf2d3ec049f4709850ec61d0c4 }, type = { Journal Article }, title = { Design and implementation aspects for plugin-base software frameworks }, pages = { 425--432 }, number = { 2 }, keywords = { Plugin-based software framework,Reflexion mechanism,Software components }, journal = { WSEAS Transactions on Computers }, issn = { 11092750 }, author = { Rupp and Daum }, abstract = { Applications of computer vision and signal processing are often based on a set of basic and commonly accepted ideas and algorithms. Thus, when developing new approaches, reuse plays a decisive role. Unfortunately, scientists are rarely familiar with the powerful concepts that the software engineering community provides in order to develop reusable software nor do they have the appropriate experience to apply these existing techniques right. For this reason, we present fundamental design and implementation aspects of component-based software frameworks, which should help to develop component-based software frameworks for a given application field. }, } |
2006 | Journal | Feng Qiao, Tinsu Pan, John W. Clark, Osama R. Mawlawi (2006). A motion-incorporated reconstruction method for gated PET studies. Physics in Medicine and Biology, 51(15), pp. 3769–3783. (link) (bib) x @article{Qiao2006, year = { 2006 }, volume = { 51 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33746512521{\&}doi=10.1088{\%}2F0031-9155{\%}2F51{\%}2F15{\%}2F012{\&}partnerID=40{\&}md5=85744da7c87fdff0c273cb687cee3723 }, type = { Journal Article }, title = { A motion-incorporated reconstruction method for gated PET studies }, pages = { 3769--3783 }, number = { 15 }, journal = { Physics in Medicine and Biology }, issn = { 00319155 }, doi = { 10.1088/0031-9155/51/15/012 }, author = { Qiao and Pan and Clark and Mawlawi }, abstract = { Cardiac and respiratory motion artefacts in PET imaging have been traditionally resolved by acquiring the data in gated mode. However, gated PET images are usually characterized by high noise content due to their low photon statistics. In this paper, we present a novel 4D model for the PET imaging system, which can incorporate motion information to generate a motion-free image with all acquired data. A computer simulation and a phantom study were conducted to test the performance of this approach. The computer simulation was based on a digital phantom that was continuously scaled during data acquisition. The phantom study, on the other hand, used two spheres in a tank of water, all of which were filled with 18F water. One of the spheres was stationary while the other moved in a sinusoidal fashion to simulate tumour motion in the thorax. Data were acquired using both 4D CT and gated PET. Motion information was derived from the 4D CT images and then used in the 4D PET model. Both studies showed that this 4D PET model had a good motion-compensating capability. In the phantom study, this approach reduced quantification error of the radioactivity concentration by 95{\%} when compared to a corresponding static acquisition, while signal-to-noise ratio was improved by 210{\%} when compared to a corresponding gated image. {\textcopyright} 2006 IOP Publishing Ltd. }, } |
2006 | Journal | Tao Ju, Joe Warren, James Carson, Musodiq Bello, Ioannis Kakadiaris, Wah Chiu, Christina Thaller, Gregor Eichele (2006). 3D volume reconstruction of a mouse brain from histological sections using warp filtering. Journal of Neuroscience Methods, 156(1-2), pp. 84–100. (link) (bib) x @article{Ju2006, year = { 2006 }, volume = { 156 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33747181686{\&}doi=10.1016{\%}2Fj.jneumeth.2006.02.020{\&}partnerID=40{\&}md5=a4e90f7b3d3b71adcc36d27a722e9843 }, type = { Journal Article }, title = { 3D volume reconstruction of a mouse brain from histological sections using warp filtering }, pages = { 84--100 }, number = { 1-2 }, keywords = { 3D reconstruction,Dynamic programming,Filtering,Histology,Image warping }, journal = { Journal of Neuroscience Methods }, issn = { 01650270 }, doi = { 10.1016/j.jneumeth.2006.02.020 }, author = { Ju and Warren and Carson and Bello and Kakadiaris and Chiu and Thaller and Eichele }, abstract = { Sectioning tissues for optical microscopy often introduces upon the resulting sections distortions that make 3D reconstruction difficult. Here we present an automatic method for producing a smooth 3D volume from distorted 2D sections in the absence of any undistorted references. The method is based on pairwise elastic image warps between successive tissue sections, which can be computed by 2D image registration. Using a Gaussian filter, an average warp is computed for each section from the pairwise warps in a group of its neighboring sections. The average warps deform each section to match its neighboring sections, thus creating a smooth volume where corresponding features on successive sections lie close to each other. The proposed method can be used with any existing 2D image registration method for 3D reconstruction. In particular, we present a novel image warping algorithm based on dynamic programming that extends Dynamic Time Warping in 1D speech recognition to compute pairwise warps between high-resolution 2D images. The warping algorithm efficiently computes a restricted class of 2D local deformations that are characteristic between successive tissue sections. Finally, a validation framework is proposed and applied to evaluate the quality of reconstruction using both real sections and a synthetic volume. {\textcopyright} 2006 Elsevier B.V. All rights reserved. }, } |
2006 | Journal | András Hajdu, János Kormos, Zsolt Lencse, Lajos Tr\'on, Mikl\'os Emri (2006). The "MEDIP - Platform independent software system for medical image processing" project. Journal of Universal Computer Science, 12(9), pp. 1229–1239. (link) (bib) x @article{Hajdu2006, year = { 2006 }, volume = { 12 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { The "MEDIP - Platform independent software system for medical image processing" project }, pages = { 1229--1239 }, number = { 9 }, keywords = { Medical image processing,Multimodal image analysis,Surface rendering,Virtual surgery,Visualization,Volume rendering }, journal = { Journal of Universal Computer Science }, issn = { 0958695X }, doi = { 10.3217/jucs-012-09-1229 }, author = { Hajdu and Kormos and Lencse and Tr{\'{o}}n and Emri }, abstract = { In this paper we present the structure and the achieved results of the R{\&}D project IKTA-4, 6/2001 "MEDIP - Platform independent software system for medical image processing" supported by the Hungarian Ministry of Education. The aim of the project was to develop a software background for our basic and applied research in the field of medical imaging that can be used in clinical routine, as well. Realization was based on the experience of information technology and medical imaging research university teams and a company specialized on software and hardware developing for nuclear medicine. The aims also reflect some former research and development activities of the participants. Thus some of them are well experienced in registration, segmentation and image fusion techniques. These experiences were also considered in the determination of the main purposes. The capabilities of the provided software library were demonstrated through test applications from the fields of orthopedics, oncology and nuclear medicine. {\textcopyright} J.UCS. }, } |
2006 | Journal | Steven K. Boyd, Stephan Moser, Michael Kuhn, Robert J. Klinck, Peter L. Krauze, Ralph Müller, Jürg A. Gasser (2006). Evaluation of three-dimensional image registration methodologies for in vivo micro-computed tomography. Annals of Biomedical Engineering, 34(10), pp. 1587–1599. (link) (bib) x @article{Boyd2006, year = { 2006 }, volume = { 34 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Evaluation of three-dimensional image registration methodologies for in vivo micro-computed tomography }, pages = { 1587--1599 }, number = { 10 }, keywords = { Image registration,Micro-computed tomography,Osteoporosis,Rat models,Tibial bone micro-structure }, journal = { Annals of Biomedical Engineering }, issn = { 00906964 }, doi = { 10.1007/s10439-006-9168-7 }, author = { Boyd and Moser and Kuhn and Klinck and Krauze and M{\"{u}}ller and Gasser }, abstract = { The advent of in vivo micro-computed tomography (micro-CT) provides a novel approach to measure the temporal adaptation of bone micro-architecture within an individual. Spatial alignment in the scanner between serial scans is challenging, but three-dimensional image registration can be used to superimpose the resulting image data, thus ensuring consistent regions of interest (ROI) for analysis. There have been several approaches to image registration developed, yet little is known about their application to high resolution micro-CT data. The purpose of this study was to explore combinations of three image registration similarity measures and three image interpolators, in addition to multiresolution registration configurations, for assessment of computational efficiency and accuracy on both in vitro and in vivo micro-CT data. Accuracy measures were assessed by comparison with a gold-standard reference transform based on attached fiducial markers. It was concluded that a mutual information registration similarity measure with a linear image interpolator, applied at steps of increasing image resolution, provided the best compromise between accurate and efficient results. In vivo registration of tibial bone microstructure measured in an ovariectomized rat model provided consistent ROI thus demonstrating the usefulness of three-dimensional image registration for in vivo experimental and clinical micro-CT research. It is a technique that is poised to become commonly utilized for analysis of micro-CT data to diagnose and monitor efficacy of therapy in bone diseases. {\textcopyright} 2006 Biomedical Engineering Society. }, } |
2006 | Journal | Minjie Wu, Owen Carmichael, Pilar Lopez-Garcia, Cameron S. Carter, Howard J. Aizenstein (2006). Quantitative comparison of AIR, SPM, and the fully deformable model for atlas-based segmentation of functional and structural MR images. Human Brain Mapping, 27(9), pp. 747–754. (link) (bib) x @article{RN784, year = { 2006 }, volume = { 27 }, url = { https://onlinelibrary.wiley.com/doi/abs/10.1002/hbm.20216 }, type = { Journal Article }, title = { Quantitative comparison of AIR, SPM, and the fully deformable model for atlas-based segmentation of functional and structural MR images }, pages = { 747--754 }, number = { 9 }, keywords = { Atlas-based segmentation,Deformable model,Image registration,fMRI }, journal = { Human Brain Mapping }, issn = { 10659471 }, doi = { 10.1002/hbm.20216 }, author = { Wu and Carmichael and Lopez-Garcia and Carter and Aizenstein }, abstract = { Typical packages used for coregistration in functional image analyses include automated image registration (AIR) and statistical parametric mapping (SPM). However, both methods have limited-dimension deformation models. A fully deformable model, which combines the piecewise linear registration for coarse alignment with demons algorithm for voxel-level refinement, allows a higher degree of spatial deformation. This leads to a more accurate colocalization of the functional signal from different subjects and therefore can produce a more reliable group average signal. We quantitatively compared the performance of the three different registration approaches through a series of experiments and we found that the fully deformable model consistently produces a more accurate structural segmentation and a more reliable functional signal colocalization than does AIR or SPM. {\textcopyright} 2006 Wiley-Liss, Inc. }, } |
2006 | In Conf. Proceedings | Chia Ling Tsai, Charles V. Stewart, Amitha Perera, Ying Lin Lee, Gehua Yang, Michal Sofka (2006). A correspondence-based software toolkit for image registration. In Conference Proceedings - IEEE International Conference on Systems, Man and Cybernetics, pp. 3972–3977. (bib) x @inproceedings{Tsai2006, year = { 2006 }, volume = { 5 }, title = { A correspondence-based software toolkit for image registration }, pages = { 3972--3977 }, issn = { 1062922X }, isbn = { 1424401003 }, doi = { 10.1109/ICSMC.2006.384753 }, booktitle = { Conference Proceedings - IEEE International Conference on Systems, Man and Cybernetics }, author = { Tsai and Stewart and Perera and Lee and Yang and Sofka }, abstract = { This paper presents a correspondence-based toolkit for image registration. Written in C++, the toolkit complements the capabilities of the Insight Toolkit (ITK). Major components include features, feature sets, match generators, error scale estimators, robust transformation estimators, and convergence testers, all combined and controlled by several different registration engines. Correspondence-based algorithms which can be implemented using the toolkit extend from ICP to hybrids of intensity-based and feature-based registration. The toolkit is being used both as an education tool and the foundation for developing new algorithms. {\textcopyright} 2006 IEEE. }, } |
2006 | In Conf. Proceedings | Henning Müller, Joris Heuberger, Adrien Depeursinge, Antoine Geissbühler (2006). Automated object extraction for medical image retrieval using the Insight Toolkit (ITK). In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 476–488. (bib) x @inproceedings{Muller2006, year = { 2006 }, volume = { 4182 LNCS }, title = { Automated object extraction for medical image retrieval using the Insight Toolkit (ITK) }, pages = { 476--488 }, issn = { 16113349 }, isbn = { 3540457801 }, doi = { 10.1007/11880592_36 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { M{\"{u}}ller and Heuberger and Depeursinge and Geissb{\"{u}}hler }, abstract = { Visual information retrieval is an emerging domain in the medical field as it has been in computer vision for more than ten years. It has the potential to help better managing the rising amount of visual medical data. One of the most frequent application fields for content-based medical image retrieval (CBIR) is diagnostic aid. By submitting an image showing a certain pathology to a CBIR system, the medical expert can easily find similar cases. A major problem is the background surrounding the object in many medical images. System parameters of the imaging modalities are stored around the images in text as well as patient name or a logo of the institution. With such noisy input data, image retrieval often rather finds images where the object appears in the same area and is surrounded by similar structures. Whereas in specialised application domains, segmentation can focus the research on a particular area, PACS-like (Picture Archiving and Communication System) databases containing a large variety of images need a more general approach. This article describes an algorithm to extract the important object of the image to reduce the amount of data to be analysed for CBIR and focuses analysis to the important object. Most current solutions index the entire image without making a difference between object and background when using varied PACS-like databases or radiology teaching files. Our requirement is to have a fully automatic algorithm for object extraction. Medical images have the advantage to normally have one particular object more or less in the centre of the image. The database used for evaluating this task is taken from a radiology teaching file called casimage and the retrieval component is an open source retrieval engine called medGIFT. {\textcopyright} Springer-Verlag Berlin Heidelberg 2006. }, } |
2006 | In Conf. Proceedings | Ganesh Adluru, Edward V.R. Dibella, Ross T. Whitaker (2006). Automatic segmentation of cardiac short axis slices in perfusion MRI. In 2006 3rd IEEE International Symposium on Biomedical Imaging: From Nano to Macro - Proceedings, pp. 133–136, New York. (link) (bib) x @inproceedings{Adluru2006, year = { 2006 }, volume = { 2006 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33750957116{\&}partnerID=40{\&}md5=94b184992f39bcb89fce66f4291f1585 {\%}3CGo to }, type = { Conference Proceedings }, title = { Automatic segmentation of cardiac short axis slices in perfusion MRI }, series = { IEEE International Symposium on Biomedical Imaging }, publisher = { Ieee }, pages = { 133--136 }, isbn = { 0780395778 }, doi = { 10.1109/isbi.2006.1624870 }, booktitle = { 2006 3rd IEEE International Symposium on Biomedical Imaging: From Nano to Macro - Proceedings }, author = { Adluru and Dibella and Whitaker }, address = { New York }, abstract = { Segmentation of the myocardium in dynamic contrast enhanced MR short axis images is an important step towards the estimation of semi-quantitative or quantitative parameters to determine the perfusion to the tissue regions. The perfusion indices of the tissue are obtained by dividing the tissue into regions of interest and estimating perfusion to each region. A fast automatic segmentation method based on level sets has been developed that makes use of the spatial and temporal information available in the dynamic images. The algorithm is validated on cardiac data qualitatively and quantitatively by comparing against regional flow indices from manually segmented tissue regions. {\textcopyright} 2006 IEEE. }, } |
2006 | In Conf. Proceedings | Kevin Stevenson, Mark Schweitzer, Ghassan Hamarneh (2006). Multi-angle deformation analysis of Hoffa's fat pad. In Medical Imaging 2006: Physiology, Function, and Structure from Medical Images, pp. 614329. (link) (bib) x @inproceedings{Stevensona, year = { 2006 }, volume = { 6143 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33745353456{\&}doi=10.1117{\%}2F12.654301{\&}partnerID=40{\&}md5=595f4e12f1f8758a3702a5ddb70cd256 }, type = { Conference Proceedings }, title = { Multi-angle deformation analysis of Hoffa's fat pad }, pages = { 614329 }, issn = { 16057422 }, isbn = { 0819461865 }, doi = { 10.1117/12.654301 }, booktitle = { Medical Imaging 2006: Physiology, Function, and Structure from Medical Images }, author = { Stevenson and Schweitzer and Hamarneh }, abstract = { Recent advances in medical research hypothesize that certain body fat, in addition to having a classical role of energy storage, may also have mechanical function. In particular, we analyzed the infrapatellar fat pad of Hoffa using 3D CT images of the knee at multiple angles to determine how the fat pad changes shape as the knee bends and whether the fat pad provides cushioning in the knee joint. The images were initially processed using a median filter then segmented using a region growing technique to isolate the fat pad from the rest of the knee. Next, rigid registration was performed to align the series of images to match the reference image. Finally, multi-resolution FEM registration was completed between the aligned images. The resulting displacements fields were used to determine the local volume change of the fat pad as the knee bends from extension to flexion through different angles. This multi-angle analysis provides a finer description of the intermediate deformations compared to earlier work, where only a pair of images (full extension and flexion) was analyzed. }, } |
2006 | In Conf. Proceedings | Jan Schreiber, Rainer Schubert, Volker Kuhn (2006). Femur detection in radiographs using template-based registration. In Informatik aktuell, pp. 111–115. (link) (bib) x @inproceedings{Schreiber, year = { 2006 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-77949583057{\&}partnerID=40{\&}md5=71c5eb7ea5d62e5a2e9b0450a8d52b56 }, type = { Conference Proceedings }, title = { Femur detection in radiographs using template-based registration }, pages = { 111--115 }, issn = { 1431472X }, isbn = { 9783540321361 }, doi = { 10.1007/3-540-32137-3_23 }, booktitle = { Informatik aktuell }, author = { Schreiber and Schubert and Kuhn }, abstract = { This article describes a method for the automatic detection of the proximal femur in radiographs using a template-based mutual information registration method. It will be part of a planned, larger system for automated estimation of osteoporosis in the femoral neck. Our multi-step optimization process achieves a successful registration rate of 70{\%} to 95{\%}. }, } |
2006 | In Conf. Proceedings | Chris R. Johnson, David M. Weinstein (2006). Biomedical computing and visualization. In Conferences in Research and Practice in Information Technology Series, pp. 3–10. (link) (bib) x @inproceedings{Johnson, year = { 2006 }, volume = { 48 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-84868696757{\&}partnerID=40{\&}md5=4571d3f07f7e58d431d9e91206610690 }, type = { Conference Proceedings }, title = { Biomedical computing and visualization }, pages = { 3--10 }, keywords = { Biomedical computing,Imaging,Problem solving environment,Visualization }, issn = { 14451336 }, isbn = { 1920682309 }, doi = { 10.1109/pact.2003.1238017 }, booktitle = { Conferences in Research and Practice in Information Technology Series }, author = { Johnson and Weinstein }, abstract = { Computers have changed the way we live, work, and even recreate. Now, they are transforming how we think about and treat human disease. In particular, advanced techniques in biomedical computing, imaging, and visualization are changing the face of biology and medicine in both research and clinical practice. The goals of biomedical computing, imaging and visualization are multifaceted. While some images and visualizations facilitate diagnosis, others help physicians plan surgery. Biomedical simulations can help to acquire a better understanding of human physiology. Still other biomedical computing and visualization techniques are used for medical training. Within biomedical research, new computational technologies allow us to "see" into and understand our bodies with unprecedented depth and detail. As a result of these advances, biomedical computing and visualization will help produce exciting new biomedical scientific discoveries and clinical treatments. In this paper, we give an overview of the computational science pipeline for an application in neuroscience and present associated research results in medical imaging, modeling, simulation, and visualization. Copyright {\textcopyright} 2006, Australian Computer Society, Inc. }, } |
2005 | Book | Luis Ibanez, Will Schroeder, Lydia Ng, Josh Cates (2005). The ITK Software Guide, 2nd Edition, NA 2005, ISBN: 00145793. (bib) x @book{Ibanez2005a, year = { 2005 }, title = { The ITK Software Guide, 2nd Edition }, keywords = { ITK,Software }, issn = { 00145793 }, doi = { 10.1016/S0014-5793(02)03066-1 }, booktitle = { Insight Software Consortium }, author = { Ibanez and Schroeder and Ng and Cates }, abstract = { The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmentation. Segmentation is the process of identifying and classifying data found in a digi- tally sampled representation. Typically the sampled representation is an image acquired from such medical instrumentation as CT or MRI scanners. Registration is the task ofaligning or de- veloping correspondences between data. For example, in the medical environment, a CT scan may be aligned with a MRI scan in order to combine the information contained in both. ITK is implemented in C++. It is cross-platform, using a build environment known as CMake to manage the compilation process in a platform-independent way. In addition, an automated wrapping process (Cable) generates interfaces between C++ and interpreted programming lan- guages such as Tcl, Java, and Python. This enables developers to create software using a variety of programming languages. ITK's C++ implementation style is referred to as generic program- ming, which is to say that it uses templates so that the same code can be applied generically to any class or type that happens to support the operations used. Such C++ templating means that the code is highly efficient, and that many software problems are discovered at compile-time, rather than at run-time during program execution. Because ITK is an open-source project, developers from around the world can use, debug, main- tain, and extend the software. ITK uses a model ofsoftware development referred to as Extreme Programming. Extreme Programming collapses the usual software creation methodology into a simultaneous and iterative process of design-implement-test-release. The key features ofEx- treme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. In ITK, an extensive testing process (using a system known as Dart) is in place that measures the quality on a daily basis. The ITK Testing Dashboard is posted continuously, reflecting the quality of the software at any moment. This book is a guide to using and developing with ITK. The sample code in the directory pro- vides a companion to the material presented here. The most recent version of this document is available online at http://www.itk.org/ItkSoftwareGuide.pdf. }, } |
2005 | Journal | Damion Shelton, George Stetten, Stephen Aylward, Luis Ibá\~nez, Aaron Cois, Charles Stewart (2005). Teaching medical image analysis with the Insight Toolkit. Medical Image Analysis, 9(6), pp. 605–611. (bib) x @article{Shelton2005, year = { 2005 }, volume = { 9 }, title = { Teaching medical image analysis with the Insight Toolkit }, pages = { 605--611 }, number = { 6 }, keywords = { Image analysis,Insight Toolkit,Teaching }, journal = { Medical Image Analysis }, issn = { 13618415 }, doi = { 10.1016/j.media.2005.04.011 }, author = { Shelton and Stetten and Aylward and Ib{\'{a}}{\~{n}}ez and Cois and Stewart }, abstract = { We present several case studies which examine the role that the Insight Toolkit (ITK) played in three medical image analysis courses and several conference tutorials. These courses represent the first use of ITK in a teaching environment, and we believe that a discussion of the teaching approach in each case and the benefits and challenges of ITK will be useful to future medical image analysis course development. ITK was found to provide significant value in a classroom setting since it provides both working "canned" algorithms, including some recently developed methods that are unavailable elsewhere, as well as a framework for developing new techniques and applications. Several areas of difficulty, particularly in regards to code complexity and advanced object-oriented design techniques, have been identified which may make the learning curve of ITK somewhat more complex than a language such as Matlab™. {\textcopyright} 2005 Elsevier B.V. All rights reserved. }, } |
2005 | Journal | David Wang, Wilson Chang, George Stetten (2005). Real-time ultrasound image analysis for the insight toolkit. Proc MICCAI, NA pp. 1–5. (link) (bib) x @article{Wang2005a, year = { 2005 }, url = { http://insight-journal.org/dspace/handle/1926/43 }, title = { Real-time ultrasound image analysis for the insight toolkit }, pages = { 1--5 }, journal = { Proc MICCAI }, author = { Wang and Chang and Stetten }, abstract = { We have successfully created a software environment in which ultrasound data can be manipulated by, ITK (the Insight Tool-Kit), in real-time. We were able to access each frame generated within the resident computer of a TerasonTM Ultrasound Machine, convert it into the ITK image format, and demonstrate the concurrent operation of ITK on the same computer by writing the images to an external hard drive. At a rate of 10 frames per second, 512 by 512 pixel grayscale frames were written by ITK methods to the external hard drive through USB 2.0 while the ultrasound scan was occurring without thrashing or delay in system performance. This simple exercise demonstrates the potential of ITK in processing ultrasound images in real-time in addition to the more traditional off-line processing. }, } |
2005 | Journal | David Wang, Wilson Chang, George Stetten (2005). Real-time ultrasound image analysis for the insight toolkit. Proc MICCAI, NA pp. 1–5. (link) (bib) x @article{Wang2005, year = { 2005 }, url = { http://insight-journal.org/dspace/handle/1926/43 }, title = { Real-time ultrasound image analysis for the insight toolkit }, pages = { 1--5 }, journal = { Proc MICCAI }, author = { Wang and Chang and Stetten }, abstract = { We have successfully created a software environment in which ultrasound data can be manipulated by, ITK (the Insight Tool-Kit), in real-time. We were able to access each frame generated within the resident computer of a TerasonTM Ultrasound Machine, convert it into the ITK image format, and demonstrate the concurrent operation of ITK on the same computer by writing the images to an external hard drive. At a rate of 10 frames per second, 512 by 512 pixel grayscale frames were written by ITK methods to the external hard drive through USB 2.0 while the ultrasound scan was occurring without thrashing or delay in system performance. This simple exercise demonstrates the potential of ITK in processing ultrasound images in real-time in addition to the more traditional off-line processing. }, } |
2005 | Journal | Jiro Nagatomi, K. Khashayar Toosi, Jonathan S. Grashow, Michael B. Chancellor, Michael S. Sacks (2005). Quantification of bladder smooth muscle orientation in normal and spinal cord injured rats. Annals of Biomedical Engineering, 33(8), pp. 1078–1089. (link) (bib) x @article{Nagatomi2005, year = { 2005 }, volume = { 33 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-23844453003{\&}doi=10.1007{\%}2Fs10439-005-5776-x{\&}partnerID=40{\&}md5=c94d2c670513b414f8e81daf45d810d7 }, type = { Journal Article }, title = { Quantification of bladder smooth muscle orientation in normal and spinal cord injured rats }, pages = { 1078--1089 }, number = { 8 }, keywords = { Histomorphometery,Image analysis,Mechanical anisotropy,Smooth muscle }, journal = { Annals of Biomedical Engineering }, issn = { 00906964 }, doi = { 10.1007/s10439-005-5776-x }, author = { Nagatomi and Toosi and Grashow and Chancellor and Sacks }, abstract = { Spinal cord injuries (SCI) often lead to severe bladder dysfunctions. Our previous studies have demonstrated that following SCI, rat bladder wall tissue became hypertrophied, significantly more compliant, and changed its mechanical behavior from orthotropic to isotropic. In order to elucidate the link between the tissue microstructure and mechanical properties of the wall, we have developed a novel semi-automated image analysis method to quantify smooth muscle bundle orientation and mass fraction in the bladder wall tissues from normal and 10 day-post-SCI rats. Results of the present study revealed that there were significant (p {\textless} 0.05) increases in smooth muscle area fractions as well as significantly (p {\textless} 0.001) fewer cell nuclei per muscle area in the SCI groups compared to the normal groups. Furthermore, while the normal rat bladders exhibited predominant smooth muscle orientation only in the longitudinal direction, the SCI rat bladders exhibited smooth muscles oriented in both the circumferential and longitudinal directions. These results provide first evidence that bladder smooth muscle cells exhibit hypertrophy rather than hyperplasia and developed a second, orthogonal orientation of smooth muscle bundles following SCI. The results of the present study corroborate our previous mechanical anisotropy data and provide the basis for development of structure-based constitutive models for urinary bladder wall tissue. {\textcopyright} 2005 Biomedical Engineering Society. }, } |
2005 | Journal | Aloys du Bois d'Aische, Mathieu De Craene, Xavier Geets, Vincent Gregoire, Benoit Macq, Simon K. Warfield (2005). Efficient multi-modal dense field non-rigid registration: Alignment of histological and section images. Medical Image Analysis, 9(6), pp. 538–546. (link) (bib) x @article{2005, year = { 2005 }, volume = { 9 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Efficient multi-modal dense field non-rigid registration: Alignment of histological and section images }, pages = { 538--546 }, number = { 6 }, keywords = { Elastic regularization,ITK,Laryngectomy,Mutual information,Non-rigid registration }, journal = { Medical Image Analysis }, issn = { 13618415 }, doi = { 10.1016/j.media.2005.04.003 }, author = { Bois d'Aische and Craene and Geets and Gregoire and Macq and Warfield }, abstract = { We describe a new algorithm for non-rigid registration capable of estimating a constrained dense displacement field from multi-modal image data. We applied this algorithm to capture non-rigid deformation between digital images of histological slides and digital flat-bed scanned images of cryotomed sections of the larynx, and carried out validation experiments to measure the effectiveness of the algorithm. The implementation was carried out by extending the open-source Insight ToolKit software. In diagnostic imaging of cancer of the larynx, imaging modalities sensitive to both anatomy (such as MRI and CT) and function (PET) are valuable. However, these modalities differ in their capability to discriminate the margins of tumor. Gold standard tumor margins can be obtained from histological images from cryotomed sections of the larynx. Unfortunately, the process of freezing, fixation, cryotoming and staining the tissue to create histological images introduces non-rigid deformations and significantcontrast changes. We demonstrate that the non-rigid registration algorithm we present is able to capture these deformations and the algorithm allows us to align histological images with scanned images of the larynx. Our non-rigid registration algorithm constructs a deformation field to warp one image onto another. The algorithm measures image similarity using a mutual information similarity criterion, and avoids spurious deformations due to noise by constraining the estimated deformation field with a linear elastic regularization term. The finite element method is used to represent the deformation field, and our implementation enables us to assign inhomogeneous material characteristics so that hard regions resist internal deformation whereas soft regions are more pliant. A gradient descent optimization strategy is used and this has enabled rapid and accurate convergence to the desired estimate of the deformation field. A further acceleration in speed without cost of accuracy is achieved by using an adaptive mesh refinement strategy. {\textcopyright} 2005 Elsevier B.V. All rights reserved. }, } |
2005 | Journal | J. Chabriais (2005). Informatique et imagerie médicale: Les tendances décelables \`a Chicago en 2004. Journal de Radiologie, 86(7-8), pp. 864–867. (link) (bib) x @article{Chabriais2005, year = { 2005 }, volume = { 86 }, url = { {\%}3CGo to }, type = { Journal Article }, title = { Informatique et imagerie m{\'{e}}dicale: Les tendances d{\'{e}}celables {\`{a}} Chicago en 2004 }, pages = { 864--867 }, number = { 7-8 }, journal = { Journal de Radiologie }, issn = { 02210363 }, doi = { 10.1016/s0221-0363(05)81460-x }, author = { Chabriais }, } |
2005 | In Collection | Marcel Jackowski, Ardeshir Goshtasby (2005). A computer-aided design system for revision of segmentation errors. In Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), pp. 717–724. (link) (bib) x @incollection{Jackowski2005, year = { 2005 }, volume = { 3750 LNCS }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33744812176{\&}doi=10.1007{\%}2F11566489{\_}88{\&}partnerID=40{\&}md5=1a20beb704d367c3bb74ae1934b0a84d }, type = { Serial }, title = { A computer-aided design system for revision of segmentation errors }, pages = { 717--724 }, issn = { 03029743 }, isbn = { 3540293264 }, doi = { 10.1007/11566489_88 }, booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, author = { Jackowski and Goshtasby }, abstract = { Automatic image segmentation methods often involve errors, requiring the assistance of the user to correct them. In this paper, a computer-aided design system is introduced for correcting such errors. The proposed system approximates each 3-D region by a parametric surface. Region voxels are first parametrized spherically using a coarseto-fine subdivision method. By using the voxel positions and their parameter coordinates, control points of a rational Gaussian surface are determined through a least-squares method to approximate the region. Finally, this surface is overlaid with the volumetric image and by locally pulling or pushing it with the mouse while viewing image information, the surface is revised as needed. Typically, a few minutes are sufficient to correct errors in a region. {\textcopyright} Springer-Verlag Berlin Heidelberg 2005. }, } |
2005 | In Conf. Proceedings | Sumit K. Shah, Michael F. McNitt-Gray, Iva Petkovska, Hyun Jun Kim, Kheshini R. DeZoysa, Jonathan G. Goldin, Robert D. Suh, Denise R. Aberle (2005). Solitary pulmonary nodule characterization on CT by use of contrast enhancement maps. In Medical Imaging 2005: Image Processing, pp. 1950. (link) (bib) x @inproceedings{Shah, year = { 2005 }, volume = { 5747 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-23844470662{\&}doi=10.1117{\%}2F12.595874{\&}partnerID=40{\&}md5=cb9ec8d5c13ffb11b3fdaa3d4b5bb75d }, type = { Conference Proceedings }, title = { Solitary pulmonary nodule characterization on CT by use of contrast enhancement maps }, pages = { 1950 }, issn = { 16057422 }, doi = { 10.1117/12.595874 }, booktitle = { Medical Imaging 2005: Image Processing }, author = { Shah and McNitt-Gray and Petkovska and Kim and DeZoysa and Goldin and Suh and Aberle }, abstract = { Studies have shown that vascular structure of a solitary pulmonary nodule (SPN) can give insight into the diagnosis of the nodule. The purpose of this study is to investigate the utility of texture analysis as a quantitative measure of the vascular structure of a nodule. A contrast CT study was conducted for 29 patients with an indeterminate SPN. For each patient, the post-contrast series at maximum enhancement was volumetrically registered to the pre-contrast series. The two registered series were subtracted to form difference images of the nodule and each voxel was color-coded into 7 bins. Initially, a representative image of each nodule was subjectively rated on a five-point by a radiologist as to the magnitude, extent, and heterogeneity of the enhancement. From the initial analysis the heterogeneity of the nodule was found to be significantly different for benign versus malignant nodules (p{\textless}0.01), while the other two ratings were found not to be significant. We then attempted to quantify this subjective rating of heterogeneity by calculating 14 textural features based on co-occurrence matrices. These features included various measures of contrast, entropy, energy, etc. Dimension reduction techniques such as principal component and factor analysis were applied to the features to reduce the 14 variables to one factor. The mean of this factor was significantly different for malignant versus benign nodules (p=0.010). Texture analysis of contrast enhancement maps appears to be useful tool to characterize SPNs. }, } |
2005 | In Conf. Proceedings | Daniel Mueller, Anthony Maeder, Peter O'Shea (2005). Improved direct volume visualisation of the coronary arteries using fused segmented regions. In Proceedings of the Digital Imaging Computing: Techniques and Applications, DICTA 2005, pp. 110–117. (link) (bib) x @inproceedings{Mueller, year = { 2005 }, volume = { 2005 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-33846990428{\&}doi=10.1109{\%}2FDICTA.2005.1578115{\&}partnerID=40{\&}md5=d567baa5b241ec2cbf2f4ffd4a343b46 }, type = { Conference Proceedings }, title = { Improved direct volume visualisation of the coronary arteries using fused segmented regions }, pages = { 110--117 }, isbn = { 0769524672 }, doi = { 10.1109/DICTA.2005.1578115 }, booktitle = { Proceedings of the Digital Imaging Computing: Techniques and Applications, DICTA 2005 }, author = { Mueller and Maeder and O'Shea }, abstract = { Coronary heart disease was the single largest cause of sudden death in Australia in 2002. Computed tomography angiography (CTA) provides high resolution, high contrast images of the thoracic cavity, and as such has emerged as the imaging modality of choice for diagnosing and planning treatment for coronary heart disease. However, radiologists and cardiac surgeons require tools to easily identify possible stenosis (narrowed or constricted coronary vessels) in such CTA datasets. We present a method which allows users to interactively visualise a specific three-dimensional region of interest (ROI). In our example, segmentation methods are applied to isolate the coronary vessels, which in turn are visually enhanced using various perceptual cues. The segmentation is achieved using a combination of thresholding, region-growing, and morphological operations. The perceptual enhancement is realized by fusing direct volume rendered images using weighting factors determined by the segmented regions. The user can allow for the easy dissemination of relevant information by adjusting 'transfer functions' to control the degree of ROI enhancement. This approach requires only roughly segmented regions of interest, and allows for the 3D visualisation of calcifications within vessels. This proposed method has significant potential for helping to facilitate the efficient treatment for coronary heart disease. Furthermore, it can be implemented at interactive framerates on comparatively cheap, desktop computing hardware making it readily accessible. {\textcopyright} 2005 IEEE. }, } |
2005 | In Conf. Proceedings | Ghassan Hamarneh, Vincent Chu, Marcelo Bordalo-Rodrigues, Mark Schweitzer (2005). Deformation analysis of Hoffa's fat pad from CT images of knee flexion and extension. In Medical Imaging 2005: Physiology, Function, and Structure from Medical Images, pp. 527. (link) (bib) x @inproceedings{Hamarneh, year = { 2005 }, volume = { 5746 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-23844492895{\&}doi=10.1117{\%}2F12.594803{\&}partnerID=40{\&}md5=5a48120f6f2668def17638f909192147 }, type = { Conference Proceedings }, title = { Deformation analysis of Hoffa's fat pad from CT images of knee flexion and extension }, pages = { 527 }, issn = { 16057422 }, doi = { 10.1117/12.594803 }, booktitle = { Medical Imaging 2005: Physiology, Function, and Structure from Medical Images }, author = { Hamarneh and Chu and Bordalo-Rodrigues and Schweitzer }, abstract = { Recent advances in medicine conjecture that certain body fat may have mechanical function in addition to its classical role of energy storage. In particular we aim to analyze if the intra-articular fat pad of Hoffa is merely a space holder or if it changes shape to provide cushioning for the knee bones. Towards this goal, 3D CT images of real knees, as well as a skeletal knee model with fat simulating Hoffa's pad, were acquired in both extension and flexion. Image segmentation was performed to automatically extract the real and simulated fat regions from the extension and flexion images. Utilizing the segmentation results as binary masks, we performed automatic multi-resolution image registration of the fat pad between flexed and extended knee positions. The resulting displacement fields from flexion-extension registration are examined and used to calculate local fat volume changes thus providing insight into shape changes that may have a mechanical component. }, } |
2005 | Misc | L Ibanez, W Schroeder, L Ng, J Cates (2005). The ITK Software Guide. (pdf) (bib) x @misc{Ibanez2005, year = { 2005 }, volume = { Second }, url = { http://www.itk.org/ItkSoftwareGuide.pdf }, title = { The ITK Software Guide }, pmid = { 1000070720 }, pages = { 804 }, number = { May }, issn = { 10445323 }, isbn = { 1930934157 }, doi = { 1�930934-15�7 }, booktitle = { The ITK Software Guide }, author = { Ibanez and Schroeder and Ng and Cates }, abstract = { Everything you need to install, use, and extend the Insight Segmentation and Registration Toolikit ITK. Includes detailed examples, installation procedures, and system overview for ITK version 2.4. (The included examples are taken directly from the ITK source code repository and are designed to demonstrate the essential features of the software.) The book comes with a CD-ROM that contains a complete hyperlinked version of the book plus ITK source code, data, Windows binaries, and extensive class documentation. Also includes CMake binaries for managing the ITK build process on a variety of compiler and operating system configurations. }, } |
2003 | Book | Luis Ibanez, Will Schroeder, Lydia Ng, Josh Cates (2003). The ITK Software Guide: The Insight Segmentation and Registration Toolkit, NA 2003, ISBN: 1089-7771. (link) (bib) x @book{Ibanez2003a, year = { 2003 }, volume = { 5 }, url = { http://www.amazon.com/dp/1930934106 }, title = { The ITK Software Guide: The Insight Segmentation and Registration Toolkit }, pages = { 539 }, number = { 4 }, issn = { 1089-7771 }, isbn = { 1930934106 }, doi = { 10.1109/4233.966107 }, booktitle = { IEEE Transactions on Information Technology in Biomedicine }, author = { Ibanez and Schroeder and Ng and Cates }, abstract = { Digital watermarking is a technique of hiding specific identification data for copyright authentication. This technique is adapted here for interleaving patient information with medical images to reduce storage and transmission overheads. The text data are encrypted before interleaving with images to ensure greater security. The graphical signals are compressed and subsequently interleaved with the image. Differential pulse-code-modulation and adaptive-delta-modulation techniques are employed for data compression, and encryption and results are tabulated for a specific example. }, } |
2003 | Book | Luis Ibanez, Will Schroeder, Lydia Ng, Josh Cates (2003). The ITK Software Guide: The Insight Segmentation and Registration Toolkit, NA 2003, ISBN: 1089-7771. (link) (bib) x @book{Ibanez2003, year = { 2003 }, volume = { 5 }, url = { http://www.amazon.com/dp/1930934106 }, title = { The ITK Software Guide: The Insight Segmentation and Registration Toolkit }, pages = { 539 }, number = { 4 }, issn = { 1089-7771 }, isbn = { 1930934106 }, doi = { 10.1109/4233.966107 }, booktitle = { IEEE Transactions on Information Technology in Biomedicine }, author = { Ibanez and Schroeder and Ng and Cates }, abstract = { Digital watermarking is a technique of hiding specific identification data for copyright authentication. This technique is adapted here for interleaving patient information with medical images to reduce storage and transmission overheads. The text data are encrypted before interleaving with images to ensure greater security. The graphical signals are compressed and subsequently interleaved with the image. Differential pulse-code-modulation and adaptive-delta-modulation techniques are employed for data compression, and encryption and results are tabulated for a specific example. }, } |
2003 | Journal | Lydia Ng, Luis Ibá\~nez (2003). Narrow band to image registration in the insight toolkit. Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 2717, pp. 271–280. (bib) x @article{Ng2003, year = { 2003 }, volume = { 2717 }, title = { Narrow band to image registration in the insight toolkit }, pages = { 271--280 }, journal = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics) }, issn = { 03029743 }, doi = { 10.1007/978-3-540-39701-4_29 }, author = { Ng and Ib{\'{a}}{\~{n}}ez }, abstract = { This paper introduces the new concept of narrow-band to image registration. Narrow-banding is a common technique used in the solution of level set approaches to image processing. For our application, the narrow-band describes the shape of an object by using a data structure containing the signed distance values at a small band of neighboring pixels. This compact representation of an object is well suited for performing registration against a standard image as well as against another narrow-band. The novel technique was implemented in the registration framework of the NLM Insight Toolkit (ITK). This implementation illustrates the great advantage of a modular framework structure that allows researchers to concentrate in the interesting aspects of a new algorithm by building on an existing set of predefined components for providing the rest of standard functionalities that are required. {\textcopyright} Springer-Verlag Berlin Heidelberg 2003. }, } |
2003 | In Collection | D. Stredney, J. Bryan, D. Sessanna, T. Kerwin (2003). Facilitating real-time volume interaction. In Studies in Health Technology and Informatics, pp. 329–335. (link) (bib) x @incollection{Stredney2003, year = { 2003 }, volume = { 94 }, url = { https://www.scopus.com/inward/record.uri?eid=2-s2.0-6344254574{\&}doi=10.3233{\%}2F978-1-60750-938-7-329{\&}partnerID=40{\&}md5=a00d000d033135abebb47756881fd5ff }, type = { Serial }, title = { Facilitating real-time volume interaction }, pages = { 329--335 }, issn = { 18798365 }, isbn = { 1586033204 }, doi = { 10.3233/978-1-60750-938-7-329 }, booktitle = { Studies in Health Technology and Informatics }, author = { Stredney and Bryan and Sessanna and Kerwin }, abstract = { We report on efforts to provide high-level intuitive tools that exploit commodity-based computing to facilitate real-time and distributed interactions with volumetric data. These efforts include an open source volume-rendering library, a portable volume visualization application framework, and parallel volume-rendering exploiting commodity-based hardware. We present our design and implementations, as well as examples of some of the various groups currently utilizing these tools, and discuss the tradeoffs of our developments versus existing techniques. }, } |
2002 | In Collection | Nils Hanssen, Bartosz von Rymon-Lipinski, Thomas Jansen, Marc Liévin, Erwin Keeve (2002). Integrating the Insight Toolkit itk into a medical software framework. In CARS 2002 Computer Assisted Radiology and Surgery, pp. 445–449. (bib) x @incollection{Hanssen2002, year = { 2002 }, title = { Integrating the Insight Toolkit itk into a medical software framework }, pages = { 445--449 }, doi = { 10.1007/978-3-642-56168-9_74 }, booktitle = { CARS 2002 Computer Assisted Radiology and Surgery }, author = { Hanssen and Rymon-Lipinski and Jansen and Li{\'{e}}vin and Keeve }, } |
2002 | In Conf. Proceedings | L. Ibá\~nez, L. Ng, J. Gee, S. Aylward (2002). Registration patterns: The generic framework for image registration of the insight toolkit. In Proceedings - International Symposium on Biomedical Imaging, pp. 345–348. (bib) x @inproceedings{Ibanez2002, year = { 2002 }, volume = { 2002-Janua }, title = { Registration patterns: The generic framework for image registration of the insight toolkit }, pages = { 345--348 }, issn = { 19458452 }, isbn = { 078037584X }, doi = { 10.1109/ISBI.2002.1029264 }, booktitle = { Proceedings - International Symposium on Biomedical Imaging }, author = { Ib{\'{a}}{\~{n}}ez and Ng and Gee and Aylward }, abstract = { This paper describes the design and implementation of the generic framework for image registration contained in the National Library of Medicine NLM/NIH Segmentation and Registration Toolkit (ITK). The problem of image registration has been modeled here as a structure of pluggable components that can be easily interchanged. The rationale behind the framework is presented in this paper both from the image processing and software engineering points of view. ITK is an open source project that provides a platform for developing image processing and analysis applications. State of the art practices of software engineering have been used for the design, implementation and testing of the toolkit The source code can be downloaded free of charge and used in academic and commercial applications. }, } |