{"id":12053,"date":"2023-07-06T15:24:46","date_gmt":"2023-07-06T13:24:46","guid":{"rendered":"https:\/\/new.sano.science\/?post_type=people&#038;p=12053"},"modified":"2026-01-31T19:49:53","modified_gmt":"2026-01-31T18:49:53","slug":"szymon-plotka","status":"publish","type":"people","link":"https:\/\/sano.science\/people\/szymon-plotka\/","title":{"rendered":"Szymon P\u0142otka"},"excerpt":{"rendered":"<p>PostDoc in Medical Imaging and Robotics<\/p>\n","protected":false},"featured_media":18503,"template":"","people_teams":[24],"class_list":["post-12053","people","type-people","status-publish","has-post-thumbnail","hentry","people_teams-alumni"],"yoast_head":"<!-- This site is optimized with the Yoast SEO Premium plugin v27.5 (Yoast SEO v27.5) - https:\/\/yoast.com\/product\/yoast-seo-premium-wordpress\/ -->\n<title>Szymon P\u0142otka - Centre for Computational Personalized Medicine<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/sano.science\/people\/szymon-plotka\/\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"Szymon P\u0142otka\" \/>\n<meta property=\"og:description\" content=\"PostDoc in Medical Imaging and Robotics\" \/>\n<meta property=\"og:url\" content=\"https:\/\/sano.science\/people\/szymon-plotka\/\" \/>\n<meta property=\"og:site_name\" content=\"Centre for Computational Personalized Medicine\" \/>\n<meta property=\"article:publisher\" content=\"https:\/\/www.facebook.com\/sano.science\/\" \/>\n<meta property=\"article:modified_time\" content=\"2026-01-31T18:49:53+00:00\" \/>\n<meta property=\"og:image\" content=\"https:\/\/sano.science\/wp-content\/uploads\/2023\/07\/Sano-Szymon-Plotka.png\" \/>\n\t<meta property=\"og:image:width\" content=\"1000\" \/>\n\t<meta property=\"og:image:height\" content=\"1000\" \/>\n\t<meta property=\"og:image:type\" content=\"image\/png\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:site\" content=\"@sanoscience\" \/>\n<meta name=\"twitter:label1\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data1\" content=\"1 minute\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\\\/\\\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/\",\"url\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/\",\"name\":\"Szymon P\u0142otka - Centre for Computational Personalized Medicine\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/sano.science\\\/#website\"},\"primaryImageOfPage\":{\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/#primaryimage\"},\"image\":{\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/#primaryimage\"},\"thumbnailUrl\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2023\\\/07\\\/Sano-Szymon-Plotka.png\",\"datePublished\":\"2023-07-06T13:24:46+00:00\",\"dateModified\":\"2026-01-31T18:49:53+00:00\",\"breadcrumb\":{\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/#breadcrumb\"},\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/\"]}]},{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/#primaryimage\",\"url\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2023\\\/07\\\/Sano-Szymon-Plotka.png\",\"contentUrl\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2023\\\/07\\\/Sano-Szymon-Plotka.png\",\"width\":1000,\"height\":1000,\"caption\":\"Sano P\u0142otka Szymon\"},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/szymon-plotka\\\/#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Home\",\"item\":\"https:\\\/\\\/sano.science\\\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"People\",\"item\":\"https:\\\/\\\/sano.science\\\/people\\\/\"},{\"@type\":\"ListItem\",\"position\":3,\"name\":\"Alumni\",\"item\":\"https:\\\/\\\/sano.science\\\/people-teams\\\/alumni\\\/\"},{\"@type\":\"ListItem\",\"position\":4,\"name\":\"Szymon P\u0142otka\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\\\/\\\/sano.science\\\/#website\",\"url\":\"https:\\\/\\\/sano.science\\\/\",\"name\":\"Centre for Computational Personalized Medicine\",\"description\":\"Sano \u2013 Centre for Computational Medicine\",\"publisher\":{\"@id\":\"https:\\\/\\\/sano.science\\\/#organization\"},\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\\\/\\\/sano.science\\\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"},{\"@type\":\"Organization\",\"@id\":\"https:\\\/\\\/sano.science\\\/#organization\",\"name\":\"Sano \u2013 Centre for Computational Medicine\",\"alternateName\":\"Sano\",\"url\":\"https:\\\/\\\/sano.science\\\/\",\"logo\":{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/sano.science\\\/#\\\/schema\\\/logo\\\/image\\\/\",\"url\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2024\\\/05\\\/logo_sano_podstawowe.png\",\"contentUrl\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2024\\\/05\\\/logo_sano_podstawowe.png\",\"width\":700,\"height\":265,\"caption\":\"Sano \u2013 Centre for Computational Medicine\"},\"image\":{\"@id\":\"https:\\\/\\\/sano.science\\\/#\\\/schema\\\/logo\\\/image\\\/\"},\"sameAs\":[\"https:\\\/\\\/www.facebook.com\\\/sano.science\\\/\",\"https:\\\/\\\/x.com\\\/sanoscience\",\"https:\\\/\\\/www.linkedin.com\\\/company\\\/sanoscience\\\/\",\"https:\\\/\\\/www.youtube.com\\\/channel\\\/UCDZ_8TcjMWUG2ZcgKKgfpwQ\",\"https:\\\/\\\/bsky.app\\\/profile\\\/sanoscience.bsky.social\"]}]}<\/script>\n<!-- \/ Yoast SEO Premium plugin. -->","yoast_head_json":{"title":"Szymon P\u0142otka - Centre for Computational Personalized Medicine","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/sano.science\/people\/szymon-plotka\/","og_locale":"en_US","og_type":"article","og_title":"Szymon P\u0142otka","og_description":"PostDoc in Medical Imaging and Robotics","og_url":"https:\/\/sano.science\/people\/szymon-plotka\/","og_site_name":"Centre for Computational Personalized Medicine","article_publisher":"https:\/\/www.facebook.com\/sano.science\/","article_modified_time":"2026-01-31T18:49:53+00:00","og_image":[{"width":1000,"height":1000,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/07\/Sano-Szymon-Plotka.png","type":"image\/png"}],"twitter_card":"summary_large_image","twitter_site":"@sanoscience","twitter_misc":{"Est. reading time":"1 minute"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/sano.science\/people\/szymon-plotka\/","url":"https:\/\/sano.science\/people\/szymon-plotka\/","name":"Szymon P\u0142otka - Centre for Computational Personalized Medicine","isPartOf":{"@id":"https:\/\/sano.science\/#website"},"primaryImageOfPage":{"@id":"https:\/\/sano.science\/people\/szymon-plotka\/#primaryimage"},"image":{"@id":"https:\/\/sano.science\/people\/szymon-plotka\/#primaryimage"},"thumbnailUrl":"https:\/\/sano.science\/wp-content\/uploads\/2023\/07\/Sano-Szymon-Plotka.png","datePublished":"2023-07-06T13:24:46+00:00","dateModified":"2026-01-31T18:49:53+00:00","breadcrumb":{"@id":"https:\/\/sano.science\/people\/szymon-plotka\/#breadcrumb"},"inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/sano.science\/people\/szymon-plotka\/"]}]},{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/sano.science\/people\/szymon-plotka\/#primaryimage","url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/07\/Sano-Szymon-Plotka.png","contentUrl":"https:\/\/sano.science\/wp-content\/uploads\/2023\/07\/Sano-Szymon-Plotka.png","width":1000,"height":1000,"caption":"Sano P\u0142otka Szymon"},{"@type":"BreadcrumbList","@id":"https:\/\/sano.science\/people\/szymon-plotka\/#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Home","item":"https:\/\/sano.science\/"},{"@type":"ListItem","position":2,"name":"People","item":"https:\/\/sano.science\/people\/"},{"@type":"ListItem","position":3,"name":"Alumni","item":"https:\/\/sano.science\/people-teams\/alumni\/"},{"@type":"ListItem","position":4,"name":"Szymon P\u0142otka"}]},{"@type":"WebSite","@id":"https:\/\/sano.science\/#website","url":"https:\/\/sano.science\/","name":"Centre for Computational Personalized Medicine","description":"Sano \u2013 Centre for Computational Medicine","publisher":{"@id":"https:\/\/sano.science\/#organization"},"potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/sano.science\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"},{"@type":"Organization","@id":"https:\/\/sano.science\/#organization","name":"Sano \u2013 Centre for Computational Medicine","alternateName":"Sano","url":"https:\/\/sano.science\/","logo":{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/sano.science\/#\/schema\/logo\/image\/","url":"https:\/\/sano.science\/wp-content\/uploads\/2024\/05\/logo_sano_podstawowe.png","contentUrl":"https:\/\/sano.science\/wp-content\/uploads\/2024\/05\/logo_sano_podstawowe.png","width":700,"height":265,"caption":"Sano \u2013 Centre for Computational Medicine"},"image":{"@id":"https:\/\/sano.science\/#\/schema\/logo\/image\/"},"sameAs":["https:\/\/www.facebook.com\/sano.science\/","https:\/\/x.com\/sanoscience","https:\/\/www.linkedin.com\/company\/sanoscience\/","https:\/\/www.youtube.com\/channel\/UCDZ_8TcjMWUG2ZcgKKgfpwQ","https:\/\/bsky.app\/profile\/sanoscience.bsky.social"]}]}},"acf":[],"meta_data":{"description":"<p>Szymon P\u0142otka obtained his PhD in Computer Science in 2024 from the Informatics Institute at the University of Amsterdam, where his research focused on applying deep learning to enhance prenatal care. His doctoral thesis, \u201c<strong>Enhancing Prenatal Care Through Deep Learning<\/strong>,\u201d explored advanced machine learning algorithms for medical image analysis, with a particular emphasis on applications in fetal video ultrasound imaging.<\/p>\n<p>Szymon&#8217;s research interests lie at the intersection of computer vision, machine learning, and deep learning-based algorithms for medical image analysis. He is particularly interested in developing innovative AI-driven solutions for improving diagnostic accuracy, integrating multimodal data sources, and optimizing healthcare workflows. His work aims to bridge the gap between cutting-edge artificial intelligence and real-world clinical applications, contributing to more efficient and accessible medical imaging technologies.<\/p>\n","email":"","social_media":[{"icon":{"ID":11984,"id":11984,"title":"github","filename":"github.svg","filesize":14876,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/github\/","alt":"","author":"5","description":"","caption":"","name":"github","status":"inherit","uploaded_to":531,"date":"2023-07-06 10:56:34","modified":"2023-07-06 10:56:34","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/github.com\/simongeek","name":"GitHub"},{"icon":{"ID":11986,"id":11986,"title":"google","filename":"google.svg","filesize":14070,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/google\/","alt":"","author":"5","description":"","caption":"","name":"google","status":"inherit","uploaded_to":531,"date":"2023-07-06 10:59:21","modified":"2023-07-06 10:59:21","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/scholar.google.com\/citations?user=g9sWRN0AAAAJ&hl=pl&oi=ao","name":"Google Schoolar"},{"icon":{"ID":11990,"id":11990,"title":"research gate","filename":"research-gate.svg","filesize":14281,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/research-gate\/","alt":"","author":"5","description":"","caption":"","name":"research-gate","status":"inherit","uploaded_to":531,"date":"2023-07-06 11:18:50","modified":"2023-07-06 11:18:50","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/research-gate.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/www.researchgate.net\/profile\/Szymon-Plotka","name":"ResearchGate"},{"icon":{"ID":11992,"id":11992,"title":"orcid","filename":"orcid.svg","filesize":15590,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/orcid\/","alt":"","author":"5","description":"","caption":"","name":"orcid","status":"inherit","uploaded_to":531,"date":"2023-07-06 11:21:29","modified":"2023-07-06 11:21:29","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/orcid.org\/0000-0001-9411-820X","name":"ORCID"},{"icon":{"ID":11994,"id":11994,"title":"linkedin","filename":"linkedin.svg","filesize":914,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/linkedin-2\/","alt":"","author":"5","description":"","caption":"","name":"linkedin-2","status":"inherit","uploaded_to":531,"date":"2023-07-06 11:24:13","modified":"2023-07-06 11:24:13","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/www.linkedin.com\/in\/szymonplotka\/","name":"LinkedIn"}],"tabs":false,"quote":"","position_with_team":{"text_before_link":"PostDoc in","link_text":"Medical Imaging and Robotics","text_after_link":"","link":"https:\/\/sano.science\/research-teams\/health-informatics-group-higs\/"},"publications":[{"ID":23729,"post_author":"8","post_date":"2025-05-15 13:40:55","post_date_gmt":"2025-05-15 11:40:55","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-AiXlYn\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\" id=\"h-szymon-plotka-karol-pustelnik-paula-szenejko-kinga-zebrowska-iga-rzucidlo-szymanska-natalia-szymecka-samaha-tomasz-legowik-katarzyna-kosinska-kaczynska-przemyslaw-korzeniowski-piotr-bilinski-asma-khalil-robert-brawura-biskupski-samaha-ivana-isgum-clara-i-sanchez-arkadiusz-sitek\">Szymon P\u0142otka, Karol Pustelnik, Paula Szenejko, Kinga \u017bebrowska, Iga Rzucid\u0142o-Szyma\u0144ska, Natalia Szymecka-Samaha, Tomasz \u0141\u0119gowik, Katarzyna Kosi\u0144ska-Kaczy\u0144ska, Przemys\u0142aw Korzeniowski, Piotr Bili\u0144ski, Asma Khalil, Robert Brawura-Biskupski-Samaha, Ivana I\u0161gum, Clara I S\u00e1nchez, Arkadiusz Sitek\u00a0<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NS0cQ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-YA57he\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">In this collaborative study, the authors propose a deep learning-based method aimed at improving the precision and efficiency of fetal biometry in prenatal ultrasound.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-YA57he\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">The study, titled \"Direct estimation of fetal biometry measurements from ultrasound video scans through deep learning,\" tackles a long-standing challenge in obstetric imaging: the reliance on skilled professionals to manually identify specific standard anatomical planes\u2014such as those of the fetal head, abdomen, and femur\u2014for measurement. This traditional approach demands expertise, is time-intensive, and often results in inconsistencies due to intra- and interobserver variation.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-KwIbo8\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">To address these limitations, the authors developed an end-to-end deep learning framework capable of automatically identifying the required standard planes and directly extracting key biometric measurements from entire ultrasound video sequences. Unlike previous methods that require manual frame selection or partial automation, this approach performs both detection and measurement in a unified pipeline.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-L0zWmC\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Importantly, it is\u2014according to the authors\u2014the first method to estimate the full set of fetal biometry parameters directly from ultrasound video data without human intervention. This advancement has the potential to improve diagnostic consistency and reduce operator dependency, particularly in resource-constrained settings, marking a significant step forward for AI-assisted prenatal diagnostics.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NS0cQ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_6825d36429d0f\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/www.ajogmfm.org\/article\/S2589-9333(25)00024-2\/abstract\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NS0cQ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-j8TR4a\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Autors<\/strong>: <a href=\"https:\/\/sano.science\/people\/szymon-plotka\/\">Szymon P\u0142otka<\/a>, Karol Pustelnik, Paula Szenejko, Kinga \u017bebrowska, Iga Rzucid\u0142o-Szyma\u0144ska, Natalia Szymecka-Samaha, Tomasz \u0141\u0119gowik, Katarzyna Kosi\u0144ska-Kaczy\u0144ska, <a href=\"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/\">Przemys\u0142aw Korzeniowski<\/a>, Piotr Bili\u0144ski, Asma Khalil, Robert Brawura-Biskupski-Samaha, Ivana I\u0161gum, Clara I S\u00e1nchez, Arkadiusz Sitek\u00a0<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-j8TR4a\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>DOI<\/strong>: 10.1016\/j.ajogmf.2025.101623&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-mJrtPh\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Keywords:<\/strong> deep learning in medicine, fetal biometry, ultrasound video analysis, prenatal ultrasound<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-AQ3sIC\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><\/p>\n<!-- \/wp:paragraph -->","post_title":"Direct estimation of fetal biometry measurements from ultrasound video scans through deep learning","post_excerpt":"Article in journal: American Journal of Obstetrics & Gynecology MFM, 2025","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"direct-estimation-of-fetal-biometry-measurements-from-ultrasound-video-scans-through-deep-learning-2","to_ping":"","pinged":"","post_modified":"2025-05-15 13:48:51","post_modified_gmt":"2025-05-15 11:48:51","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=23729","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":20967,"post_author":"8","post_date":"2025-01-21 16:51:16","post_date_gmt":"2025-01-21 15:51:16","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-6CqjM2\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\" id=\"h-sabina-martyniak-joanna-kaleta-diego-dall-alba-michal-naskret-szymon-plotka-and-przemyslaw-korzeniowski\">Sabina Martyniak, Joanna Kaleta, Diego Dall'Alba, Micha\u0142 Naskr\u0119t, Szymon P\u0142otka, and Przemys\u0142aw Korzeniowski<br><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-tI7h1k\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-aU2f7H\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">This work presents an innovative approach to advancing computer-assisted surgical (CAS) systems by addressing challenges in training data quality and realism.&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-A5sGbH\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">In this study, the authors introduce a comprehensive pipeline for creating high-quality synthetic data tailored for modern CAS systems. This pipeline integrates an advanced surgical simulator capable of generating complex annotations that surpass those found in existing public datasets. The simulator also models intricate surgical interactions, including the dynamics between instruments and deformable anatomical structures, ensuring a more realistic simulation environment.&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-qQq76G\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">To further enhance data realism, the researchers developed a novel image-to-image translation method based on Stable Diffusion (SD) and Low-Rank Adaptation (LoRA). This technique minimizes the visual gap between synthetic and real-world images while preserving the simulator's detailed annotations. By leveraging only a small amount of real-world data, the method enables efficient training and generalizes well to practical applications, thereby improving CAS training and guidance.&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-g7BCRx\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">This innovative pipeline has been validated experimentally and is a significant step forward in bridging the gap between synthetic and real-world datasets for surgical applications. The dataset and code are available for the research community<a href=\"https:\/\/github.com\/SanoScience\/SimuScope\" target=\"_blank\" rel=\"noreferrer noopener nofollow\"> github.com\/SanoScience\/SimuScope<\/a><\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-tI7h1k\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-H3zZ17\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Authors<\/strong>: Sabina Martyniak, Joanna Kaleta, Diego Dall'Alba, Micha\u0142 Naskr\u0119t, Szymon P\u0142otka, and Przemys\u0142aw Korzeniowski<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-aU2f7H\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>DOI<\/strong>: 10.48550\/arXiv.2412.02332&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-tI7h1k\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_678fc2ae54c84\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/arxiv.org\/abs\/2412.02332\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"SimuScope: Realistic Endoscopic Synthetic Dataset Generation through Surgical Simulation and Diffusion Models","post_excerpt":"Conference manuscript in IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), 2024","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"simuscope-realistic-endoscopic-synthetic-dataset-generation-through-surgical-simulation-and-diffusion-models","to_ping":"","pinged":"","post_modified":"2025-05-19 21:17:07","post_modified_gmt":"2025-05-19 19:17:07","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=20967","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":14853,"post_author":"5","post_date":"2024-01-10 20:52:21","post_date_gmt":"2024-01-10 19:52:21","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-wEgzdH\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\"><strong>Plotka, Grzeszczyk<\/strong>, Brawura-Biskupski-Samaha, Gutaj, Lipa, Trzcinski, Isgum, Sanchez,<strong> Sitek<\/strong><\/h2>\n<!-- \/wp:heading -->","post_title":"BabyNet++: Fetal Birth Weight Prediction using Biometry Multimodal Data Acquired Less Than 24 Hours Before Delivery","post_excerpt":"In: Computers in Biology & Medicine, 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"babynet-fetal-birth-weight-prediction-using-biometry-multimodal-data-acquired-less-than-24-hours-before-delivery","to_ping":"","pinged":"","post_modified":"2024-01-26 14:17:39","post_modified_gmt":"2024-01-26 13:17:39","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=14853","menu_order":29,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":15049,"post_author":"5","post_date":"2024-01-18 10:38:44","post_date_gmt":"2024-01-18 09:38:44","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-RAzE0W\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Matthias Eisenmann,\u00a0Annika Reinke,\u00a0Vivienn Weru,\u00a0Minu Dietlinde Tizabi,\u00a0Fabian Isensee,\u00a0Tim J. Adler,\u00a0Sharib Ali,\u00a0Vincent Andrearczyk,\u00a0Marc Aubreville,\u00a0Ujjwal Baid,\u00a0Spyridon Bakas,\u00a0Niranjan Balu,\u00a0Sophia Bano,\u00a0Jorge Bernal,\u00a0Sebastian Bodenstedt,\u00a0Alessandro Casella,\u00a0Veronika Cheplygina,\u00a0Marie Daum,\u00a0Marleen de Bruijne,\u00a0Adrien Depeursinge,\u00a0Reuben Dorent,\u00a0Jan Egger,\u00a0David G. Ellis,\u00a0Sandy Engelhardt,\u00a0Melanie Ganz,\u00a0Noha Ghatwary,\u00a0Gabriel Girard,\u00a0Patrick Godau,\u00a0Anubha Gupta,\u00a0Lasse Hansen,\u00a0Kanako Harada,\u00a0Mattias Heinrich,\u00a0Nicholas Heller,\u00a0Alessa Hering,\u00a0Arnaud Huaulm\u00e9,\u00a0Pierre Jannin,\u00a0Ali Emre Kavur,\u00a0Old\u0159ich Kodym,\u00a0Michal Kozubek,\u00a0Jianning Li,\u00a0Hongwei Li,\u00a0Jun Ma,\u00a0Carlos Mart\u00edn-Isla,\u00a0Bjoern Menze,\u00a0Alison Noble,\u00a0Valentin Oreiller,\u00a0Nicolas Padoy,\u00a0Sarthak Pati,\u00a0Kelly Payette,\u00a0Tim R\u00e4dsch,\u00a0Jonathan Rafael-Pati\u00f1o,\u00a0Vivek Singh Bawa,\u00a0Stefanie Speidel,\u00a0Carole H. Sudre,\u00a0Kimberlin van Wijnen,\u00a0Martin Wagner,\u00a0Donglai Wei,\u00a0Amine Yamlahi,\u00a0Moi Hoon Yap,\u00a0Chun Yuan,\u00a0Maximilian Zenk,\u00a0Aneeq Zia,\u00a0David Zimmerer,\u00a0Dogu Baran Aydogan,\u00a0Binod Bhattarai,\u00a0Louise Bloch,\u00a0Raphael Br\u00fcngel,\u00a0Jihoon Cho,\u00a0Chanyeol Choi,\u00a0Qi Dou,\u00a0Ivan Ezhov,\u00a0Christoph M. Friedrich,\u00a0Clifton Fuller,\u00a0Rebati Raman Gaire,\u00a0Adrian Galdran,\u00a0\u00c1lvaro Garc\u00eda Faura,\u00a0Maria Grammatikopoulou,\u00a0SeulGi Hong,\u00a0Mostafa Jahanifar,\u00a0Ikbeom Jang,\u00a0Abdolrahim Kadkhodamohammadi,\u00a0Inha Kang,\u00a0Florian Kofler,\u00a0Satoshi Kondo,\u00a0Hugo Kuijf,\u00a0Mingxing Li,\u00a0Minh Huan Luu,\u00a0Toma\u017e Martin\u010di\u010d,\u00a0Pedro Morais,\u00a0Mohamed A. Naser,\u00a0Bruno Oliveira,\u00a0David Owen,\u00a0Subeen Pang,\u00a0Jinah Park,\u00a0Sung-Hong Park,\u00a0Szymon P\u0142otka,\u00a0Elodie Puybareau,\u00a0Nasir Rajpoot,\u00a0Kanghyun Ryu,\u00a0Numan Saeed\u00a0et al. (25 additional authors not shown)<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-koslDw\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-SRn8tU\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">International benchmarking competitions have become fundamental for the comparative performance assessment of image analysis methods. However, little attention has been given to investigating what can be learnt from these competitions. Do they really generate scientific progress? What are common and successful participation strategies? What makes a solution superior to a competing method? To address this gap in the literature, we performed a multi- center study with all 80 competitions that were conducted in the scope of IEEE ISBI 2021 and MICCAI 2021. Statistical analyses performed based on comprehensive descriptions of the submitted algorithms linked to their rank as well as the underlying participation strategies revealed common char- acteristics of winning solutions. These typically include the use of multi-task learning (63%) and\/or multi-stage pipelines (61%), and a focus on augmentation (100%), im- age preprocessing (97%), data curation (79%), and post- processing (66%). The \u201ctypical\u201d lead of a winning team is a computer scientist with a doctoral degree, five years of experience in biomedical image analysis, and four years of experience in deep learning. Two core general development strategies stood out for highly-ranked teams: the reflection of the metrics in the method design and the focus on analyz- ing and handling failure cases. According to the organizers, 43% of the winning algorithms exceeded the state of the art but only 11% completely solved the respective domain prob- lem. The insights of our study could help researchers (1) improve algorithm development strategies when approach- ing new problems, and (2) focus on open research questions revealed by this work.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-koslDw\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a8f193fa877\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/openaccess.thecvf.com\/content\/CVPR2023\/papers\/Eisenmann_Why_Is_the_Winner_the_Best_CVPR_2023_paper.pdf\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Why is the winner the best?","post_excerpt":"In: 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"why-is-the-winner-the-best","to_ping":"","pinged":"","post_modified":"2024-01-18 10:38:44","post_modified_gmt":"2024-01-18 09:38:44","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=15049","menu_order":31,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":15038,"post_author":"5","post_date":"2024-01-18 10:24:56","post_date_gmt":"2024-01-18 09:24:56","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-G8lzyX\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Joanna Kaleta, Diego Dall\u2019Alba, Szymon P\u0142otka, Przemys\u0142aw Korzeniowski<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-MENX2Y\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-nsQHUU\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Computer-assisted surgical systems provide support information to the surgeon, which can improve the execution and overall outcome of the procedure. These systems are based on deep learning models that are trained on complex and challenging-to-annotate data. Generating synthetic data can overcome these limitations, but it is necessary to reduce the domain gap between real and synthetic data.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-MENX2Y\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a8ee413e9c1\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/link.springer.com\/article\/10.1007\/s11548-023-03030-w?utm_source=rct_congratemailt\\u0026utm_medium=email\\u0026utm_campaign=oa_20231107\\u0026utm_content=10.1007\/s11548-023-03030-w\\u0026fbclid=IwAR2GsWKZwJLqcP5wjhvqyHlHnHeughXGMz0l4xLmub2QrXHHr6VxxQxiJ-o\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Minimal data requirement for realistic endoscopic image generation with Stable Diffusion","post_excerpt":"In: International Journal of Computer Assisted Radiology and Surgery, 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"minimal-data-requirement-for-realistic-endoscopic-image-generation-with-stable-diffusion","to_ping":"","pinged":"","post_modified":"2024-01-18 10:24:57","post_modified_gmt":"2024-01-18 09:24:57","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=15038","menu_order":32,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":14968,"post_author":"5","post_date":"2024-01-16 13:59:08","post_date_gmt":"2024-01-16 12:59:08","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-Ctao5l\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Michal K. Grzeszczyk, Szymon P\u0142otka, Beata Rebizant, Katarzyna Kosi\u0144ska-Kaczy\u0144ska, Micha\u0142 Lipa, Robert Brawura-Biskupski-Samaha, Przemys\u0142aw Korzeniowski, Tomasz Trzci\u0144ski, Arkadiusz Sitek<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-cx4IQr\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-KXYa3n\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Medical data analysis often combines both imaging and tabular data processing using machine learning algorithms. While previous studies have investigated the impact of attention mechanisms on deep learning models, few have explored integrating attention modules and tabular data. In this paper, we introduce TabAttention, a novel module that enhances the performance of Convolutional Neural Networks (CNNs) with an attention mechanism that is trained conditionally on tabular data. Specifically, we extend the Convolutional Block Attention Module to 3D by adding a Temporal Attention Module that uses multi-head self-attention to learn attention maps. Furthermore, we enhance all attention modules by integrating tabular data embeddings. Our approach is demonstrated on the fetal birth weight (FBW) estimation task, using 92 fetal abdominal ultrasound video scans and fetal biometry measurements. Our results indicate that TabAttention outperforms clinicians and existing methods that rely on tabular and\/or imaging data for FBW prediction. This novel approach has the potential to improve computer-aided diagnosis in various clinical workflows where imaging and tabular data are combined. We provide a source code for integrating TabAttention in CNNs at\u00a0<a href=\"https:\/\/github.com\/SanoScience\/Tab-Attention\">https:\/\/github.com\/SanoScience\/Tab-Attention<\/a>.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-cx4IQr\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a67d89e2323\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/link.springer.com\/chapter\/10.1007\/978-3-031-43990-2_33\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"TabAttention: Learning Attention Conditionally on Tabular Data","post_excerpt":"In: MICCAI, 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"tabattention-learning-attention-conditionally-on-tabular-data","to_ping":"","pinged":"","post_modified":"2024-01-16 13:59:08","post_modified_gmt":"2024-01-16 12:59:08","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=14968","menu_order":33,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":14924,"post_author":"5","post_date":"2024-01-16 12:59:29","post_date_gmt":"2024-01-16 11:59:29","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-W4Gebu\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Michal K. Grzeszczyk, Szymon P\u0142otka, Arkadiusz Sitek<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-DaNRf4\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-xVc82I\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Cardiac Magnetic Resonance Imaging is commonly used for the assessment of the cardiac anatomy and function. The delineations of left and right ventricle blood pools and left ventricular myocardium are important for the diagnosis of cardiac diseases. Unfortunately, the movement of a patient during the CMR acquisition procedure may result in motion artifacts appearing in the final image. Such artifacts decrease the diagnostic quality of CMR images and force redoing of the procedure. In this paper, we present a Multi-task Swin UNEt TRansformer network for simultaneous solving of two tasks in the CMRxMotion challenge: CMR segmentation and motion artifacts classification. We utilize both segmentation and classification as a multi-task learning approach which allows us to determine the diagnostic quality of CMR and generate masks at the same time. CMR images are classified into three diagnostic quality classes, whereas, all samples with non-severe motion artifacts are being segmented. Ensemble of five networks trained using 5-Fold Cross-validation achieves segmentation performance of DICE coefficient of 0.871 and classification accuracy of 0.595.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-DaNRf4\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a66f8b91c7e\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/arxiv.org\/abs\/2209.02470\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Multi-task Swin Transformer for Motion Artifacts Classification and Cardiac Magnetic Resonance Image Segmentation","post_excerpt":"In: Statistical Atlases and Computational Modelling of the Heart Workshop (MICCAI 2022), 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"multi-task-swin-transformer-for-motion-artifacts-classification-and-cardiac-magnetic-resonance-image-segmentation","to_ping":"","pinged":"","post_modified":"2024-01-16 12:59:29","post_modified_gmt":"2024-01-16 11:59:29","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=14924","menu_order":57,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":14873,"post_author":"5","post_date":"2024-01-12 16:52:49","post_date_gmt":"2024-01-12 15:52:49","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-gryrhF\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Arkadiusz Sitek, Joanna Seliga-Siwecka, Szymon P\u0142otka, Michal K. Grzeszczyk, Szymon Seliga, Krzysztof W\u0142odarczyk, Renata Bokiniec<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-N2hnSy\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-1Sq5vk\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Necrotising enterocolitis (NEC) is one of the most common diseases in neonates and predominantly affects premature or very-low-birth-weight infants. Diagnosis is difficult and needed in hours since the first symptom onset for the best therapeutic effects. Artificial intelligence (AI) may play a significant role in NEC diagnosis. A literature search on the use of AI in the diagnosis of NEC was performed. Four databases (PubMed, Embase, arXiv, and IEEE Xplore) were searched with the appropriate MeSH terms. The search yielded 118 publications that were reduced to 8 after screening and checking for eligibility. Of the eight, five used classic machine learning (ML), and three were on the topic of deep ML. Most publications showed promising results. However, no publications with evident clinical benefits were found. Datasets used for training and testing AI systems were small and typically came from a single institution. The potential of AI to improve the diagnosis of NEC is evident. The body of literature on this topic is scarce, and more research in this area is needed, especially with a focus on clinical utility. Cross-institutional data for the training and testing of AI algorithms are required to make progress in this area.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-N2hnSy\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a160289ac79\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/www.nature.com\/articles\/s41390-022-02322-2\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Artificial intelligence in the diagnosis of necrotising enterocolitis in newborns","post_excerpt":"In: (Nature) Pediatric Research, 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"artificial-intelligence-in-the-diagnosis-of-necrotising-enterocolitis-in-newborns","to_ping":"","pinged":"","post_modified":"2024-01-12 16:52:49","post_modified_gmt":"2024-01-12 15:52:49","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=14873","menu_order":59,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":14823,"post_author":"5","post_date":"2024-01-10 19:50:49","post_date_gmt":"2024-01-10 18:50:49","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-JIr9n4\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\"><strong>P\u0142otka S, Grzeszczyk MK<\/strong>, Lipa M, Trzci\u0144ski T, and <strong>Sitek A.<\/strong><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-FJZV8o\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-oLqJsd\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Predicting fetal weight at birth is an important aspect of perinatal care, particularly in the context of antenatal management, which includes the planned timing and the mode of delivery. Accurate prediction of weight using prenatal ultrasound is challenging as it requires images of specific fetal body parts during advanced pregnancy which is difficult to capture due to poor quality of images caused by the lack of amniotic fluid. As a consequence, predictions which rely on standard methods often suffer from significant errors. In this paper we propose the Residual Transformer Module which extends a 3D ResNet-based network for analysis of\u00a02D+t spatio-temporal ultrasound video scans. Our end-to-end method, called BabyNet, automatically predicts fetal birth weight based on fetal ultrasound video scans. We evaluate BabyNet using a dedicated clinical set comprising 225 2D fetal ultrasound videos of pregnancies from 75 patients performed one day prior to delivery. Experimental results show that BabyNet outperforms several state-of-the-art methods and estimates the weight at birth with accuracy comparable to human experts. Furthermore, combining estimates provided by human experts with those computed by BabyNet yields the best results, outperforming either of other methods by a significant margin. The source code of BabyNet is available at\u00a0<a href=\"https:\/\/github.com\/SanoScience\/BabyNet\">https:\/\/github.com\/SanoScience\/BabyNet<\/a>.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-FJZV8o\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_659ee6f317c70\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/link.springer.com\/chapter\/10.1007\/978-3-031-16440-8_34\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"BabyNet: Residual Transformer Module for Birth Weight Prediction on Fetal Ultrasound Video","post_excerpt":"In: 25th International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI), 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"babynet-residual-transformer-module-for-birth-weight-prediction-on-fetal-ultrasound-video","to_ping":"","pinged":"","post_modified":"2024-01-10 19:50:49","post_modified_gmt":"2024-01-10 18:50:49","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=14823","menu_order":61,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":12676,"post_author":"5","post_date":"2023-07-13 13:17:33","post_date_gmt":"2023-07-13 11:17:33","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-4J1DxL\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Szczepa\u0144ski T,<strong> Sitek A<\/strong>, Trzci\u0144ski T, <strong>P\u0142otka S<\/strong><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-ypkope\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_64c02cf3f5e7b\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/arxiv.org\/pdf\/2201.09360.pdf\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"POTHER: Patch-Voted Deep Learning-based Chest X-ray Bias Analysis for COVID-19 Detection\u00a0","post_excerpt":"In: 22nd International Conference on Computational Science, 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"pother-patch-voted-deep-learning-based-chest-x-ray-bias-analysis-for-covid-19-detection-2","to_ping":"","pinged":"","post_modified":"2024-01-09 18:29:57","post_modified_gmt":"2024-01-09 17:29:57","post_content_filtered":"","post_parent":0,"guid":"https:\/\/new.sano.science\/?post_type=research&#038;p=12676","menu_order":63,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":12666,"post_author":"5","post_date":"2023-07-13 13:11:29","post_date_gmt":"2023-07-13 11:11:29","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-i2nvwe\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\"><strong>\u015alazyk F, Jab\u0142ecki P, Malawski M, P\u0142otka P., A. Lisowska<\/strong><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-l8oSS5\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-Khjkm0\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Federated learning enables building a shared model from multicentre data while storing the training data locally for privacy. In this paper, we present an evaluation (called CXR-FL) of deep learning-based models for chest X-ray image analysis using the federated learning method. We examine the impact of federated learning parameters on the performance of central models. Additionally, we show that classification models perform worse if trained on a region of interest reduced to segmentation of the lung compared to the full image. However, focusing training of the classification model on the lung area may result in improved pathology interpretability during inference. We also find that federated learning helps maintain model generalizability. The pre-trained weights and code are publicly available at (<a href=\"https:\/\/github.com\/SanoScience\/CXR-FL\">https:\/\/github.com\/SanoScience\/CXR-FL<\/a>).<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-l8oSS5\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_64c02d6b6c034\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/link.springer.com\/chapter\/10.1007\/978-3-031-08754-7_50\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"CXR-FL: Deep Learning-based Chest X-ray Image Analysis Using Federated Learning\u00a0","post_excerpt":"In: 22nd International Conference on Computational Science Lecture Notes in Computer Science, 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"cxr-fl-deep-learning-based-chest-x-ray-image-analysis-using-federated-learning-2","to_ping":"","pinged":"","post_modified":"2024-01-09 18:24:36","post_modified_gmt":"2024-01-09 17:24:36","post_content_filtered":"","post_parent":0,"guid":"https:\/\/new.sano.science\/?post_type=research&#038;p=12666","menu_order":55,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":12651,"post_author":"5","post_date":"2023-07-13 12:42:24","post_date_gmt":"2023-07-13 10:42:24","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-Ie3pig\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">P\u0142otka, Szymon; W\u0142odarczyk, Tomasz; Klasa, Adam; Lipa, Micha\u0142; Sitek, Arkadiusz; Trzci\u0144ski, Tomasz<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-xUotvU\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-40P5RJ\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">In this paper, we propose an end-to-end multi-task neural network called FetalNet with an attention mechanism and stacked module for spatio-temporal fetal ultrasound scan video analysis. Fetal bio-metric measurement is a standard examination during pregnancy used for the fetus growth monitoring and estimation of gestational age and fetal weight. The main goal in fetal ultrasound scan video analysis is to find proper standard planes to measure the fetal head, abdomen and femur. Due to natural high speckle noise and shadows in ultrasound data, medical expertise and sonographic experience are required to find the appropriate acquisition plane and perform accurate measurements of the fetus. In addition, existing computer-aided methods for fetal US biometric measurement address only one single image frame without considering temporal features. To address these shortcomings, we propose an end-to-end multi-task neural network for spatio-temporal ultrasound scan video analysis to simultaneously localize, classify and measure the fetal body parts. We propose a new encoder-decoder segmentation architecture that incorporates a classification branch. Additionally, we employ an attention mechanism with a stacked module to learn salient maps to suppress irrelevant US regions and efficient scan plane localization. We trained on the fetal ultrasound video comes from routine examinations of 700 different patients. Our method called FetalNet outperforms existing state-of-the-art methods in both classification and segmentation in fetal ultrasound video recordings.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-xUotvU\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_64afd4fa01477\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/www.researchgate.net\/publication\/353260219_FetalNet_Multi-task_Deep_Learning_Framework_for_Fetal_Ultrasound_Biometric_Measurements\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"FetalNet: Multi-Task Deep Learning Framework for Fetal Ultrasound Biometric Measurements\u00a0","post_excerpt":"In: Conference on Neural Information Processing, 2021.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"fetalnet-multi-task-deep-learning-framework-for-fetal-ultrasound-biometric-measurements","to_ping":"","pinged":"","post_modified":"2024-01-05 13:47:41","post_modified_gmt":"2024-01-05 12:47:41","post_content_filtered":"","post_parent":0,"guid":"https:\/\/new.sano.science\/?post_type=research&#038;p=12651","menu_order":83,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":12639,"post_author":"5","post_date":"2023-07-13 12:36:12","post_date_gmt":"2023-07-13 10:36:12","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-SZh5no\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">P\u0142otka, Szymon; Klasa, Adam; Lisowska, Aneta; Seliga-Siwecka, Joanna; Lipa, Micha\u0142; Trzci\u0144ski, Tomasz; Sitek, Arkadiusz<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-iBvqOp\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-JovYc3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Objective. This work investigates the use of deep convolutional neural networks (CNN) to automatically perform measurements of fetal body parts, including head circumference, biparietal diameter, abdominal circumference and femur length, and to estimate gestational age and fetal weight using fetal ultrasound videos. Approach. We developed a novel multi-task CNN-based spatio-temporal fetal US feature extraction and standard plane detection algorithm (called FUVAI) and evaluated the method on 50 freehand fetal US video scans. We compared FUVAI fetal biometric measurements with measurements made by five experienced sonographers at two time points separated by at least two weeks. Intra- and inter-observer variabilities were estimated. Main results. We found that automated fetal biometric measurements obtained by FUVAI were comparable to the measurements performed by experienced sonographers The observed differences in measurement values were within the range of inter- and intra-observer variability. Moreover, analysis has shown that these differences were not statistically significant when comparing any individual medical expert to our model. Significance. We argue that FUVAI has the potential to assist sonographers who perform fetal biometric measurements in clinical settings by providing them with suggestions regarding the best measuring frames, along with automated measurements. Moreover, FUVAI is able perform these tasks in just a few seconds, which is a huge difference compared to the average of six minutes taken by sonographers. This is significant, given the shortage of medical experts capable of interpreting fetal ultrasound images in numerous countries.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-hOhI3D\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_64afd38695946\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/iopscience.iop.org\/article\/10.1088\/1361-6560\/ac4d85\/pdf\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Deep learning fetal ultrasound video model match human observers in biometric measurements\u00a0","post_excerpt":"In: Physics in Medicine & Biology, 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"deep-learning-fetal-ultrasound-video-model-match-human-observers-in-biometric-measurements","to_ping":"","pinged":"","post_modified":"2024-01-05 13:49:17","post_modified_gmt":"2024-01-05 12:49:17","post_content_filtered":"","post_parent":0,"guid":"https:\/\/new.sano.science\/?post_type=research&#038;p=12639","menu_order":68,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":12628,"post_author":"5","post_date":"2023-07-13 11:53:41","post_date_gmt":"2023-07-13 09:53:41","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-ni8pgc\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\"><strong>Korzeniowski P, Plotka S<\/strong>, Brawura-Biskupski-Samaha R, <strong>Sitek A<\/strong><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-Z7xpPa\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-vQfTCA\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Spina Bifida (SB) is a birth defect developed during the early stage of pregnancy in which there is incomplete closing of the spine around the spinal cord. The growing interest in fetoscopic Spina-Bifida repair, which is performed in fetuses who are still in the pregnant uterus, prompts the need for appropriate training. The learning curve for such procedures is steep and requires excellent procedural skills. Computer-based virtual reality (VR) simulation systems offer a safe, cost-effective, and configurable training environment free from ethical and patient safety issues. However, to the best of our knowledge, there are currently no commercial or experimental VR training simulation systems available for fetoscopic SB-repair procedures. In this paper, we propose a novel VR simulator for core manual skills training for SB-repair. An initial simulation realism validation study was carried out by obtaining subjective feedback (face and content validity) from 14 clinicians. The overall simulation realism was on average marked 4.07 on a 5-point Likert scale (1 - very unrealistic, 5 - very realistic). Its usefulness as a training tool for SB-repair as well as in learning fundamental laparoscopic skills was marked 4.63 and 4.80, respectively. These results indicate that VR simulation of fetoscopic procedures may contribute to surgical training without putting fetuses and their mothers at risk. It could also facilitate wider adaptation of fetoscopic procedures in place of much more invasive open fetal surgeries.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NG4WG4\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_64afc99327015\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/ieeexplore.ieee.org\/document\/9981920\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Virtual Reality Simulator for Fetoscopic Spina Bifida Repair Surgery\u00a0","post_excerpt":"In: IEEE international conference on intelligent robots and systems IROS, 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"virtual-reality-simulator-for-fetoscopic-spina-bifida-repair-surgery","to_ping":"","pinged":"","post_modified":"2024-01-10 19:46:46","post_modified_gmt":"2024-01-10 18:46:46","post_content_filtered":"","post_parent":0,"guid":"https:\/\/new.sano.science\/?post_type=research&#038;p=12628","menu_order":71,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":28554,"post_author":"8","post_date":"2026-01-31 19:47:50","post_date_gmt":"2026-01-31 18:47:50","post_content":"<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-7IiDS7\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Mateusz W\u00f3jcikowski, Diego Dall'Alba, Sabina Martyniak, Szymon P\u0142otka, Renata Szydlak, Piotr Walecki, Andrzej A Kononowicz, Przemyslaw Korzeniowski<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"20px\",\"epAnimationGeneratedClass\":\"edplus_anim-eOcEsc\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:20px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-7IiDS7\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>VR Simulator for Robotic-Assisted Surgery Training<br><\/strong>This study explored the use of a <strong>VR-based robotic surgery simulator<\/strong> designed for medical education. The simulator, compatible with Meta Quest 2\/3\/Pro headsets, was tested by 67 international medical students to evaluate its usability and educational value.<br>The system reproduces the cholecystectomy procedure using the Extended Position-Based Dynamics (XPBD) method to simulate soft tissue behavior in real time (1 kHz physics, 90 Hz graphics). Most participants rated the simulator as a valuable learning tool, with over 80% reporting improved understanding of anatomy and surgical workflows.<br>Students appreciated the safe, immersive practice environment and the opportunity for repeated training. Suggested improvements included enhancing tissue realism, diversifying the range of surgical scenarios, and optimizing the user interface for VR interaction. Overall, the project highlights the strong potential of VR simulators to transform surgical training.<br>Read more in the full article at the link below.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"20px\",\"epAnimationGeneratedClass\":\"edplus_anim-eOcEsc\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:20px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-7IiDS7\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Conference: Hamlyn Symposium on Medical Robotics 2025<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"20px\",\"epAnimationGeneratedClass\":\"edplus_anim-bNjJc0\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:20px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_697e4d14f1bac\",\"name\":\"acf\/button\",\"data\":{\"title\":\"Read\/Download\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/events.plgrid.pl\/event\/89\/contributions\/745\/attachments\/195\/440\/VRSR_for_Marian_2.pdf\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Affordable Virtual Reality Simulation for Robotic Surgery: Preliminary Usability and Educational Evaluation","post_excerpt":"Conference abstract:  Hamlyn Symposium on Medical Robotics 2025, 2025","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"affordable-virtual-reality-simulation-for-robotic-surgery-preliminary-usability-and-educational-evaluation","to_ping":"","pinged":"","post_modified":"2026-01-31 19:47:51","post_modified_gmt":"2026-01-31 18:47:51","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=28554","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"}]},"_links":{"self":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people\/12053","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people"}],"about":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/types\/people"}],"version-history":[{"count":12,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people\/12053\/revisions"}],"predecessor-version":[{"id":28568,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people\/12053\/revisions\/28568"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/media\/18503"}],"wp:attachment":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/media?parent=12053"}],"wp:term":[{"taxonomy":"people_teams","embeddable":true,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people_teams?post=12053"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}