{"id":554,"date":"2023-05-14T12:44:15","date_gmt":"2023-05-14T10:44:15","guid":{"rendered":"https:\/\/sano.empressia.dev\/?post_type=people&#038;p=554"},"modified":"2026-01-31T19:51:00","modified_gmt":"2026-01-31T18:51:00","slug":"przemyslaw-korzeniowski","status":"publish","type":"people","link":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/","title":{"rendered":"Przemys\u0142aw Korzeniowski"},"excerpt":{"rendered":"<p>Research Team Leader of Medical Imaging and Robotics and Head of VR &#038; Robotics Lab<\/p>\n","protected":false},"featured_media":18499,"template":"","people_teams":[24],"class_list":["post-554","people","type-people","status-publish","has-post-thumbnail","hentry","people_teams-alumni"],"yoast_head":"<!-- This site is optimized with the Yoast SEO Premium plugin v27.3 (Yoast SEO v27.3) - https:\/\/yoast.com\/product\/yoast-seo-premium-wordpress\/ -->\n<title>Przemys\u0142aw Korzeniowski - Centre for Computational Personalized Medicine<\/title>\n<meta name=\"robots\" content=\"index, follow, max-snippet:-1, max-image-preview:large, max-video-preview:-1\" \/>\n<link rel=\"canonical\" href=\"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/\" \/>\n<meta property=\"og:locale\" content=\"en_US\" \/>\n<meta property=\"og:type\" content=\"article\" \/>\n<meta property=\"og:title\" content=\"Przemys\u0142aw Korzeniowski\" \/>\n<meta property=\"og:description\" content=\"Research Team Leader of Medical Imaging and Robotics and Head of VR &amp; Robotics Lab\" \/>\n<meta property=\"og:url\" content=\"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/\" \/>\n<meta property=\"og:site_name\" content=\"Centre for Computational Personalized Medicine\" \/>\n<meta property=\"article:publisher\" content=\"https:\/\/www.facebook.com\/sano.science\/\" \/>\n<meta property=\"article:modified_time\" content=\"2026-01-31T18:51:00+00:00\" \/>\n<meta property=\"og:image\" content=\"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/Przemyslaw-Korzeniowski-Sano.png\" \/>\n\t<meta property=\"og:image:width\" content=\"1000\" \/>\n\t<meta property=\"og:image:height\" content=\"1000\" \/>\n\t<meta property=\"og:image:type\" content=\"image\/png\" \/>\n<meta name=\"twitter:card\" content=\"summary_large_image\" \/>\n<meta name=\"twitter:site\" content=\"@sanoscience\" \/>\n<meta name=\"twitter:label1\" content=\"Est. reading time\" \/>\n\t<meta name=\"twitter:data1\" content=\"1 minute\" \/>\n<script type=\"application\/ld+json\" class=\"yoast-schema-graph\">{\"@context\":\"https:\\\/\\\/schema.org\",\"@graph\":[{\"@type\":\"WebPage\",\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/\",\"url\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/\",\"name\":\"Przemys\u0142aw Korzeniowski - Centre for Computational Personalized Medicine\",\"isPartOf\":{\"@id\":\"https:\\\/\\\/sano.science\\\/#website\"},\"primaryImageOfPage\":{\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/#primaryimage\"},\"image\":{\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/#primaryimage\"},\"thumbnailUrl\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2023\\\/05\\\/Przemyslaw-Korzeniowski-Sano.png\",\"datePublished\":\"2023-05-14T10:44:15+00:00\",\"dateModified\":\"2026-01-31T18:51:00+00:00\",\"breadcrumb\":{\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/#breadcrumb\"},\"inLanguage\":\"en-US\",\"potentialAction\":[{\"@type\":\"ReadAction\",\"target\":[\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/\"]}]},{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/#primaryimage\",\"url\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2023\\\/05\\\/Przemyslaw-Korzeniowski-Sano.png\",\"contentUrl\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2023\\\/05\\\/Przemyslaw-Korzeniowski-Sano.png\",\"width\":1000,\"height\":1000},{\"@type\":\"BreadcrumbList\",\"@id\":\"https:\\\/\\\/sano.science\\\/people\\\/przemyslaw-korzeniowski\\\/#breadcrumb\",\"itemListElement\":[{\"@type\":\"ListItem\",\"position\":1,\"name\":\"Home\",\"item\":\"https:\\\/\\\/sano.science\\\/\"},{\"@type\":\"ListItem\",\"position\":2,\"name\":\"People\",\"item\":\"https:\\\/\\\/sano.science\\\/people\\\/\"},{\"@type\":\"ListItem\",\"position\":3,\"name\":\"Alumni\",\"item\":\"https:\\\/\\\/sano.science\\\/people-teams\\\/alumni\\\/\"},{\"@type\":\"ListItem\",\"position\":4,\"name\":\"Przemys\u0142aw Korzeniowski\"}]},{\"@type\":\"WebSite\",\"@id\":\"https:\\\/\\\/sano.science\\\/#website\",\"url\":\"https:\\\/\\\/sano.science\\\/\",\"name\":\"Centre for Computational Personalized Medicine\",\"description\":\"Sano \u2013 Centre for Computational Medicine\",\"publisher\":{\"@id\":\"https:\\\/\\\/sano.science\\\/#organization\"},\"potentialAction\":[{\"@type\":\"SearchAction\",\"target\":{\"@type\":\"EntryPoint\",\"urlTemplate\":\"https:\\\/\\\/sano.science\\\/?s={search_term_string}\"},\"query-input\":{\"@type\":\"PropertyValueSpecification\",\"valueRequired\":true,\"valueName\":\"search_term_string\"}}],\"inLanguage\":\"en-US\"},{\"@type\":\"Organization\",\"@id\":\"https:\\\/\\\/sano.science\\\/#organization\",\"name\":\"Sano \u2013 Centre for Computational Medicine\",\"alternateName\":\"Sano\",\"url\":\"https:\\\/\\\/sano.science\\\/\",\"logo\":{\"@type\":\"ImageObject\",\"inLanguage\":\"en-US\",\"@id\":\"https:\\\/\\\/sano.science\\\/#\\\/schema\\\/logo\\\/image\\\/\",\"url\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2024\\\/05\\\/logo_sano_podstawowe.png\",\"contentUrl\":\"https:\\\/\\\/sano.science\\\/wp-content\\\/uploads\\\/2024\\\/05\\\/logo_sano_podstawowe.png\",\"width\":700,\"height\":265,\"caption\":\"Sano \u2013 Centre for Computational Medicine\"},\"image\":{\"@id\":\"https:\\\/\\\/sano.science\\\/#\\\/schema\\\/logo\\\/image\\\/\"},\"sameAs\":[\"https:\\\/\\\/www.facebook.com\\\/sano.science\\\/\",\"https:\\\/\\\/x.com\\\/sanoscience\",\"https:\\\/\\\/www.linkedin.com\\\/company\\\/sanoscience\\\/\",\"https:\\\/\\\/www.youtube.com\\\/channel\\\/UCDZ_8TcjMWUG2ZcgKKgfpwQ\",\"https:\\\/\\\/bsky.app\\\/profile\\\/sanoscience.bsky.social\"]}]}<\/script>\n<!-- \/ Yoast SEO Premium plugin. -->","yoast_head_json":{"title":"Przemys\u0142aw Korzeniowski - Centre for Computational Personalized Medicine","robots":{"index":"index","follow":"follow","max-snippet":"max-snippet:-1","max-image-preview":"max-image-preview:large","max-video-preview":"max-video-preview:-1"},"canonical":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/","og_locale":"en_US","og_type":"article","og_title":"Przemys\u0142aw Korzeniowski","og_description":"Research Team Leader of Medical Imaging and Robotics and Head of VR & Robotics Lab","og_url":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/","og_site_name":"Centre for Computational Personalized Medicine","article_publisher":"https:\/\/www.facebook.com\/sano.science\/","article_modified_time":"2026-01-31T18:51:00+00:00","og_image":[{"width":1000,"height":1000,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/Przemyslaw-Korzeniowski-Sano.png","type":"image\/png"}],"twitter_card":"summary_large_image","twitter_site":"@sanoscience","twitter_misc":{"Est. reading time":"1 minute"},"schema":{"@context":"https:\/\/schema.org","@graph":[{"@type":"WebPage","@id":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/","url":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/","name":"Przemys\u0142aw Korzeniowski - Centre for Computational Personalized Medicine","isPartOf":{"@id":"https:\/\/sano.science\/#website"},"primaryImageOfPage":{"@id":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/#primaryimage"},"image":{"@id":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/#primaryimage"},"thumbnailUrl":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/Przemyslaw-Korzeniowski-Sano.png","datePublished":"2023-05-14T10:44:15+00:00","dateModified":"2026-01-31T18:51:00+00:00","breadcrumb":{"@id":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/#breadcrumb"},"inLanguage":"en-US","potentialAction":[{"@type":"ReadAction","target":["https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/"]}]},{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/#primaryimage","url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/Przemyslaw-Korzeniowski-Sano.png","contentUrl":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/Przemyslaw-Korzeniowski-Sano.png","width":1000,"height":1000},{"@type":"BreadcrumbList","@id":"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/#breadcrumb","itemListElement":[{"@type":"ListItem","position":1,"name":"Home","item":"https:\/\/sano.science\/"},{"@type":"ListItem","position":2,"name":"People","item":"https:\/\/sano.science\/people\/"},{"@type":"ListItem","position":3,"name":"Alumni","item":"https:\/\/sano.science\/people-teams\/alumni\/"},{"@type":"ListItem","position":4,"name":"Przemys\u0142aw Korzeniowski"}]},{"@type":"WebSite","@id":"https:\/\/sano.science\/#website","url":"https:\/\/sano.science\/","name":"Centre for Computational Personalized Medicine","description":"Sano \u2013 Centre for Computational Medicine","publisher":{"@id":"https:\/\/sano.science\/#organization"},"potentialAction":[{"@type":"SearchAction","target":{"@type":"EntryPoint","urlTemplate":"https:\/\/sano.science\/?s={search_term_string}"},"query-input":{"@type":"PropertyValueSpecification","valueRequired":true,"valueName":"search_term_string"}}],"inLanguage":"en-US"},{"@type":"Organization","@id":"https:\/\/sano.science\/#organization","name":"Sano \u2013 Centre for Computational Medicine","alternateName":"Sano","url":"https:\/\/sano.science\/","logo":{"@type":"ImageObject","inLanguage":"en-US","@id":"https:\/\/sano.science\/#\/schema\/logo\/image\/","url":"https:\/\/sano.science\/wp-content\/uploads\/2024\/05\/logo_sano_podstawowe.png","contentUrl":"https:\/\/sano.science\/wp-content\/uploads\/2024\/05\/logo_sano_podstawowe.png","width":700,"height":265,"caption":"Sano \u2013 Centre for Computational Medicine"},"image":{"@id":"https:\/\/sano.science\/#\/schema\/logo\/image\/"},"sameAs":["https:\/\/www.facebook.com\/sano.science\/","https:\/\/x.com\/sanoscience","https:\/\/www.linkedin.com\/company\/sanoscience\/","https:\/\/www.youtube.com\/channel\/UCDZ_8TcjMWUG2ZcgKKgfpwQ","https:\/\/bsky.app\/profile\/sanoscience.bsky.social"]}]}},"acf":[],"meta_data":{"quote":"","description":"<p>He obtained M.Sc. in Advanced Computing in 2010 and Ph.D. in Modelling and Simulation in 2016 from Imperial College London. His work and research at Modelling in Medicine and Surgery Research Group focused on the development and validation of virtual reality simulators. He gained practical experience in the industry at the R&amp;D Department of Volkswagen Group, where he was a key team-member of a newly established Virtual Engineering Lab, the forefront of digital transformation of the whole company. His main research interest are virtual and augmented reality, real-time physically-based simulation, massively-parallel computing, haptic interfaces as well as aspects of software engineering and architecture of simulation software.<\/p>\n","social_media":[{"icon":{"ID":11984,"id":11984,"title":"github","filename":"github.svg","filesize":14876,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/github\/","alt":"","author":"5","description":"","caption":"","name":"github","status":"inherit","uploaded_to":531,"date":"2023-07-06 10:56:34","modified":"2023-07-06 10:56:34","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/github.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/github.com\/korzen","name":"GitHub"},{"icon":{"ID":11986,"id":11986,"title":"google","filename":"google.svg","filesize":14070,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/google\/","alt":"","author":"5","description":"","caption":"","name":"google","status":"inherit","uploaded_to":531,"date":"2023-07-06 10:59:21","modified":"2023-07-06 10:59:21","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/google.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/scholar.google.com\/citations?hl=en&user=lNOtdSwAAAAJ","name":"Google Schoolar"},{"icon":{"ID":11992,"id":11992,"title":"orcid","filename":"orcid.svg","filesize":15590,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/orcid\/","alt":"","author":"5","description":"","caption":"","name":"orcid","status":"inherit","uploaded_to":531,"date":"2023-07-06 11:21:29","modified":"2023-07-06 11:21:29","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/orcid.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/orcid.org\/0000-0001-5391-1295","name":"ORCID"},{"icon":{"ID":11994,"id":11994,"title":"linkedin","filename":"linkedin.svg","filesize":914,"url":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","link":"https:\/\/sano.science\/people\/maciej-malawski\/linkedin-2\/","alt":"","author":"5","description":"","caption":"","name":"linkedin-2","status":"inherit","uploaded_to":531,"date":"2023-07-06 11:24:13","modified":"2023-07-06 11:24:13","menu_order":0,"mime_type":"image\/svg+xml","type":"image","subtype":"svg+xml","icon":"https:\/\/sano.science\/wp-includes\/images\/media\/default.png","width":1,"height":1,"sizes":{"thumbnail":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","thumbnail-width":150,"thumbnail-height":150,"medium":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","medium-width":300,"medium-height":300,"medium_large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","medium_large-width":768,"medium_large-height":1,"large":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","large-width":1024,"large-height":1024,"1536x1536":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","1536x1536-width":1,"1536x1536-height":1,"2048x2048":"https:\/\/sano.science\/wp-content\/uploads\/2023\/05\/linkedin.svg","2048x2048-width":1,"2048x2048-height":1}},"link":"https:\/\/www.linkedin.com\/in\/korzeniowski-phd\/","name":"LinkedIn"}],"tabs":[{"title":"Contact","content":"<p><strong>Sano Centre for Computational Medicine<\/strong><\/p>\n<p>Czarnowiejska 36, 33-332, Cracow, Poland<\/p>\n<p><strong>Email:<\/strong> <a href=\"mailto:p.korzeniowski@sanoscience.org\"><span id=\"eeb-948650-948990\">p.korzeniowski@sanoscience.org<\/span><\/a><\/p>\n<p><strong>Tel:<\/strong> <a href=\"tel:0048123072737\">+48 12\u00a0307 27 37<\/a><\/p>\n"}],"email":"","position_with_team":{"text_before_link":"Research Team Leader of","link_text":"Medical Imaging and Robotics","text_after_link":"and Head of VR & Robotics Lab","link":"https:\/\/sano.science\/research-teams\/health-informatics-group-higs\/"},"publications":[{"ID":23781,"post_author":"8","post_date":"2025-05-20 17:20:11","post_date_gmt":"2025-05-20 15:20:11","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-sHGyzO\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\" id=\"h-diego-dall-alba-nbsp-michal-nasket-nbsp-sabina-kaminska-nbsp-przemyslaw-korzeniowski\">Diego Dall'Alba,&nbsp;Micha\u0142 Nasket,&nbsp;Sabina Kaminska,&nbsp;Przemys\u0142aw Korzeniowski<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"30px\",\"epAnimationGeneratedClass\":\"edplus_anim-UIbTWv\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:30px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-mmx73R\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Robotic-assisted surgery is evolving at a fast pace and holds significant potential for improvements through automation. Yet, methods like Reinforcement Learning (RL), which require extensive task repetition, are challenging to apply directly in real surgical scenarios due to safety and feasibility concerns. This highlights the importance of using simulated environments that combine realism with computational efficiency and scalability.<br>In response to this need, we present FF-SRL (Fast and Flexible Surgical Reinforcement Learning) \u2014 a high-speed, GPU-based simulation platform tailored for robotic surgery. Unlike traditional setups, FF-SRL runs both the physics-based simulation and the RL training process entirely on a single GPU. This design eliminates common performance limitations caused by data exchange between the CPU and GPU, significantly boosting learning speed.<br>Experimental results demonstrate that FF-SRL can decrease the training duration for intricate tasks like tissue manipulation by approximately tenfold \u2014 achieving performance in just a few minutes compared to conventional hybrid simulators. This level of efficiency opens new possibilities for testing and refining RL algorithms in surgical contexts. To support further research and collaboration, we have made the FF-SRL framework freely accessible to the research community.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"30px\",\"epAnimationGeneratedClass\":\"edplus_anim-UIbTWv\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:30px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-jsuB1V\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Authors<\/strong>: <a href=\"https:\/\/sano.science\/people\/diego-dallalba\/\">Diego Dall'Alba<\/a>,&nbsp;<a href=\"https:\/\/sano.science\/people\/michal-naskret\/\">Micha\u0142 Naskret<\/a>,&nbsp;<a href=\"https:\/\/sano.science\/wp-content\/uploads\/2023\/07\/Sabina-Kaminska_Sano.png\">Sabina Kaminska<\/a>,&nbsp;<a href=\"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/\">Przemys\u0142aw Korzeniowski<\/a><\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-NEuOqB\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>DOI<\/strong>: 10.1109\/IROS58592.2024.10801658<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-NEuOqB\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Keywords<\/strong>: Robotic-assisted surgery, Reinforcement Learning (RL), Surgical simulation, GPU-based simulation, Fast and Flexible Surgical Reinforcement Learning (FF-SRL), Real-time physics simulation, Computational efficiency, Simulation platform<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"30px\",\"epAnimationGeneratedClass\":\"edplus_anim-UIbTWv\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:30px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_682c9edf5049a\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/arxiv.org\/abs\/2503.18616\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"FF-SRL: High Performance GPU-Based Surgical Simulation For Robot Learning","post_excerpt":"In: https:\/\/arxiv.org, 2025","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"ff-srl-high-performance-gpu-based-surgical-simulation-for-robot-learning","to_ping":"","pinged":"","post_modified":"2025-05-20 17:30:07","post_modified_gmt":"2025-05-20 15:30:07","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=23781","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":23729,"post_author":"8","post_date":"2025-05-15 13:40:55","post_date_gmt":"2025-05-15 11:40:55","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-AiXlYn\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\" id=\"h-szymon-plotka-karol-pustelnik-paula-szenejko-kinga-zebrowska-iga-rzucidlo-szymanska-natalia-szymecka-samaha-tomasz-legowik-katarzyna-kosinska-kaczynska-przemyslaw-korzeniowski-piotr-bilinski-asma-khalil-robert-brawura-biskupski-samaha-ivana-isgum-clara-i-sanchez-arkadiusz-sitek\">Szymon P\u0142otka, Karol Pustelnik, Paula Szenejko, Kinga \u017bebrowska, Iga Rzucid\u0142o-Szyma\u0144ska, Natalia Szymecka-Samaha, Tomasz \u0141\u0119gowik, Katarzyna Kosi\u0144ska-Kaczy\u0144ska, Przemys\u0142aw Korzeniowski, Piotr Bili\u0144ski, Asma Khalil, Robert Brawura-Biskupski-Samaha, Ivana I\u0161gum, Clara I S\u00e1nchez, Arkadiusz Sitek\u00a0<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NS0cQ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-YA57he\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">In this collaborative study, the authors propose a deep learning-based method aimed at improving the precision and efficiency of fetal biometry in prenatal ultrasound.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-YA57he\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">The study, titled \"Direct estimation of fetal biometry measurements from ultrasound video scans through deep learning,\" tackles a long-standing challenge in obstetric imaging: the reliance on skilled professionals to manually identify specific standard anatomical planes\u2014such as those of the fetal head, abdomen, and femur\u2014for measurement. This traditional approach demands expertise, is time-intensive, and often results in inconsistencies due to intra- and interobserver variation.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-KwIbo8\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">To address these limitations, the authors developed an end-to-end deep learning framework capable of automatically identifying the required standard planes and directly extracting key biometric measurements from entire ultrasound video sequences. Unlike previous methods that require manual frame selection or partial automation, this approach performs both detection and measurement in a unified pipeline.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-L0zWmC\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Importantly, it is\u2014according to the authors\u2014the first method to estimate the full set of fetal biometry parameters directly from ultrasound video data without human intervention. This advancement has the potential to improve diagnostic consistency and reduce operator dependency, particularly in resource-constrained settings, marking a significant step forward for AI-assisted prenatal diagnostics.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NS0cQ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_6825d36429d0f\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/www.ajogmfm.org\/article\/S2589-9333(25)00024-2\/abstract\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NS0cQ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-j8TR4a\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Autors<\/strong>: <a href=\"https:\/\/sano.science\/people\/szymon-plotka\/\">Szymon P\u0142otka<\/a>, Karol Pustelnik, Paula Szenejko, Kinga \u017bebrowska, Iga Rzucid\u0142o-Szyma\u0144ska, Natalia Szymecka-Samaha, Tomasz \u0141\u0119gowik, Katarzyna Kosi\u0144ska-Kaczy\u0144ska, <a href=\"https:\/\/sano.science\/people\/przemyslaw-korzeniowski\/\">Przemys\u0142aw Korzeniowski<\/a>, Piotr Bili\u0144ski, Asma Khalil, Robert Brawura-Biskupski-Samaha, Ivana I\u0161gum, Clara I S\u00e1nchez, Arkadiusz Sitek\u00a0<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-j8TR4a\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>DOI<\/strong>: 10.1016\/j.ajogmf.2025.101623&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-mJrtPh\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Keywords:<\/strong> deep learning in medicine, fetal biometry, ultrasound video analysis, prenatal ultrasound<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-AQ3sIC\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><\/p>\n<!-- \/wp:paragraph -->","post_title":"Direct estimation of fetal biometry measurements from ultrasound video scans through deep learning","post_excerpt":"Article in journal: American Journal of Obstetrics & Gynecology MFM, 2025","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"direct-estimation-of-fetal-biometry-measurements-from-ultrasound-video-scans-through-deep-learning-2","to_ping":"","pinged":"","post_modified":"2025-05-15 13:48:51","post_modified_gmt":"2025-05-15 11:48:51","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=23729","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":20967,"post_author":"8","post_date":"2025-01-21 16:51:16","post_date_gmt":"2025-01-21 15:51:16","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-6CqjM2\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\" id=\"h-sabina-martyniak-joanna-kaleta-diego-dall-alba-michal-naskret-szymon-plotka-and-przemyslaw-korzeniowski\">Sabina Martyniak, Joanna Kaleta, Diego Dall'Alba, Micha\u0142 Naskr\u0119t, Szymon P\u0142otka, and Przemys\u0142aw Korzeniowski<br><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-tI7h1k\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-aU2f7H\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">This work presents an innovative approach to advancing computer-assisted surgical (CAS) systems by addressing challenges in training data quality and realism.&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-A5sGbH\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">In this study, the authors introduce a comprehensive pipeline for creating high-quality synthetic data tailored for modern CAS systems. This pipeline integrates an advanced surgical simulator capable of generating complex annotations that surpass those found in existing public datasets. The simulator also models intricate surgical interactions, including the dynamics between instruments and deformable anatomical structures, ensuring a more realistic simulation environment.&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-qQq76G\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">To further enhance data realism, the researchers developed a novel image-to-image translation method based on Stable Diffusion (SD) and Low-Rank Adaptation (LoRA). This technique minimizes the visual gap between synthetic and real-world images while preserving the simulator's detailed annotations. By leveraging only a small amount of real-world data, the method enables efficient training and generalizes well to practical applications, thereby improving CAS training and guidance.&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-g7BCRx\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">This innovative pipeline has been validated experimentally and is a significant step forward in bridging the gap between synthetic and real-world datasets for surgical applications. The dataset and code are available for the research community<a href=\"https:\/\/github.com\/SanoScience\/SimuScope\" target=\"_blank\" rel=\"noreferrer noopener nofollow\"> github.com\/SanoScience\/SimuScope<\/a><\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-tI7h1k\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-H3zZ17\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Authors<\/strong>: Sabina Martyniak, Joanna Kaleta, Diego Dall'Alba, Micha\u0142 Naskr\u0119t, Szymon P\u0142otka, and Przemys\u0142aw Korzeniowski<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-aU2f7H\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>DOI<\/strong>: 10.48550\/arXiv.2412.02332&nbsp;<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-tI7h1k\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_678fc2ae54c84\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/arxiv.org\/abs\/2412.02332\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"SimuScope: Realistic Endoscopic Synthetic Dataset Generation through Surgical Simulation and Diffusion Models","post_excerpt":"Conference manuscript in IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), 2024","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"simuscope-realistic-endoscopic-synthetic-dataset-generation-through-surgical-simulation-and-diffusion-models","to_ping":"","pinged":"","post_modified":"2025-05-19 21:17:07","post_modified_gmt":"2025-05-19 19:17:07","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=20967","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":21132,"post_author":"8","post_date":"2025-02-04 20:31:00","post_date_gmt":"2025-02-04 19:31:00","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-Or1mHV\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\" id=\"h-amanuel-ergogo-nbsp-diego-dall-alba-nbsp-przemyslaw-korzeniowski\">Amanuel Ergogo,&nbsp;Diego Dall'Alba,&nbsp;Przemyslaw Korzeniowski<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-juDPze\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-2mxAaw\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">HiLoTEL is a flexible virtual-reality framework for executing and learning tasks in virtual and physical environments. It enables hu-man experts to collaborate with learning agents and intervene when necessary through human-in-the-loop imitation learning. HiLoTEL reduces the need to carry out repetitive tasks, providing the user with an intuitive supervision interface. The system is tested on Pick-and-Place task, considering both teleoperated and passthrough interaction modalities. The results show that HiLoTEL improves success rates while maintaining human-level completion time and providing users with 71% hands free supervision time, thus enabling effective human-robot collaboration<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-juDPze\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-Qwi9b1\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>Authors<\/strong>:\u00a0<a href=\"https:\/\/www.computer.org\/csdl\/search\/default?type=author&amp;givenName=Amanuel&amp;surname=Ergogo\" target=\"_blank\" rel=\"noreferrer noopener\">Amanuel Ergogo<\/a>,\u00a0<a href=\"https:\/\/www.computer.org\/csdl\/search\/default?type=author&amp;givenName=Diego&amp;surname=Dall%27Alba\" target=\"_blank\" rel=\"noreferrer noopener\">Diego Dall'Alba<\/a>,\u00a0\u00a0<a href=\"https:\/\/www.computer.org\/csdl\/search\/default?type=author&amp;givenName=Przemyslaw&amp;surname=Korzeniowski\" target=\"_blank\" rel=\"noreferrer noopener\">Przemyslaw Korzeniowski<\/a><\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-2RsxZw\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>DOI<\/strong>: <a href=\"https:\/\/doi.org\/10.1109\/VRW62533.2024.00137\" target=\"_blank\" rel=\"noreferrer noopener\">10.1109\/VRW62533.2024.00137<\/a><\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"40px\",\"epAnimationGeneratedClass\":\"edplus_anim-juDPze\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:40px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_67a21da58a7e9\",\"name\":\"acf\/button\",\"data\":{\"title\":\"\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/www.computer.org\/csdl\/proceedings-article\/vrw\/2024\/744900a681\/1XlcE8sI6LC\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"HiLoTEL: Virtual Reality Robot Interface-Based Human-in-the-Loop Task Execution and Learning in the Physical World Through Its Digital Twin","post_excerpt":"Conference manuscript in IEEE VR, 2024","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"hilotel-virtual-reality-robot-interface-based-human-in-the-loop-task-execution-and-learning-in-the-physical-world-through-its-digital-twin","to_ping":"","pinged":"","post_modified":"2025-07-08 16:30:33","post_modified_gmt":"2025-07-08 14:30:33","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=21132","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":14968,"post_author":"5","post_date":"2024-01-16 13:59:08","post_date_gmt":"2024-01-16 12:59:08","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-Ctao5l\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Michal K. Grzeszczyk, Szymon P\u0142otka, Beata Rebizant, Katarzyna Kosi\u0144ska-Kaczy\u0144ska, Micha\u0142 Lipa, Robert Brawura-Biskupski-Samaha, Przemys\u0142aw Korzeniowski, Tomasz Trzci\u0144ski, Arkadiusz Sitek<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-cx4IQr\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-KXYa3n\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Medical data analysis often combines both imaging and tabular data processing using machine learning algorithms. While previous studies have investigated the impact of attention mechanisms on deep learning models, few have explored integrating attention modules and tabular data. In this paper, we introduce TabAttention, a novel module that enhances the performance of Convolutional Neural Networks (CNNs) with an attention mechanism that is trained conditionally on tabular data. Specifically, we extend the Convolutional Block Attention Module to 3D by adding a Temporal Attention Module that uses multi-head self-attention to learn attention maps. Furthermore, we enhance all attention modules by integrating tabular data embeddings. Our approach is demonstrated on the fetal birth weight (FBW) estimation task, using 92 fetal abdominal ultrasound video scans and fetal biometry measurements. Our results indicate that TabAttention outperforms clinicians and existing methods that rely on tabular and\/or imaging data for FBW prediction. This novel approach has the potential to improve computer-aided diagnosis in various clinical workflows where imaging and tabular data are combined. We provide a source code for integrating TabAttention in CNNs at\u00a0<a href=\"https:\/\/github.com\/SanoScience\/Tab-Attention\">https:\/\/github.com\/SanoScience\/Tab-Attention<\/a>.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-cx4IQr\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a67d89e2323\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/link.springer.com\/chapter\/10.1007\/978-3-031-43990-2_33\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"TabAttention: Learning Attention Conditionally on Tabular Data","post_excerpt":"In: MICCAI, 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"tabattention-learning-attention-conditionally-on-tabular-data","to_ping":"","pinged":"","post_modified":"2024-01-16 13:59:08","post_modified_gmt":"2024-01-16 12:59:08","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=14968","menu_order":33,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":15030,"post_author":"5","post_date":"2024-01-18 10:17:58","post_date_gmt":"2024-01-18 09:17:58","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-szoDDG\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Natalia Lipp, Pawe\u0142 Strojny, Agnieszka Strojny, S\u0142awomir \u015apiewak, Jan Argasi\u0144ski, Przemys\u0142aw Korzeniowski<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-pOH2qF\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-UpwHJ3\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Simulation realism is a crucial factor for task performance in virtual reality. The issue is that it does not simply result from a simulation's graphical quality and is often connected to users' expectations and the capacity of the cognitive system. It is assumed that perceived realism may be affected by physiological arousal which is the basis of emotional reactions and of attention prioritizing. The main aim of the presented study is to verify whether perceptual characteristics of a virtual scene \u2013 complexity and vividness of virtual objects \u2013 affect perceived realism. The secondary aim is to test whether realism assessment changes because of arousal. An experimental study was conducted with 100 participants in total. Four experimental groups differ in terms of the complexity of a virtual scene (i.e., number of objects in the field of view) and vividness of virtual objects (i.e., figure-ground contrast). Participants were asked to perform a task, that was taking on the role of a marshaller and positioning the plane on the airport apron in the virtual reality simulation. During the task, electrodermal activity was measured. After the virtual session, participants assessed perceived realism. Results indicate that the complexity and vividness of virtual scene do not affect perceived realism directly. Physiological arousal (i.e., skin conductance level) is a moderator of the relationship between the vividness of the virtual scene and perceived realism. A high level of arousal increases realism assessment in vivid simulations.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-pOH2qF\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a8ecb3de7de\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/ieeexplore.ieee.org\/document\/10322268\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Are You Aroused Enough to See the Difference? The Role of Physiological Arousal in Perceiving Realism of Virtual Scene","post_excerpt":"In: IEEE ISMAR, 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"are-you-aroused-enough-to-see-the-difference-the-role-of-physiological-arousal-in-perceiving-realism-of-virtual-scene","to_ping":"","pinged":"","post_modified":"2024-01-18 10:17:58","post_modified_gmt":"2024-01-18 09:17:58","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=15030","menu_order":8,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":15038,"post_author":"5","post_date":"2024-01-18 10:24:56","post_date_gmt":"2024-01-18 09:24:56","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-G8lzyX\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Joanna Kaleta, Diego Dall\u2019Alba, Szymon P\u0142otka, Przemys\u0142aw Korzeniowski<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-MENX2Y\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-nsQHUU\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Computer-assisted surgical systems provide support information to the surgeon, which can improve the execution and overall outcome of the procedure. These systems are based on deep learning models that are trained on complex and challenging-to-annotate data. Generating synthetic data can overcome these limitations, but it is necessary to reduce the domain gap between real and synthetic data.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-MENX2Y\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a8ee413e9c1\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/link.springer.com\/article\/10.1007\/s11548-023-03030-w?utm_source=rct_congratemailt\\u0026utm_medium=email\\u0026utm_campaign=oa_20231107\\u0026utm_content=10.1007\/s11548-023-03030-w\\u0026fbclid=IwAR2GsWKZwJLqcP5wjhvqyHlHnHeughXGMz0l4xLmub2QrXHHr6VxxQxiJ-o\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Minimal data requirement for realistic endoscopic image generation with Stable Diffusion","post_excerpt":"In: International Journal of Computer Assisted Radiology and Surgery, 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"minimal-data-requirement-for-realistic-endoscopic-image-generation-with-stable-diffusion","to_ping":"","pinged":"","post_modified":"2024-01-18 10:24:57","post_modified_gmt":"2024-01-18 09:24:57","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=15038","menu_order":32,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":15080,"post_author":"5","post_date":"2024-01-18 20:46:29","post_date_gmt":"2024-01-18 19:46:29","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-v86eth\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\">Natalia Lipp, Pawe\u0142 Strojny, Agnieszka Strojny, S\u0142awomir \u015apiewak, Jan Argasi\u0144ski, Przemys\u0142aw Korzeniowski<\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-mw314Z\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-Wmf9PZ\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">The main aims of the presented study are to verify whether the amount of textures in a virtual scene affects task performance and to test whether visual imagery changes the relationship between realism and task performance. An experimental study with three groups differed in visual realism was conducted (n=100). Participants were asked to perform a task: taking on the role of a marshaller and positioning the plane on the airport apron. Results indicate that texturing does not affect task performance. Visual imagery is a moderator of the relationship between perceived realism and task performance. A high level of imagery interferes with a high realism assessment decreasing task performance.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-mw314Z\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_65a97ff331c86\",\"name\":\"acf\/button\",\"data\":{\"title\":\"\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Performing Tasks in Virtual Reality. Interplay between Realism and Visual Imagery","post_excerpt":"In: ACM VRST (The 29th ACM Symposium on Virtual Reality Software and Technology), 2023.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"performing-tasks-in-virtual-reality-interplay-between-realism-and-visual-imagery","to_ping":"","pinged":"","post_modified":"2024-01-18 20:46:29","post_modified_gmt":"2024-01-18 19:46:29","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=15080","menu_order":11,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":12628,"post_author":"5","post_date":"2023-07-13 11:53:41","post_date_gmt":"2023-07-13 09:53:41","post_content":"<!-- wp:heading {\"epAnimationGeneratedClass\":\"edplus_anim-ni8pgc\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<h2 class=\"wp-block-heading eplus-wrapper\"><strong>Korzeniowski P, Plotka S<\/strong>, Brawura-Biskupski-Samaha R, <strong>Sitek A<\/strong><\/h2>\n<!-- \/wp:heading -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-Z7xpPa\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-vQfTCA\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Spina Bifida (SB) is a birth defect developed during the early stage of pregnancy in which there is incomplete closing of the spine around the spinal cord. The growing interest in fetoscopic Spina-Bifida repair, which is performed in fetuses who are still in the pregnant uterus, prompts the need for appropriate training. The learning curve for such procedures is steep and requires excellent procedural skills. Computer-based virtual reality (VR) simulation systems offer a safe, cost-effective, and configurable training environment free from ethical and patient safety issues. However, to the best of our knowledge, there are currently no commercial or experimental VR training simulation systems available for fetoscopic SB-repair procedures. In this paper, we propose a novel VR simulator for core manual skills training for SB-repair. An initial simulation realism validation study was carried out by obtaining subjective feedback (face and content validity) from 14 clinicians. The overall simulation realism was on average marked 4.07 on a 5-point Likert scale (1 - very unrealistic, 5 - very realistic). Its usefulness as a training tool for SB-repair as well as in learning fundamental laparoscopic skills was marked 4.63 and 4.80, respectively. These results indicate that VR simulation of fetoscopic procedures may contribute to surgical training without putting fetuses and their mothers at risk. It could also facilitate wider adaptation of fetoscopic procedures in place of much more invasive open fetal surgeries.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"50px\",\"epAnimationGeneratedClass\":\"edplus_anim-NG4WG4\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:50px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_64afc99327015\",\"name\":\"acf\/button\",\"data\":{\"title\":\"READ HERE\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/ieeexplore.ieee.org\/document\/9981920\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_blank\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Virtual Reality Simulator for Fetoscopic Spina Bifida Repair Surgery\u00a0","post_excerpt":"In: IEEE international conference on intelligent robots and systems IROS, 2022.","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"virtual-reality-simulator-for-fetoscopic-spina-bifida-repair-surgery","to_ping":"","pinged":"","post_modified":"2024-01-10 19:46:46","post_modified_gmt":"2024-01-10 18:46:46","post_content_filtered":"","post_parent":0,"guid":"https:\/\/new.sano.science\/?post_type=research&#038;p=12628","menu_order":71,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"},{"ID":28554,"post_author":"8","post_date":"2026-01-31 19:47:50","post_date_gmt":"2026-01-31 18:47:50","post_content":"<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-7IiDS7\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Mateusz W\u00f3jcikowski, Diego Dall'Alba, Sabina Martyniak, Szymon P\u0142otka, Renata Szydlak, Piotr Walecki, Andrzej A Kononowicz, Przemyslaw Korzeniowski<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"20px\",\"epAnimationGeneratedClass\":\"edplus_anim-eOcEsc\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:20px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-7IiDS7\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\"><strong>VR Simulator for Robotic-Assisted Surgery Training<br><\/strong>This study explored the use of a <strong>VR-based robotic surgery simulator<\/strong> designed for medical education. The simulator, compatible with Meta Quest 2\/3\/Pro headsets, was tested by 67 international medical students to evaluate its usability and educational value.<br>The system reproduces the cholecystectomy procedure using the Extended Position-Based Dynamics (XPBD) method to simulate soft tissue behavior in real time (1 kHz physics, 90 Hz graphics). Most participants rated the simulator as a valuable learning tool, with over 80% reporting improved understanding of anatomy and surgical workflows.<br>Students appreciated the safe, immersive practice environment and the opportunity for repeated training. Suggested improvements included enhancing tissue realism, diversifying the range of surgical scenarios, and optimizing the user interface for VR interaction. Overall, the project highlights the strong potential of VR simulators to transform surgical training.<br>Read more in the full article at the link below.<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"20px\",\"epAnimationGeneratedClass\":\"edplus_anim-eOcEsc\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:20px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:paragraph {\"epAnimationGeneratedClass\":\"edplus_anim-7IiDS7\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<p class=\" eplus-wrapper\">Conference: Hamlyn Symposium on Medical Robotics 2025<\/p>\n<!-- \/wp:paragraph -->\n\n<!-- wp:spacer {\"height\":\"20px\",\"epAnimationGeneratedClass\":\"edplus_anim-bNjJc0\",\"epGeneratedClass\":\"eplus-wrapper\"} -->\n<div style=\"height:20px\" aria-hidden=\"true\" class=\"wp-block-spacer eplus-wrapper\"><\/div>\n<!-- \/wp:spacer -->\n\n<!-- wp:acf\/button {\"id\":\"block_697e4d14f1bac\",\"name\":\"acf\/button\",\"data\":{\"title\":\"Read\/Download\",\"_title\":\"field_61d40397c2f0a\",\"button_type\":\"link\",\"_button_type\":\"field_63bbde3b8f0d0\",\"url\":\"https:\/\/events.plgrid.pl\/event\/89\/contributions\/745\/attachments\/195\/440\/VRSR_for_Marian_2.pdf\",\"_url\":\"field_61d4039bc2f0b\",\"button_style\":\"primary\",\"_button_style\":\"field_63872d045d0f0\",\"target\":\"_self\",\"_target\":\"field_63872c705d0ef\",\"button_extra_classes\":\"\",\"_button_extra_classes\":\"field_642beab6a97de\"},\"align\":\"\",\"mode\":\"edit\"} \/-->","post_title":"Affordable Virtual Reality Simulation for Robotic Surgery: Preliminary Usability and Educational Evaluation","post_excerpt":"Conference abstract:  Hamlyn Symposium on Medical Robotics 2025, 2025","post_status":"publish","comment_status":"closed","ping_status":"closed","post_password":"","post_name":"affordable-virtual-reality-simulation-for-robotic-surgery-preliminary-usability-and-educational-evaluation","to_ping":"","pinged":"","post_modified":"2026-01-31 19:47:51","post_modified_gmt":"2026-01-31 18:47:51","post_content_filtered":"","post_parent":0,"guid":"https:\/\/sano.science\/?post_type=research&#038;p=28554","menu_order":0,"post_type":"research","post_mime_type":"","comment_count":"0","filter":"raw"}]},"_links":{"self":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people\/554","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people"}],"about":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/types\/people"}],"version-history":[{"count":16,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people\/554\/revisions"}],"predecessor-version":[{"id":28569,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people\/554\/revisions\/28569"}],"wp:featuredmedia":[{"embeddable":true,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/media\/18499"}],"wp:attachment":[{"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/media?parent=554"}],"wp:term":[{"taxonomy":"people_teams","embeddable":true,"href":"https:\/\/sano.science\/index.php\/wp-json\/wp\/v2\/people_teams?post=554"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}