Raw JSON
{'hasResults': False, 'derivedSection': {'miscInfoModule': {'versionHolder': '2025-12-24'}}, 'protocolSection': {'designModule': {'phases': ['NA'], 'studyType': 'INTERVENTIONAL', 'designInfo': {'allocation': 'NA', 'maskingInfo': {'masking': 'NONE'}, 'primaryPurpose': 'TREATMENT', 'interventionModel': 'SINGLE_GROUP'}, 'enrollmentInfo': {'type': 'ESTIMATED', 'count': 220}}, 'statusModule': {'overallStatus': 'ENROLLING_BY_INVITATION', 'startDateStruct': {'date': '2023-12-20', 'type': 'ACTUAL'}, 'expandedAccessInfo': {'hasExpandedAccess': False}, 'statusVerifiedDate': '2024-11', 'completionDateStruct': {'date': '2025-05-15', 'type': 'ESTIMATED'}, 'lastUpdateSubmitDate': '2025-03-06', 'studyFirstSubmitDate': '2025-03-04', 'studyFirstSubmitQcDate': '2025-03-06', 'lastUpdatePostDateStruct': {'date': '2025-03-07', 'type': 'ACTUAL'}, 'studyFirstPostDateStruct': {'date': '2025-03-07', 'type': 'ACTUAL'}, 'primaryCompletionDateStruct': {'date': '2025-03-20', 'type': 'ESTIMATED'}}, 'outcomesModule': {'otherOutcomes': [{'measure': 'Precision', 'timeFrame': 'Day 1', 'description': 'In the accuracy of medical image segmentation, TP represents a case predicted as positive, and the true label is positive; FN represents a case predicted as negative, but the true label is positive; FP represents a case predicted as positive, but the true label is negative; TN represents a case predicted as negative, and the true label is also negative. The calculation of precision is given by Precision = TP/(TP+FP), which refers to the proportion of correctly identified positive samples among all predicted positive samples.'}, {'measure': 'Recall', 'timeFrame': 'Day 1', 'description': 'In the accuracy of medical image segmentation, TP represents a case predicted as positive, and the true label is positive; FN represents a case predicted as negative, but the true label is positive; FP represents a case predicted as positive, but the true label is negative; TN represents a case predicted as negative, and the true label is also negative. The calculation of recall is given by Recall = TP/(TP+FN), which refers to the proportion of correctly predicted positive samples among all actual positive samples.'}, {'measure': 'Accuracy', 'timeFrame': 'Day 1', 'description': 'In the accuracy of medical image segmentation, TP represents a case predicted as positive, and the true label is positive; FN represents a case predicted as negative, but the true label is positive; FP represents a case predicted as positive, but the true label is negative; TN represents a case predicted as negative, and the true label is also negative. Thus, the accuracy of the artificial intelligence model is calculated as Accuracy=(TP+TN)/(TP+FP+FN+TN), which refers to the ratio of the number of correctly predicted samples to the total number of samples in all experiments.'}], 'primaryOutcomes': [{'measure': 'F1 score', 'timeFrame': 'Day 1', 'description': "In the accuracy of medical image segmentation, TP represents a case predicted as positive, and the true label is positive; FN represents a case predicted as negative, but the true label is positive; FP represents a case predicted as positive, but the true label is negative; TN represents a case predicted as negative, and the true label is also negative. The F1 score is calculated as F1 = 2 × \\[Precision × Recall / (Precision + Recall)\\], and it takes into account both the precision and recall of the classification model, defined as the harmonic mean of the model's precision and recall."}], 'secondaryOutcomes': [{'measure': 'IoU', 'timeFrame': 'Day 1', 'description': 'In the semantic segmentation task, let the manually annotated region be X and the region predicted by the model be Y. The accuracy of the artificial intelligence model is calculated using IoU=(X∩Y)/(X∪Y) and Dice=(2\\|X∩Y\\|)/(\\|X\\|+\\|Y\\|).'}]}, 'oversightModule': {'oversightHasDmc': False, 'isFdaRegulatedDrug': False, 'isFdaRegulatedDevice': False}, 'conditionsModule': {'keywords': ['artificial inteligence(AI)', 'deep learning', 'laparoscopic surgery'], 'conditions': ['Artificial Intelligence (AI)', 'Deep Learning', 'Laparoscopic Surgery']}, 'referencesModule': {'references': [{'pmid': '33655738', 'type': 'BACKGROUND', 'citation': 'Beyersdorffer P, Kunert W, Jansen K, Miller J, Wilhelm P, Burgert O, Kirschniak A, Rolinger J. Detection of adverse events leading to inadvertent injury during laparoscopic cholecystectomy using convolutional neural networks. Biomed Tech (Berl). 2021 Mar 1;66(4):413-421. doi: 10.1515/bmt-2020-0106. Print 2021 Aug 26.'}, {'pmid': '35941306', 'type': 'BACKGROUND', 'citation': 'Golany T, Aides A, Freedman D, Rabani N, Liu Y, Rivlin E, Corrado GS, Matias Y, Khoury W, Kashtan H, Reissman P. Artificial intelligence for phase recognition in complex laparoscopic cholecystectomy. Surg Endosc. 2022 Dec;36(12):9215-9223. doi: 10.1007/s00464-022-09405-5. Epub 2022 Aug 8.'}, {'pmid': '31712899', 'type': 'BACKGROUND', 'citation': 'Kim JH, Kim H. Modified liver hanging maneuver in laparoscopic major hepatectomy: the learning curve and evolution of indications. Surg Endosc. 2020 Jun;34(6):2742-2748. doi: 10.1007/s00464-019-07248-1. Epub 2019 Nov 11.'}, {'pmid': '29512045', 'type': 'BACKGROUND', 'citation': 'Gaitanidis A, Simopoulos C, Pitiakoudis M. What to consider when designing a laparoscopic colorectal training curriculum: a review of the literature. Tech Coloproctol. 2018 Mar;22(3):151-160. doi: 10.1007/s10151-018-1760-y. Epub 2018 Mar 6.'}, {'pmid': '31156556', 'type': 'BACKGROUND', 'citation': 'Vaz RM, Bordenali G, Bibancos M. Testicular Cancer-Surgical Treatment. Front Endocrinol (Lausanne). 2019 May 15;10:308. doi: 10.3389/fendo.2019.00308. eCollection 2019.'}, {'pmid': '28746157', 'type': 'BACKGROUND', 'citation': 'Moris D, Vernadakis S. Laparoscopic Hepatectomy for Hepatocellular Carcinoma: The Opportunities, the Challenges, and the Limitations. Ann Surg. 2018 Jul;268(1):e16. doi: 10.1097/SLA.0000000000002458. No abstract available.'}, {'pmid': '27849661', 'type': 'BACKGROUND', 'citation': 'Yoon YI, Kim KH, Kang SH, Kim WJ, Shin MH, Lee SK, Jung DH, Park GC, Ahn CS, Moon DB, Ha TY, Song GW, Hwang S, Lee SG. Pure Laparoscopic Versus Open Right Hepatectomy for Hepatocellular Carcinoma in Patients With Cirrhosis: A Propensity Score Matched Analysis. Ann Surg. 2017 May;265(5):856-863. doi: 10.1097/SLA.0000000000002072.'}, {'pmid': '25872167', 'type': 'BACKGROUND', 'citation': 'Han HS, Shehta A, Ahn S, Yoon YS, Cho JY, Choi Y. Laparoscopic versus open liver resection for hepatocellular carcinoma: Case-matched study with propensity score matching. J Hepatol. 2015 Sep;63(3):643-50. doi: 10.1016/j.jhep.2015.04.005. Epub 2015 Apr 12.'}, {'pmid': '31797047', 'type': 'BACKGROUND', 'citation': 'Kitaguchi D, Takeshita N, Matsuzaki H, Takano H, Owada Y, Enomoto T, Oda T, Miura H, Yamanashi T, Watanabe M, Sato D, Sugomori Y, Hara S, Ito M. Real-time automatic surgical phase recognition in laparoscopic sigmoidectomy using the convolutional neural network-based deep learning approach. Surg Endosc. 2020 Nov;34(11):4924-4931. doi: 10.1007/s00464-019-07281-0. Epub 2019 Dec 3.'}, {'pmid': '29359029', 'type': 'BACKGROUND', 'citation': 'Ziogas IA, Tsoulfas G. Advances and challenges in laparoscopic surgery in the management of hepatocellular carcinoma. World J Gastrointest Surg. 2017 Dec 27;9(12):233-245. doi: 10.4240/wjgs.v9.i12.233.'}, {'pmid': '23601993', 'type': 'BACKGROUND', 'citation': 'Rivas H, Diaz-Calderon D. Present and future advanced laparoscopic surgery. Asian J Endosc Surg. 2013 May;6(2):59-67. doi: 10.1111/ases.12028.'}]}, 'descriptionModule': {'briefSummary': "Hepatocellular Carcinoma(HCC) is a common disease in China, ranking as the fourth most prevalent malignant tumor and the third leading cause of cancer-related deaths in the country. Along with other liver, biliary, pancreatic, and splenic diseases, it poses a serious threat to the lives and health of the Chinese population. Precise organ resection techniques, centered around accurate preoperative imaging and functional assessment as well as meticulous surgical operations, have become the mainstream in hepatobiliary surgery in the 21st century. These techniques require precise dissection of intrahepatic blood vessels, the biliary system, and the pancreatic-splenic duct system to achieve an optimal balance between eradicating lesions and preserving the normal function of the organs while minimizing trauma to the body.\n\nPrecise tissue resection via laparoscopy is a prerequisite for successful hepatobiliary surgery. Addressing how to assist surgeons in performing surgeries more safely and effectively, as well as how to enhance learning outcomes during training, are pressing issues that need to be resolved. Efficient learning and analysis of surgical videos may help improve surgeons' intraoperative performance.\n\nIn recent years, advancements in artificial intelligence (AI) have led to a surge in the application of computer vision (CV) in medical image analysis, including surgical videos. Laparoscopic surgery generates a large amount of surgical video data, providing a new opportunity for the enhancement of laparoscopic surgical CV technology. AI-based CV technology can utilize these surgical video data to develop real-time automated decision support tools and surgical training systems, offering new directions for addressing the shortcomings of laparoscopic surgery.\n\nHowever, the application of deep learning models in surgical procedures still has some shortcomings. Based on this, the present study aims to conduct a retrospective analysis of cases involving laparoscopic hepatobiliary and pancreatic surgeries performed at Zhujiang Hospital, Southern Medical University, between 2017 and 2024. The goal is to investigate the recognition and validation of deep learning models for classifying surgical phase images in medical imaging, as well as for semantic segmentation of anatomical structures, surgical instruments, and surgical gestures, including abdominal CT and MRI.", 'detailedDescription': "Hepatocellular Carcinoma(HCC) is a common disease in China. It ranks as the fourth most common malignant tumor and the third leading cause of cancer-related deaths in the country. Along with other liver, biliary, pancreatic, and splenic diseases, it poses a serious threat to the lives and health of the Chinese population. Precise organ resection techniques, centered around accurate preoperative imaging and functional assessment as well as meticulous surgical operations, have become the mainstream in hepatobiliary surgery in the 21st century. These techniques require precise dissection of intrahepatic blood vessels, the biliary system, and the pancreatic-splenic duct system to achieve an optimal balance between eradicating the lesions and preserving the normal function of the organs while minimizing trauma to the body. Therefore, key questions that must be carefully designed in preoperative planning include: How to maximize the removal of lesions while preserving normal tissues and organs? How to ensure safe surgical margins? How to control damage during surgery to ensure the rapid recovery of residual tissue function postoperatively? With the development of medical imaging and the advent of the information age, the field of liver surgery is entering the era of digital precision hepatobiliary surgery.\n\nPrecise tissue resection via laparoscopy is a prerequisite for successful hepatobiliary surgery. Compared to open surgery, laparoscopic surgery offers advantages such as less trauma, reduced postoperative pain, and faster recovery. However, laparoscopic surgery also has drawbacks, including insufficient tactile feedback, limited surgical space, narrow field of vision, and restricted maneuverability. The visibility and angles of laparoscopic instruments are limited. When encountering adhesions or complex situations, laparoscopic surgery can increase operational difficulty and prolong surgical time. Therefore, complex laparoscopic procedures require extensive experience and have a steep learning curve. How to assist surgeons in performing surgeries more safely and effectively, as well as how to enhance learning outcomes during training, are pressing issues that need to be addressed. Efficient learning and analysis of surgical videos may help improve surgeons' intraoperative performance.\n\nIn recent years, advancements in artificial intelligence (AI) have led to a surge in the application of computer vision (CV) in medical image analysis, including surgical videos. Laparoscopic surgery generates a large amount of surgical video data, providing a new opportunity for the enhancement of laparoscopic surgical CV technology. AI-based CV technology can utilize these surgical video data to develop real-time automated decision support tools and surgical training systems, offering new directions for addressing the shortcomings of laparoscopic surgery. AI systems can identify surgical phases for various important tasks, such as recording and analyzing adverse events, education, statistics, and assessing surgical performance. Currently, these tasks are performed manually by specialized surgeons, which is very time-consuming. AI systems can also perform scene segmentation, including identifying anatomical structures and areas to avoid damage to critical structures, as well as recognizing surgical instruments to prevent accidents. Uncontrolled instrument movements can cause damage to adjacent structures, and identifying surgical actions (e.g., suturing techniques) can help assess skill proficiency. Using such systems during surgery can facilitate real-time monitoring and assist decision-making, potentially enhancing safety and improving patient outcomes. For example, real-time assistance systems can alert surgeons to incorrect anatomical planes, erroneous maneuvers, or impending complications.\n\nHowever, the application of deep learning models in surgical procedures still has some shortcomings. First, surgical phase recognition can independently estimate the remaining surgical time, effectively arranging logistics in the operating room and providing surgical interns with informative and targeted educational materials. However, current research on surgical phase recognition is still difficult to apply clinically. One reason is that the video duration required for AI models to recognize surgical phases is often too long. Limited training data and the optimization of overall accuracy make it challenging for AI to recognize shorter-duration steps. With improved model design and new annotation standards, AI can achieve high-precision step recognition, providing detailed information for the further development of valuable computer-assisted surgical systems.\n\nBased on this, the present study aims to conduct a retrospective analysis of cases involving laparoscopic hepatobiliary and pancreatic surgeries performed at Zhujiang Hospital, Southern Medical University, between 2017 and 2024. The goal is to investigate the recognition and validation of deep learning models for classifying surgical phase images in medical imaging, as well as for semantic segmentation of anatomical structures, surgical instruments, and surgical gestures, including abdominal CT and MRI."}, 'eligibilityModule': {'sex': 'ALL', 'stdAges': ['ADULT', 'OLDER_ADULT'], 'maximumAge': '85 Years', 'minimumAge': '18 Years', 'healthyVolunteers': True, 'eligibilityCriteria': 'Inclusion Criteria:\n\n* Patients who underwent laparoscopic hepatobiliary and pancreatic surgery at Zhujiang Hospital, Southern Medical University, from January 1, 2017, to October 31, 2023.\n* Liver function classified as Child-Pugh grade A or B.\n* Age 18 to 85 years.\n* Complete clinical medical records.\n\nExclusion Criteria:\n\n* Presence of underlying diseases that cannot tolerate surgery (such as severe heart, lung, brain, or kidney dysfunction).\n* Preoperative imaging examinations and intraoperative findings of cancer thrombus in the main and branch of the portal vein, common hepatic duct and its branches, hepatic vein main and branch, and inferior vena cava.\n* Intraoperative findings of extrahepatic invasion and metastasis.\n* Planned pregnancy, unplanned pregnancy, and pregnant individuals.\n* Preoperative liver function classified as Child-Pugh grade C.\n* Previous history of treatments such as radiofrequency or microwave ablation, radiotherapy, liver transplantation, etc.'}, 'identificationModule': {'nctId': 'NCT06864702', 'briefTitle': 'The Construction and Effect Verification of a Deep Learning-based Automated Semantic Segmentation Model for Medical Imaging', 'organization': {'class': 'OTHER', 'fullName': 'Zhujiang Hospital'}, 'officialTitle': 'The Construction and Effect Verification of a Deep Learning-based Automated Semantic Segmentation Model for Medical Imaging: A Retrospective Case Analysis Clinical Study', 'orgStudyIdInfo': {'id': '2024-KY-152-02'}}, 'armsInterventionsModule': {'armGroups': [{'type': 'EXPERIMENTAL', 'label': 'Experimental group', 'description': '220 participants were allocated to this group. The intervention is whether the patient received diagnosis and treatment at Zhujiang Hospital of Southern Medical University and retained medical images such as abdominal. The construction of a deep learning model for semantic segmentation of images during laparoscopic hepatobiliary and pancreatic surgery in this arm, focusing on classifying surgical stages, anatomical structures, surgical instruments, and surgical gestures, along with the validation of model performance (Intersection over Union, Dice score, accuracy, precision, recall, and F1 score)', 'interventionNames': ['Behavioral: Whether the patient received diagnosis and treatment at Zhujiang Hospital of Southern Medical University and retained medical images such as abdominal']}], 'interventions': [{'name': 'Whether the patient received diagnosis and treatment at Zhujiang Hospital of Southern Medical University and retained medical images such as abdominal', 'type': 'BEHAVIORAL', 'description': 'The construction of a deep learning model for semantic segmentation of images during laparoscopic hepatobiliary and pancreatic surgery, focusing on classifying surgical stages, anatomical structures, surgical instruments, and surgical gestures, along with the validation of model performance (Intersection over Union, Dice score, accuracy, precision, recall, and F1 score).', 'armGroupLabels': ['Experimental group']}]}, 'contactsLocationsModule': {'locations': [{'zip': '510280', 'city': 'Guangzhou', 'state': 'Guangdong', 'country': 'China', 'facility': 'Zhujiang Hospital of Southern Medical University', 'geoPoint': {'lat': 23.11667, 'lon': 113.25}}]}, 'ipdSharingStatementModule': {'ipdSharing': 'UNDECIDED'}, 'sponsorCollaboratorsModule': {'leadSponsor': {'name': 'Zhujiang Hospital', 'class': 'OTHER'}, 'responsibleParty': {'type': 'SPONSOR'}}}}