Raw JSON
{'hasResults': False, 'derivedSection': {'miscInfoModule': {'versionHolder': '2025-12-24'}, 'conditionBrowseModule': {'meshes': [{'id': 'D001039', 'term': 'Aphasia, Broca'}, {'id': 'D020521', 'term': 'Stroke'}, {'id': 'D001037', 'term': 'Aphasia'}], 'ancestors': [{'id': 'D013064', 'term': 'Speech Disorders'}, {'id': 'D007806', 'term': 'Language Disorders'}, {'id': 'D003147', 'term': 'Communication Disorders'}, {'id': 'D019954', 'term': 'Neurobehavioral Manifestations'}, {'id': 'D009461', 'term': 'Neurologic Manifestations'}, {'id': 'D009422', 'term': 'Nervous System Diseases'}, {'id': 'D012816', 'term': 'Signs and Symptoms'}, {'id': 'D013568', 'term': 'Pathological Conditions, Signs and Symptoms'}, {'id': 'D002561', 'term': 'Cerebrovascular Disorders'}, {'id': 'D001927', 'term': 'Brain Diseases'}, {'id': 'D002493', 'term': 'Central Nervous System Diseases'}, {'id': 'D014652', 'term': 'Vascular Diseases'}, {'id': 'D002318', 'term': 'Cardiovascular Diseases'}]}}, 'protocolSection': {'designModule': {'phases': ['NA'], 'studyType': 'INTERVENTIONAL', 'designInfo': {'allocation': 'RANDOMIZED', 'maskingInfo': {'masking': 'SINGLE', 'whoMasked': ['OUTCOMES_ASSESSOR']}, 'primaryPurpose': 'TREATMENT', 'interventionModel': 'PARALLEL'}, 'enrollmentInfo': {'type': 'ESTIMATED', 'count': 20}}, 'statusModule': {'overallStatus': 'NOT_YET_RECRUITING', 'startDateStruct': {'date': '2025-05-01', 'type': 'ESTIMATED'}, 'expandedAccessInfo': {'hasExpandedAccess': False}, 'statusVerifiedDate': '2025-04', 'completionDateStruct': {'date': '2026-03-01', 'type': 'ESTIMATED'}, 'lastUpdateSubmitDate': '2025-04-14', 'studyFirstSubmitDate': '2025-02-11', 'studyFirstSubmitQcDate': '2025-02-11', 'lastUpdatePostDateStruct': {'date': '2025-04-16', 'type': 'ACTUAL'}, 'studyFirstPostDateStruct': {'date': '2025-02-17', 'type': 'ACTUAL'}, 'primaryCompletionDateStruct': {'date': '2026-03-01', 'type': 'ESTIMATED'}}, 'outcomesModule': {'primaryOutcomes': [{'measure': 'Speaking rate', 'timeFrame': 'From enrollment to the end of treatment at 6 weeks', 'description': 'Language production as assessed by number of correct words produced per minute'}]}, 'oversightModule': {'isUsExport': False, 'oversightHasDmc': False, 'isFdaRegulatedDrug': False, 'isFdaRegulatedDevice': False}, 'conditionsModule': {'keywords': ['Aphasia', 'Nonfluent aphasia', 'Speech entrainment', 'Speech synchronization'], 'conditions': ['Aphasia Non Fluent', 'Stroke', 'Aphasia']}, 'descriptionModule': {'briefSummary': "The goal of this clinical trial is to test the use of voice personalization through artificial intelligence (AI) voice cloning on speech entrainment tasks to improve language production of persons with aphasia (PWA). The main question the study aims to answer is:\n\n\\- What is the impact of personalized voice on speech entrainment in PWA compared to traditional speech entrainment?\n\nSpeech entrainment is a technique used by speech-language pathologists to improve the speech production of PWA. Traditionally, speech therapists act as the model for participants to speak along with to improve their speech production. This study proposes the use of one's own voice (digitally altered) to improve speech production.\n\nThe study uses a mobile health approach to administer speech entrainment treatment through a mobile app.\n\n* Smartphones with the mobile app pre-installed will be mailed to participants at no cost.\n* Participants will complete treatment in the comfort of their homes.\n* The experimental treatments involve: mirror speech entrainment (speaking along to one's own voice) and traditional speech entrainment (speaking along to someone else's voice)."}, 'eligibilityModule': {'sex': 'ALL', 'stdAges': ['ADULT', 'OLDER_ADULT'], 'maximumAge': '70 Years', 'minimumAge': '18 Years', 'healthyVolunteers': False, 'eligibilityCriteria': "Inclusion Criteria:\n\n* Adults between the ages of 18-70 years old\n* Dominant English-speaking\n* Aphasia due to left hemisphere stroke\n\nExclusion Criteria:\n\n* Individuals with aphasia due to other causes such as dementia, Alzheimer's disease and Parkinsonism\n* Non-dominant English adult speakers\n* Persons with significant difficulties with language comprehension\n* Persons with concomitant cognitive disability\n* Persons with uncorrected sensory deficits."}, 'identificationModule': {'nctId': 'NCT06829420', 'acronym': 'MSE', 'briefTitle': 'Mirror Speech Entrainment: A Novel Technique for Voice Personalized Speech Entrainment for Nonfluent Aphasia', 'organization': {'class': 'OTHER', 'fullName': 'University of South Florida'}, 'officialTitle': 'Mirror Speech Entrainment: A Novel Technique for Voice Personalized Speech Entrainment for Nonfluent Aphasia', 'orgStudyIdInfo': {'id': '007481'}}, 'armsInterventionsModule': {'armGroups': [{'type': 'EXPERIMENTAL', 'label': 'Experiment 1: Feasibility Study', 'description': 'Participants will receive two treatments sequentially: mirror speech entrainment using the auditory-only modality followed by mirror speech entrainment using the auditory-visual modality.', 'interventionNames': ['Behavioral: Mirror speech entrainment']}, {'type': 'EXPERIMENTAL', 'label': 'Experiment 2: Mirror SE versus Traditional SE (Auditory-only)', 'description': 'This arm investigates the use of mirror speech entrainment using auditory only feedback and traditional speech entrainment using auditory only feedback.', 'interventionNames': ['Behavioral: Mirror speech entrainment', 'Behavioral: Traditional speech entrainment: auditory-only']}, {'type': 'EXPERIMENTAL', 'label': 'Experiment 3: Mirror SE versus Traditional SE (Auditory-visual)', 'description': 'This arm will investigate the effect of mirror speech entrainment in the auditory-visual modality versus traditional speech entrainment in the auditory-visual modality.', 'interventionNames': ['Behavioral: Mirror speech entrainment', 'Behavioral: Traditional speech entrainment: auditory-visual']}], 'interventions': [{'name': 'Mirror speech entrainment', 'type': 'BEHAVIORAL', 'description': "Speech entrainment with the user's own voice using auditory-only and auditory-visual modalities.", 'armGroupLabels': ['Experiment 1: Feasibility Study', 'Experiment 2: Mirror SE versus Traditional SE (Auditory-only)', 'Experiment 3: Mirror SE versus Traditional SE (Auditory-visual)']}, {'name': 'Traditional speech entrainment: auditory-only', 'type': 'BEHAVIORAL', 'description': "Traditional speech entrainment (speech entrainment using an external agent's voice) using the auditory-only modality (users only listen and speak along to auditory stimuli)", 'armGroupLabels': ['Experiment 2: Mirror SE versus Traditional SE (Auditory-only)']}, {'name': 'Traditional speech entrainment: auditory-visual', 'type': 'BEHAVIORAL', 'description': "Traditional speech entrainment (speech entrainment using an external agent's voice) using the auditory-visual modality (users listen and speak along to both auditory and visual stimuli (mouth movements)", 'armGroupLabels': ['Experiment 3: Mirror SE versus Traditional SE (Auditory-visual)']}]}, 'contactsLocationsModule': {'locations': [{'zip': '33612', 'city': 'Tampa', 'state': 'Florida', 'country': 'United States', 'contacts': [{'name': 'Celine Davis, M.S. Speech-language Pathology', 'role': 'CONTACT', 'email': 'celinedavis@usf.edu', 'phone': '8139742006'}, {'name': 'Gerald C Imaezue, Ph.D', 'role': 'CONTACT', 'email': 'gimaezue@usf.edu', 'phoneExt': '8139742006'}, {'name': 'Celine Davis, M.S. Speech-language Pathology', 'role': 'PRINCIPAL_INVESTIGATOR'}, {'name': 'Gerald C Imaezue, Ph.D', 'role': 'SUB_INVESTIGATOR'}], 'facility': 'University of South Florida', 'geoPoint': {'lat': 27.94752, 'lon': -82.45843}}]}, 'ipdSharingStatementModule': {'ipdSharing': 'NO', 'description': 'Individual participant data will only be shared with approved members of the research team.'}, 'sponsorCollaboratorsModule': {'leadSponsor': {'name': 'University of South Florida', 'class': 'OTHER'}, 'responsibleParty': {'type': 'PRINCIPAL_INVESTIGATOR', 'investigatorTitle': 'Speech-language Pathologist', 'investigatorFullName': 'Celine Davis', 'investigatorAffiliation': 'University of South Florida'}}}}