Raw JSON
{'hasResults': False, 'derivedSection': {'miscInfoModule': {'versionHolder': '2025-12-24'}, 'conditionBrowseModule': {'meshes': [{'id': 'D000690', 'term': 'Amyotrophic Lateral Sclerosis'}], 'ancestors': [{'id': 'D013118', 'term': 'Spinal Cord Diseases'}, {'id': 'D002493', 'term': 'Central Nervous System Diseases'}, {'id': 'D009422', 'term': 'Nervous System Diseases'}, {'id': 'D016472', 'term': 'Motor Neuron Disease'}, {'id': 'D019636', 'term': 'Neurodegenerative Diseases'}, {'id': 'D057177', 'term': 'TDP-43 Proteinopathies'}, {'id': 'D009468', 'term': 'Neuromuscular Diseases'}, {'id': 'D057165', 'term': 'Proteostasis Deficiencies'}, {'id': 'D008659', 'term': 'Metabolic Diseases'}, {'id': 'D009750', 'term': 'Nutritional and Metabolic Diseases'}]}}, 'protocolSection': {'designModule': {'phases': ['NA'], 'studyType': 'INTERVENTIONAL', 'designInfo': {'allocation': 'NA', 'maskingInfo': {'masking': 'NONE'}, 'primaryPurpose': 'BASIC_SCIENCE', 'interventionModel': 'SINGLE_GROUP', 'interventionModelDescription': 'PALS and age-matched adults will participate in one solo speech production task (clear speech) and three interactive tasks (structured communicative interaction, unstructured communicative interaction, and clear speech structured communicative interaction) in which they work with an unfamiliar, naive interlocutor. This study is designed to examine the differences in speech produced in the four tasks. Comparisons of speech produced by PALS and age-matched adults will clarify whether differences in speech observed across the four tasks are a function of the speech difficulties experiences by PALS.\n\nPlans for Assignment - This is a single group study in which all participants will engage in the same tasks.'}, 'enrollmentInfo': {'type': 'ESTIMATED', 'count': 300}}, 'statusModule': {'overallStatus': 'RECRUITING', 'startDateStruct': {'date': '2024-11-05', 'type': 'ACTUAL'}, 'expandedAccessInfo': {'hasExpandedAccess': False}, 'statusVerifiedDate': '2025-01', 'completionDateStruct': {'date': '2029-02-28', 'type': 'ESTIMATED'}, 'lastUpdateSubmitDate': '2025-01-09', 'studyFirstSubmitDate': '2024-01-29', 'studyFirstSubmitQcDate': '2024-02-15', 'lastUpdatePostDateStruct': {'date': '2025-01-13', 'type': 'ACTUAL'}, 'studyFirstPostDateStruct': {'date': '2024-02-20', 'type': 'ACTUAL'}, 'primaryCompletionDateStruct': {'date': '2029-02-28', 'type': 'ESTIMATED'}}, 'outcomesModule': {'primaryOutcomes': [{'measure': 'Formant frequencies of speech sounds', 'timeFrame': 'two 60 minute sessions', 'description': 'Formant frequencies that characterize speech sounds will be made on speech recorded in the intervention task.'}, {'measure': 'Intelligibility of recorded speech', 'timeFrame': 'two 60 minute sessions', 'description': 'Perceptual judgments will be provided by solo, naïve listeners who did not participate in the interactions. Listeners will hear recorded speech of PALS and age-matched speakers recorded across the different tasks and indicate what they heard. The score will be expressed in percent. The possible range is between 0-100%. The higher scores mean a better outcome.'}, {'measure': 'Syntactic properties', 'timeFrame': 'One 60 minute session', 'description': 'Syntactic complexity in the unstructured communication task will be measured through mean length of grammatical units, clausal density, and clause type. Each variable will be assessed at both the dyadic level (e.g., clausal density for both interlocutors together) and at the level of the individual speaker (e.g., clausal density of each speaker). A composite of these measures will provide an index of the syntactic complexity of the conversation.'}, {'measure': 'Pragmatic Properties', 'timeFrame': 'One 60 minute session', 'description': "The investigators will count the number and duration of silent portions of speech, filled pauses, linguistic mazes, speaking turns, and interruptions in the unstructured communication task.\n\nA composite measure of the individual measures will provide an index of an individual's contribution to the conversation."}, {'measure': 'Duration of speech sounds', 'timeFrame': 'two 60 minute sessions', 'description': 'Durations that characterize speech sounds will be made on speech recorded in the intervention task.'}]}, 'oversightModule': {'oversightHasDmc': False, 'isFdaRegulatedDrug': False, 'isFdaRegulatedDevice': False}, 'conditionsModule': {'conditions': ['Amyotrophic Lateral Sclerosis']}, 'referencesModule': {'references': [{'pmid': '32302251', 'type': 'BACKGROUND', 'citation': 'Olmstead AJ, Lee J, Viswanathan N. The Role of the Speaker, the Listener, and Their Joint Contributions During Communicative Interactions: A Tripartite View of Intelligibility in Individuals With Dysarthria. J Speech Lang Hear Res. 2020 Apr 27;63(4):1106-1114. doi: 10.1044/2020_JSLHR-19-00233. Epub 2020 Apr 17.'}, {'type': 'BACKGROUND', 'citation': 'Olmstead, A. J., Viswanathan, N., Cowan, T., & Yang, K. (2021). Phonetic adaptation in interlocutors with mismatched language backgrounds: A case for a phonetic synergy account. Journal of Phonetics, 87, 101054.'}]}, 'descriptionModule': {'briefSummary': 'The goal of this clinical trial is to learn about the effect of communicative interaction on verbal communication in people with amyotrophic lateral sclerosis (ALS) and age-matched speakers.\n\nThe question is, What are the effects of communicative interaction on verbal communication in people with ALS?\n\nParticipants will read words and sentences while they are in a solo setting and interactive setting.', 'detailedDescription': "PALS and age-matched adults will participate in one solo speech production task (clear speech) and three interactive tasks (structured communicative interaction, unstructured communicative interaction, and clear speech structured communicative interaction) in which they work with an unfamiliar, naive interlocutor. This study is designed to examine the differences in speech produced in the four tasks. Comparisons of speech produced by PALS and age-matched adults will clarify whether differences in speech observed across the four tasks are a function of the speech difficulties experiences by PALS.\n\nPlans for Assignment - This is a single group study in which all participants will engage in the same tasks.\n\nDelivery of Intervention: Using tablets and audio recording devices provided to them, participants will complete this task in the comfort of their home. Study protocols will be explained via videoconferencing by experimenters. Produced speech will be recorded using solid-state audio recorders as well as remotely through the video conferencing software.\n\nAdequacy of Sample size. Assuming medium effect sizes (Cohen's f = 0.3) based on our pilot data, for 80% power at an alpha of .05, the investigators will require 76 speakers. The investigators propose n = 100 PALS in order to account for speech variability that is common for PALS. The investigators plan to recruit 50 age-matched speakers. The investigators anticipate that this sample size will be sufficient to make appropriate comparison to the PALS group because there will be considerably less variability in these speakers.\n\nAdequacy of Analyses. The proposed statistical analyses (Generalized mixed effects regressions) are standard and will be used to analyze the effect of the intervention on the outcome measures described below. Severity of condition (for PALS) will be included in the analyses and by-subject slopes and intercepts will be used to account for variability across participants."}, 'eligibilityModule': {'sex': 'ALL', 'stdAges': ['ADULT', 'OLDER_ADULT'], 'maximumAge': '90 Years', 'minimumAge': '18 Years', 'healthyVolunteers': True, 'eligibilityCriteria': 'Inclusion Criteria:\n\nSpeakers with amyotrophic lateral sclerosis (ALS) (PALS-people with ALS)\n\n* diagnosis of ALS following the revised EL Escorial criteria\n* no history of other neurological conditions (e.g., stroke)\n* no cognitive impairment assessed by Telephone Montreal Cognitive Assessment (mini MoCA)\n* detectable speech disturbance according to the ALS Functional Rating Scale-Revised (ALSFRS-R)\n* the ability to produce single words\n* being a native speaker of American English (AE).\n\nAge-matched Speakers\n\n* passing the remote hearing screening\n* having no known speech, language, or neurological disorders per self-report\n* no cognitive impairment assessed by Telephone Montreal Cognitive Assessment (mini MoCA)\n* being a functionally native monolingual speaker of American English.\n\nUnfamiliar Interlocutors\n\n* passing the remote hearing screening\n* having no known speech, language or neurological disorders per self-report\n* being a native monolingual speaker of American English\n* having no experience communicating with people with dysarthria\n* being between the ages of 18 and 40.\n\nExclusion Criteria:\n\n* None - if volunteer meets the inclusion criteria, then they will be enrolled'}, 'identificationModule': {'nctId': 'NCT06266403', 'briefTitle': 'Evaluating Verbal Communication in Structured Interactions: Theoretical and Clinical Implications', 'organization': {'class': 'OTHER', 'fullName': 'Penn State University'}, 'officialTitle': 'Evaluating Verbal Communication in Structured Interactions: Theoretical and Clinical Implications', 'orgStudyIdInfo': {'id': 'STUDY00024154'}}, 'armsInterventionsModule': {'armGroups': [{'type': 'EXPERIMENTAL', 'label': 'People with amyotrophic lateral sclerosis, age-matched speakers', 'description': 'People with ALS and age-matched speakers will participate in structured communicative interaction.', 'interventionNames': ['Behavioral: Structured Communicative Interaction', 'Behavioral: Clear Speech', 'Behavioral: Unstructured communicative interaction', 'Behavioral: Clear Speech Structured Communicative Interaction']}], 'interventions': [{'name': 'Structured Communicative Interaction', 'type': 'BEHAVIORAL', 'description': 'Two interlocutors, one with ALS and a typical, unfamiliar interlocutor or an age-matched speaker and a typical interlocutor, will work together. On each trial, one of the interlocutors will be randomly chosen to be the "speaker" and the other will be the "listener". Each participant in the pair will view the same set of words on their screens. After one second, one of the words will be highlighted on the speaker\'s screen, they will say the word in the phrase "Click on the \\_\\_\\_\\_ this time", and the listener will click on it. After the listener has made their selection, both participants will receive feedback on trial success.', 'armGroupLabels': ['People with amyotrophic lateral sclerosis, age-matched speakers']}, {'name': 'Clear Speech', 'type': 'BEHAVIORAL', 'description': 'PALS and age-matched speakers will read critical words consisting of target segments (e.g., "hid", "ship", "net") in random order four times. These words will be embedded in the carrier phrase "Click on the \\_\\_\\_ this time." Participations will be instructed to overenunciate the critical words.', 'armGroupLabels': ['People with amyotrophic lateral sclerosis, age-matched speakers']}, {'name': 'Unstructured communicative interaction', 'type': 'BEHAVIORAL', 'description': 'Two interlocutors, one with ALS and a typical, unfamiliar interlocutor or an age-matched speaker and a typical interlocutor, will work together. The pairs will be presented with two different versions of the same picture with eight differences chosen to elicit the same target segments (e.g., "hid", "ship", "net"). These pictures will be modified from the LUCID corpus. In total, pairs will complete four picture sets per session. Pairs will be given 5 minutes for each picture set.', 'armGroupLabels': ['People with amyotrophic lateral sclerosis, age-matched speakers']}, {'name': 'Clear Speech Structured Communicative Interaction', 'type': 'BEHAVIORAL', 'description': 'Two interlocutors, one with ALS and a typical, unfamiliar interlocutor or an age-matched speaker and a typical interlocutor, will work together. On each trial, one of the interlocutors will be randomly chosen to be the "speaker" and the other will be the "listener". Each participant in the pair will view the same set of words on their screens. After one second, one of the words will be highlighted on the speaker\'s screen, they will say the word in the phrase "Click on the \\_\\_\\_\\_ this time", and the listener will click on it. The speaker will be instructed to overenunciate the critical words. After the listener has made their selection, both participants will receive feedback on trial success.', 'armGroupLabels': ['People with amyotrophic lateral sclerosis, age-matched speakers']}]}, 'contactsLocationsModule': {'locations': [{'zip': '16802', 'city': 'University Park', 'state': 'Pennsylvania', 'status': 'RECRUITING', 'country': 'United States', 'contacts': [{'name': 'Jimin Lee, PhD', 'role': 'CONTACT', 'email': 'jxl91@psu.edu', 'phone': '814-867-3373'}], 'facility': 'Speech Core, Pennsylvania State University', 'geoPoint': {'lat': 40.80201, 'lon': -77.85639}}], 'centralContacts': [{'name': 'Anne Olmstead, Ph.D.', 'role': 'CONTACT', 'email': 'ajo150@psu.edu', 'phone': '814-867-3373'}], 'overallOfficials': [{'name': 'Jimin Lee, PhD', 'role': 'PRINCIPAL_INVESTIGATOR', 'affiliation': 'The Pennsylvania State University'}, {'name': 'Navin Viswanathan, PhD', 'role': 'PRINCIPAL_INVESTIGATOR', 'affiliation': 'The Pennsylvania State University'}, {'name': 'Anne Olmstead, PhD', 'role': 'PRINCIPAL_INVESTIGATOR', 'affiliation': 'The Pennsylvania State University'}]}, 'ipdSharingStatementModule': {'infoTypes': ['STUDY_PROTOCOL', 'SAP'], 'timeFrame': 'Data will be shared after publication of the study.', 'ipdSharing': 'YES', 'description': 'The project will involve the collection of audio samples, perceptual judgments, and questionnaire data. All deidentified data will be shared through the Open Science Framework (osf.io). All scripts, protocols, procedures, and analyses will be shared along with the deidentified data to ensure that other researchers can verify and build on the presented results. All shared data will be made available in a format that is accessible by open-access software (e.g., R, Open Office, pdf reader). In addition, platform-specific scripts (e.g., experimental software LabVanced) will be shared with explanations so that they may be implemented across different software environments.', 'accessCriteria': 'All deidentified data, metadata, and related tools will be freely available via Open Science Framework (OSF). The original audio files will be made available (for participants who consent) by request from researchers in the field to ensure responsible use.'}, 'sponsorCollaboratorsModule': {'leadSponsor': {'name': 'Penn State University', 'class': 'OTHER'}, 'responsibleParty': {'type': 'PRINCIPAL_INVESTIGATOR', 'investigatorTitle': 'Associate Professor', 'investigatorFullName': 'Ji Min Lee, Ph.D.', 'investigatorAffiliation': 'Penn State University'}}}}