Raw JSON
{'hasResults': False, 'derivedSection': {'miscInfoModule': {'versionHolder': '2025-12-24'}}, 'protocolSection': {'designModule': {'phases': ['NA'], 'studyType': 'INTERVENTIONAL', 'designInfo': {'allocation': 'NA', 'maskingInfo': {'masking': 'NONE', 'maskingDescription': 'Participants will typically be unaware of the conditions presented, though because these involve manipulations of stimuli or task demands, they may be aware of the manipulation. This is not expected to impact the primary outcome measures (e.g., BOLD signal activation patterns).'}, 'primaryPurpose': 'BASIC_SCIENCE', 'interventionModel': 'SINGLE_GROUP', 'interventionModelDescription': 'This is a basic science study in which all participants will participate in all task conditions within each experiment (repeated-measures design).'}, 'enrollmentInfo': {'type': 'ACTUAL', 'count': 10}}, 'statusModule': {'overallStatus': 'COMPLETED', 'startDateStruct': {'date': '2024-04-25', 'type': 'ACTUAL'}, 'expandedAccessInfo': {'hasExpandedAccess': False}, 'statusVerifiedDate': '2025-05', 'completionDateStruct': {'date': '2025-01-31', 'type': 'ACTUAL'}, 'lastUpdateSubmitDate': '2025-05-21', 'studyFirstSubmitDate': '2023-12-05', 'studyFirstSubmitQcDate': '2023-12-15', 'lastUpdatePostDateStruct': {'date': '2025-05-23', 'type': 'ACTUAL'}, 'studyFirstPostDateStruct': {'date': '2023-12-18', 'type': 'ACTUAL'}, 'primaryCompletionDateStruct': {'date': '2025-01-31', 'type': 'ACTUAL'}}, 'outcomesModule': {'primaryOutcomes': [{'measure': 'Blood Oxygenation Level Dependent (BOLD) fMRI signal', 'timeFrame': 'Through study completion, an average of two weeks', 'description': 'The investigators will use BOLD activation patterns measured from each retinotopic ROI to fit quantitative models of spatial encoding. These models will be used to reconstruct stimulus representations on experimental trials to quantify how stimulus representations are encoded in each brain region studied, and how these representations change across experimental manipulations. These measurements will be used to test the impact of stimulus manipulations on stimulus representations in different brain regions.'}, {'measure': 'Gaze position', 'timeFrame': 'Through study completion, an average of two weeks', 'description': 'The investigators will use the measured gaze position in (x,y) coordinates to verify stable fixation throughout the experiment. Trials with poor fixation performance may be excluded from further analyses.'}, {'measure': 'Behavioral response (button press)', 'timeFrame': 'Through study completion, an average of two weeks', 'description': "On all trials participants will be instructed to attend carefully to the fixation point and report the shape of a target '+' (wide or tall) by pressing one of two buttons held in their hand inside the scanner. The left button will indicate wide; the right button will indicate tall. The investigators will ensure participants are performing the task as instructed by assessing the accuracy of their behavioral responses."}]}, 'oversightModule': {'oversightHasDmc': False, 'isFdaRegulatedDrug': False, 'isFdaRegulatedDevice': False}, 'conditionsModule': {'conditions': ['Basic Science: Visual Attention in Healthy Participants', 'Basic Science: Neural Representations of Location', 'Attention']}, 'referencesModule': {'references': [{'pmid': '28628004', 'type': 'BACKGROUND', 'citation': 'Mackey WE, Winawer J, Curtis CE. Visual field map clusters in human frontoparietal cortex. Elife. 2017 Jun 19;6:e22974. doi: 10.7554/eLife.22974.'}, {'pmid': '34354071', 'type': 'BACKGROUND', 'citation': 'Hallenbeck GE, Sprague TC, Rahmati M, Sreenivasan KK, Curtis CE. Working memory representations in visual cortex mediate distraction effects. Nat Commun. 2021 Aug 5;12(1):4714. doi: 10.1038/s41467-021-24973-1.'}, {'pmid': '29488841', 'type': 'BACKGROUND', 'citation': 'Sprague TC, Itthipuripat S, Vo VA, Serences JT. Dissociable signatures of visual salience and behavioral relevance across attentional priority maps in human cortex. J Neurophysiol. 2018 Jun 1;119(6):2153-2165. doi: 10.1152/jn.00059.2018. Epub 2018 Feb 28.'}, {'pmid': '29876523', 'type': 'BACKGROUND', 'citation': 'Sprague TC, Adam KCS, Foster JJ, Rahmati M, Sutterer DW, Vo VA. Inverted Encoding Models Assay Population-Level Stimulus Representations, Not Single-Unit Neural Tuning. eNeuro. 2018 Jun 5;5(3):ENEURO.0098-18.2018. doi: 10.1523/ENEURO.0098-18.2018. eCollection 2018 May-Jun. No abstract available.'}, {'pmid': '31772033', 'type': 'BACKGROUND', 'citation': 'Sprague TC, Boynton GM, Serences JT. The Importance of Considering Model Choices When Interpreting Results in Computational Neuroimaging. eNeuro. 2019 Dec 20;6(6):ENEURO.0196-19.2019. doi: 10.1523/ENEURO.0196-19.2019. Print 2019 Nov/Dec.'}, {'pmid': '26212711', 'type': 'BACKGROUND', 'citation': 'Laumann TO, Gordon EM, Adeyemo B, Snyder AZ, Joo SJ, Chen MY, Gilmore AW, McDermott KB, Nelson SM, Dosenbach NU, Schlaggar BL, Mumford JA, Poldrack RA, Petersen SE. Functional System and Areal Organization of a Highly Sampled Individual Human Brain. Neuron. 2015 Aug 5;87(3):657-70. doi: 10.1016/j.neuron.2015.06.037. Epub 2015 Jul 23.'}, {'pmid': '34916659', 'type': 'BACKGROUND', 'citation': 'Allen EJ, St-Yves G, Wu Y, Breedlove JL, Prince JS, Dowdle LT, Nau M, Caron B, Pestilli F, Charest I, Hutchinson JB, Naselaris T, Kay K. A massive 7T fMRI dataset to bridge cognitive neuroscience and artificial intelligence. Nat Neurosci. 2022 Jan;25(1):116-126. doi: 10.1038/s41593-021-00962-x. Epub 2021 Dec 16.'}, {'type': 'BACKGROUND', 'citation': 'Fedorenko E. The early origins and the growing popularity of the individual-subject analytic approach in human neuroscience. Current Opinion in Behavioral Sciences. 2021; 40:105-112.'}, {'type': 'BACKGROUND', 'citation': 'Naselaris T, Allen E, Kay K. Extensive sampling for complete models of individual brains. Current Opinion in Behavioral Sciences. 2021; 40:45-51.'}, {'type': 'BACKGROUND', 'citation': 'Poldrack RA. Diving into the deep end: a personal reflection on the MyConnectome study. Current Opinion in Behavioral Sciences. 2021; 40:1-4.'}, {'pmid': '35369044', 'type': 'BACKGROUND', 'citation': 'Pritschet L, Taylor CM, Santander T, Jacobs EG. Applying dense-sampling methods to reveal dynamic endocrine modulation of the nervous system. Curr Opin Behav Sci. 2021 Aug;40:72-78. doi: 10.1016/j.cobeha.2021.01.012. Epub 2021 Feb 25.'}, {'pmid': '35512638', 'type': 'BACKGROUND', 'citation': 'Gratton C, Nelson SM, Gordon EM. Brain-behavior correlations: Two paths toward reliability. Neuron. 2022 May 4;110(9):1446-1449. doi: 10.1016/j.neuron.2022.04.018.'}, {'pmid': '29557067', 'type': 'BACKGROUND', 'citation': 'Smith PL, Little DR. Small is beautiful: In defense of the small-N design. Psychon Bull Rev. 2018 Dec;25(6):2083-2101. doi: 10.3758/s13423-018-1451-8.'}, {'pmid': '24212672', 'type': 'BACKGROUND', 'citation': 'Sprague TC, Serences JT. Attention modulates spatial priority maps in the human occipital, parietal and frontal cortices. Nat Neurosci. 2013 Dec;16(12):1879-87. doi: 10.1038/nn.3574. Epub 2013 Nov 10.'}, {'pmid': '31398186', 'type': 'BACKGROUND', 'citation': 'Itthipuripat S, Vo VA, Sprague TC, Serences JT. Value-driven attentional capture enhances distractor representations in early visual cortex. PLoS Biol. 2019 Aug 9;17(8):e3000186. doi: 10.1371/journal.pbio.3000186. eCollection 2019 Aug.'}, {'pmid': '32139585', 'type': 'BACKGROUND', 'citation': 'Poltoratski S, Tong F. Resolving the Spatial Profile of Figure Enhancement in Human V1 through Population Receptive Field Modeling. J Neurosci. 2020 Apr 15;40(16):3292-3303. doi: 10.1523/JNEUROSCI.2377-19.2020. Epub 2020 Mar 5.'}, {'pmid': '28381491', 'type': 'BACKGROUND', 'citation': 'Poltoratski S, Ling S, McCormack D, Tong F. Characterizing the effects of feature salience and top-down attention in the early visual system. J Neurophysiol. 2017 Jul 1;118(1):564-573. doi: 10.1152/jn.00924.2016. Epub 2017 Apr 5.'}]}, 'descriptionModule': {'briefSummary': 'How does one know what to look at in a scene? Imagine a "Where\'s Waldo" game - it\'s challenging to find Waldo because there are many \'salient\' locations in the picture, each vying for one\'s attention. One can only attend to a small location on the picture at a given moment, so to find Waldo, one needs to direct their attention to different locations. One prominent theory about how one accomplishes this claims that important locations are identified based on distinct feature types (for example, motion or color), with locations most unique compared to the background most likely to be attended. An important component of this theory is that individual feature dimensions (again, color or motion) are computed within their own \'feature maps\', which are thought to be implemented in specific brain regions. However, whether and how specific brain regions contribute to these feature maps remains unknown.\n\nThe goal of this study is to determine how brain regions that respond strongly to different feature types (color and motion) and which encode spatial locations of visual stimuli extract \'feature dimension maps\' based on stimulus properties, including feature contrast. The investigators hypothesize that feature-selective brain regions act as neural feature dimension maps, and thus encode representations of salient location(s) based on their preferred feature dimension. The investigators will scan healthy human participants using functional MRI (fMRI) in a repeated-measures design while they view visual stimuli made salient based on different combinations of feature dimensions. The investigators will employ state-of-the-art multivariate analysis techniques that allow them to reconstruct an \'image\' of the stimulus representation encoded by each brain region to dissect how neural tissue identifies salient locations. Each participant will perform a challenging task at the center of the screen to ensure they keep their eyes still and ignore the stimuli presented in the periphery, which are used to gauge how the visual system automatically extracts important locations without confounding factors like eye movements. Across trials and experiments the investigators will manipulate 1) the \'strength\' of the salient locations based on how different the salient stimulus is compared to the background, 2) the number of salient locations, and 3) the feature value(s) used to make each location salient. Altogether, these manipulations will help the investigators fully understand these critical salience computations in the healthy human visual system.', 'detailedDescription': "In this experiment, participants will engage in all task conditions in a repeated-measures design. Participants are not randomly assigned to groups, as all participants will experience the same set of experimental manipulations. In this experiment, participants will engage in a series of challenging visual attention tasks while their eye position is tracked during fMRI scanning.\n\nIn all tasks, participants will perform challenging discrimination judgments based on a stimulus presented at the fixation point (discriminate the aspect ratio of a + target - wide or tall?). Behavioral responses will be recorded with a button press, which participants will make using a fMRI-compatible button box held in their right hand.\n\nIn this Experiment, the investigators will manipulate aspects of the stimulus display while holding the behavioral task constant. These manipulations will allow the investigators to test the role of feature-selective retinotopic regions of interest (ROIs) in automatically extracting salient information from the environment to guide visual attention.\n\nIn this Experiment (Experiment 1.1), the investigators will test the effect of changing the salience-defining feature of a visual stimulus display on stimulus representations reconstructed from fMRI activation patterns in feature-selective retinotopic ROIs. Participants will continuously perform the challenging fixation discrimination task while occasional salient stimuli appear. Stimuli will consist of flickering checkerboards, static colored dot fields with localized salient patches of differently-colored dots, and moving grayscale dot fields with localized salient patches of dots moving in a different direction. Single neurons in macaque visual cortex have been shown to respond to salient stimuli, and so the investigators expect to see, based on these previous studies, a substantial representation of the salient stimulus location in response patterns measured from feature-selective brain regions.\n\nParticipants will also be scanned for an anatomical \\& retinotopic mapping session, which will allow the investigators to identify brain regions for further analysis using well-established and standardized procedures.\n\nSTATISTICAL DESIGN \\& POWER\n\nThe fMRI studies described in this study record employ an inverted encoding model (IEM) for spatial position to quantify stimulus representations in reconstructed spatial maps of the visual field based on activation patterns measured in retinotopic feature-selective ROIs. The investigators rigorously identify ROIs using independent retinotopic mapping and localizer techniques, and use a 'mapping' task to estimate a 'fixed' encoding model for use across all conditions in each Experiment reported. These design decisions ensure that the investigators can maximize their ability to detect effects of their manipulations of interest within individual participants and brain regions and maximize the statistical power. The investigators use a compromise between deep imaging of several experimental and stimulus conditions within individual participants and aggregation of data across a moderate sample of these deeply-imaged participants (n = 10; see below). This allows the investigators to attain high-quality, reproducible estimates of model-based stimulus representations across task and stimulus manipulations within individual participants and conduct statistical inference on these measurements across the study sample.\n\nfMRI analyses will be conducted within each participant's individual brain, and voxels are assigned 'region' labels according to independent criteria (functional retinotopic mapping). Accordingly, there are no comparisons that require precise alignment of brain tissue between participants, and no generation of group-averaged 'maps' of brain activation. As such, concerns about reproducibility of brain maps and associated statistical power concerns are irrelevant to this study design.\n\nThe statistical design of the study is a repeated-measures design, whereby each participant is exposed to all manipulations in the study. The order of manipulations each participant experiences is randomized across participants. The investigators will employ nonparametric randomization tests for all statistical comparisons whereby they will conduct hypothesis testing (e.g., repeated-measures analysis of variance) using 'shuffled' data (misaligned condition labels relative to measured map activation on each trial) to generate a null distribution of test statistics under the null hypothesis of no effect of their independent variable(s). Once this procedure is repeated extensively (1,000 times) per test, the p-value can be estimated by comparing the test statistic computed using intact labels to this null distribution, and corrected for multiple comparisons as appropriate (e.g., via false discovery rate). Using permutation procedures to generate a null distribution minimizes reliance on parametric assumptions.\n\nAdditionally, the experiments within the study are designed such that sufficient data will be acquired that data from each individual participant can be used to test the effects of interest. Accordingly, each participant can be considered independent 'replication' of each other participant. Previous studies adopting a similar methodology whereby IEM-based reconstructions of visual stimuli are compared between conditions have employed relatively small sample sizes (n = 8; n = 8; n = 7). Other studies using population receptive field models or location-specific functional localizer, which are in principle very similar to the approach employed here, have used smaller sample sizes (e.g., n = 6).\n\nSample size \\& statistical power:\n\nIn this study, the investigators will acquire an intermediate sample size with extensive data per task condition (n = 10; 2 experimental fMRI sessions, each 1.5-2 hrs, for each participant; along with a 2-hr anatomical imaging and retinotopic mapping fMRI session). Of particular interest, one study used n = 6 participants to establish with a large effect size dz = 3.52 that V1 voxels tuned to a stimulus location where a salient stimulus was defined by feature contrast respond more strongly than when feature contrast is absent. In another study, similar effect sizes were reported by this group in a color-selective ROI known as hV4 (n = 6; dz = 1.06 and 1.80 for orientation- and motion-based contrast, respectively).\n\nAccordingly, assuming a conservative effect size of 0.90 (based on those reported previously), the investigators expect a sample size of n = 10 will allow the study to be well-powered (80%, α = 0.05) to detect a similar change in this Experiment (Experiment 1.1), which is most analogous to this study (one-tailed paired T-test).\n\nAdditionally, the investigators used their pilot data (n = 3) to measure the effect size for the critical comparison between salience-related modulations between feature-selective regions to be dz = 3.10 for the salience-defining feature. These values are commensurate with those cited above, and further support the selection of sample size. If analyses of data acquired during further pilot testing \\& experiment refinement suggest smaller effect sizes, the investigators will refine the power analyses and adjust the projected enrollment accordingly to ensure robust and reproducible results. Note that this power analysis relies on parametric assumptions which will not be required for the proposed analyses, which invoke randomization methods to compute empirical null distributions."}, 'eligibilityModule': {'sex': 'ALL', 'stdAges': ['ADULT'], 'maximumAge': '55 Years', 'minimumAge': '18 Years', 'healthyVolunteers': True, 'eligibilityCriteria': 'Inclusion Criteria:\n\n* between 18 and 55 years of age\n* normal or corrected-to-normal vision\n\nExclusion Criteria:\n\n* neurological disease based on self-report\n* implanted medical devices (e.g., cardiac pacemaker; metallic aneurism clip)\n* non-removable metallic piercings\n* metal fragments in the body (e.g., from welding)\n* pregnant and have a chance of being pregnant (if female)\n* history of claustrophobia\n* history of hearing loss/damage'}, 'identificationModule': {'nctId': 'NCT06175312', 'briefTitle': 'Probing the Role of Feature Dimension Maps in Visual Cognition: Impact of Salience Level (Expt 1.1)', 'organization': {'class': 'OTHER', 'fullName': 'University of California, Santa Barbara'}, 'officialTitle': 'Probing the Role of Feature Dimension Maps in Visual Cognition: Expt 1.1', 'orgStudyIdInfo': {'id': '5-23-0569: 1.1'}, 'secondaryIdInfos': [{'id': 'R01EY035300', 'link': 'https://reporter.nih.gov/quickSearch/R01EY035300', 'type': 'NIH'}]}, 'armsInterventionsModule': {'armGroups': [{'type': 'EXPERIMENTAL', 'label': 'Manipulations of graded feature salience (Expt 1.1)', 'description': 'Participants will view stimuli made salient based on feature contrast in one feature dimensions (color or motion direction; or checkerboard luminance contrast). The degree to which a location is salient will be manipulated based on the feature contrast across multiple values', 'interventionNames': ['Other: Stimulus properties: salience-defining feature', 'Other: Stimulus properties: magnitude of salience']}], 'interventions': [{'name': 'Stimulus properties: salience-defining feature', 'type': 'OTHER', 'description': 'The feature used to define a salient location will be varied across trials (checkerboard contrast; motion direction; color hue)', 'armGroupLabels': ['Manipulations of graded feature salience (Expt 1.1)']}, {'name': 'Stimulus properties: magnitude of salience', 'type': 'OTHER', 'description': 'The magnitude of the salient location will be varied across trials independently from salience-defining feature (based on feature contrast)', 'armGroupLabels': ['Manipulations of graded feature salience (Expt 1.1)']}]}, 'contactsLocationsModule': {'locations': [{'zip': '93117', 'city': 'Santa Barbara', 'state': 'California', 'country': 'United States', 'facility': 'University of California, Santa Barbara', 'geoPoint': {'lat': 34.42083, 'lon': -119.69819}}], 'overallOfficials': [{'name': 'Tommy C Sprague', 'role': 'PRINCIPAL_INVESTIGATOR', 'affiliation': 'University of California, Santa Barbara'}]}, 'ipdSharingStatementModule': {'url': 'https://osf.io/ufjzl/', 'infoTypes': ['STUDY_PROTOCOL', 'SAP', 'ICF', 'ANALYTIC_CODE'], 'timeFrame': 'Data will be available indefinitely beginning with publication of results', 'ipdSharing': 'YES', 'description': 'Processed fMRI and raw behavioral data will be shared with researchers immediately upon publication', 'accessCriteria': "Processed fMRI data and raw behavioral/eyetracking data will be publicly available on the lab's Open Science Framework page (https://osf.io/ufjzl/), and analysis code will be available on GitHub (an online tool for storing and managing code; github.com/SpragueLab). Raw, unprocessed fMRI data will be made available upon justifiable request from qualified researchers"}, 'sponsorCollaboratorsModule': {'leadSponsor': {'name': 'University of California, Santa Barbara', 'class': 'OTHER'}, 'collaborators': [{'name': 'National Eye Institute (NEI)', 'class': 'NIH'}], 'responsibleParty': {'type': 'SPONSOR'}}}}