-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathteam_08MQ.py
977 lines (844 loc) · 45.6 KB
/
team_08MQ.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
#!/usr/bin/python
# coding: utf-8
""" Write the work of NARPS team 08MQ using Nipype """
from os.path import join
from itertools import product
from nipype import Node, Workflow, MapNode
from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split, Select
from nipype.interfaces.io import SelectFiles, DataSink
from nipype.interfaces.fsl import (
# General usage
FSLCommand, ImageStats,
# Preprocessing
FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer,
Threshold, Info, SUSAN, FLIRT, ApplyXFM, ConvertXFM,
# Analyses
Level1Design, FEATModel, L2Model, FILMGLS,
FLAMEO, Randomise, MultipleRegressDesign
)
from nipype.interfaces.fsl.utils import Merge as MergeImages
from nipype.algorithms.confounds import CompCor
from nipype.algorithms.modelgen import SpecifyModel
from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform, ApplyTransforms
from narps_open.pipelines import Pipeline
from narps_open.data.task import TaskInformation
from narps_open.data.participants import get_group
from narps_open.core.common import (
remove_file, list_intersection, elements_in_string, clean_list, list_to_file
)
# Setup FSL
FSLCommand.set_default_output_type('NIFTI_GZ')
class PipelineTeam08MQ(Pipeline):
""" A class that defines the pipeline of team 08MQ """
def __init__(self):
super().__init__()
self.fwhm = 6.0
self.team_id = '08MQ'
self.contrast_list = ['1', '2', '3']
self.run_level_contasts = [
('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]),
('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]),
('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1])
]
def get_preprocessing(self):
""" Return a Nipype workflow describing the prerpocessing part of the pipeline """
# IdentityInterface node - allows to iterate over subjects and runs
information_source = Node(IdentityInterface(
fields = ['subject_id', 'run_id']),
name = 'information_source')
information_source.iterables = [
('run_id', self.run_list),
('subject_id', self.subject_list),
]
# SelectFiles node - to select necessary files
file_templates = {
'anat': join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz'),
'func': join(
'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz'
),
'sbref': join(
'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_sbref.nii.gz'
),
'magnitude': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_magnitude1.nii.gz'),
'phasediff': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_phasediff.nii.gz')
}
select_files = Node(SelectFiles(file_templates), name = 'select_files')
select_files.inputs.base_directory = self.directories.dataset_dir
# DataSink Node - store the wanted results in the wanted directory
data_sink = Node(DataSink(), name = 'data_sink')
data_sink.inputs.base_directory = self.directories.output_dir
# FAST Node - Bias field correction on anatomical images
bias_field_correction = Node(FAST(), name = 'bias_field_correction')
bias_field_correction.inputs.img_type = 1 # T1 image
bias_field_correction.inputs.output_biascorrected = True
# BET Node - Brain extraction for anatomical images
brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat')
brain_extraction_anat.inputs.frac = 0.5
# FAST Node - Segmentation of anatomical images
segmentation_anat = Node(FAST(), name = 'segmentation_anat')
segmentation_anat.inputs.no_bias = True # Bias field was already removed
segmentation_anat.inputs.segments = False # Only output partial volume estimation
segmentation_anat.inputs.probability_maps = False # Only output partial volume estimation
# Split Node - Split probability maps as they output from the segmentation node
split_segmentation_maps = Node(Split(), name = 'split_segmentation_maps')
split_segmentation_maps.inputs.splits = [1, 1, 1]
split_segmentation_maps.inputs.squeeze = True # Unfold one-element splits removing the list
# ANTs Node - Normalization of anatomical images to T1 MNI152 space
# https://github.com/ANTsX/ANTs/wiki/Anatomy-of-an-antsRegistration-call
normalization_anat = Node(Registration(), name = 'normalization_anat')
normalization_anat.inputs.fixed_image = Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
normalization_anat.inputs.collapse_output_transforms = True
normalization_anat.inputs.convergence_threshold = [1e-06]
normalization_anat.inputs.convergence_window_size = [10]
normalization_anat.inputs.dimension = 3
normalization_anat.inputs.initial_moving_transform_com = True
normalization_anat.inputs.radius_or_number_of_bins = [32, 32, 4]
normalization_anat.inputs.sampling_percentage = [0.25, 0.25, 1]
normalization_anat.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN']
normalization_anat.inputs.metric = ['MI', 'MI', 'CC']
normalization_anat.inputs.transform_parameters = [(0.1,), (0.1,), (0.1, 3.0, 0.0)]
normalization_anat.inputs.metric_weight = [1.0]*3
normalization_anat.inputs.shrink_factors = [[8, 4, 2, 1]]*3
normalization_anat.inputs.smoothing_sigmas = [[3, 2, 1, 0]]*3
normalization_anat.inputs.sigma_units = ['vox']*3
normalization_anat.inputs.number_of_iterations = [
[1000, 500, 250, 100],
[1000, 500, 250, 100],
[100, 70, 50, 20]
]
normalization_anat.inputs.use_histogram_matching = True
normalization_anat.inputs.winsorize_lower_quantile = 0.005
normalization_anat.inputs.winsorize_upper_quantile = 0.995
# Threshold Node - create white-matter mask
threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter')
threshold_white_matter.inputs.thresh = 1
# Threshold Node - create CSF mask
threshold_csf = Node(Threshold(), name = 'threshold_csf')
threshold_csf.inputs.thresh = 1
# ErodeImage Node - Erode white-matter mask
erode_white_matter = Node(ErodeImage(), name = 'erode_white_matter')
erode_white_matter.inputs.kernel_shape = 'sphere'
erode_white_matter.inputs.kernel_size = 2.0 #mm
# ErodeImage Node - Erode CSF mask
erode_csf = Node(ErodeImage(), name = 'erode_csf')
erode_csf.inputs.kernel_shape = 'sphere'
erode_csf.inputs.kernel_size = 1.5 #mm
# BET Node - Brain extraction of magnitude images
brain_extraction_magnitude = Node(BET(), name = 'brain_extraction_magnitude')
brain_extraction_magnitude.inputs.frac = 0.5
# PrepareFieldmap Node - Convert phase and magnitude to fieldmap images
convert_to_fieldmap = Node(PrepareFieldmap(), name = 'convert_to_fieldmap')
# FLIRT Node - Align high contrast functional images to anatomical
# (i.e.: single-band reference images a.k.a. sbref)
coregistration_sbref = Node(FLIRT(), name = 'coregistration_sbref')
coregistration_sbref.inputs.interp = 'trilinear'
coregistration_sbref.inputs.cost = 'bbr' # boundary-based registration
# ConvertXFM Node - Inverse coregistration transform, to get anat to func transform
inverse_func_to_anat = Node(ConvertXFM(), name = 'inverse_func_to_anat')
inverse_func_to_anat.inputs.invert_xfm = True
# BET Node - Brain extraction for functional images
brain_extraction_func = Node(BET(), name = 'brain_extraction_func')
brain_extraction_func.inputs.frac = 0.3
brain_extraction_func.inputs.mask = True
brain_extraction_func.inputs.functional = True
# MCFLIRT Node - Motion correction of functional images
motion_correction = Node(MCFLIRT(), name = 'motion_correction')
motion_correction.inputs.cost = 'normcorr'
motion_correction.inputs.interpolation = 'spline' # should be 'trilinear'
motion_correction.inputs.save_plots = True # Save transformation parameters
# Function Nodes get_slice_timings - Create a file with acquisition timing for each slide
slice_timings = Node(Function(
function = list_to_file,
input_names = ['input_list', 'file_name'],
output_names = ['output_file']
), name = 'slice_timings')
slice_timings.inputs.input_list = TaskInformation()['SliceTiming']
slice_timings.inputs.file_name = 'slice_timings.tsv'
# SliceTimer Node - Slice time correction
slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction')
slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime']
# ImageStats Node - Compute median of voxel values to derive SUSAN's brightness_threshold
# we do not need to filter on not-zero values (option -P) because a mask is passed
compute_median = Node(ImageStats(), name = 'compute_median')
compute_median.inputs.op_string = '-p 50' # Median is calculated as the 50th percentile
# SUSAN Node - smoothing of functional images
# we set brightness_threshold to .75x median of the input file, as performed by fMRIprep
smoothing = Node(SUSAN(), name = 'smoothing')
smoothing.inputs.fwhm = self.fwhm
compute_brightness_threshold = lambda x : .75 * x
# ApplyXFM Node - Alignment of white matter to functional space
alignment_white_matter = Node(ApplyXFM(), name = 'alignment_white_matter')
alignment_white_matter.inputs.apply_xfm = True
# ApplyXFM Node - Alignment of CSF to functional space
alignment_csf = Node(ApplyXFM(), name = 'alignment_csf')
alignment_csf.inputs.apply_xfm = True
# ApplyTransforms Node - Alignment of functional data to anatomical space
# warning : ApplyTransforms only accepts a list as transforms input
alignment_func_to_anat = Node(ApplyTransforms(), name = 'alignment_func_to_anat')
transform_as_list = lambda x : [x]
# Select Node - Change the order of transforms coming from ANTs Registration
reverse_transform_order = Node(Select(), name = 'reverse_transform_order')
reverse_transform_order.inputs.index = [1, 0]
# ApplyWarp Node - Alignment of functional data to MNI space
alignment_func_to_mni = Node(WarpTimeSeriesImageMultiTransform(),
name = 'alignment_func_to_mni')
alignment_func_to_mni.inputs.reference_image = \
Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
# Merge Node - Merge the two masks (WM and CSF) in one input for the next node
merge_masks = Node(Merge(2), name = 'merge_masks')
# CompCor Node - Compute anatomical confounds (regressors of no interest in the model)
# from the WM and CSF masks
compute_confounds = Node(CompCor(), name = 'compute_confounds')
compute_confounds.inputs.num_components = 4
compute_confounds.inputs.merge_method = 'union'
compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime']
# Function Nodes remove_files - Remove sizeable files once they aren't needed
remove_func_0 = MapNode(Function(
function = remove_file,
input_names = ['_', 'file_name'],
output_names = []
), name = 'remove_func_0', iterfield = 'file_name')
remove_func_1 = MapNode(Function(
function = remove_file,
input_names = ['_', 'file_name'],
output_names = []
), name = 'remove_func_1', iterfield = 'file_name')
remove_func_2 = MapNode(Function(
function = remove_file,
input_names = ['_', 'file_name'],
output_names = []
), name = 'remove_func_2', iterfield = 'file_name')
remove_func_3 = MapNode(Function(
function = remove_file,
input_names = ['_', 'file_name'],
output_names = []
), name = 'remove_func_3', iterfield = 'file_name')
remove_func_4 = MapNode(Function(
function = remove_file,
input_names = ['_', 'file_name'],
output_names = []
), name = 'remove_func_4', iterfield = 'file_name')
preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing')
preprocessing.config['execution']['stop_on_first_crash'] = 'true'
preprocessing.connect([
# Inputs
(information_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]),
# Anatomical images
(select_files, bias_field_correction, [('anat', 'in_files')]),
(bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]),
(brain_extraction_anat, segmentation_anat, [('out_file', 'in_files')]),
(brain_extraction_anat, normalization_anat, [('out_file', 'moving_image')]),
(segmentation_anat, split_segmentation_maps, [('partial_volume_files', 'inlist')]),
(split_segmentation_maps, threshold_white_matter, [('out2', 'in_file')]),
(split_segmentation_maps, threshold_csf, [('out1', 'in_file')]),
(threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]),
(threshold_csf, erode_csf, [('out_file', 'in_file')]),
(erode_white_matter, alignment_white_matter, [('out_file', 'in_file')]),
(inverse_func_to_anat, alignment_white_matter, [('out_file', 'in_matrix_file')]),
(select_files, alignment_white_matter, [('sbref', 'reference')]),
(erode_csf, alignment_csf, [('out_file', 'in_file')]),
(inverse_func_to_anat, alignment_csf, [('out_file', 'in_matrix_file')]),
(select_files, alignment_csf, [('sbref', 'reference')]),
(alignment_csf, merge_masks, [('out_file', 'in1')]),
(alignment_white_matter, merge_masks, [('out_file', 'in2')]),
# Field maps
(select_files, brain_extraction_magnitude, [('magnitude', 'in_file')]),
(brain_extraction_magnitude, convert_to_fieldmap, [('out_file', 'in_magnitude')]),
(select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]),
# High contrast functional volume
(select_files, coregistration_sbref, [('sbref', 'in_file')]),
(select_files, coregistration_sbref, [('anat', 'reference')]),
(convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]),
(coregistration_sbref, inverse_func_to_anat, [('out_matrix_file', 'in_file')]),
# Functional images
(select_files, brain_extraction_func, [('func', 'in_file')]),
(brain_extraction_func, motion_correction, [('out_file', 'in_file')]),
(select_files, motion_correction, [('sbref', 'ref_file')]),
(slice_timings, slice_time_correction, [('output_file', 'custom_timings')]),
(motion_correction, slice_time_correction, [('out_file', 'in_file')]),
(slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]),
(slice_time_correction, compute_median, [('slice_time_corrected_file', 'in_file')]),
(brain_extraction_func, compute_median, [('mask_file', 'mask_file')]),
(compute_median, smoothing, [(
('out_stat', compute_brightness_threshold), 'brightness_threshold')
]),
(smoothing, alignment_func_to_anat, [('smoothed_file', 'input_image')]),
(coregistration_sbref, alignment_func_to_anat, [(
('out_matrix_file', transform_as_list), 'transforms')
]),
(brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference_image')]),
(alignment_func_to_anat, alignment_func_to_mni, [('output_image', 'input_image')]),
(normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]),
(reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]),
(merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space
(slice_time_correction, compute_confounds, [
('slice_time_corrected_file', 'realigned_file')
]),
# Outputs of preprocessing
(motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]),
(compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]),
(alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]),
# File removals
(motion_correction, remove_func_0, [('out_file', 'file_name')]),
(data_sink, remove_func_0, [('out_file', '_')]),
(slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file_name')]),
(data_sink, remove_func_1, [('out_file', '_')]),
(smoothing, remove_func_2, [('smoothed_file', 'file_name')]),
(data_sink, remove_func_2, [('out_file', '_')]),
(alignment_func_to_anat, remove_func_3, [('output_image', 'file_name')]),
(data_sink, remove_func_3, [('out_file', '_')]),
(alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]),
(data_sink, remove_func_4, [('out_file', '_')])
])
return preprocessing
def get_preprocessing_outputs(self):
""" Return a list of the files generated by the preprocessing """
parameters = {
'subject_id': self.subject_list,
'run_id': self.run_list,
'file': [
'components_file.txt',
'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par',
'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz'
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'preprocessing',
'_run_id_{run_id}_subject_id_{subject_id}',
'{file}'
)
return [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]
def get_subject_information(event_file):
"""
Extract information from an event file, to setup the model. 4 regressors are extracted :
- event: a regressor with 4 second ON duration
- gain : a parametric modulation of events corresponding to gain magnitude. Mean centred.
- loss : a parametric modulation of events corresponding to loss magnitude. Mean centred.
- response : a regressor with 1 for accept and -1 for reject. Mean centred.
Parameters :
- event_file : str, event file corresponding to the run and the subject to analyze
Returns :
- subject_info : list of Bunch containing event information
"""
from nipype.interfaces.base import Bunch
condition_names = ['event', 'gain', 'loss', 'response']
onsets = {}
durations = {}
amplitudes = {}
# Create dictionary items with empty lists
for condition in condition_names:
onsets.update({condition : []})
durations.update({condition : []})
amplitudes.update({condition : []})
# Parse information in the event_file
with open(event_file, 'rt') as file:
next(file) # skip the header
for line in file:
info = line.strip().split()
onsets['event'].append(float(info[0]))
durations['event'].append(float(info[1]))
amplitudes['event'].append(1.0)
onsets['gain'].append(float(info[0]))
durations['gain'].append(float(info[4])) # TODO : change to info[1] (= 4) ?
amplitudes['gain'].append(float(info[2]))
onsets['loss'].append(float(info[0]))
durations['loss'].append(float(info[4])) # TODO : change to info[1] (= 4) ?
amplitudes['loss'].append(float(info[3]))
onsets['response'].append(float(info[0]))
durations['response'].append(float(info[1])) # TODO : change to info[4] (= RT) ?
if 'accept' in info[5]:
amplitudes['response'].append(1.0)
elif 'reject' in info[5]:
amplitudes['response'].append(-1.0)
else:
amplitudes['response'].append(0.0)
return [
Bunch(
conditions = condition_names,
onsets = [onsets[k] for k in condition_names],
durations = [durations[k] for k in condition_names],
amplitudes = [amplitudes[k] for k in condition_names],
regressor_names = None,
regressors = None)
]
def get_run_level_analysis(self):
""" Return a Nipype workflow describing the run level analysis part of the pipeline
Returns:
- run_level_analysis : nipype.WorkFlow
"""
# IdentityInterface node - allows to iterate over subjects and runs
information_source = Node(IdentityInterface(
fields = ['subject_id', 'run_id']),
name = 'information_source')
information_source.iterables = [
('run_id', self.run_list),
('subject_id', self.subject_list),
]
# SelectFiles node - to select necessary files
templates = {
# Functional MRI - computed by preprocessing
'func' : join(self.directories.output_dir, 'preprocessing',
'_run_id_{run_id}_subject_id_{subject_id}',
'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz'
),
# Event file - from the original dataset
'event' : join('sub-{subject_id}', 'func',
'sub-{subject_id}_task-MGT_run-{run_id}_events.tsv'
),
# Motion parameters - computed by preprocessing's motion_correction Node
'motion' : join(self.directories.output_dir, 'preprocessing',
'_run_id_{run_id}_subject_id_{subject_id}',
'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par',
)
}
select_files = Node(SelectFiles(templates), name = 'select_files')
select_files.inputs.base_directory = self.directories.dataset_dir
# DataSink Node - store the wanted results in the wanted directory
data_sink = Node(DataSink(), name = 'data_sink')
data_sink.inputs.base_directory = self.directories.output_dir
# Function Node get_subject_information - Get subject information from event files
subject_information = Node(Function(
function = self.get_subject_information,
input_names = ['event_file'],
output_names = ['subject_info']
), name = 'subject_information')
# SpecifyModel Node - Generates a model
specify_model = Node(SpecifyModel(), name = 'specify_model')
specify_model.inputs.high_pass_filter_cutoff = 90
specify_model.inputs.input_units = 'secs'
specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime']
specify_model.inputs.parameter_source = 'FSL' # Source of motion parameters.
# Level1Design Node - Generate files for first level computation
model_design = Node(Level1Design(), 'model_design')
model_design.inputs.bases = {
'dgamma':{'derivs' : True} # Canonical double gamma HRF plus temporal derivative
}
model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime']
model_design.inputs.model_serial_correlations = True
model_design.inputs.contrasts = self.run_level_contasts
# FEATModel Node - Generate first level model
model_generation = Node(FEATModel(), name = 'model_generation')
# FILMGLS Node - Estimate first level model
model_estimate = Node(FILMGLS(), name = 'model_estimate')
# Create l1 analysis workflow and connect its nodes
run_level_analysis = Workflow(
base_dir = self.directories.working_dir,
name = 'run_level_analysis'
)
run_level_analysis.connect([
(information_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]),
(select_files, subject_information, [('event', 'event_file')]),
(subject_information, specify_model, [('subject_info', 'subject_info')]),
(select_files, specify_model, [('motion', 'realignment_parameters')]),
(select_files, specify_model, [('func', 'functional_runs')]),
(specify_model, model_design, [('session_info', 'session_info')]),
(model_design, model_generation, [
('ev_files', 'ev_files'),
('fsf_files', 'fsf_file')]),
(select_files, model_estimate, [('func', 'in_file')]),
(model_generation, model_estimate, [
('con_file', 'tcon_file'),
('design_file', 'design_file')]),
(model_estimate, data_sink, [('results_dir', 'run_level_analysis.@results')]),
(model_generation, data_sink, [
('design_file', 'run_level_analysis.@design_file'),
('design_image', 'run_level_analysis.@design_img')]),
])
return run_level_analysis
def get_run_level_outputs(self):
""" Return a list of the files generated by the run level analysis """
parameters = {
'run_id' : self.run_list,
'subject_id' : self.subject_list,
'file' : [
'run0.mat',
'run0.png'
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}'
)
return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]
parameters = {
'run_id' : self.run_list,
'subject_id' : self.subject_list,
'contrast_id' : self.contrast_list,
'file' : [
join('results', 'cope{contrast_id}.nii.gz'),
join('results', 'tstat{contrast_id}.nii.gz'),
join('results', 'varcope{contrast_id}.nii.gz'),
join('results', 'zstat{contrast_id}.nii.gz'),
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}'
)
return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]
return return_list
def get_subject_level_analysis(self):
""" Return a Nipype workflow describing the subject level analysis part of the pipeline """
# IdentityInterface node - allows to iterate over subjects and contrasts
information_source = Node(IdentityInterface(
fields = ['subject_id', 'contrast_id']),
name = 'information_source')
information_source.iterables = [
('subject_id', self.subject_list),
('contrast_id', self.contrast_list)
]
# SelectFiles Node - select necessary files
templates = {
'cope' : join(self.directories.output_dir, 'run_level_analysis',
'_run_id_*_subject_id_{subject_id}', 'results', 'cope{contrast_id}.nii.gz'),
'varcope' : join(self.directories.output_dir, 'run_level_analysis',
'_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz')
}
select_files = Node(SelectFiles(templates), name = 'select_files')
select_files.inputs.base_directory = self.directories.dataset_dir
# DataSink Node - store the wanted results in the wanted directory
data_sink = Node(DataSink(), name = 'data_sink')
data_sink.inputs.base_directory = self.directories.output_dir
# L2Model Node - Generate subject specific second level model
generate_model = Node(L2Model(), name = 'generate_model')
generate_model.inputs.num_copes = len(self.run_list)
# Merge Node - Merge copes files for each subject
merge_copes = Node(MergeImages(), name = 'merge_copes')
merge_copes.inputs.dimension = 't'
# Merge Node - Merge varcopes files for each subject
merge_varcopes = Node(MergeImages(), name = 'merge_varcopes')
merge_varcopes.inputs.dimension = 't'
# FLAMEO Node - Estimate model
estimate_model = Node(FLAMEO(), name = 'estimate_model')
estimate_model.inputs.run_mode = 'fe' # Fixed effect
estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
# Second level (single-subject, mean of all four scans) analyses: Fixed effects analysis.
subject_level_analysis = Workflow(
base_dir = self.directories.working_dir,
name = 'subject_level_analysis')
subject_level_analysis.connect([
(information_source, select_files, [
('subject_id', 'subject_id'),
('contrast_id', 'contrast_id')]),
(select_files, merge_copes, [('cope', 'in_files')]),
(select_files, merge_varcopes, [('varcope', 'in_files')]),
(merge_copes, estimate_model, [('merged_file', 'cope_file')]),
(merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]),
(generate_model, estimate_model, [
('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')]),
(estimate_model, data_sink, [
('zstats', 'subject_level_analysis.@stats'),
('tstats', 'subject_level_analysis.@tstats'),
('copes', 'subject_level_analysis.@copes'),
('var_copes', 'subject_level_analysis.@varcopes')])])
return subject_level_analysis
def get_subject_level_outputs(self):
""" Return a list of the files generated by the subject level analysis """
parameters = {
'contrast_id' : self.contrast_list,
'subject_id' : self.subject_list,
'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz']
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}','{file}'
)
return [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]
def get_one_sample_t_test_regressors(subject_list: list) -> dict:
"""
Create dictionary of regressors for one sample t-test group analysis.
Parameters:
- subject_list: ids of subject in the group for which to do the analysis
Returns:
- dict containing named lists of regressors.
"""
return dict(group_mean = [1 for _ in subject_list])
def get_two_sample_t_test_regressors(
equal_range_ids: list,
equal_indifference_ids: list,
subject_list: list,
) -> dict:
"""
Create dictionary of regressors for two sample t-test group analysis.
Parameters:
- equal_range_ids: ids of subjects in equal range group
- equal_indifference_ids: ids of subjects in equal indifference group
- subject_list: ids of subject for which to do the analysis
Returns:
- regressors, dict: containing named lists of regressors.
- groups, list: group identifiers to distinguish groups in FSL analysis.
"""
# Create 2 lists containing n_sub values which are
# * 1 if the participant is on the group
# * 0 otherwise
equal_range_regressors = [1 if i in equal_range_ids else 0 for i in subject_list]
equal_indifference_regressors = [
1 if i in equal_indifference_ids else 0 for i in subject_list
]
# Create regressors output : a dict with the two list
regressors = dict(
equalRange = equal_range_regressors,
equalIndifference = equal_indifference_regressors
)
# Create groups outputs : a list with 1 for equalRange subjects and 2 for equalIndifference
groups = [1 if i == 1 else 2 for i in equal_range_regressors]
return regressors, groups
def get_group_level_analysis(self):
""" Return all workflows for the group level analysis. """
methods = ['equalRange', 'equalIndifference', 'groupComp']
return [self.get_group_level_analysis_sub_workflow(method) for method in methods]
def get_group_level_analysis_sub_workflow(self, method):
"""
Return a workflow for the group level analysis.
Parameters:
- method: one of 'equalRange', 'equalIndifference' or 'groupComp'
Returns:
- group_level_analysis: nipype.WorkFlow
"""
# Infosource Node - iterate over the contrasts generated by the subject level analysis
information_source = Node(
IdentityInterface(
fields = ['contrast_id']
),
name = 'information_source',
)
information_source.iterables = [('contrast_id', self.contrast_list)]
# SelectFiles Node - select necessary files
templates = {
'cope' : join(self.directories.output_dir, 'subject_level_analysis',
'_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'),
'varcope' : join(self.directories.output_dir, 'subject_level_analysis',
'_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz')
}
select_files = Node(SelectFiles(templates), name = 'select_files')
select_files.inputs.base_directory = self.directories.dataset_dir
select_files.inputs.force_list = True
# Datasink Node - save important files
data_sink = Node(DataSink(), name = 'data_sink')
data_sink.inputs.base_directory = self.directories.output_dir
# Function Node elements_in_string
# Get contrast of parameter estimates (cope) for these subjects
# Note : using a MapNode with elements_in_string requires using clean_list to remove
# None values from the out_list
get_copes = MapNode(Function(
function = elements_in_string,
input_names = ['input_str', 'elements'],
output_names = ['out_list']
),
name = 'get_copes', iterfield = 'input_str'
)
# Function Node elements_in_string
# Get variance of the estimated copes (varcope) for these subjects
# Note : using a MapNode with elements_in_string requires using clean_list to remove
# None values from the out_list
get_varcopes = MapNode(Function(
function = elements_in_string,
input_names = ['input_str', 'elements'],
output_names = ['out_list']
),
name = 'get_varcopes', iterfield = 'input_str'
)
# Merge Node - Merge cope files
merge_copes = Node(MergeImages(), name = 'merge_copes')
merge_copes.inputs.dimension = 't'
# Merge Node - Merge cope files
merge_varcopes = Node(MergeImages(), name = 'merge_varcopes')
merge_varcopes.inputs.dimension = 't'
# MultipleRegressDesign Node - Specify model
specify_model = Node(MultipleRegressDesign(), name = 'specify_model')
# FLAMEO Node - Estimate model
estimate_model = Node(FLAMEO(), name = 'estimate_model')
estimate_model.inputs.run_mode = 'ols' # Ordinary least squares
estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
# Randomise Node -
randomise = Node(Randomise(), name = 'randomise')
randomise.inputs.num_perm = 10000
randomise.inputs.tfce = True
randomise.inputs.vox_p_values = True
randomise.inputs.c_thresh = 0.05
randomise.inputs.tfce_E = 0.01
randomise.inputs.mask = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
# Compute the number of participants used to do the analysis
nb_subjects = len(self.subject_list)
# Declare the workflow
group_level_analysis = Workflow(
base_dir = self.directories.working_dir,
name = f'group_level_analysis_{method}_nsub_{nb_subjects}'
)
group_level_analysis.connect([
(information_source, select_files, [('contrast_id', 'contrast_id')]),
(select_files, get_copes, [('cope', 'input_str')]),
(select_files, get_varcopes, [('varcope', 'input_str')]),
(get_copes, merge_copes, [(('out_list', clean_list), 'in_files')]),
(get_varcopes, merge_varcopes,[(('out_list', clean_list), 'in_files')]),
(merge_copes, estimate_model, [('merged_file', 'cope_file')]),
(merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]),
(specify_model, estimate_model, [
('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')
]),
(merge_copes, randomise, [('merged_file', 'in_file')]),
(specify_model, randomise, [
('design_mat', 'design_mat'),
('design_con', 'tcon')
]),
(randomise, data_sink, [
('t_corrected_p_files',
f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'),
('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat')
]),
(estimate_model, data_sink, [
('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'),
('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats')
])
])
if method in ('equalRange', 'equalIndifference'):
# Setup a one sample t-test
specify_model.inputs.contrasts = [
('Group', 'T', ['group_mean'], [1]),
('Group', 'T', ['group_mean'], [-1])
]
# Function Node get_group_subjects - Get subjects in the group and in the subject_list
get_group_subjects = Node(Function(
function = list_intersection,
input_names = ['list_1', 'list_2'],
output_names = ['out_list']
),
name = 'get_group_subjects'
)
get_group_subjects.inputs.list_1 = get_group(method)
get_group_subjects.inputs.list_2 = self.subject_list
# Function Node get_one_sample_t_test_regressors
# Get regressors in the equalRange and equalIndifference method case
regressors_one_sample = Node(
Function(
function = self.get_one_sample_t_test_regressors,
input_names = ['subject_list'],
output_names = ['regressors']
),
name = 'regressors_one_sample',
)
# Add missing connections
group_level_analysis.connect([
(get_group_subjects, get_copes, [('out_list', 'elements')]),
(get_group_subjects, get_varcopes, [('out_list', 'elements')]),
(get_group_subjects, regressors_one_sample, [('out_list', 'subject_list')]),
(regressors_one_sample, specify_model, [('regressors', 'regressors')])
])
elif method == 'groupComp':
# Select copes and varcopes corresponding to the selected subjects
# Indeed the SelectFiles node asks for all (*) subjects available
get_copes.inputs.elements = self.subject_list
get_varcopes.inputs.elements = self.subject_list
# Setup a two sample t-test
specify_model.inputs.contrasts = [(
'Eq range vs Eq indiff in loss',
'T',
['equalRange', 'equalIndifference'],
[1, -1]
)]
# Function Node get_equal_range_subjects
# Get subjects in the equalRange group and in the subject_list
get_equal_range_subjects = Node(Function(
function = list_intersection,
input_names = ['list_1', 'list_2'],
output_names = ['out_list']
),
name = 'get_equal_range_subjects'
)
get_equal_range_subjects.inputs.list_1 = get_group('equalRange')
get_equal_range_subjects.inputs.list_2 = self.subject_list
# Function Node get_equal_indifference_subjects
# Get subjects in the equalIndifference group and in the subject_list
get_equal_indifference_subjects = Node(Function(
function = list_intersection,
input_names = ['list_1', 'list_2'],
output_names = ['out_list']
),
name = 'get_equal_indifference_subjects'
)
get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference')
get_equal_indifference_subjects.inputs.list_2 = self.subject_list
# Function Node get_two_sample_t_test_regressors
# Get regressors in the groupComp method case
regressors_two_sample = Node(
Function(
function = self.get_two_sample_t_test_regressors,
input_names = [
'equal_range_ids',
'equal_indifference_ids',
'subject_list',
],
output_names = ['regressors', 'groups']
),
name = 'regressors_two_sample',
)
regressors_two_sample.inputs.subject_list = self.subject_list
# Add missing connections
group_level_analysis.connect([
(get_equal_range_subjects, regressors_two_sample, [
('out_list', 'equal_range_ids')
]),
(get_equal_indifference_subjects, regressors_two_sample, [
('out_list', 'equal_indifference_ids')
]),
(regressors_two_sample, specify_model, [
('regressors', 'regressors'),
('groups', 'groups')])
])
return group_level_analysis
def get_hypotheses_outputs(self):
""" Return the names of the files used by the team to answer the hypotheses of NARPS. """
nb_sub = len(self.subject_list)
files = [
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'zstat2.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'zstat2.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'zstat1.nii.gz'),
join(f'group_level_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_2', 'zstat1.nii.gz')
]
return [join(self.directories.output_dir, f) for f in files]