Face features (FaceNet) models
Contents
Face features (FaceNet) models¶
Set of models investigating face perception:
single-predictor model with binary regressor coding for presence of any face (any_faces)
any_faces + speech
any_faces + speech + log of cumulative time the detected face has been on screen (mean across faces, log_mean_face_time_cum)
from create import create_models
from pyns import Neuroscout
from matplotlib import pyplot as plt
from pathlib import Path
import sys
sys.path.append("..")
from utils import dump_collection, load_collection
%matplotlib inline
/opt/miniconda-latest/envs/neuroscout_paper/lib/python3.8/site-packages/nilearn/datasets/__init__.py:86: FutureWarning: Fetchers from the nilearn.datasets module will be updated in version 0.9 to return python strings instead of bytes and Pandas dataframes instead of Numpy arrays.
warn("Fetchers from the nilearn.datasets module will be "
api = Neuroscout()
confounds = [
'a_comp_cor_00', 'a_comp_cor_01', 'a_comp_cor_02', 'a_comp_cor_03', 'a_comp_cor_04', 'a_comp_cor_05',
'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'
]
datasets = api.datasets.get() # Get all Neuroscout datasets
filename = Path('models') / 'facenet.json'
any_faces (only)¶
We are using create_models
which is a very simple function which uses Neuroscout’s pyNS
library to create
predictors = ['any_faces']
name = '+'.join(predictors)
# Uncomment to re-create
# models = {}
# models[name] = create_models(name=name, predictors=predictors, datasets=datasets, confounds=confounds)
models = load_collection(filename)
analysis = models[name][0]['analysis']
analysis
<Analysis hash_id=M8vE1 name=any_faces dataset_id=27>
Generate a report for a random run (first one), to save time. Note that this order may change, and the order of runs is not guaranteed
analysis.generate_report(run_id=analysis.runs[0])
analysis.plot_report(plot_type='design_matrix_plot')
# Compile analyses if still in DRAFT
# for analysis_dict in models[name]:
# analysis = analysis_dict['analysis']
# if analysis.get_status()['status'] in 'DRAFT':
# analysis.compile()
any_faces + speech¶
predictors = ['any_faces', 'speech']
name = '+'.join(predictors)
# Uncomment to re-create
# models[name] = create_models(name=name, predictors=predictors, datasets=datasets, confounds=confounds)
analysis = models[name][0]['analysis']
analysis
<Analysis hash_id=spcpc name=any_faces+speech dataset_id=10>
analysis.generate_report(run_id=analysis.runs[0]) # Only generate for a single example run, to save time
analysis.plot_report(plot_type='design_matrix_plot')
# Compile analyses if still in DRAFT
# for analysis_dict in models[name]:
# analysis = analysis_dict['analysis']
# if analysis.get_status()['status'] in 'DRAFT':
# analysis.compile()
any_faces + speech + face_time_cum¶
predictors = ['any_faces', 'speech', 'log_mean_face_time_cum']
name = '+'.join(predictors)
# Uncomment to re-create
# models[name] = create_models(name=name, predictors=predictors, datasets=datasets, confounds=confounds)
analysis = models[name][0]['analysis']
analysis.generate_report(run_id=analysis.runs[0]) # Only generate for a single example run, to save time
analysis.plot_report(plot_type='design_matrix_plot')
# Compile analyses
# for analysis_dict in models[name]:
# analysis = analysis_dict['analysis']
# if analysis.get_status()['status'] in 'DRAFT':
# analysis.compile()
# Save models out to file
# dump_collection(models, filename)