diff --git a/eegnb/analysis/analysis_report.html b/eegnb/analysis/analysis_report.html new file mode 100644 index 00000000..55dda918 --- /dev/null +++ b/eegnb/analysis/analysis_report.html @@ -0,0 +1,39 @@ + + + + + Analysis Report + + +
+ Description + Raw Epoch + Stimulus Response +
+
+

Analysis Report

+

+ Experiment Name: {}
+ Subject Id: {}
+ Session Id: {}
+ EEG Device: {}
+ Drop Percentage: {}

+ This is an analysis report for the experiment.
For more information about the experiment, please visit the documentation +

+
+
+

Raw Epoch

+

+ The raw epoch is shown below. The raw epoch is the data that is recorded from the EEG headset. The raw epoch is then processed to remove noise and artifacts. +

+ Raw Epoch +
+
+

Stimulus Response

+

+ The stimulus response is shown below. The stimulus response is the data that is recorded from the EEG headset after removing noise and artifacts. +

+ Stimulus Response +
+ + \ No newline at end of file diff --git a/eegnb/analysis/analysis_report.py b/eegnb/analysis/analysis_report.py new file mode 100644 index 00000000..9df3b88d --- /dev/null +++ b/eegnb/analysis/analysis_report.py @@ -0,0 +1,95 @@ + +# Generating html using Python + +from airium import Airium +from typing import Dict +import os +import eegnb +import base64 + +a = Airium() + +def get_experiment_information(experiment:str): + analysis_save_path = os.path.join(os.path.dirname(eegnb.__file__), "analysis") + file_path = os.path.join(analysis_save_path, "experiment_descriptions") + + with open(os.path.join(file_path, experiment + ".txt"), 'r') as f: + experiment_text = f.readlines() + + return experiment_text + +def get_img_string(image_save_path): + """ Returns image as string to embed into the html report """ + return base64.b64encode(open(image_save_path, "rb").read()).decode() + +def get_html(experimental_parameters: Dict): + + # add variable to store the link + analysis_save_path = os.path.join(os.path.dirname(eegnb.__file__), "analysis") + css_path = os.path.join(analysis_save_path, "styling.css") + eeg_device, experiment, subject, session, example, drop_percentage, epochs_chosen = experimental_parameters.values() + + erp_image_path = os.path.join(os.getcwd(), "erp_plot.png") + pos_image_path = os.path.join(os.getcwd(), "power_spectrum.png") + + experiment_text = get_experiment_information(experiment) + + + """ Possibility of unique experiment text - decision to be made """ + #experiment_text = "" + #with open('experiment_descriptions/{}.txt'.format(experiment), 'r') as f: + # experiment_text = f.readlines() + + a('') + with a.html(): + with a.head(): + a.link(href=css_path, rel='stylesheet', type="text/css") + a.title(_t="Analysis Report") + + with a.body(): + + # Navigation bar + with a.div(klass="topnav"): + a.a(_t="Description", href="#Description") + a.a(_t="Raw Epoch", href="#Raw Epoch") + a.a(_t="Stimulus Response", href="#Stimulus Response") + + # Description + with a.div(id="Description"): + a.h1(_t="Analysis Report") + with a.p(): + a("Experiment Name: {}
".format(experiment)) + + if example: + a("Example File
") + else: + a("Subject Id: {}
".format(subject)) + a("Session Id: {}
".format(session)) + + a("EEG Device: {}
".format(eeg_device)) + a('This is an analysis report for the experiment.
For more information about the experiment, please visit the documentation

') + a("{}
".format(experiment_text[0])) + a("{}
".format(experiment_text[1])) + + # Raw Epoch + with a.div(id="Raw Epoch"): + a.h2(_t="Raw Epoch") + with a.p(): + a("The power spectrum of the raw epoch is displayed below. The raw epoch is then processed to remove noise and artifacts.") + a.img(src="data:image/png;base64, {}".format(get_img_string(pos_image_path)), alt="Raw Epoch") + + # Stimulus Response + with a.div(id="Stimulus Response"): + a.h2(_t="Stimulus Response") + with a.p(): + a("The stimulus response is shown below. The stimulus response is the amplitude response at the specific timescales where the response to the stimulus can be detected.
") + a("Epochs chosen: {}
".format(epochs_chosen)) + a("Drop Percentage: {} %

".format(round(drop_percentage,2))) + a.img(src="data:image/png;base64, {}".format(get_img_string(erp_image_path)), alt="Stimulus Response") + + # Delete the images + os.remove(erp_image_path) + os.remove(pos_image_path) + + # Return the html + return str(a) diff --git a/eegnb/analysis/experiment_descriptions/visual-N170.txt b/eegnb/analysis/experiment_descriptions/visual-N170.txt new file mode 100644 index 00000000..ec9c24a7 --- /dev/null +++ b/eegnb/analysis/experiment_descriptions/visual-N170.txt @@ -0,0 +1,2 @@ +The N170 is a large negative event-related potential (ERP) component that occurs after the detection of faces, but not objects, scrambled faces, or other body parts such as hands. +In the experiment we aim to detect the N170 using faces and houses as our stimuli. \ No newline at end of file diff --git a/eegnb/analysis/experiment_descriptions/visual-P300.txt b/eegnb/analysis/experiment_descriptions/visual-P300.txt new file mode 100644 index 00000000..d756becd --- /dev/null +++ b/eegnb/analysis/experiment_descriptions/visual-P300.txt @@ -0,0 +1,2 @@ +The P300 is a positive event-related potential (ERP) that occurs around 300ms after perceiving a novel or unexpected stimulus. It is most commonly elicited through ‘oddball’ experimental paradigms, where a certain subtype of stimulus is presented rarely amidst a background of another more common type of stimulus. +In the experiment, we aimed to elicit P300 response using a visual oddball stimulation. \ No newline at end of file diff --git a/eegnb/analysis/pipelines.py b/eegnb/analysis/pipelines.py new file mode 100644 index 00000000..a4604531 --- /dev/null +++ b/eegnb/analysis/pipelines.py @@ -0,0 +1,243 @@ +""" + +CLI Pipeline for Analysis of EEGNB Recorded Data + +To do: +1. Beautify analysis pdf +2. Handle cli automated errors for report creation + +Usage: + +For Recorded Data: + +from eegnb.analysis.pipelines import create_analysis_report() +create_analysis_report(experiment, eegdevice, subject, session, filepath)s + +For Example Datasets: + +from eegnb.analysis.pipelines import example_analysis_report() +example_analysis_report() + +""" + +# Some standard pythonic imports +import os +from collections import OrderedDict +import warnings +import matplotlib.pyplot as plt +from datetime import datetime +import numpy as np +from typing import Dict + +warnings.filterwarnings('ignore') + +# MNE functions +from mne import Epochs,find_events, create_info +from mne.io import RawArray + +# EEG-Notebooks functions +from eegnb import generate_save_fn +from eegnb.analysis.utils import load_data,plot_conditions, load_csv_as_raw, fix_musemissinglines +from eegnb.analysis.analysis_report import get_html +from eegnb.datasets import fetch_dataset +from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS +from pathlib import Path + +DATA_DIR = os.path.join(os.path.expanduser("~/"), ".eegnb", "data") +eegdevice, experiment_name, subject_id, session_nb, example_flag = None, None, None, None, False + +def load_eeg_data(experiment, subject=1, session=1, device_name='muse2016_bfn', tmin=-0.1, tmax=0.6, baseline=None, + reject={'eeg': 5e-5}, preload=True, verbose=1, + picks=[0,1,2,3], event_id = OrderedDict(House=1,Face=2), fnames=None, example=False): + """ + Loads EEG data from the specified experiment, subject, session, and device. + Returns the raw and epochs objects. + + Procedure + 1. Loads the data using file names and retrives if not already present + 2. Epochs the data + 3. Computes the ERP + 4. Returns the raw and ERP objects + + Parameters + ---------- + experiment : Experiment Name + subject : Subject ID of performed experiment + session : Session ID of performed experiment + device_name : Device used for performed experiment + tmin : Start time of the epochs in seconds, relative to the time-locked event. + tmax : End time of the epochs in seconds, relative to the time-locked event. + baseline : Not very sure..? + reject : Rejection parameters for the epochs. + preload : If True, preload the epochs into memory. + verbose : If True, print out messages. + picks : Channels to include in the analysis. + event_id : Dictionary of event_id's for the epochs + fnames : File names of the experiment data, if not passed, example files are used + """ + + # If not using the example dataset, load the data from the specified experiment using load_csv_as_raw + if not example: + + # Obataining the specific parameters to load the data into MNE object + sfreq = SAMPLE_FREQS[device_name] + ch_ind = EEG_INDICES[device_name] + + # Generate file names if not passed + if fnames is None: + raw = load_data(subject_id=subject, session_nb=session, experiment=experiment, device_name=device_name, site="local", data_dir=os.path.join(os.path.expanduser('~/'),'.eegnb', 'data')) + + else: + # Replace Ch names has arbitarily been set to None + if device_name in ["muse2016", "muse2", "museS"]: + raw = load_csv_as_raw([fnames], sfreq=sfreq, ch_ind=ch_ind, aux_ind=[5], replace_ch_names=None, verbose=verbose) + else: + raw = load_csv_as_raw([fnames], sfreq=sfreq, ch_ind=ch_ind, replace_ch_names=None, verbose=verbose) + + # Getting the subject and session + subject, session = fnames.split('_')[1], fnames.split('_')[2] + + # If using the example dataset, load the data from the example dataset + else: + subject, session = 1, 1 + + # Loading Data + eegnb_data_path = os.path.join(os.path.expanduser('~/'),'.eegnb', 'data') + experiment_data_path = os.path.join(eegnb_data_path, experiment, 'eegnb_examples') + + # If dataset hasn't been downloaded yet, download it + if not os.path.isdir(experiment_data_path): + fetch_dataset(data_dir=eegnb_data_path, experiment=experiment, site='eegnb_examples') + + raw = load_data(1,1, + experiment=experiment, site='eegnb_examples', device_name=device_name, + data_dir = eegnb_data_path) + + # Filtering the data under a certain frequency range + raw.filter(1,30, method='iir') + + # Visualising the power spectrum + fig = raw.plot_psd(fmin=1, fmax=30, show=False) + + # Saving the figure so it can be accessed by the pdf creation. Automatically deleted when added to the pdf. + plt.tight_layout() + plt.savefig("power_spectrum.png") + plt.show(block=False) + plt.pause(10) + plt.close() + + # Epoching + # Create an array containing the timestamps and type of each stimulus (i.e. face or house) + events = find_events(raw) + + # Create an MNE Epochs object representing all the epochs around stimulus presentation + epochs = Epochs(raw, events=events, event_id=event_id, + tmin=tmin, tmax=tmax, baseline=baseline, + reject=reject, preload=preload, + verbose=verbose, picks=picks) + + print('sample drop %: ', (1 - len(epochs.events)/len(events)) * 100) + print(len(epochs.events), 'events found') + print(epochs) + + experimental_parameters = {"eeg_device": device_name, "experiment_name": experiment, "subject_id": subject, "session_nb": session, "example_flag": example, "drop_percent": (1 - len(epochs.events)/len(events)) * 100, "epochs_chosen": len(epochs.events)} + + return epochs, experimental_parameters + + +def make_erp_plot(epochs, experimental_parameters:Dict, conditions=OrderedDict(House=[1],Face=[2]), ci=97.5, n_boot=1000, title='', + diff_waveform=None, #(1, 2)) + channel_order=[1,0,2,3]): + """ + Plots the ERP for the specified conditions. + + Parameters + ---------- + epochs : MNE Epochs object + conditions : OrderedDict holding the conditions to plot + ci: confidence interval + n_boot: number of bootstrap samples + title: title of the plot + diff_waveform: tuple of two integers indicating the channels to compare + channel_order: list of integers indicating the order of the channels to plot + """ + + fig, ax = plot_conditions(epochs, conditions=conditions, + ci=97.5, n_boot=1000, title='', + diff_waveform=None, #(1, 2)) + channel_order=[1,0,2,3]) # reordering of epochs.ch_names according to [[0,2],[1,3]] of subplot axes + + # Autoscaling the y axis to a tight fit to the ERP + for i in [0,1,2,3]: ax[i].autoscale(tight=True) + + # Saving the figure so it can be accessed by the pdf creation. Automatically deleted when added to the pdf. + # Makes sure that the axis labels are not cut out + plt.tight_layout() + plt.savefig("erp_plot.png") + plt.show(block=False) + plt.pause(10) + plt.close() + + # Creating the pdf, needs to be discussed whether we want to call it here or seperately. + create_pdf(experimental_parameters) + +def create_pdf(experimental_parameters:Dict): + """Creates analysis report using the power spectrum and ERP plots that are saved in the directory""" + + # Unpack the experimental parameters + eegdevice, experiment, subject, session, example, drop_percentage, epochs_chosen = experimental_parameters.values() + + # Getting the directory where the report should be saved + save_dir = get_save_directory(experiment=experiment, eegdevice=eegdevice, subject=subject, session=session, example=example, label="analysis") + + #get whole filepath + filepath = os.path.join(save_dir, 'analysis_report_{}.html'.format(datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))) + + # Get the report + report_html = get_html(experimental_parameters) + + # Save html file + with open(filepath, 'w') as f: + f.write(report_html) + + # Informing the user that the report has been saved + print('Analysis report saved to {}\n'.format(filepath)) + print("Open the report by clicking the following link: {}{}".format("file:///", filepath)) + +def get_save_directory(experiment, eegdevice, subject, session, example, label): + """ Returns save directory as a String for the analysis report """ + + if not example: + site='local' + else: + site='eegnb_examples' + + # Getting the directory where the analysis report should be saved + save_path = os.path.join(os.path.expanduser("~/"),'.eegnb', label) + save_path = os.path.join(save_path, experiment, site, eegdevice, "subject{}".format(subject), "session{}".format(session)) + + # Creating the directory if it doesn't exist + if not os.path.isdir(save_path): + os.makedirs(save_path) + + return save_path + +def create_analysis_report_(experiment, eegdevice, subject=None, session=None, data_path=None, bluemuse_file_fix=False): + """ Interface with the erp plot function, basically cli type instructions """ + + # Prompt user to enter options and then take inputs and do the necessary + epochs, experimental_parameters = load_eeg_data(experiment=experiment, subject=subject, session=session, device_name=eegdevice, example=False, fnames=data_path) + make_erp_plot(epochs, experimental_parameters) + +def example_analysis_report(): + """ Example of how to use the analysis report function """ + + experiment = ["visual-N170", "visual-P300"] + experiment_choice = experiment[int(input("Choose an experiment: {} 0 or 1\n".format(experiment)))] + + if experiment_choice == "visual-N170": + epochs, experimental_parameters = load_eeg_data(experiment_choice, example=True) + make_erp_plot(epochs, experimental_parameters) + else: + epochs, experimental_parameters = load_eeg_data('visual-P300', device_name='muse2016', event_id={'Non-Target': 1, 'Target': 2}, example=True) + make_erp_plot(epochs, experimental_parameters, conditions=OrderedDict(NonTarget=[1],Target=[2])) \ No newline at end of file diff --git a/eegnb/analysis/report.html b/eegnb/analysis/report.html new file mode 100644 index 00000000..b0f91f77 --- /dev/null +++ b/eegnb/analysis/report.html @@ -0,0 +1,60 @@ + + + + + + + Analysis Report + + + + +
+ Description + Raw Epoch + Stimulus Response + About +
+ +
+

+ Analysis Report +

+

+ Experiment Name:
+ Subject Id:
+ Session Number:
+ EEG Headset:
+ Drop Percentage:
+

+ +

+ This is an analysis report for the experiment.
+ For more information about the experiment, please visit the documentation. +

+
+ + +
+

+ Raw Epoch +

+

+ The raw epoch is shown below. The raw epoch is the data that is recorded from the EEG headset. The raw epoch is then processed to remove noise and artifacts. +

+ Raw Epoch +
+ + +
+

+ Stimulus Response +

+

+ The stimulus response is shown below. The stimulus response is the data that is recorded from the EEG headset after the raw epoch has been processed. The stimulus response is then used to calculate the power spectrum. +

+ Stimulus Response +
+ + + diff --git a/eegnb/analysis/styling.css b/eegnb/analysis/styling.css new file mode 100644 index 00000000..4b000843 --- /dev/null +++ b/eegnb/analysis/styling.css @@ -0,0 +1,34 @@ +/* Add a black background color to the top navigation */ +.topnav { + background-color: #333; + overflow: hidden; + } + + /* Style the links inside the navigation bar */ + .topnav a { + float: left; + color: #f2f2f2; + text-align: center; + padding: 14px 16px; + text-decoration: none; + font-size: 17px; + } + + /* Change the color of links on hover */ + .topnav a:hover { + background-color: #ddd; + color: black; + } + + /* Add a color to the active/current link */ + .topnav a.active { + background-color: #04AA6D; + color: white; + } + + /* Centre the images */ + img { + display: block; + margin-left: auto; + margin-right: auto; + } \ No newline at end of file diff --git a/eegnb/analysis/utils.py b/eegnb/analysis/utils.py index bd67baa9..d0ca7c58 100644 --- a/eegnb/analysis/utils.py +++ b/eegnb/analysis/utils.py @@ -5,11 +5,15 @@ from collections import OrderedDict from glob import glob from typing import Union, List, Dict +from collections import Iterable from time import sleep, time from numpy.core.fromnumeric import std +import keyboard +import os import pandas as pd import numpy as np +import matplotlib.pyplot as plt import seaborn as sns from mne import create_info, concatenate_raws from mne.io import RawArray @@ -22,8 +26,6 @@ from eegnb.devices.eeg import EEG from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS - - # this should probably not be done here sns.set_context("talk") sns.set_style("white") @@ -32,6 +34,22 @@ logger = logging.getLogger(__name__) +# Empirically determined lower and upper bounds of +# acceptable temporal standard deviations +# for different EEG devices tested by us +openbci_devices = ['ganglion', 'ganglion_wifi', 'cyton', 'cyton_wifi', 'cyton_daisy_wifi'] +muse_devices = ['muse' + model + sfx for model in ['2016', '2', 'S'] for sfx in ['', '_bfn', '_bfb']] +neurosity_devices = ['notion1', 'notion2', 'crown'] +gtec_devices = ['unicorn'] +alltesteddevices = openbci_devices + muse_devices + neurosity_devices + gtec_devices +thres_stds = {} +for device in alltesteddevices: + if device in openbci_devices: thres_stds[device] = [1,9] + elif device in muse_devices: thres_stds[device] = [1,18] + elif device in neurosity_devices: thres_stds[device] = [1,15] + elif device in gtec_devices: thres_stds[device] = [1,15] + + def load_csv_as_raw( fnames: List[str], sfreq: float, @@ -68,6 +86,7 @@ def load_csv_as_raw( n_aux = 0 raw = [] + for fn in fnames: # Read the file data = pd.read_csv(fn) @@ -91,7 +110,7 @@ def load_csv_as_raw( # create MNE object info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq, verbose=1) raw.append(RawArray(data=data, info=info, verbose=verbose)) - + raws = concatenate_raws(raw, verbose=verbose) montage = make_standard_montage("standard_1005") raws.set_montage(montage) @@ -445,25 +464,14 @@ def check_report(eeg: EEG, n_times: int=60, pause_time=5, thres_std_low=None, th # If no upper and lower std thresholds set in function call, # set thresholds based on the following per-device name defaults - if thres_std_high is None: - if eeg.device_name in ["ganglion", "ganglion_wifi", "cyton", - "cyton_wifi", "cyton_daisy", "cyton_daisy_wifi"]: - thres_std_high = 9 - elif eeg.device_name in ["notion1", "notion2", "crown"]: - thres_std_high = 15 - elif 'muse' in eeg.device_name: - thres_std_high = 18 - + edn = eeg.device_name + flag = False if thres_std_low is None: - - if 'muse' in eeg.device_name: - thres_std_low = 1 - - elif eeg.device_name in ["ganglion", "ganglion_wifi", "cyton", - "cyton_wifi", "cyton_daisy", "cyton_daisy_wifi", - "notion1", "notion2", "crown"]: - thres_std_low = 1 - + if edn in thres_stds.keys(): + thres_std_low = thres_stds[edn][0] + if thres_std_high is None: + if edn in thres_stds.keys(): + thres_std_high = thres_stds[edn][1] print("\n\nRunning signal quality check...") print(f"Accepting threshold stdev between: {thres_std_low} - {thres_std_high}") @@ -493,7 +501,6 @@ def check_report(eeg: EEG, n_times: int=60, pause_time=5, thres_std_low=None, th print("\nSignal quality:") print(indicators) - bad_channels = [k for k, v in std_series.iteritems() if v < thres_std_low or v > thres_std_high ] if bad_channels: print(f"Bad channels: {', '.join(bad_channels)}") @@ -510,26 +517,26 @@ def check_report(eeg: EEG, n_times: int=60, pause_time=5, thres_std_low=None, th if (loop_index+1) % n_inarow == 0: print(f"\n\nLooks like you still have {len(bad_channels)} bad channels after {loop_index+1} tries\n") - prompt_start = time() - continue_sigqual = input("\nChecks will resume in %s seconds...Press 'c' (and ENTER key) if you want to stop adjusting for better quality.\n" %pause_time) - while time() < prompt_start + 5: - if continue_sigqual == 'c': - break - if continue_sigqual == 'c': - print("\nStopping signal quality checks!") - break - - sleep(pause_time) - - - + prompt_time = time() + print(f"Starting next cycle in 5 seconds, press C and enter to cancel") + while time() < prompt_time + 5: + if keyboard.is_pressed('c'): + print("\nStopping signal quality checks!") + flag = True + break + if flag: + break + def fix_musemissinglines(orig_f,new_f=''): - if new_f == '': new_f = orig_f.replace('.csv', '_fml.csv') + #if new_f == '': new_f = orig_f.replace('.csv', '_fml.csv') + + # Overwriting + new_f = orig_f print('writing fixed file to %s' %new_f) - # Read oriignal file + # Read original file F = open(orig_f, 'r') Ls = F.readlines() diff --git a/eegnb/cli/__main__.py b/eegnb/cli/__main__.py index 6e57fbce..b924fc50 100644 --- a/eegnb/cli/__main__.py +++ b/eegnb/cli/__main__.py @@ -6,10 +6,12 @@ import shutil from eegnb.datasets.datasets import zip_data_folders -from .introprompt import intro_prompt +from .introprompt import intro_prompt, analysis_intro_prompt from .utils import run_experiment +from eegnb import generate_save_fn from eegnb.devices.eeg import EEG from eegnb.analysis.utils import check_report +from eegnb.analysis.pipelines import load_eeg_data, make_erp_plot, create_analysis_report_, example_analysis_report @click.group(name="eegnb") @@ -35,6 +37,7 @@ def runexp( outfname: str = None, prompt: bool = False, dosigqualcheck = True, + generatereport = True ): """ Run experiment. @@ -59,6 +62,8 @@ def runexp( if prompt: eeg, experiment, recdur, outfname = intro_prompt() else: + # Random values for outfile for now + outfname = generate_save_fn(eegdevice, experiment,7, 7) if eegdevice == "ganglion": # if the ganglion is chosen a MAC address should also be provided eeg = EEG(device=eegdevice, mac_addr=macaddr) @@ -73,16 +78,55 @@ def askforsigqualcheck(): "Sorry, didn't recognize answer. " askforsigqualcheck() + def askforreportcheck(): + generatereport = input("\n\nGenerate Report? (Y/n): \n").lower() != "n" + if dosigqualcheck: askforsigqualcheck() - + + if generatereport: + askforreportcheck() run_experiment(experiment, eeg, recdur, outfname) print(f"\n\n\nExperiment complete! Recorded data is saved @ {outfname}") + if generatereport: + # Error of filenames being multiple etc, needs to be handled + create_analysis_report(experiment=experiment, device_name=eegdevice, fnames=outfname) +@main.command() +@click.option("-ex", "--experiment", help="Experiment to run") +@click.option("-ed", "--eegdevice", help="EEG device to use") +@click.option("-sub", "--subject", help="Subject ID") +@click.option("-sess", "--session", help="Session number") +@click.option("-fp", "--filepath", help="Filepath to save data") +@click.option( + "-ip", "--prompt", help="Use interactive prompt to ask for parameters", is_flag=True +) +def create_analysis_report( + experiment: str, + eegdevice: str = None, + subject: str = None, + session: str = None, + filepath:str = None, + prompt: bool = False, +): + """ + Create analysis report of recorded data + """ + + if prompt: + example = input("Do you want to load an example experiment? (y/n)\n") + print() + if example == 'y': + example_analysis_report() + return + else: + experiment, eegdevice, subject, session, filepath = analysis_intro_prompt() + create_analysis_report_(experiment, eegdevice, subject, session, filepath) + @main.command() @click.option("-ed", "--eegdevice", help="EEG device to use", required=True) @@ -107,6 +151,8 @@ def checksigqual(eegdevice: str): # valuess in the function definition ] + + @main.command() @click.option("-ex", "--experiment", help="Experiment to zip", required=False) @click.option( diff --git a/eegnb/cli/introprompt.py b/eegnb/cli/introprompt.py index 6eaf90c9..647651bb 100644 --- a/eegnb/cli/introprompt.py +++ b/eegnb/cli/introprompt.py @@ -25,7 +25,7 @@ def device_prompt() -> EEG: "ganglion": "OpenBCI Ganglion", "cyton": "OpenBCI Cyton", "cyton_daisy": "OpenBCI Cyton + Daisy", - "unicord": "G.Tec Unicorn", + "unicorn": "G.Tec Unicorn", "brainbit": "BrainBit", "notion1": "Notion 1", "notion2": "Notion 2", @@ -96,7 +96,7 @@ def exp_prompt(runorzip:str='run') -> str: ) ) - exp_idx = int(input("\nEnter Experiment Selection: ")) + exp_idx = int(input("\nEnter Experiment Selection: \n")) exp_selection = list(experiments.keys())[exp_idx] print(f"Selected experiment: {exp_selection} \n") @@ -157,6 +157,63 @@ def intro_prompt() -> Tuple[EEG, str, int, str]: return eeg_device, exp_selection, duration, str(save_fn) +def analysis_device_prompt(): + + boards = { + "none": "None", + "muse2016": "Muse (2016)", + "muse2": "Muse 2", + "museS": "Muse S", + "muse2016_bfn": "Muse 2016 - brainflow, native bluetooth", + "muse2016_bfb": "Muse 2016 - brainflow, BLED bluetooth dongle", + "muse2_bfn": "Muse 2 - brainflow, native bluetooth", + "muse2_bfb": "Muse 2 - brainflow, BLED bluetooth dongle", + "museS_bfn": "Muse S - brainflow, native bluetooth", + "museS_bfb": "Muse S - brainflow, BLED bluetooth dongle", + "ganglion": "OpenBCI Ganglion", + "cyton": "OpenBCI Cyton", + "cyton_daisy": "OpenBCI Cyton + Daisy", + "unicorn": "G.Tec Unicorn", + "brainbit": "BrainBit", + "notion1": "Notion 1", + "notion2": "Notion 2", + "crown": "Crown", + "synthetic": "Synthetic", + "freeeeg32": "FreeEEG32", + } + + print("Please enter the integer value corresponding to your EEG device: \n") + print("\n".join(f"[{i:2}] {board}" for i, board in enumerate(boards.values()))) + + board_idx = int(input("\nEnter Board Selection: \n")) + + # Board_codes are the actual names to be passed to the EEG class + board_code = list(boards.keys())[board_idx] + return board_code + +def analysis_intro_prompt(): + + # check if user has filepath + print("Welcome to NeurotechX EEG Notebooks\n") + print("Do you have a filepath to a .csv file you would like to analyze? \n") + print("[1] Yes \n") + print("[0] No \n") + file_idx = int(input("Enter selection: \n")) + if file_idx == 1: + print("Please enter the filepath to the .csv file you would like to analyze. \n") + filepath = input("Enter filepath: \n") + subject, session = None, None + else: + subject = int(input("Enter subject ID#: \n")) + session = int(input("Enter session #: \n")) + filepath = None + + eegdevice = analysis_device_prompt() + experiment = exp_prompt() + + return experiment, eegdevice, subject, session, filepath + + def intro_prompt_zip() -> Tuple[str,str]: """This function handles the user prompts for inputting information for zipping their function.""" diff --git a/requirements.txt b/requirements.txt index 2ad031b2..c85486eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,6 +18,9 @@ pyserial>=3.5 h5py>=3.1.0 pytest-shutil pyo>=1.0.3; platform_system == "Linux" +keyboard==0.13.5 +airium>=0.1.0 +attrdict>=2.0.1 # This might try to build from source on linux (since there are no wheels for Linux on PyPI) . # You can pass `--find-links=https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-20.04/` your `pip install` to use the prebuilt wheels at the link. diff --git a/testing.py b/testing.py new file mode 100644 index 00000000..fff46054 --- /dev/null +++ b/testing.py @@ -0,0 +1,12 @@ + + +import matplotlib.pyplot as plt +import numpy as np +from eegnb.analysis.pipelines import load_eeg_data, make_erp_plot +from eegnb.analysis.utils import fix_musemissinglines + +file_path = r"C:\Users\Parv\.eegnb\data\visual-N170\local\muse2\subject0001\session004\recording_2022-08-15-19.09.37.csv" + +raw, epochs = load_eeg_data(experiment='visual-N170', subject=1, session=3, device_name='muse2', example=False) +make_erp_plot(epochs) +