diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 000000000..e21712e4b --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,25 @@ +# Codespell configuration is within pyproject.toml +--- +name: Codespell + +on: + push: + branches: [master] + pull_request: + branches: [master] + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Annotate locations with typos + uses: codespell-project/codespell-problem-matcher@v1 + - name: Codespell + uses: codespell-project/actions-codespell@v2 diff --git a/brainscore_vision/benchmarks/ferguson2024/benchmark.py b/brainscore_vision/benchmarks/ferguson2024/benchmark.py index e14713edc..368b9bc1b 100644 --- a/brainscore_vision/benchmarks/ferguson2024/benchmark.py +++ b/brainscore_vision/benchmarks/ferguson2024/benchmark.py @@ -123,7 +123,7 @@ def get_integral_data(assembly: BehavioralAssembly, experiment: str, precompute_ :param assembly: the human behavioral data to look at :param experiment: str, the prefix of the experiment subtype, ex: "tilted_line" or "lle" :param precompute_boostrap: True if using precomputed integral errors, else manually compute (Slow!) - :return: tuple of calculated human integral and its boostrapped (precomputed) error + :return: tuple of calculated human integral and its bootstrapped (precomputed) error """ lapse_rate = LAPSE_RATES[experiment] blue_data = generate_summary_df(assembly, lapse_rate, "first") diff --git a/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py b/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py index c28f390c0..076401fff 100644 --- a/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py +++ b/brainscore_vision/benchmarks/ferguson2024/helpers/helpers.py @@ -130,7 +130,7 @@ def calculate_accuracy(df: BehavioralAssembly, lapse_rate: float) -> float: """ - Calculates a per-subject lapse rate-corrected accuracy for an assembly. - Subject accuracy is averaged over all images with a certain distractor size and repetition coords (i.e. these - coords are mixed togather and the accuracy is calculated over this merged assembly). + coords are mixed together and the accuracy is calculated over this merged assembly). :param df: DataFrame Object that contains experimental data :param lapse_rate: a precomputed float defined above that represents avg. subject lapse rate in experiment @@ -214,7 +214,7 @@ def boostrap_integral(df_blue: DataFrame, df_orange: DataFrame, num_loops: int = :param df_blue: DataFrame, the first (blue) block of data (target on a field of distractors) :param df_orange: DataFrame, the second (orange) block of data (distractor on a field of targets) - :param num_loops: int, number of times the boostrap will run (and thus take the average) + :param num_loops: int, number of times the bootstrap will run (and thus take the average) :return: Dict of values {bootstrapped_integral, bootstrapped_integral_error) """ num_subjects = len(set(df_blue["participant_id"])) diff --git a/brainscore_vision/benchmarks/hebart2023/benchmark.py b/brainscore_vision/benchmarks/hebart2023/benchmark.py index e72f38653..975fb25a1 100644 --- a/brainscore_vision/benchmarks/hebart2023/benchmark.py +++ b/brainscore_vision/benchmarks/hebart2023/benchmark.py @@ -62,7 +62,7 @@ def __call__(self, candidate: BrainModel): # Score the model # We chose not to compute error estimates but you could compute them - # by spliting the data into five folds and computing the standard deviation. + # by splitting the data into five folds and computing the standard deviation. correct_choices = choices.values == self._assembly.coords["image_3"].values # third image is always correct raw_score = np.sum(correct_choices) / len(choices['presentation']) score = (raw_score - 1 / 3) / (self.ceiling - 1 / 3) diff --git a/brainscore_vision/benchmarks/scialom2024/test.py b/brainscore_vision/benchmarks/scialom2024/test.py index c660b795c..005d96ebc 100644 --- a/brainscore_vision/benchmarks/scialom2024/test.py +++ b/brainscore_vision/benchmarks/scialom2024/test.py @@ -99,7 +99,7 @@ def test_dataset_ceiling(self, dataset, expected_ceiling): ('segments-59', approx(0.12072, abs=0.001)), ('segments-77', approx(0.12996, abs=0.001)), ('segments-100', approx(0.11540, abs=0.001)), # all of the above are AccuracyDistance - ('phosphenes-all', approx(0.18057, abs=0.01)), # alls are ErrorConsistency + ('phosphenes-all', approx(0.18057, abs=0.01)), # all are ErrorConsistency ('segments-all', approx(0.15181, abs=0.01)), ]) def test_model_8_degrees(self, dataset, expected_raw_score): diff --git a/brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m b/brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m index 6809618df..bd2b2a05c 100644 --- a/brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m +++ b/brainscore_vision/data/david2004/data_packaging/lib/DataHash_20160618/DataHash.m @@ -27,7 +27,7 @@ % 'array': The contents, type and size of the input [Data] are % considered for the creation of the hash. Nested CELLs % and STRUCT arrays are parsed recursively. Empty arrays of -% different type reply different hashs. +% different type reply different hashes. % 'file': [Data] is treated as file name and the hash is calculated % for the files contents. % 'bin': [Data] is a numerical, LOGICAL or CHAR array. Only the @@ -110,7 +110,7 @@ % Jan Achterhold (author 267816) suggested to consider Java objects. % 016: 01-Feb-2015 20:53, Java heap space exhausted for large files. % Now files are process in chunks to save memory. -% 017: 15-Feb-2015 19:40, Collsions: Same hash for different data. +% 017: 15-Feb-2015 19:40, Collisions: Same hash for different data. % Examples: zeros(1,1) and zeros(1,1,0) % complex(0) and zeros(1,1,0,0) % Now the number of dimensions is included, to avoid this. @@ -132,7 +132,7 @@ % OPEN BUGS: % Nath wrote: -% function handle refering to struct containing the function will create +% function handle referring to struct containing the function will create % infinite loop. Is there any workaround ? % Example: % d= dynamicprops(); @@ -400,7 +400,7 @@ DataBin = uint8(DataObj); % Matt Raum had this excellent idea - unfortunately this function is - % undocumented and might not be supported in te future: + % undocumented and might not be supported in the future: % DataBin = getByteStreamFromArray(DataObj); catch % Or perhaps this is better: diff --git a/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c b/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c index 29f5a1cd3..83c27d05b 100644 --- a/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c +++ b/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5.c @@ -142,7 +142,7 @@ # define MWSIZE_MAX MAX_int32_T #endif -// Directive for endianess: +// Directive for endianness: #if !defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) # define _LITTLE_ENDIAN #endif @@ -645,7 +645,7 @@ void ArrayCore(MD5_CTX *context, const mxArray *V) // Core function to process structs: =========================================== void StructCore(MD5_CTX *context, const mxArray *V, mwSize nElem) { - // Sort field names alphabetically to avoid effects of teh order of fields. + // Sort field names alphabetically to avoid effects of the order of fields. const char *FieldName; int nField, iField, FieldIndex; mwSize iElem; diff --git a/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m b/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m index 1b7c6c7fc..ca1912757 100644 --- a/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m +++ b/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/GetMD5_helper.m @@ -9,7 +9,7 @@ % V: Array of any type, which is not handled in the C-Mex. % OUTPUT: % S: Array or struct containing elementary types only. -% The implementation migth be changed by the user! +% The implementation might be changed by the user! % Default: % - Sparse arrays: Struct containing the indices and values. % - Function handle: The reply of FUNCTIONS and the size and date of the @@ -76,7 +76,7 @@ S = uint8(V); % Matt Raum had this excellent idea - unfortunately this function is - % undocumented and might not be supported in te future: + % undocumented and might not be supported in the future: % S = getByteStreamFromArray(DataObj); catch ME % Or perhaps this is better: diff --git a/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m b/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m index 5a4e57db0..f00a2887e 100644 --- a/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m +++ b/brainscore_vision/data/david2004/data_packaging/lib/GetMD5/InstallMex.m @@ -193,7 +193,7 @@ Flags = cat(2, Flags, {'-compatibleArrayDims'}); end -% Define endianess directive: +% Define endianness directive: if isLittleEndian Flags = cat(2, Flags, {'-D_LITTLE_ENDIAN'}); else % Does Matlab run on a big endian machine currently?! @@ -270,7 +270,7 @@ % Run the unit-test: ----------------------------------------------------------- if ~isempty(UnitTestFcn) && compiled fprintf('\n\n== Post processing:\n'); - [dum, UnitTestName] = fileparts(UnitTestFcn); %#ok % Remove extension + [dummy, UnitTestName] = fileparts(UnitTestFcn); %#ok % Remove extension if ~isempty(which(UnitTestName)) fprintf(' Call: %s\n\n', UnitTestName); feval(UnitTestName); diff --git a/brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m b/brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m index 0398d3456..793c909a3 100644 --- a/brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m +++ b/brainscore_vision/data/david2004/data_packaging/lib/glob/glob.m @@ -42,7 +42,7 @@ % * GLOB returns only directory names when a trailing file % separator is specified. % * On Windows GLOB is not case sensitive, but it returns -% matching names exactely in the case as they are defined on +% matching names exactly in the case as they are defined on % the filesystem. Case of host and sharename of a UNC path and % case of drive letters will be returned as specified in % FILESPEC. @@ -319,7 +319,7 @@ % ------------------------------------------------------------------------ function L = ls_regexp(regexp_fhandle, path, varargin) % List files that match PATH/r1/r2/r3/... where PATH is a string without -% any wildcards and r1..rn are regular expresions that contain the parts of +% any wildcards and r1..rn are regular expressions that contain the parts of % a filespec between the file separators. % L is a cell array with matching file or directory names. % REGEXP_FHANDLE contain a file handle to REGEXP or REGEXPI depending @@ -427,11 +427,11 @@ % return matching names if ~isempty(varargin{end}) - % determing matching names ignoring trailing '/' + % determine matching names ignoring trailing '/' L_no_trailing_fsep = regexprep(L, '/$', ''); I = regexp_fhandle(L_no_trailing_fsep, ['^' expression '$']); else - % determing matching names including trailing '/' + % determine matching names including trailing '/' I = regexp_fhandle(L, ['^' expression '$']); end I = cellfun('isempty', I); diff --git a/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py b/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py index c00e224a2..2d1b6f120 100644 --- a/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py +++ b/brainscore_vision/data/ferguson2024/data_packaging/data_packaging.py @@ -68,7 +68,7 @@ def create_assembly_and_upload(name: str, experiment: str, upload_to_s3=True) -> :param name: the name of the experiment, usually Ferguson2024 :param experiment: the dataset, i.e. color :param upload_to_s3: True if you want to upload this to BrainIO on S3 - :return: the assmebly + :return: the assembly """ all_subjects = pd.read_csv(f'csvs/{experiment}_sanity_processed.csv') diff --git a/brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py b/brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py index c86db70e3..1efd8ef57 100644 --- a/brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py +++ b/brainscore_vision/data/geirhos2021/data_packaging/high-pass/high-pass_data_assembly.py @@ -15,7 +15,7 @@ - 16 image categories - for the this benchmark (high-pass) subjects saw the EXACT image indicated with the variable/column name image_lookup_id, and not a variation of it (no distortions, editing, etc). Condition is unclear based on - documentation fromm source repo, needs follow up. + documentation from source repo, needs follow up. ''' # initial csv to dataframe processing: diff --git a/brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py b/brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py index 1242b800a..d1afd4225 100644 --- a/brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py +++ b/brainscore_vision/data/geirhos2021/data_packaging/low-pass/low-pass_data_assembly.py @@ -15,7 +15,7 @@ - 16 image categories - for the this benchmark (low-pass) subjects saw the EXACT image indicated with the variable/column name image_lookup_id, and not a variation of it (no distortions, editing, etc). Condition is unclear based on - documentation fromm source repo, needs follow up. + documentation from source repo, needs follow up. ''' # initial csv to dataframe processing: diff --git a/brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py b/brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py index b9c3915cb..2fb511012 100644 --- a/brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py +++ b/brainscore_vision/data/geirhos2021/data_packaging/phase-scrambling/phase-scrambling_data_assembly.py @@ -15,7 +15,7 @@ - 16 image categories - for the this benchmark (phase-scrambling) subjects saw the EXACT image indicated with the variable/column name image_lookup_id, and not a variation of it (no distortions, editing, etc). Condition is again unclear based on - documentation fromm source repo, needs follow up. + documentation from source repo, needs follow up. ''' # initial csv to dataframe processing: diff --git a/brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb b/brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb index cb080b3b4..49eec0521 100644 --- a/brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb +++ b/brainscore_vision/data/igustibagus2024/investigation_consistency.ipynb @@ -95,7 +95,7 @@ " assembly = assembly.where(assembly.object_style != 'skeleton', drop=True)\n", " assembly = assembly.where(assembly.object_style != 'nan', drop=True)\n", "\n", - " ## this is temporary because i havent pushed the new version of the assembly online:\n", + " ## this is temporary because i haven't pushed the new version of the assembly online:\n", " csv_path = './merged_assembly/merged_stimulus_set.csv'\n", " dir_path = './images'\n", " stimulus_set = brainio.stimuli.StimulusSet.from_files(csv_path, dir_path)\n", diff --git a/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb b/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb index a930e481c..91b673efc 100644 --- a/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb +++ b/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly.ipynb @@ -173,7 +173,7 @@ } ], "source": [ - "# now check if whithin oleo assembly images have always the same number of repetitions\n", + "# now check if within oleo assembly images have always the same number of repetitions\n", "from collections import Counter\n", "\n", "repetition_values = assembly_oleo.repetition.values # Replace with your actual array\n", diff --git a/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py b/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py index 3a1878013..5d2e2e00f 100644 --- a/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py +++ b/brainscore_vision/data/igustibagus2024/merged_assembly/create_merged_assembly_and_stim.py @@ -146,7 +146,7 @@ import pdb; pdb.set_trace() -# upload assmebly to S3 +# upload assembly to S3 package_data_assembly('brainio_brainscore', merged_assembly, assembly_identifier=merged_assembly.name, stimulus_set_identifier=stimuli.name,assembly_class_name="NeuronRecordingAssembly", bucket_name="brainio-brainscore") \ No newline at end of file diff --git a/brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py b/brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py index cf80a886f..067a867fa 100644 --- a/brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py +++ b/brainscore_vision/data/igustibagus2024/merged_assembly/helpers_background_id.py @@ -222,14 +222,14 @@ def create_background_ids(hvm_data, rest_data, non_silhouette_data): def find_matching_background(oods_category, hvm_category): ''' hvm and hvm-like images share the same background. To identify similar backgrounds images are compared pixel-wise to each over. - Images that share the most overlapp are then labeled with the same background id as the respective hvm-image. + Images that share the most overlap are then labeled with the same background id as the respective hvm-image. Arguments: oods_category (NeuronRecordingAssembly): all images from one single hvm-like domain without background id, hvm_category (NeuronRecordingAssembly): hvm images with background id Returns: - background_ids: list of matching background ids for the single hvm-like domian + background_ids: list of matching background ids for the single hvm-like domain ''' background_ids = [] # Find the respective background id from hvm images for each OOD image @@ -262,7 +262,7 @@ def load_silhouette_data(data): Separating domain-transfer data into hvm, hvm-like (silhouette) and rest (non-silhouette) data. This separation is needed to give each hvm-like image the same background number as its respective hvm version (images are sharing the same background). - Arguements: + Arguments: data: full data that is going to be split into hvm, hvm-like (silhouette) and rest (non-silhouette) data Returns: @@ -409,7 +409,7 @@ def reduce_data_num_images(data_complete, number_images): def get_final_traning_data(complete_training_data, num_images_training, num_neurons): ''' - Draws final traning images and neurons for one split. + Draws final training images and neurons for one split. Arguments: complete_training_data (dict with NeuronRecordingAssembly): keys: domain names, values: complete training data pool for one split, @@ -503,7 +503,7 @@ def add_accuracies_to_split_df(final_test_data_dictionary, decoder, split_datafr num_neurons: number of training neurons, num_training_images: number of training images - Retruns: + Returns: split_dataframe (dict): keys: domain names, values: dataframe with columns: #Neurons, #Images training, Accuracy test data ''' # Get and store the test accuracy for each crossdomain @@ -568,7 +568,7 @@ def get_classifier_score_2AFC(classifier, data): ################################################# ################################################# ################################################# -# Brain model speficic functions +# Brain model specific functions ################################################# @@ -678,7 +678,7 @@ def save_split_dataframes(split_crossdomain_dataframes, crossdomain_dataframes, def save_split_averaged_dataframes(crossdomain_dataframes, neurons_array, images_array, brain_model_name): ''' - Saves dataframe with perfromance averaged over multiple splits for each domain. + Saves dataframe with performance averaged over multiple splits for each domain. Arguments: crossdomain_dataframes (dict): keys: domain name, values: dataframes with performance for each #Neurons x #Images combination stored over multiple splits diff --git a/brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py b/brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py index 2775a131a..c3203d57a 100644 --- a/brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py +++ b/brainscore_vision/data/marques2020/data_packaging/marques_stim_common.py @@ -205,11 +205,11 @@ def gen_grating_stim(degrees, size_px, stim_name, grat_params, save_dir): for i in np.arange(nStim): stim_id = np.uint64(grat_params[i, 0] * 10e9 + grat_params[i, 1] * 10e7 + grat_params[i, 3] * 10e5 + grat_params[i, 4] * 10e3 + grat_params[i, 5] * 10e1 + grat_params[i, 6]) - grat = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2], + great = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2], rad=grat_params[i, 3], sf=grat_params[i, 4], orientation=grat_params[i, 5], phase=grat_params[i, 6], stim_id= stim_id, format_id='{0:012d}', save_dir=save_dir, size_px=[size_px, size_px], type_name=stim_name) - image_names[i] = (grat.save_stimulus()) + image_names[i] = (great.save_stimulus()) image_local_file_path[i] = save_dir + os.sep + image_names[i] all_y[i] = grat_params[i, 0] all_x[i] = grat_params[i, 1] @@ -264,7 +264,7 @@ def gen_grating_stim_old(degrees, size_px, stim_name, grat_contrast, grat_pos, g for s in np.arange(len(grat_sf)): for o in np.arange(len(grat_orientation)): for p in np.arange(len(grat_phase)): - grat = Grating(width=width, pos=[grat_pos[y], grat_pos[x]], + great = Grating(width=width, pos=[grat_pos[y], grat_pos[x]], contrast=grat_contrast[c], rad=grat_rad[r], sf=grat_sf[s], orientation=grat_orientation[o], phase=grat_phase[p], @@ -272,7 +272,7 @@ def gen_grating_stim_old(degrees, size_px, stim_name, grat_contrast, grat_pos, g y * 10e9 + x * 10e7 + r * 10e5 + s * 10e3 + o * 10e1 + p), format_id='{0:012d}', save_dir=save_dir, size_px=[size_px, size_px], type_name=stim_name) - image_names[i] = (grat.save_stimulus()) + image_names[i] = (great.save_stimulus()) image_local_file_path[i] = save_dir + os.sep + image_names[i] all_y[i] = grat_pos[y] all_x[i] = grat_pos[x] diff --git a/brainscore_vision/data/zhang2018/__init__.py b/brainscore_vision/data/zhang2018/__init__.py index d56c7ebfe..d9b2862fe 100644 --- a/brainscore_vision/data/zhang2018/__init__.py +++ b/brainscore_vision/data/zhang2018/__init__.py @@ -5,7 +5,7 @@ BIBTEX = """@article{zhang2018finding, title={Finding any Waldo with zero-shot invariant and efficient visual search}, - author={Zhang, Mengmi and Feng, Jiashi and Ma, Keng Teck and Lim, Joo Hwee and Zhao, Qi and Kreiman, Gabriel}, + author={Zhang, Mengmi and Feng, Jiashi and Ma, Keng Teck and Lim, You Hwee and Zhao, Qi and Kreiman, Gabriel}, journal={Nature communications}, volume={9}, number={1}, diff --git a/brainscore_vision/metrics/accuracy_distance/metric.py b/brainscore_vision/metrics/accuracy_distance/metric.py index eb47e3bba..70894f14b 100644 --- a/brainscore_vision/metrics/accuracy_distance/metric.py +++ b/brainscore_vision/metrics/accuracy_distance/metric.py @@ -49,7 +49,7 @@ def __call__(self, source: BehavioralAssembly, target: indexers = {v: cond[i] for i, v in enumerate(variables)} subject_cond_assembly = subject_assembly.sel(**indexers) source_cond_assembly = source.sel(**indexers) - # to accomodate unbalanced designs, skip combinations of + # to accommodate unbalanced designs, skip combinations of # variables that don't exist in both assemblies if len(subject_cond_assembly) and len(source_cond_assembly): cond_scores.append(self.compare_single_subject( diff --git a/brainscore_vision/metrics/cka/metric.py b/brainscore_vision/metrics/cka/metric.py index 67ccd2931..59152dcd7 100644 --- a/brainscore_vision/metrics/cka/metric.py +++ b/brainscore_vision/metrics/cka/metric.py @@ -64,7 +64,7 @@ def centering(K): return np.dot(np.dot(H, K), H) # HKH are the same with KH, KH is the first centering, H(KH) do the second time, - # results are the sme with one time centering + # results are the same with one time centering # return np.dot(H, K) # KH diff --git a/brainscore_vision/model_helpers/activations/temporal/model/pytorch.py b/brainscore_vision/model_helpers/activations/temporal/model/pytorch.py index 00197874e..17bc8e844 100644 --- a/brainscore_vision/model_helpers/activations/temporal/model/pytorch.py +++ b/brainscore_vision/model_helpers/activations/temporal/model/pytorch.py @@ -63,7 +63,7 @@ def get_activations(self, inputs : List[Any], layer_names : List[str]) -> typing def get_layer(self, layer_name : str): # the layer_name is a string that represents the module hierarchy up to the target layer, - # seperated by ".", e.g., "module1.submodule2.relu". + # separated by ".", e.g., "module1.submodule2.relu". if layer_name == 'logits': return self._output_layer() module = self._model diff --git a/brainscore_vision/model_helpers/activations/temporal/utils.py b/brainscore_vision/model_helpers/activations/temporal/utils.py index d1f7a264c..e4644b993 100644 --- a/brainscore_vision/model_helpers/activations/temporal/utils.py +++ b/brainscore_vision/model_helpers/activations/temporal/utils.py @@ -118,7 +118,7 @@ def proportional_average_pooling_(arr, size): return ret.astype(arr.dtype) -# cv2 has the wierd bug of cannot handling too large channel size +# cv2 has the weird bug of cannot handling too large channel size def cv2_resize(arr, size, mode, batch_size=4): # arr [H, W, C] import cv2 @@ -185,7 +185,7 @@ def get_specified_layers(any_model): return list(inferencer.layer_activation_format.keys()) # switch the inferencer at any level -# specify key='same' to retrive the same parameter from the original inferencer +# specify key='same' to retrieve the same parameter from the original inferencer def switch_inferencer(any_model, new_inferencer_cls, **kwargs): inferencer = get_inferencer(any_model) base_model = get_base_model(any_model) diff --git a/brainscore_vision/model_interface.py b/brainscore_vision/model_interface.py index 48622282d..cf3bb4e3e 100644 --- a/brainscore_vision/model_interface.py +++ b/brainscore_vision/model_interface.py @@ -157,7 +157,7 @@ def start_task(self, task: Task, fitting_stimuli) -> None: """ Instructs the model to begin one of the tasks specified in :data:`~brainscore_vision.model_interface.BrainModel.Task`. - For all followings call of :meth:`~brainscore_vision.model_interface.BrainModel.look_at`, + For all following call of :meth:`~brainscore_vision.model_interface.BrainModel.look_at`, the model returns the expected outputs for the specified task. :param task: The task the model should perform, and thus which outputs it should return @@ -176,7 +176,7 @@ def start_recording(self, recording_target: RecordingTarget, time_bins: List[Tup """ Instructs the model to begin recording in a specified :data:`~brainscore_vision.model_interface.BrainModel.RecordingTarget` and return the specified `time_bins`. - For all followings call of :meth:`~brainscore_vision.model_interface.BrainModel.look_at`, the model returns the + For all following call of :meth:`~brainscore_vision.model_interface.BrainModel.look_at`, the model returns the corresponding recordings. These recordings are a :class:`~brainio.assemblies.NeuroidAssembly` with exactly 3 dimensions: diff --git a/brainscore_vision/models/Res2Net50_26w_4s/model.py b/brainscore_vision/models/Res2Net50_26w_4s/model.py index 87be1143e..01ce6cefb 100644 --- a/brainscore_vision/models/Res2Net50_26w_4s/model.py +++ b/brainscore_vision/models/Res2Net50_26w_4s/model.py @@ -26,7 +26,7 @@ def get_model(name): # get_layers method to tell the code what layers to consider. If you are submitting a custom -# model, then you will most likley need to change this method's return values. +# model, then you will most likely need to change this method's return values. def get_layers(name): """ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to diff --git a/brainscore_vision/models/abl_contrast_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/abl_contrast_evresnet_50_457_0/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/abl_contrast_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/abl_contrast_evresnet_50_457_0/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/abl_light_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/abl_light_evresnet_50_457_0/evnet/modules.py index f49643e16..674bb95df 100644 --- a/brainscore_vision/models/abl_light_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/abl_light_evresnet_50_457_0/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/abl_mcells_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/abl_mcells_evresnet_50_457_0/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/abl_mcells_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/abl_mcells_evresnet_50_457_0/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/abl_pcells_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/abl_pcells_evresnet_50_457_0/evnet/modules.py index 33569ee14..bd47edcef 100644 --- a/brainscore_vision/models/abl_pcells_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/abl_pcells_evresnet_50_457_0/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/alexnet_simclr_run1/model.py b/brainscore_vision/models/alexnet_simclr_run1/model.py index 958dcffb2..8e1f88160 100644 --- a/brainscore_vision/models/alexnet_simclr_run1/model.py +++ b/brainscore_vision/models/alexnet_simclr_run1/model.py @@ -101,7 +101,7 @@ def _construct_mlp_layers(self, mlp_spec, proj_relu, mlp_coeff, output_bias, nor f = list(map(int, mlp_spec.split("-"))) f[-2] = int(f[-2] * mlp_coeff) - # constuct each linear block + # construct each linear block for i in range(len(f) - 2): fc_layers = [] diff --git a/brainscore_vision/models/evresnet_50_1/evnet/modules.py b/brainscore_vision/models/evresnet_50_1/evnet/modules.py index 47ed43d09..e8f6a8cb6 100644 --- a/brainscore_vision/models/evresnet_50_1/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_1/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/evresnet_50_4/evnet/modules.py b/brainscore_vision/models/evresnet_50_4/evnet/modules.py index 20533ef44..f472126db 100644 --- a/brainscore_vision/models/evresnet_50_4/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_4/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py b/brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py index 20533ef44..f472126db 100644 --- a/brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_4_no_mapping/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/evresnet_50_5/evnet/modules.py b/brainscore_vision/models/evresnet_50_5/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/evresnet_50_5/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_5/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/evresnet_50_6/evnet/modules.py b/brainscore_vision/models/evresnet_50_6/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/evresnet_50_6/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_6/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/evresnet_50_7/evnet/modules.py b/brainscore_vision/models/evresnet_50_7/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/evresnet_50_7/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_7/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/evresnet_50_8/evnet/modules.py b/brainscore_vision/models/evresnet_50_8/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/evresnet_50_8/evnet/modules.py +++ b/brainscore_vision/models/evresnet_50_8/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/kap/helpers.py b/brainscore_vision/models/kap/helpers.py index f128abdb4..9eaf34dec 100644 --- a/brainscore_vision/models/kap/helpers.py +++ b/brainscore_vision/models/kap/helpers.py @@ -61,7 +61,7 @@ def _get_mexican_hat_kernel2d( pdf = torch.zeros([kernel_size[0], kernel_size[1]]) for i_y, y in enumerate(ys): for i_x, x in enumerate(xs): - w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negtive up no// 1.2 for 7 great + w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negative up no// 1.2 for 7 great pdf[i_x, i_y] = w #+ np.random.normal(0,1) kernel2d = pdf / abs(pdf).sum() diff --git a/brainscore_vision/models/kap1/helpers.py b/brainscore_vision/models/kap1/helpers.py index f128abdb4..9eaf34dec 100644 --- a/brainscore_vision/models/kap1/helpers.py +++ b/brainscore_vision/models/kap1/helpers.py @@ -61,7 +61,7 @@ def _get_mexican_hat_kernel2d( pdf = torch.zeros([kernel_size[0], kernel_size[1]]) for i_y, y in enumerate(ys): for i_x, x in enumerate(xs): - w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negtive up no// 1.2 for 7 great + w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negative up no// 1.2 for 7 great pdf[i_x, i_y] = w #+ np.random.normal(0,1) kernel2d = pdf / abs(pdf).sum() diff --git a/brainscore_vision/models/kap2/helpers.py b/brainscore_vision/models/kap2/helpers.py index f128abdb4..9eaf34dec 100644 --- a/brainscore_vision/models/kap2/helpers.py +++ b/brainscore_vision/models/kap2/helpers.py @@ -61,7 +61,7 @@ def _get_mexican_hat_kernel2d( pdf = torch.zeros([kernel_size[0], kernel_size[1]]) for i_y, y in enumerate(ys): for i_x, x in enumerate(xs): - w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negtive up no// 1.2 for 7 great + w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negative up no// 1.2 for 7 great pdf[i_x, i_y] = w #+ np.random.normal(0,1) kernel2d = pdf / abs(pdf).sum() diff --git a/brainscore_vision/models/kapIT/helpers.py b/brainscore_vision/models/kapIT/helpers.py index f128abdb4..9eaf34dec 100644 --- a/brainscore_vision/models/kapIT/helpers.py +++ b/brainscore_vision/models/kapIT/helpers.py @@ -61,7 +61,7 @@ def _get_mexican_hat_kernel2d( pdf = torch.zeros([kernel_size[0], kernel_size[1]]) for i_y, y in enumerate(ys): for i_x, x in enumerate(xs): - w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negtive up no// 1.2 for 7 great + w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negative up no// 1.2 for 7 great pdf[i_x, i_y] = w #+ np.random.normal(0,1) kernel2d = pdf / abs(pdf).sum() diff --git a/brainscore_vision/models/kapv1/helpers.py b/brainscore_vision/models/kapv1/helpers.py index f128abdb4..9eaf34dec 100644 --- a/brainscore_vision/models/kapv1/helpers.py +++ b/brainscore_vision/models/kapv1/helpers.py @@ -61,7 +61,7 @@ def _get_mexican_hat_kernel2d( pdf = torch.zeros([kernel_size[0], kernel_size[1]]) for i_y, y in enumerate(ys): for i_x, x in enumerate(xs): - w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negtive up no// 1.2 for 7 great + w = evaluate(xs[i_x], ys[i_y], sigma) # sigma too big too many negative correlation noooo too small??? 1.5 for 7 negative up no// 1.2 for 7 great pdf[i_x, i_y] = w #+ np.random.normal(0,1) kernel2d = pdf / abs(pdf).sum() diff --git a/brainscore_vision/models/p1_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/p1_evresnet_50_457_0/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/p1_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/p1_evresnet_50_457_0/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/p1_evresnet_50_457_1/evnet/modules.py b/brainscore_vision/models/p1_evresnet_50_457_1/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/p1_evresnet_50_457_1/evnet/modules.py +++ b/brainscore_vision/models/p1_evresnet_50_457_1/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/p2_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/p2_evresnet_50_457_0/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/p2_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/p2_evresnet_50_457_0/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/p2_evresnet_50_457_1/evnet/modules.py b/brainscore_vision/models/p2_evresnet_50_457_1/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/p2_evresnet_50_457_1/evnet/modules.py +++ b/brainscore_vision/models/p2_evresnet_50_457_1/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/pm1_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/pm1_evresnet_50_457_0/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/pm1_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/pm1_evresnet_50_457_0/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/pm2_evresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/pm2_evresnet_50_457_0/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/pm2_evresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/pm2_evresnet_50_457_0/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/pm2_evresnet_50_457_1/evnet/modules.py b/brainscore_vision/models/pm2_evresnet_50_457_1/evnet/modules.py index d5e841eb6..ee0a9d641 100644 --- a/brainscore_vision/models/pm2_evresnet_50_457_1/evnet/modules.py +++ b/brainscore_vision/models/pm2_evresnet_50_457_1/evnet/modules.py @@ -40,7 +40,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py b/brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py index 3b1d0c0f4..a4ca820b2 100644 --- a/brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py +++ b/brainscore_vision/models/resnet50_VITO_8deg_cc/helpers/resnet.py @@ -89,7 +89,7 @@ def __init__(self, kernel_size=2, stride=2, padding=0, blur_kernel_size=3, blur_ elif blur_position == 'before': self.layer = [self.blurpool, self.maxpool] else: - raise ValueError('invalid blur postion: {}'.format(blur_position)) + raise ValueError('invalid blur position: {}'.format(blur_position)) self.main = nn.Sequential(self.maxpool, self.blurpool) diff --git a/brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py b/brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py index 3aac0ffce..7d27a6d9a 100644 --- a/brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py +++ b/brainscore_vision/models/resnet50_moclr8deg/helpers/helpers.py @@ -124,7 +124,7 @@ def __init__(self, kernel_size=2, stride=2, padding=0, blur_kernel_size=3, blur_ elif blur_position == 'before': self.layer = [self.blurpool, self.maxpool] else: - raise ValueError('invalid blur postion: {}'.format(blur_position)) + raise ValueError('invalid blur position: {}'.format(blur_position)) self.main = nn.Sequential(self.maxpool, self.blurpool) diff --git a/brainscore_vision/models/resnet50_primary_visual_cortex/model.py b/brainscore_vision/models/resnet50_primary_visual_cortex/model.py index 74cbf0b79..ad64c9a57 100644 --- a/brainscore_vision/models/resnet50_primary_visual_cortex/model.py +++ b/brainscore_vision/models/resnet50_primary_visual_cortex/model.py @@ -642,7 +642,7 @@ def resnet_pvc(**kwargs): return ResNetPVC(Bottleneck, [3, 4, 6, 3], **kwargs) -# The model names to consider. If you are making a custom model, then you most likley want to change +# The model names to consider. If you are making a custom model, then you most likely want to change # the return value of this function. def get_model_list(): """ @@ -706,7 +706,7 @@ def get_model(name): # get_layers method to tell the code what layers to consider. If you are submitting a custom -# model, then you will most likley need to change this method's return values. +# model, then you will most likely need to change this method's return values. def get_layers(name): """ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to diff --git a/brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py b/brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py index 0f3482b0b..4044fc25a 100644 --- a/brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py +++ b/brainscore_vision/models/resnet50_vitoimagevidnet8/helpers/helpers.py @@ -124,7 +124,7 @@ def __init__(self, kernel_size=2, stride=2, padding=0, blur_kernel_size=3, blur_ elif blur_position == 'before': self.layer = [self.blurpool, self.maxpool] else: - raise ValueError('invalid blur postion: {}'.format(blur_position)) + raise ValueError('invalid blur position: {}'.format(blur_position)) self.main = nn.Sequential(self.maxpool, self.blurpool) diff --git a/brainscore_vision/models/resnet_50_2/evnet/modules.py b/brainscore_vision/models/resnet_50_2/evnet/modules.py index 20533ef44..f472126db 100644 --- a/brainscore_vision/models/resnet_50_2/evnet/modules.py +++ b/brainscore_vision/models/resnet_50_2/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/resnet_50_mapping0_1/evnet/modules.py b/brainscore_vision/models/resnet_50_mapping0_1/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/resnet_50_mapping0_1/evnet/modules.py +++ b/brainscore_vision/models/resnet_50_mapping0_1/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/resnet_50_mapping1_1/evnet/modules.py b/brainscore_vision/models/resnet_50_mapping1_1/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/resnet_50_mapping1_1/evnet/modules.py +++ b/brainscore_vision/models/resnet_50_mapping1_1/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/resnet_50_mapping2_1/evnet/modules.py b/brainscore_vision/models/resnet_50_mapping2_1/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/resnet_50_mapping2_1/evnet/modules.py +++ b/brainscore_vision/models/resnet_50_mapping2_1/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py b/brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py index 469dc5ffd..ca7b89117 100644 --- a/brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py +++ b/brainscore_vision/models/resnet_SIN_IN_FT_IN/model.py @@ -43,7 +43,7 @@ def get_model(name): # get_layers method to tell the code what layers to consider. If you are submitting a custom -# model, then you will most likley need to change this method's return values. +# model, then you will most likely need to change this method's return values. def get_layers(name): """ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to diff --git a/brainscore_vision/models/temporal_model_torchvision/model.py b/brainscore_vision/models/temporal_model_torchvision/model.py index 30d96aba8..6d93d6444 100644 --- a/brainscore_vision/models/temporal_model_torchvision/model.py +++ b/brainscore_vision/models/temporal_model_torchvision/model.py @@ -73,8 +73,8 @@ def get_model(identifier): def process_output(layer, layer_name, input, output): if layer_name.startswith("blocks"): - output, thw = output - t, h, w = thw + output, thw = output # codespell:ignore + t, h, w = thw # codespell:ignore output = output[:, 1:] # remove cls b, n, c = output.shape assert n == t*h*w @@ -89,4 +89,4 @@ def process_output(layer, layer_name, input, output): process_output=process_output, **inferencer_kwargs) - return wrapper \ No newline at end of file + return wrapper diff --git a/brainscore_vision/models/vonegrcnn_47e/model.py b/brainscore_vision/models/vonegrcnn_47e/model.py index 0f04f3a65..c5080943e 100644 --- a/brainscore_vision/models/vonegrcnn_47e/model.py +++ b/brainscore_vision/models/vonegrcnn_47e/model.py @@ -155,7 +155,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x): @@ -585,7 +585,7 @@ def get_model(name): # get_layers method to tell the code what layers to consider. If you are submitting a custom -# model, then you will most likley need to change this method's return values. +# model, then you will most likely need to change this method's return values. def get_layers(name): """ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to diff --git a/brainscore_vision/models/vonegrcnn_52e_full/model.py b/brainscore_vision/models/vonegrcnn_52e_full/model.py index e16e85bf3..2d7d46631 100644 --- a/brainscore_vision/models/vonegrcnn_52e_full/model.py +++ b/brainscore_vision/models/vonegrcnn_52e_full/model.py @@ -160,7 +160,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x): @@ -584,7 +584,7 @@ def get_model(name): # get_layers method to tell the code what layers to consider. If you are submitting a custom -# model, then you will most likley need to change this method's return values. +# model, then you will most likely need to change this method's return values. def get_layers(name): """ This method returns a list of string layer names to consider per model. The benchmarks maps brain regions to diff --git a/brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py b/brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py index 0f839786d..d8a7687b0 100644 --- a/brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py +++ b/brainscore_vision/models/vonegrcnn_62e_nobn/helpers/vongrcnn_helpers.py @@ -146,7 +146,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x): diff --git a/brainscore_vision/models/voneresnet_50_1/evnet/modules.py b/brainscore_vision/models/voneresnet_50_1/evnet/modules.py index 47ed43d09..e8f6a8cb6 100644 --- a/brainscore_vision/models/voneresnet_50_1/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_1/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_1079_0/evnet/modules.py b/brainscore_vision/models/voneresnet_50_1079_0/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/voneresnet_50_1079_0/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_1079_0/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_1083_1/evnet/modules.py b/brainscore_vision/models/voneresnet_50_1083_1/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/voneresnet_50_1083_1/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_1083_1/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_3/evnet/modules.py b/brainscore_vision/models/voneresnet_50_3/evnet/modules.py index 47ed43d09..e8f6a8cb6 100644 --- a/brainscore_vision/models/voneresnet_50_3/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_3/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_457_0/evnet/modules.py b/brainscore_vision/models/voneresnet_50_457_0/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/voneresnet_50_457_0/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_457_0/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_457_1/evnet/modules.py b/brainscore_vision/models/voneresnet_50_457_1/evnet/modules.py index 8dfa85fc3..4c382934b 100644 --- a/brainscore_vision/models/voneresnet_50_457_1/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_457_1/evnet/modules.py @@ -38,7 +38,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py b/brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py index 47ed43d09..e8f6a8cb6 100644 --- a/brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py +++ b/brainscore_vision/models/voneresnet_50_no_weight/evnet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x: torch.Tensor) -> torch.Tensor: diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md index 756f30b1a..54db5f67e 100644 --- a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/README.md @@ -63,13 +63,13 @@ Soon... $ git clone https://github.com/dicarlolab/vonenet.git 2. And when you setup its codes, you must need 'val' directory. so here is link. - this link is from Korean's blog I refered as below https://seongkyun.github.io/others/2019/03/06/imagenet_dn/ + this link is from Korean's blog I referred as below https://seongkyun.github.io/others/2019/03/06/imagenet_dn/ ** Download link** https://academictorrents.com/collection/imagenet-2012 Once you download that large tar files, you must unzip that files - -- all instructions below are refered above link, I only translate it + -- all instructions below are referred above link, I only translate it # Unzip training dataset $ mkdir train && mb ILSVRC2012_img_train.tar train/ && cd train diff --git a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py index fe5e53491..3cd13fe2c 100644 --- a/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py +++ b/brainscore_vision/models/voneresnet_50_non_stochastic/vonenet/vonenet/modules.py @@ -22,7 +22,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=4): self.stride = (stride, stride) self.padding = (kernel_size // 2, kernel_size // 2) - # Param instatiations + # Param instantiations self.weight = torch.zeros((out_channels, in_channels, kernel_size, kernel_size)) def forward(self, x): diff --git a/brainscore_vision/submission/developers_guide.md b/brainscore_vision/submission/developers_guide.md index 1e163a3b5..27ad55559 100644 --- a/brainscore_vision/submission/developers_guide.md +++ b/brainscore_vision/submission/developers_guide.md @@ -1,7 +1,7 @@ ## Submission system ### Components -To provide an automatical scoring mechanism for artificial models of the ventral stream, Brain-Score has implemented a whole system, which is explained in the follows. The system consists of following components: +To provide an automatic scoring mechanism for artificial models of the ventral stream, Brain-Score has implemented a whole system, which is explained in the follows. The system consists of following components: ![](brainscore_submission.png) - **Brain-Score Website:** @@ -12,12 +12,12 @@ To provide an automatical scoring mechanism for artificial models of the ventral - **[Jenkins](http://braintree.mit.edu:8080/):** [Jenkins](http://braintree.mit.edu:8080/) is a continuous integration tool, which we use to automatically run project unittests and the scoring process for brain models. - Jenkins is running on Braintree, the lab's internal server. Jenkins defines different jobs, executing different taks. The task for a new submission is triggered by the website, the unittest tasks are triggerd by GitHub web hooks. + Jenkins is running on Braintree, the lab's internal server. Jenkins defines different jobs, executing different tasks. The task for a new submission is triggered by the website, the unittest tasks are triggered by GitHub web hooks. Once the jobs are triggered, jenkins runs a procedure to execute the tests or scoring and communicate the results back to the user or back to GitHub. - **Openmind** Scoring submissions is a computation and memory expensive process, we cannot execute model scoring on small machines. Because we do not want to execute the jobs on Braintree, we submit jobs to Openmind, the department cluster system. - The big advantage of Openmind is its queuing system, which allows to define detailed ressource requirements and jobs are executed, once their requested ressources are available. The jenkins related contents are stored on ``/om5/group/dicarlo/jenkins``. + The big advantage of Openmind is its queuing system, which allows to define detailed resource requirements and jobs are executed, once their requested resources are available. The jenkins related contents are stored on ``/om5/group/dicarlo/jenkins``. This directory contains a script for model submission (`score_model.sh`) and for unittests (`unittests_brainscore.sh`). The scripts are executed in an openmind job and are responsible for fully installing a conda environment, executing the process, shutting everything down again. Results are stored in the database or copied to amazon S3 cloud file system. From there jenkins reports the results back to its caller. diff --git a/docs/source/modules/developer_clarifications.rst b/docs/source/modules/developer_clarifications.rst index 6e63f2008..c830f5469 100644 --- a/docs/source/modules/developer_clarifications.rst +++ b/docs/source/modules/developer_clarifications.rst @@ -20,7 +20,7 @@ anyone interested in contributing to Brain-Score's codebase or scientific workin Result Caching is a Brain-Score `repo `_ that allows model activations (and other functions) to be cached to disk, in order to speed up the process of rescoring models. It contains a decorator that can be attached to a function - right before it is defined. On the first run of that function, `result_caching` will save to disk the result of tha function + right before it is defined. On the first run of that function, `result_caching` will save to disk the result of the function and will load that result from disk in future calls with the same parameters. All files are saved in the user's `~/result_caching` folder, and they are persistent, as there is no garbage collection built in. You can deactivate `result_caching` by simply setting the environment flag `RESULTCACHING_DISABLE` to `1`. Please see the link above diff --git a/docs/source/modules/submission.rst b/docs/source/modules/submission.rst index f6d2a3239..322bd31e9 100644 --- a/docs/source/modules/submission.rst +++ b/docs/source/modules/submission.rst @@ -41,7 +41,7 @@ Submission System Components Jenkins is a continuous integration tool, which we use to automatically run project unittests and the scoring process for models of the brain. `Jenkins is running on Braintree `_ - DiCarlo lab's internal server. Jenkins defines different jobs and executes different tasks. The task for a new submission is - triggered via the website and the unittest tasks are triggerd by GitHub web hooks. Once the jobs are triggered, + triggered via the website and the unittest tasks are triggered by GitHub web hooks. Once the jobs are triggered, Jenkins runs a procedure to execute the tests or scoring and communicate the results back to the user (via email) or back to GitHub. @@ -69,7 +69,7 @@ Submission System Components - Test (brainscore-ohio-test): The database used for executing tests. Jenkins also executes unittests of all Brain-Score projects and should use this database for testing. - The names in parantheses are used in brain-score to load database credentials for the different databases. + The names in parentheses are used in brain-score to load database credentials for the different databases. Just change the name and another database is used. Databases are automatically snapshotted every 7 days, and devs can restore snapshots at any time. diff --git a/pyproject.toml b/pyproject.toml index d77bab486..32b7e9098 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,3 +119,10 @@ packages = { find = { where = ["."] } } "brainscore_vision.data" = ["**"] "brainscore_vision.metrics" = ["**"] "brainscore_vision.models" = ["**"] + +[tool.codespell] +# Ref: https://github.com/codespell-project/codespell#using-a-config-file +skip = '.git*,imagenet_categories.txt' +check-hidden = true +ignore-regex = '^\s*("image/\S+": ".*|author = {.*)' +ignore-words-list = 'bu,nin,indx,evnet' diff --git a/tests/test_model_helpers/README.md b/tests/test_model_helpers/README.md index e0520dc04..a1e1f2f4d 100644 --- a/tests/test_model_helpers/README.md +++ b/tests/test_model_helpers/README.md @@ -2,7 +2,7 @@ ## Markers Unit tests have various markers that denote possible issues in the travis build: -* **private_access**: tests that require access to a private ressource, such as assemblies on S3 (travis pull request builds can not have private access) +* **private_access**: tests that require access to a private resource, such as assemblies on S3 (travis pull request builds can not have private access) * **memory_intense**: tests requiring more memory than is available in the travis sandbox (currently 3 GB, https://docs.travis-ci.com/user/common-build-problems/#my-build-script-is-killed-without-any-error) Use the following syntax to mark a test: