Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions benchmarks/templates/benchmarks/benchmark_table.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
{% load static %}

{% if models %}
{% if has_user %}
<form>
{% endif %}
<div class="table-container">
<table id="leaderboard" class="table is-hoverable benchmarks fixed_header">
<! Each of the cells in table/heading have a lot of information of what benchmark they are and who their
parent is. This allows for QuerySelecting and using some of the filters from index.py>
<thead>
<tr>
<th id="model-th" class="model want_to_click"><p>
<span class="identifier want_to_click">
Model
</span>
</p></th>
{% for benchmark in benchmarks %}
<th
{% if benchmark.ceiling and benchmark.ceiling != 'X' %}
title="ceiling: {{ benchmark.ceiling }}"
{% endif %}
data-benchmark="{{ benchmark.short_name }}"
{# data-parent="{{ benchmark_parents|get_parent_item:benchmark.identifier }}"#}
class="rotate depth_{{ benchmark.depth }}"
>
<p>
{% if benchmark.benchmark_type.parent %}
{# indent, depending on depth #}
<span class="want_to_click benchmark_child_indent">
{{ benchmark_parents|get_initial_characters:benchmark.identifier }}
</span>
{% endif %}

{# reference link #}
{% if benchmark.benchmark_type.reference and benchmark.benchmark_type.reference.url %}
<a href="{{ benchmark.benchmark_type.reference.url }}">
{% endif %}

{# identifier #}
<span data-benchmark="{{ benchmark.short_name }}"
class="want_to_click clicker depth_{{ benchmark.depth }}
{% if benchmark.version %} instance {% endif %}
{% if benchmark.root_parent == 'average_{{ domain }}' %} brain_benchmark {% endif %}"
value="{{ benchmark.identifier }}">{{ benchmark.short_name|simplify_domain }}
</span>
{% if benchmark.benchmark_type.reference and benchmark.benchmark_type.reference.url %}
</a>
{% endif %}
</p></th>
{% endfor %}
</tr>
</thead>
<!-- model rows -->
<tbody>
{% for model in models %}
<tr>
<td class="model">
<div class="identifier" title="{{ model.reference_identifier }}">
<a target="_self" rel="noopener noreferrer"
{# href="{% url 'model-view' domain model.id %}"#}
>
{{ model|display_model:user }}
</a>
</div>
</td>
{% for score_row in model.scores %}
<td title="{{ score_row.benchmark.short_name }} unceiled: {{ score_row.score_raw|format_score }}"
data-benchmark="{{ score_row.versioned_benchmark_identifier }}"
{# data-parent="{{ benchmark_parents|get_parent_item:score_row.versioned_benchmark_identifier }}"#}
class="score_cell displaySquare depth_{{ score_row.benchmark.depth }} clicker"
style="{{ score_row.color }}">
{{ score_row.score_ceiled }}
</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
</div>

<div class="column is-10">
Model scores on brain benchmarks.
Click on a model to see more details.
The more green and bright a cell, the better the model's score.
Scores are ceiled, hover the benchmark to see ceilings.
</div>
{% if has_user %}
</form>
{% endif %}
{% else %}
<p>No data.</p>
{% endif %}
25 changes: 22 additions & 3 deletions benchmarks/templates/benchmarks/competition2024.html
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,22 @@ <h3 class="title is-5 pt-5 pb-5">Common critiques of the Brain-Score platform</h
</div>
</section>

{# leaderboard #}
<section id="leaderboard" class="section container tablecenter is-centered">
<h3 class="title is-3">Competition Leaderboard</h3>

<h4 class="title is-4 track">Behavioral Track</h4>
<div class="content">
{% include "benchmarks/benchmark_table.html" with benchmarks=benchmarks_behavior_vision models=models_behavior_vision %}
</div>

<h4 class="title is-4 track">Neural Track</h4>
<div class="content">
{% include "benchmarks/benchmark_table.html" with benchmarks=benchmarks_neural_vision models=models_neural_vision %}
</div>
</section>


{# overview #}
<section id="overview" class="section container center">
<h3 class="title is-3">Overview</h3>
Expand Down Expand Up @@ -254,7 +270,8 @@ <h3 class="title is-3">Models</h3>
<code>effnetb1_cutmixpatch_SAM_</code></a></li>
<li><a href="../model/vision/1033">
<code>effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288</code></a>
(Winner of the 2022 competition)</li>
(Winner of the 2022 competition)
</li>
<li><a href="../model/vision/648">
<code>resnext101_32x32d_wsl</code></a></li>
<li><a href="../model/vision/1045">
Expand Down Expand Up @@ -287,9 +304,11 @@ <h3 class="title is-3">Models</h3>
<li><a href="../model/vision/680">
<code>resnet18-local_aggregation</code></a></li>
<li><a href="../model/vision/1044">
<code>grcnn_robust_v1</code></a> (Top-3 competition 2022)</li>
<code>grcnn_robust_v1</code></a> (Top-3 competition 2022)
</li>
<li><a href="../model/vision/991">
<code>custom_model_cv_18_dagger_408</code></a> (Top-3 competition 2022)</li>
<code>custom_model_cv_18_dagger_408</code></a> (Top-3 competition 2022)
</li>
<li><a href="../model/vision/734">
<code>ViT_L_32_imagenet1k</code></a></li>
<li><a href="../model/vision/563">
Expand Down
100 changes: 99 additions & 1 deletion benchmarks/views/competition2024.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,104 @@
from django.shortcuts import render

from .index import get_context
from ..models import User


def view(request):
context = {}
# model filter
included_models = [
"cvt_cvt-w24-384-in22k_finetuned-in1k_4",
"resnext101_32x8d_wsl",
"effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288",
"effnetb1_cutmixpatch_augmix_robust32_avge4e7_manylayers_324x288",
"resnext101_32x32d_wsl",
"effnetb1_272x240",
"resnext101_32x48d_wsl",
"pnasnet_large_pytorch",
"resnet-152_v2_pytorch",
"focalnet_tiny_lrf_in1k",
"hmax",
"alexnet",
"CORnet-S",
"resnet-50-robust",
"voneresnet-50-non_stochastic",
# "resnet18-local_aggregation", # TF no longer supported
# "grcnn_robust_v1", # weights deleted on user server
"custom_model_cv_18_dagger_408",
"ViT_L_32_imagenet1k",
"mobilenet_v2_1-4_224_pytorch",
"pixels",
]
assert len(included_models) == 19
model_filter = dict(model__name__in=included_models)

# benchmark filter
track_benchmarks = {
"behavior_vision": [
"average_vision",
"behavior_vision",

"Hebart2023-match",

"Baker2022",
"Baker2022inverted-accuracy_delta", "Baker2022fragmented-accuracy_delta",
"Baker2022frankenstein-accuracy_delta",

"tong.Coggan2024_behavior-ConditionWiseAccuracySimilarity",

"BMD2024",
"BMD2024.texture_1Behavioral-accuracy_distance",
"BMD2024.texture_2Behavioral-accuracy_distance",
"BMD2024.dotted_1Behavioral-accuracy_distance",
"BMD2024.dotted_2Behavioral-accuracy_distance",

"Maniquet2024",
"Maniquet2024-confusion_similarity'", "Maniquet2024-tasks_consistency",

# "Malania2007",
# "Malania2007.short2", "Malania2007.short4", "Malania2007.short6", "Malania2007.short8",
# "Malania2007.short16", "Malania2007.equal2", "Malania2007.long2", "Malania2007.equal16",
# "Malania2007.long16", "Malania2007.vernieracuity-threshold",
#
# "Scialom2024",
# "Scialom2024_rgb-behavioral_accuracy",
# "Scialom2024_phosphenes-all-behavioral_accuracy",
# "Scialom2024_segments-all-behavioral_accuracy",
# "Scialom2024_phosphenes-100-behavioral_accuracy",
# "Scialom2024_segments-100-behavioral_accuracy",
#
# "Ferguson2024",
# "Ferguson2024circle_line-value_delta", "Ferguson2024color-value_delta",
# "Ferguson2024convergence-value_delta", "Ferguson2024eighth-value_delta",
# "Ferguson2024gray_easy-value_delta", "Ferguson2024gray_hard-value_delta", "Ferguson2024half-value_delta",
# "Ferguson2024juncture-value_delta", "Ferguson2024lle-value_delta", "Ferguson2024llh-value_delta",
# "Ferguson2024quarter-value_delta", "Ferguson2024round_f-value_delta", "Ferguson2024round_v-value_delta",
# "Ferguson2024tilted_line-value_delta"
],
"neural_vision": [
"average_vision",
"neural_vision",
"V1", "V2", "V4", "IT",
"Bracci2019.anteriorVTC-rdm",
"Coggan2024",
"tong.Coggan2024_fMRI.V1-rdm",
"tong.Coggan2024_fMRI.V2-rdm",
"tong.Coggan2024_fMRI.V4-rdm",
"tong.Coggan2024_fMRI.IT-rdm",
]
}
admin_user = User.objects.get(id=2)
context = {'leaderboard_keys': ['behavior_vision', 'neural_vision']}
for key, key_benchmarks in track_benchmarks.items():
benchmark_filter = lambda benchmarks: benchmarks.filter(identifier__in=key_benchmarks)
key_context = get_context(benchmark_filter=benchmark_filter,
model_filter=model_filter,
user=admin_user,
domain="vision", show_public=True,
compute_benchmark_average=True)
key_context[f"benchmarks_{key}"] = key_context['benchmarks']
key_context[f"models_{key}"] = key_context['models']
del key_context['benchmarks'], key_context['models']
context = {**context, **key_context}

return render(request, 'benchmarks/competition2024.html', context)
Loading