From bc3dce47d2eb1efa86fd7a23add98b21e4674108 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 3 Feb 2026 14:54:43 -0800 Subject: [PATCH 01/45] add a new study field called max_responses to model, study edit/create forms, and database --- studies/forms.py | 4 ++ .../0105_add_max_responses_to_study.py | 38 +++++++++++++++++++ studies/models.py | 6 +++ studies/templates/studies/_study_fields.html | 1 + 4 files changed, 49 insertions(+) create mode 100644 studies/migrations/0105_add_max_responses_to_study.py diff --git a/studies/forms.py b/studies/forms.py index 8d68ee41c..db612bf49 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -211,6 +211,7 @@ class Meta: "name", "lab", "priority", + "max_responses", "image", "preview_summary", "short_description", @@ -247,6 +248,7 @@ class Meta: "study_type": "Experiment Type", "compensation_description": "Compensation", "priority": "Lab Page Priority", + "max_responses": "Maximum Responses", } widgets = { "preview_summary": Textarea(attrs={"rows": 2}), @@ -277,6 +279,7 @@ class Meta: "priority": forms.TextInput( attrs={"type": "range", "min": "1", "max": "99"} ), + "max_responses": forms.NumberInput(attrs={"min": "1"}), } help_texts = { @@ -300,6 +303,7 @@ class Meta: "shared_preview": "Allow other Lookit researchers to preview your study and give feedback.", "study_type": "Choose the type of experiment you are creating - this will change the fields that appear on the Study Details page.", "priority": "This affects how studies are ordered at your lab's custom URL, not the main study page. If you leave all studies at the highest priority (99), then all of your lab's active/discoverable studies will be shown in a randomized order on your lab page. If you lower the priority of this study to 1, then it will appear last in the list on your lab page. You can find your lab's custom URL from the labs page. For more info, see the documentation on study prioritization.", + "max_responses": "Optional limit on the number of valid responses to collect for this study. When this response limit is reached, the study will be automatically paused. This value can be changed at any time. Leave blank for no limit.", } diff --git a/studies/migrations/0105_add_max_responses_to_study.py b/studies/migrations/0105_add_max_responses_to_study.py new file mode 100644 index 000000000..284c928da --- /dev/null +++ b/studies/migrations/0105_add_max_responses_to_study.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.9 on 2026-02-03 22:08 + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("studies", "0104_add_bucket_kwarg_to_video_cleanup"), + ] + + operations = [ + migrations.RenameIndex( + model_name="consentruling", + new_name="studies_con_respons_23a801_idx", + old_fields=("response", "action"), + ), + migrations.RenameIndex( + model_name="consentruling", + new_name="studies_con_respons_df509e_idx", + old_fields=("response", "arbiter"), + ), + migrations.RenameIndex( + model_name="studylog", + new_name="studies_stu_study_i_b16384_idx", + old_fields=("study", "action"), + ), + migrations.AddField( + model_name="study", + name="max_responses", + field=models.IntegerField( + blank=True, + default=None, + null=True, + validators=[django.core.validators.MinValueValidator(1)], + ), + ), + ] diff --git a/studies/models.py b/studies/models.py index ea187be8f..316f4db9e 100644 --- a/studies/models.py +++ b/studies/models.py @@ -364,6 +364,12 @@ class Study(models.Model): is_building = models.BooleanField(default=False) compensation_description = models.TextField(blank=True) criteria_expression = models.TextField(blank=True, default="") + max_responses = models.IntegerField( + null=True, + blank=True, + default=None, + validators=[MinValueValidator(1)], + ) must_have_participated = models.ManyToManyField( "self", blank=True, symmetrical=False, related_name="expected_participation" ) diff --git a/studies/templates/studies/_study_fields.html b/studies/templates/studies/_study_fields.html index c8c79e354..d204594d2 100644 --- a/studies/templates/studies/_study_fields.html +++ b/studies/templates/studies/_study_fields.html @@ -5,6 +5,7 @@ {% bootstrap_field form.name label_class="form-label fw-bold" wrapper_class="mb-4" %} {% bootstrap_field form.lab label_class="form-label fw-bold" wrapper_class="mb-4" %} {% bootstrap_field form.priority label_class="form-label fw-bold" wrapper_class="mb-4" addon_before="" %} +{% bootstrap_field form.max_responses label_class="form-label fw-bold" wrapper_class="mb-4" %}
From f4ecc627e1466075a041014954b1666df494c1c8 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 3 Feb 2026 14:56:29 -0800 Subject: [PATCH 02/45] add client side validation to strip invalid characters/values --- web/static/js/study-fields.js | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/web/static/js/study-fields.js b/web/static/js/study-fields.js index de1361e93..e663adae1 100644 --- a/web/static/js/study-fields.js +++ b/web/static/js/study-fields.js @@ -104,4 +104,20 @@ $(document).ready(function () { // Trigger mousedown to populate ui. mustHave.dispatchEvent(new Event('mousedown')); mustNotHave.dispatchEvent(new Event('mousedown')); + + /* + Max Responses validation + */ + const maxResponses = document.querySelector('#id_max_responses'); + if (maxResponses) { + maxResponses.addEventListener('input', () => { + // Remove non-numeric characters and leading zeros + let value = maxResponses.value.replace(/[^0-9]/g, '').replace(/^0+/, ''); + // Ensure minimum value of 1 if not empty + if (value !== '' && parseInt(value) < 1) { + value = '1'; + } + maxResponses.value = value; + }); + } }); From 28ffc43d6f124bfbf27a72857382e97f0709bd2f Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 3 Feb 2026 15:36:59 -0800 Subject: [PATCH 03/45] add study methods for getting valid response count, comparing with max_responses, and pausing if needed (stub/WIP); check/pause study whenever a complete response is received --- studies/models.py | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/studies/models.py b/studies/models.py index 316f4db9e..6bcbb3661 100644 --- a/studies/models.py +++ b/studies/models.py @@ -554,6 +554,49 @@ def videos_for_consented_responses(self): """Gets videos but only for consented responses.""" return Video.objects.filter(response_id__in=self.consented_responses) + @property + def valid_response_count(self) -> int: + """Return the count of valid responses for max_responses limit. + + A response is counted as valid if: + - is_preview is False + - eligibility is "Eligible" or blank/empty + - completed is True + + Returns: + int: Count of valid responses + """ + return ( + self.responses.filter( + is_preview=False, + completed=True, + ) + .filter( + models.Q(eligibility=[]) + | models.Q(eligibility__contains=[ResponseEligibility.ELIGIBLE]) + ) + .count() + ) + + @property + def has_reached_max_responses(self) -> bool: + """Check if the study has reached its maximum number of valid responses. + + Returns: + bool: True if max_responses is set and the limit has been reached + """ + if self.max_responses is None: + return False + return self.valid_response_count >= self.max_responses + + def check_and_pause_if_at_max_responses(self): + """Check if max responses reached and pause the study if so. + + Only pauses if the study is currently active. + """ + # TODO: Implement logic to pause study when max responses reached + pass + @property def consent_videos(self): return self.videos.filter(is_consent_footage=True) @@ -1300,6 +1343,10 @@ def take_action_on_exp_data(sender, instance, created, **kwargs): else: dispatch_frame_action(response) + # If this response is complete, then check if this study has reached max responses and pause if needed + if response.completed: + response.study.check_and_pause_if_at_max_responses() + class FeedbackApiManager(models.Manager): """Prefetch all the things.""" From c146a21179a9ec159051b55d786ad2ea71e88eeb Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 5 Feb 2026 14:47:06 -0800 Subject: [PATCH 04/45] update valid_response_count logic so that completion status is ignored when the study is external --- studies/models.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/studies/models.py b/studies/models.py index 6bcbb3661..26e0bd3c0 100644 --- a/studies/models.py +++ b/studies/models.py @@ -561,22 +561,23 @@ def valid_response_count(self) -> int: A response is counted as valid if: - is_preview is False - eligibility is "Eligible" or blank/empty - - completed is True + - completed is True (internal study types only) + + For external studies, the completed field is ignored. Returns: int: Count of valid responses """ - return ( - self.responses.filter( - is_preview=False, - completed=True, - ) - .filter( - models.Q(eligibility=[]) - | models.Q(eligibility__contains=[ResponseEligibility.ELIGIBLE]) - ) - .count() - ) + responses = self.responses.filter(is_preview=False) + + # For internal study types, also require completed=True + if not self.study_type.is_external: + responses = responses.filter(completed=True) + + return responses.filter( + models.Q(eligibility=[]) + | models.Q(eligibility__contains=[ResponseEligibility.ELIGIBLE]) + ).count() @property def has_reached_max_responses(self) -> bool: From 248fb9075c79493a782b2426b02ea3fa909ff925 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 5 Feb 2026 14:59:38 -0800 Subject: [PATCH 05/45] add tests for study properties: valid_response_count, has_reached_max_responses --- studies/tests.py | 228 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 228 insertions(+) diff --git a/studies/tests.py b/studies/tests.py index 86c0b9705..b1bd7665a 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -861,6 +861,234 @@ def test_responses_for_researcher_external_studies(self): self.assertIn(response, study.responses_for_researcher(user)) + def test_valid_response_count_internal_study(self): + """Test that valid_response_count correctly counts eligible, completed, non-preview responses.""" + study = Study.objects.create(study_type=StudyType.get_ember_frame_player()) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Note: Response.save() auto-sets eligibility, so we update this value after creation + + # Valid response: completed, not preview, eligible + r1 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r1.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Valid response: completed, not preview, empty eligibility + r2 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r2.pk).update(eligibility=[]) + + # Invalid: preview response + r3 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=True, + ) + Response.objects.filter(pk=r3.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Invalid: not completed + r4 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=False, + is_preview=False, + ) + Response.objects.filter(pk=r4.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Invalid: ineligible + r5 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r5.pk).update( + eligibility=[ResponseEligibility.INELIGIBLE_OLD] + ) + + self.assertEqual(study.valid_response_count, 2) + + def test_valid_response_count_external_study(self): + """Test that valid_response_count for external studies ignores completed field.""" + study = Study.objects.create(study_type=StudyType.get_external()) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Valid: not preview, eligible, completed + r1 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r1.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Valid: not preview, eligible, NOT completed (should still count for external) + r2 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=False, + is_preview=False, + ) + Response.objects.filter(pk=r2.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Valid: not preview, empty eligibility, NOT completed + r3 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=False, + is_preview=False, + ) + Response.objects.filter(pk=r3.pk).update(eligibility=[]) + + # Invalid: preview response (should not count) + r4 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=True, + ) + Response.objects.filter(pk=r4.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Invalid: ineligible (should not count) + r5 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r5.pk).update( + eligibility=[ResponseEligibility.INELIGIBLE_OLD] + ) + + # 3 valid responses: r1, r2, r3 (completed field ignored for external) + self.assertEqual(study.valid_response_count, 3) + + def test_has_reached_max_responses_no_limit(self): + """Test that has_reached_max_responses returns False when max_responses is None.""" + study = Study.objects.create( + study_type=StudyType.get_ember_frame_player(), + max_responses=None, + ) + self.assertFalse(study.has_reached_max_responses) + + def test_has_reached_max_responses_not_reached(self): + """Test that has_reached_max_responses returns False when limit not reached.""" + study = Study.objects.create( + study_type=StudyType.get_ember_frame_player(), + max_responses=5, + ) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Add 2 valid responses + for _ in range(2): + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + self.assertFalse(study.has_reached_max_responses) + + def test_has_reached_max_responses_reached(self): + """Test that has_reached_max_responses returns True when limit is reached.""" + study = Study.objects.create( + study_type=StudyType.get_ember_frame_player(), + max_responses=3, + ) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Add 3 valid responses + for _ in range(3): + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + self.assertTrue(study.has_reached_max_responses) + + def test_has_reached_max_responses_exceeded(self): + """Test that has_reached_max_responses returns True when limit is exceeded.""" + study = Study.objects.create( + study_type=StudyType.get_ember_frame_player(), + max_responses=2, + ) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Add 4 valid responses (exceeds limit of 2) + for _ in range(4): + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + self.assertTrue(study.has_reached_max_responses) + class VideoModelTestCase(TestCase): def setUp(self): From 82ffd46dfb4b876172a81b4475ab4d45575c65cc Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 5 Feb 2026 16:28:58 -0800 Subject: [PATCH 06/45] try to reduce code duplication to pass sonar gate --- studies/tests.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/studies/tests.py b/studies/tests.py index b1bd7665a..ab15a1385 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -870,7 +870,7 @@ def test_valid_response_count_internal_study(self): # Note: Response.save() auto-sets eligibility, so we update this value after creation # Valid response: completed, not preview, eligible - r1 = Response.objects.create( + valid1 = Response.objects.create( study=study, child=child, study_type=study.study_type, @@ -878,12 +878,10 @@ def test_valid_response_count_internal_study(self): completed=True, is_preview=False, ) - Response.objects.filter(pk=r1.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + Response.objects.filter(pk=valid1.pk).update(eligibility=[]) # Valid response: completed, not preview, empty eligibility - r2 = Response.objects.create( + valid2 = Response.objects.create( study=study, child=child, study_type=study.study_type, @@ -891,10 +889,12 @@ def test_valid_response_count_internal_study(self): completed=True, is_preview=False, ) - Response.objects.filter(pk=r2.pk).update(eligibility=[]) + Response.objects.filter(pk=valid2.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) # Invalid: preview response - r3 = Response.objects.create( + invalid3 = Response.objects.create( study=study, child=child, study_type=study.study_type, @@ -902,12 +902,12 @@ def test_valid_response_count_internal_study(self): completed=True, is_preview=True, ) - Response.objects.filter(pk=r3.pk).update( + Response.objects.filter(pk=invalid3.pk).update( eligibility=[ResponseEligibility.ELIGIBLE] ) # Invalid: not completed - r4 = Response.objects.create( + invalid4 = Response.objects.create( study=study, child=child, study_type=study.study_type, @@ -915,12 +915,12 @@ def test_valid_response_count_internal_study(self): completed=False, is_preview=False, ) - Response.objects.filter(pk=r4.pk).update( + Response.objects.filter(pk=invalid4.pk).update( eligibility=[ResponseEligibility.ELIGIBLE] ) # Invalid: ineligible - r5 = Response.objects.create( + invalid5 = Response.objects.create( study=study, child=child, study_type=study.study_type, @@ -928,7 +928,7 @@ def test_valid_response_count_internal_study(self): completed=True, is_preview=False, ) - Response.objects.filter(pk=r5.pk).update( + Response.objects.filter(pk=invalid5.pk).update( eligibility=[ResponseEligibility.INELIGIBLE_OLD] ) @@ -1000,7 +1000,7 @@ def test_valid_response_count_external_study(self): is_preview=False, ) Response.objects.filter(pk=r5.pk).update( - eligibility=[ResponseEligibility.INELIGIBLE_OLD] + eligibility=[ResponseEligibility.INELIGIBLE_CRITERIA] ) # 3 valid responses: r1, r2, r3 (completed field ignored for external) From 100e6ed44d461259913c9bbfa800acf5e47492e0 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 5 Feb 2026 16:40:49 -0800 Subject: [PATCH 07/45] add a checkbox for set response limit, which enables/disables the max_responses input box, and clear max_responses value if checkbox is unchecked --- studies/forms.py | 13 ++++++++++++- web/static/js/study-fields.js | 31 +++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/studies/forms.py b/studies/forms.py index db612bf49..996270197 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -168,9 +168,20 @@ def participated_choices(): must_not_have_participated = forms.MultipleChoiceField( choices=participated_choices, required=False ) + set_response_limit = forms.BooleanField( + required=False, + label="Set a Response Limit", + help_text="Check this box to set a target number of valid responses for this study. The study will automatically pause when the number of valid responses reaches this limit.", + ) def clean(self): cleaned_data = super().clean() + + # Clear max_responses value if set_response_limit is not checked + # (when set_response_limit is not checked, the max_responses field is diabled, which means the None value will not be saved) + if not cleaned_data.get("set_response_limit"): + cleaned_data["max_responses"] = None + min_age_days = self.cleaned_data.get("min_age_days") min_age_months = self.cleaned_data.get("min_age_months") min_age_years = self.cleaned_data.get("min_age_years") @@ -303,7 +314,7 @@ class Meta: "shared_preview": "Allow other Lookit researchers to preview your study and give feedback.", "study_type": "Choose the type of experiment you are creating - this will change the fields that appear on the Study Details page.", "priority": "This affects how studies are ordered at your lab's custom URL, not the main study page. If you leave all studies at the highest priority (99), then all of your lab's active/discoverable studies will be shown in a randomized order on your lab page. If you lower the priority of this study to 1, then it will appear last in the list on your lab page. You can find your lab's custom URL from the labs page. For more info, see the documentation on study prioritization.", - "max_responses": "Optional limit on the number of valid responses to collect for this study. When this response limit is reached, the study will be automatically paused. This value can be changed at any time. Leave blank for no limit.", + "max_responses": "This is an optional limit on the number of valid responses that should be collected before the study is automatically paused. This limit can be changed at any time, and you can edit the valid/invalid status for each response. For no response limit, leave this field blank or uncheck 'Set a Response Limit'.", } diff --git a/web/static/js/study-fields.js b/web/static/js/study-fields.js index e663adae1..709d880b8 100644 --- a/web/static/js/study-fields.js +++ b/web/static/js/study-fields.js @@ -106,10 +106,37 @@ $(document).ready(function () { mustNotHave.dispatchEvent(new Event('mousedown')); /* - Max Responses validation + Max Responses validation and checkbox toggle */ const maxResponses = document.querySelector('#id_max_responses'); - if (maxResponses) { + const setResponseLimit = document.querySelector('#id_set_response_limit'); + + if (maxResponses && setResponseLimit) { + // Function to enable/disable max_responses based on checkbox + function toggleMaxResponses() { + if (setResponseLimit.checked) { + maxResponses.disabled = false; + maxResponses.parentElement.classList.remove('text-muted'); + } else { + // The max responses value is cleared when the setResponseLimit box is unchecked, but this value won't actually be saved with the form data if the form is submitted in this state, because the max_responses field is disabled. This case is handled on the backend in the form's clean method, which checks if setResponseLimit is unchecked, and if so, sets max_responses to None. + maxResponses.disabled = true; + maxResponses.value = ''; + maxResponses.parentElement.classList.add('text-muted'); + } + } + + // On page load, check the checkbox if max_responses has a value + if (maxResponses.value) { + setResponseLimit.checked = true; + } + + // Set initial state + toggleMaxResponses(); + + // Listen for checkbox changes + setResponseLimit.addEventListener('change', toggleMaxResponses); + + // Input validation for max_responses maxResponses.addEventListener('input', () => { // Remove non-numeric characters and leading zeros let value = maxResponses.value.replace(/[^0-9]/g, '').replace(/^0+/, ''); From c78edeb1695890e9591089c7982be1bf14af4da0 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 5 Feb 2026 17:04:27 -0800 Subject: [PATCH 08/45] move check for set_response_limit=False and setting max_responses to None: does not work in clean because django does not update a field that was not there in original POST, so it needs to be added in save method --- studies/forms.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/studies/forms.py b/studies/forms.py index 996270197..b592baab5 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -177,11 +177,6 @@ def participated_choices(): def clean(self): cleaned_data = super().clean() - # Clear max_responses value if set_response_limit is not checked - # (when set_response_limit is not checked, the max_responses field is diabled, which means the None value will not be saved) - if not cleaned_data.get("set_response_limit"): - cleaned_data["max_responses"] = None - min_age_days = self.cleaned_data.get("min_age_days") min_age_months = self.cleaned_data.get("min_age_months") min_age_years = self.cleaned_data.get("min_age_years") @@ -216,6 +211,16 @@ def clean_image(self): return cleaned_image + def save(self, commit=True): + instance = super().save(commit=False) + # Explicitly set max_responses to None if set_response_limit is unchecked + if not self.cleaned_data.get("set_response_limit"): + instance.max_responses = None + if commit: + instance.save() + self.save_m2m() + return instance + class Meta: model = Study fields = [ From 4aa3c1422ff7437dd04cb3cf9d3795f9c667f8db Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 6 Feb 2026 13:51:17 -0800 Subject: [PATCH 09/45] if study has a max responses value, show the current/max counts and a progress bar with other study details --- studies/templates/studies/study_detail.html | 23 +++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/studies/templates/studies/study_detail.html b/studies/templates/studies/study_detail.html index 1720f65ad..d5a6ef663 100644 --- a/studies/templates/studies/study_detail.html +++ b/studies/templates/studies/study_detail.html @@ -127,6 +127,29 @@ Your study link will show up here when you have built the experiment runner. {% endif %}

+ {% if study.max_responses %} +
+
+ Valid Responses + {{ study.valid_response_count }} / {{ study.max_responses }} +
+ {% widthratio study.valid_response_count study.max_responses 100 as percent_str %} + {% with percent=percent_str|add:"0" %} +
+
+
+
+ {% if percent >= 100 %} + Response limit reached + {% endif %} + {% endwith %} +
+ {% endif %}
Date: Fri, 6 Feb 2026 13:52:44 -0800 Subject: [PATCH 10/45] put the study max responses field into a checkbox (to make it clear that its optional), move to bottom of form after ppt criteria --- studies/templates/studies/_study_fields.html | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/studies/templates/studies/_study_fields.html b/studies/templates/studies/_study_fields.html index d204594d2..f9bc34eaf 100644 --- a/studies/templates/studies/_study_fields.html +++ b/studies/templates/studies/_study_fields.html @@ -5,7 +5,6 @@ {% bootstrap_field form.name label_class="form-label fw-bold" wrapper_class="mb-4" %} {% bootstrap_field form.lab label_class="form-label fw-bold" wrapper_class="mb-4" %} {% bootstrap_field form.priority label_class="form-label fw-bold" wrapper_class="mb-4" addon_before="" %} -{% bootstrap_field form.max_responses label_class="form-label fw-bold" wrapper_class="mb-4" %}
@@ -158,4 +157,12 @@ {% bootstrap_field form.must_not_have_participated label_class="form-label fw-bold" wrapper_class="mb-4" %} {% bootstrap_field form.criteria_expression label_class="form-label fw-bold" wrapper_class="mb-4" %}
+
+ {% bootstrap_field form.set_response_limit label_class="form-label fw-bold" wrapper_class="mb-2" %} +
+ {% bootstrap_label "Maximum Responses" label_class="form-label fw-bold" label_for="id_max_responses" %} + {% bootstrap_field form.max_responses show_label=False show_help=False field_class="w-25" placeholder="Enter a number" %} +
+
+
{% bootstrap_field form.study_type label_class="form-label fw-bold" wrapper_class="mb-4" %} From 440eeeef09ba19cdb246193813419a42f5e8ef0e Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 13:51:11 -0800 Subject: [PATCH 11/45] edit help text for set response limit and max responses questions --- studies/forms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/studies/forms.py b/studies/forms.py index b592baab5..6baf068a3 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -171,7 +171,7 @@ def participated_choices(): set_response_limit = forms.BooleanField( required=False, label="Set a Response Limit", - help_text="Check this box to set a target number of valid responses for this study. The study will automatically pause when the number of valid responses reaches this limit.", + help_text="Check this box to set a target number of valid responses for this study.", ) def clean(self): @@ -319,7 +319,7 @@ class Meta: "shared_preview": "Allow other Lookit researchers to preview your study and give feedback.", "study_type": "Choose the type of experiment you are creating - this will change the fields that appear on the Study Details page.", "priority": "This affects how studies are ordered at your lab's custom URL, not the main study page. If you leave all studies at the highest priority (99), then all of your lab's active/discoverable studies will be shown in a randomized order on your lab page. If you lower the priority of this study to 1, then it will appear last in the list on your lab page. You can find your lab's custom URL from the labs page. For more info, see the documentation on study prioritization.", - "max_responses": "This is an optional limit on the number of valid responses that should be collected before the study is automatically paused. This limit can be changed at any time, and you can edit the valid/invalid status for each response. For no response limit, leave this field blank or uncheck 'Set a Response Limit'.", + "max_responses": "The study will automatically pause when the number of valid responses reaches this limit. You can change this value at any time. Note that participant sessions running when the limit is reached are permitted to continue. See [the documentation] for more information on response limits, valid responses, and changing a response's valid/invaild status. For no response limit, leave this field blank.", } From b3c11d4b634d39f054c027973199ef6f9e2e0815 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 13:52:14 -0800 Subject: [PATCH 12/45] show help text under max responses question --- studies/templates/studies/_study_fields.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/studies/templates/studies/_study_fields.html b/studies/templates/studies/_study_fields.html index f9bc34eaf..aaf319bf4 100644 --- a/studies/templates/studies/_study_fields.html +++ b/studies/templates/studies/_study_fields.html @@ -161,7 +161,7 @@ {% bootstrap_field form.set_response_limit label_class="form-label fw-bold" wrapper_class="mb-2" %}
{% bootstrap_label "Maximum Responses" label_class="form-label fw-bold" label_for="id_max_responses" %} - {% bootstrap_field form.max_responses show_label=False show_help=False field_class="w-25" placeholder="Enter a number" %} + {% bootstrap_field form.max_responses show_label=False show_help=True field_class="w-25" placeholder="Enter a number" %}

From 7b9481db0323239561d2d118312f69bc44cb7647 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 13:54:36 -0800 Subject: [PATCH 13/45] move max responses placeholder text into study form model --- studies/forms.py | 4 +++- studies/templates/studies/_study_fields.html | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/studies/forms.py b/studies/forms.py index 6baf068a3..1b4419224 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -295,7 +295,9 @@ class Meta: "priority": forms.TextInput( attrs={"type": "range", "min": "1", "max": "99"} ), - "max_responses": forms.NumberInput(attrs={"min": "1"}), + "max_responses": forms.NumberInput( + attrs={"min": "1", "placeholder": "Enter a number"} + ), } help_texts = { diff --git a/studies/templates/studies/_study_fields.html b/studies/templates/studies/_study_fields.html index aaf319bf4..d7c5406ce 100644 --- a/studies/templates/studies/_study_fields.html +++ b/studies/templates/studies/_study_fields.html @@ -161,7 +161,7 @@ {% bootstrap_field form.set_response_limit label_class="form-label fw-bold" wrapper_class="mb-2" %}
{% bootstrap_label "Maximum Responses" label_class="form-label fw-bold" label_for="id_max_responses" %} - {% bootstrap_field form.max_responses show_label=False show_help=True field_class="w-25" placeholder="Enter a number" %} + {% bootstrap_field form.max_responses show_label=False show_help=True field_class="w-25" %}

From 7224cde1db9f64a75fe79edaa89ac026c201b600 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 14:29:32 -0800 Subject: [PATCH 14/45] set response progress bar to red/striped if exceeded, add alerts for response limit reached/exceeded text, add msg that study has been paused --- studies/templates/studies/study_detail.html | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/studies/templates/studies/study_detail.html b/studies/templates/studies/study_detail.html index d5a6ef663..962da56f0 100644 --- a/studies/templates/studies/study_detail.html +++ b/studies/templates/studies/study_detail.html @@ -136,7 +136,7 @@ {% widthratio study.valid_response_count study.max_responses 100 as percent_str %} {% with percent=percent_str|add:"0" %}
-
- {% if percent >= 100 %} - Response limit reached + {% if percent == 100 %} +
+ Response limit reached. Your study has been paused. +
+ {% elif percent > 100 %} +
+ Response limit exceeded. Your study has been paused. +
{% endif %} {% endwith %}
From f51943cce106d952f377d84cf7c94cb63b2ea40e Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 14:46:53 -0800 Subject: [PATCH 15/45] add check_and_pause_if_at_max_responses logic: pause if study has a max responses value, is active, and has reached limit; update pause trigger to make caller/user arg optional --- studies/models.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/studies/models.py b/studies/models.py index 26e0bd3c0..cc43e555f 100644 --- a/studies/models.py +++ b/studies/models.py @@ -593,10 +593,22 @@ def has_reached_max_responses(self) -> bool: def check_and_pause_if_at_max_responses(self): """Check if max responses reached and pause the study if so. - Only pauses if the study is currently active. + Only pauses if the study is currently active. Uses the state machine's + pause trigger to properly transition and run callbacks. """ - # TODO: Implement logic to pause study when max responses reached - pass + if self.max_responses is None: + return + + if self.state != "active": + return + + if not self.has_reached_max_responses: + return + + # Use the state machine's pause trigger to properly transition + # and run callbacks (like notify_administrators_of_pause) + self.pause() # No user since this is system-triggered + self.save() @property def consent_videos(self): @@ -825,12 +837,16 @@ def notify_administrators_of_activation(self, ev): ) def notify_administrators_of_pause(self, ev): + user = ev.kwargs.get("user") + caller_name = ( + user.get_short_name() if user else "System (max responses reached)" + ) context = { "lab_name": self.lab.name, "study_name": self.name, "study_id": self.pk, "study_uuid": str(self.uuid), - "researcher_name": ev.kwargs.get("user").get_short_name(), + "researcher_name": caller_name, "action": ev.transition.dest, } send_mail.delay( From fc078640b9d98259cba6ab827cbc90e8420ffbee Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 14:49:17 -0800 Subject: [PATCH 16/45] add tests for Study model check_and_pause_if_at_max_responses method --- studies/tests.py | 148 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/studies/tests.py b/studies/tests.py index ab15a1385..2f62cb7f7 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -1089,6 +1089,154 @@ def test_has_reached_max_responses_exceeded(self): self.assertTrue(study.has_reached_max_responses) + def test_check_and_pause_if_at_max_responses_no_limit_set(self): + """Study without max_responses set should not pause.""" + study = Study.objects.create( + name="No Limit Study", + lab=Lab.objects.create( + name="Test Lab No Limit", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=None, + ) + study.state = "active" + study.save() + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "active") + + def test_check_and_pause_if_at_max_responses_not_active(self): + """Study not in active state should not pause.""" + study = Study.objects.create( + name="Not Active Study", + lab=Lab.objects.create( + name="Test Lab Not Active", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=1, + ) + # Study is in "created" state by default + self.assertEqual(study.state, "created") + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "created") + + def test_check_and_pause_if_at_max_responses_not_reached(self): + """Active study that hasn't reached max_responses should not pause.""" + study = Study.objects.create( + name="Under Limit Study", + lab=Lab.objects.create( + name="Test Lab Under Limit", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=5, + ) + study.state = "active" + study.save() + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Add only 2 responses (under limit of 5) + for _ in range(2): + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "active") + + def test_check_and_pause_if_at_max_responses_limit_reached(self): + """Active study that has reached max_responses should pause.""" + study = Study.objects.create( + name="At Limit Study", + lab=Lab.objects.create( + name="Test Lab At Limit", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=2, + ) + study.state = "active" + study.save() + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Add exactly 2 responses (at limit of 2) + for _ in range(2): + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "paused") + + def test_check_and_pause_if_at_max_responses_limit_exceeded(self): + """Active study that has exceeded max_responses should pause.""" + study = Study.objects.create( + name="Over Limit Study", + lab=Lab.objects.create( + name="Test Lab Over Limit", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=2, + ) + study.state = "active" + study.save() + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Add 4 responses (exceeds limit of 2) + for _ in range(4): + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "paused") + class VideoModelTestCase(TestCase): def setUp(self): From d82a4a6e9e14dafd108c10f6b215939fba099b3a Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 16:45:36 -0800 Subject: [PATCH 17/45] refactor tests to pass sonar code duplication checks --- studies/tests.py | 351 +++++++++++++---------------------------------- 1 file changed, 93 insertions(+), 258 deletions(-) diff --git a/studies/tests.py b/studies/tests.py index 2f62cb7f7..0d8410065 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -842,6 +842,53 @@ def test_get_jspsych(self): class StudyModelTestCase(TestCase): + def _create_response( + self, study, child, user, completed=True, is_preview=False, eligibility=None + ): + """Create a single response, optionally overriding eligibility after creation. + + Note: Response.save() auto-sets eligibility, so we use .update() to override it. + Pass eligibility=None to skip the override (keep auto-set value). + """ + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=completed, + is_preview=is_preview, + ) + if eligibility is not None: + Response.objects.filter(pk=r.pk).update(eligibility=eligibility) + return r + + def _create_eligible_responses(self, study, count): + """Create a user, child, and the given number of eligible responses for a study.""" + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + for _ in range(count): + self._create_response( + study, child, user, eligibility=[ResponseEligibility.ELIGIBLE] + ) + return user, child + + def _create_study_with_lab(self, name, max_responses, state=None): + """Create a study with a lab, optionally setting its state.""" + study = Study.objects.create( + name=name, + lab=Lab.objects.create( + name=f"Test Lab {name}", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=max_responses, + ) + if state: + study.state = state + study.save() + return study + def test_responses_for_researcher_external_studies(self): study = Study.objects.create( study_type=StudyType.get_external(), @@ -867,69 +914,31 @@ def test_valid_response_count_internal_study(self): user = User.objects.create(is_active=True) child = Child.objects.create(user=user, birthday=date.today()) - # Note: Response.save() auto-sets eligibility, so we update this value after creation - - # Valid response: completed, not preview, eligible - valid1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=valid1.pk).update(eligibility=[]) - # Valid response: completed, not preview, empty eligibility - valid2 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=valid2.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] + self._create_response(study, child, user, eligibility=[]) + # Valid response: completed, not preview, eligible + self._create_response( + study, child, user, eligibility=[ResponseEligibility.ELIGIBLE] ) - # Invalid: preview response - invalid3 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, + self._create_response( + study, + child, + user, is_preview=True, + eligibility=[ResponseEligibility.ELIGIBLE], ) - Response.objects.filter(pk=invalid3.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) - # Invalid: not completed - invalid4 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, + self._create_response( + study, + child, + user, completed=False, - is_preview=False, + eligibility=[ResponseEligibility.ELIGIBLE], ) - Response.objects.filter(pk=invalid4.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) - # Invalid: ineligible - invalid5 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=invalid5.pk).update( - eligibility=[ResponseEligibility.INELIGIBLE_OLD] + self._create_response( + study, child, user, eligibility=[ResponseEligibility.INELIGIBLE_OLD] ) self.assertEqual(study.valid_response_count, 2) @@ -941,66 +950,30 @@ def test_valid_response_count_external_study(self): child = Child.objects.create(user=user, birthday=date.today()) # Valid: not preview, eligible, completed - r1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r1.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] + self._create_response( + study, child, user, eligibility=[ResponseEligibility.ELIGIBLE] ) - # Valid: not preview, eligible, NOT completed (should still count for external) - r2 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, + self._create_response( + study, + child, + user, completed=False, - is_preview=False, - ) - Response.objects.filter(pk=r2.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] + eligibility=[ResponseEligibility.ELIGIBLE], ) - # Valid: not preview, empty eligibility, NOT completed - r3 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=False, - is_preview=False, - ) - Response.objects.filter(pk=r3.pk).update(eligibility=[]) - + self._create_response(study, child, user, completed=False, eligibility=[]) # Invalid: preview response (should not count) - r4 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, + self._create_response( + study, + child, + user, is_preview=True, + eligibility=[ResponseEligibility.ELIGIBLE], ) - Response.objects.filter(pk=r4.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) - # Invalid: ineligible (should not count) - r5 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r5.pk).update( - eligibility=[ResponseEligibility.INELIGIBLE_CRITERIA] + self._create_response( + study, child, user, eligibility=[ResponseEligibility.INELIGIBLE_CRITERIA] ) # 3 valid responses: r1, r2, r3 (completed field ignored for external) @@ -1020,23 +993,7 @@ def test_has_reached_max_responses_not_reached(self): study_type=StudyType.get_ember_frame_player(), max_responses=5, ) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) - - # Add 2 valid responses - for _ in range(2): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) - + self._create_eligible_responses(study, count=2) self.assertFalse(study.has_reached_max_responses) def test_has_reached_max_responses_reached(self): @@ -1045,23 +1002,7 @@ def test_has_reached_max_responses_reached(self): study_type=StudyType.get_ember_frame_player(), max_responses=3, ) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) - - # Add 3 valid responses - for _ in range(3): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) - + self._create_eligible_responses(study, count=3) self.assertTrue(study.has_reached_max_responses) def test_has_reached_max_responses_exceeded(self): @@ -1070,39 +1011,14 @@ def test_has_reached_max_responses_exceeded(self): study_type=StudyType.get_ember_frame_player(), max_responses=2, ) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) - - # Add 4 valid responses (exceeds limit of 2) - for _ in range(4): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) - + self._create_eligible_responses(study, count=4) self.assertTrue(study.has_reached_max_responses) def test_check_and_pause_if_at_max_responses_no_limit_set(self): """Study without max_responses set should not pause.""" - study = Study.objects.create( - name="No Limit Study", - lab=Lab.objects.create( - name="Test Lab No Limit", - institution="Test", - contact_email="test@test.com", - ), - study_type=StudyType.get_ember_frame_player(), - max_responses=None, + study = self._create_study_with_lab( + "No Limit Study", max_responses=None, state="active" ) - study.state = "active" - study.save() study.check_and_pause_if_at_max_responses() study.refresh_from_db() @@ -1111,16 +1027,7 @@ def test_check_and_pause_if_at_max_responses_no_limit_set(self): def test_check_and_pause_if_at_max_responses_not_active(self): """Study not in active state should not pause.""" - study = Study.objects.create( - name="Not Active Study", - lab=Lab.objects.create( - name="Test Lab Not Active", - institution="Test", - contact_email="test@test.com", - ), - study_type=StudyType.get_ember_frame_player(), - max_responses=1, - ) + study = self._create_study_with_lab("Not Active Study", max_responses=1) # Study is in "created" state by default self.assertEqual(study.state, "created") @@ -1131,34 +1038,10 @@ def test_check_and_pause_if_at_max_responses_not_active(self): def test_check_and_pause_if_at_max_responses_not_reached(self): """Active study that hasn't reached max_responses should not pause.""" - study = Study.objects.create( - name="Under Limit Study", - lab=Lab.objects.create( - name="Test Lab Under Limit", - institution="Test", - contact_email="test@test.com", - ), - study_type=StudyType.get_ember_frame_player(), - max_responses=5, + study = self._create_study_with_lab( + "Under Limit Study", max_responses=5, state="active" ) - study.state = "active" - study.save() - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) - - # Add only 2 responses (under limit of 5) - for _ in range(2): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_eligible_responses(study, count=2) study.check_and_pause_if_at_max_responses() study.refresh_from_db() @@ -1167,34 +1050,10 @@ def test_check_and_pause_if_at_max_responses_not_reached(self): def test_check_and_pause_if_at_max_responses_limit_reached(self): """Active study that has reached max_responses should pause.""" - study = Study.objects.create( - name="At Limit Study", - lab=Lab.objects.create( - name="Test Lab At Limit", - institution="Test", - contact_email="test@test.com", - ), - study_type=StudyType.get_ember_frame_player(), - max_responses=2, + study = self._create_study_with_lab( + "At Limit Study", max_responses=2, state="active" ) - study.state = "active" - study.save() - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) - - # Add exactly 2 responses (at limit of 2) - for _ in range(2): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_eligible_responses(study, count=2) study.check_and_pause_if_at_max_responses() study.refresh_from_db() @@ -1203,34 +1062,10 @@ def test_check_and_pause_if_at_max_responses_limit_reached(self): def test_check_and_pause_if_at_max_responses_limit_exceeded(self): """Active study that has exceeded max_responses should pause.""" - study = Study.objects.create( - name="Over Limit Study", - lab=Lab.objects.create( - name="Test Lab Over Limit", - institution="Test", - contact_email="test@test.com", - ), - study_type=StudyType.get_ember_frame_player(), - max_responses=2, + study = self._create_study_with_lab( + "Over Limit Study", max_responses=2, state="active" ) - study.state = "active" - study.save() - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) - - # Add 4 responses (exceeds limit of 2) - for _ in range(4): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_eligible_responses(study, count=4) study.check_and_pause_if_at_max_responses() study.refresh_from_db() From 47d8df9c3dc0644df16fed2ce2485e54af291730 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 16:46:41 -0800 Subject: [PATCH 18/45] override celery since these tests involve state changes that send emails --- studies/tests.py | 1 + 1 file changed, 1 insertion(+) diff --git a/studies/tests.py b/studies/tests.py index 0d8410065..5d3d2d6df 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -841,6 +841,7 @@ def test_get_jspsych(self): self.assertFalse(StudyType.get_jspsych().is_external) +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) class StudyModelTestCase(TestCase): def _create_response( self, study, child, user, completed=True, is_preview=False, eligibility=None From 8c5ab618b2964662bfcfccf7cda23389df65f21a Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 16:48:58 -0800 Subject: [PATCH 19/45] fix issue with tests failing because study image field was not up-to-date or treated as changed because of None vs empty string comparison --- studies/models.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/studies/models.py b/studies/models.py index cc43e555f..d0663015d 100644 --- a/studies/models.py +++ b/studies/models.py @@ -605,6 +605,10 @@ def check_and_pause_if_at_max_responses(self): if not self.has_reached_max_responses: return + # Refresh from DB to ensure the in-memory study is current before + # the pause transition triggers a save (via _finalize_state_change). + self.refresh_from_db() + # Use the state machine's pause trigger to properly transition # and run callbacks (like notify_administrators_of_pause) self.pause() # No user since this is system-triggered @@ -1014,6 +1018,12 @@ def check_modification_of_approved_study( ): continue # Skip, since the actual JSON content is the same - only exact_text changing if new != current: + # For file fields (e.g. image), None and "" are equivalent empty + # values that can differ between in-memory defaults and DB-loaded + # values. Treat them as unchanged. + if hasattr(current, "name") and hasattr(new, "name"): + if (current.name or "") == (new.name or ""): + continue important_fields_changed = True break From 08243dadcd21aa1d65816fea644003ded204b5cd Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 11 Feb 2026 16:50:22 -0800 Subject: [PATCH 20/45] remove unnecessary study.save after study.pause --- studies/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/studies/models.py b/studies/models.py index d0663015d..18e23cb54 100644 --- a/studies/models.py +++ b/studies/models.py @@ -611,8 +611,9 @@ def check_and_pause_if_at_max_responses(self): # Use the state machine's pause trigger to properly transition # and run callbacks (like notify_administrators_of_pause) + # Note: no explicit save() needed here because the state machine's + # _finalize_state_change callback already saves the model. self.pause() # No user since this is system-triggered - self.save() @property def consent_videos(self): From c054ba14c275b4a4b49f32d0926b87a57ae23ef6 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 17 Feb 2026 14:06:20 -0800 Subject: [PATCH 21/45] add args to check_and_pause_if_at_max_responses for optionally sending email to researchers and displaying message to user --- studies/models.py | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/studies/models.py b/studies/models.py index 18e23cb54..36b3e22a3 100644 --- a/studies/models.py +++ b/studies/models.py @@ -8,6 +8,7 @@ import fleep from botocore.exceptions import ClientError from django.conf import settings +from django.contrib import messages from django.contrib.auth.models import Group, Permission from django.contrib.postgres.fields import ArrayField from django.core.validators import MaxValueValidator, MinValueValidator @@ -590,11 +591,18 @@ def has_reached_max_responses(self) -> bool: return False return self.valid_response_count >= self.max_responses - def check_and_pause_if_at_max_responses(self): + def check_and_pause_if_at_max_responses( + self, send_researcher_email=False, request=None + ): """Check if max responses reached and pause the study if so. Only pauses if the study is currently active. Uses the state machine's pause trigger to properly transition and run callbacks. + + Args: + send_researcher_email: If True, send notification email to researchers + with CHANGE_STUDY_STATUS permission. + request: If provided, display a Django messages banner to the user. """ if self.max_responses is None: return @@ -610,11 +618,21 @@ def check_and_pause_if_at_max_responses(self): self.refresh_from_db() # Use the state machine's pause trigger to properly transition - # and run callbacks (like notify_administrators_of_pause) + # and run callbacks (like notify_administrators_of_pause). # Note: no explicit save() needed here because the state machine's # _finalize_state_change callback already saves the model. self.pause() # No user since this is system-triggered + if send_researcher_email: + self._notify_researchers_of_max_responses_pause() + + if request: + messages.warning( + request, + f'Study "{self.name}" has been automatically paused because it ' + f"reached the response limit ({self.valid_response_count}/{self.max_responses}).", + ) + @property def consent_videos(self): return self.videos.filter(is_consent_footage=True) @@ -866,6 +884,28 @@ def notify_administrators_of_pause(self, ev): **context, ) + def _notify_researchers_of_max_responses_pause(self): + """Send email to researchers notifying them the study was auto-paused + because it reached the maximum number of responses.""" + context = { + "study_name": self.name, + "study_id": self.pk, + "study_uuid": str(self.uuid), + "max_responses": self.max_responses, + "valid_response_count": self.valid_response_count, + } + send_mail.delay( + "notify_researchers_of_max_responses_pause", + f"{self.name}: Study paused - response limit reached", + settings.EMAIL_FROM_ADDRESS, + bcc=list( + self.users_with_study_perms( + StudyPermission.CHANGE_STUDY_STATUS + ).values_list("username", flat=True) + ), + **context, + ) + def notify_administrators_of_deactivation(self, ev): context = { "lab_name": self.lab.name, From 3eb1d464a1e42bdbd60f25c5eb0fb419de6993c4 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 17 Feb 2026 14:07:16 -0800 Subject: [PATCH 22/45] add new email template for notifying researchers of automatic study pause due to reaching max responses --- ...tify_researchers_of_max_responses_pause.html | 17 +++++++++++++++++ ...otify_researchers_of_max_responses_pause.txt | 11 +++++++++++ 2 files changed, 28 insertions(+) create mode 100644 studies/templates/emails/notify_researchers_of_max_responses_pause.html create mode 100644 studies/templates/emails/notify_researchers_of_max_responses_pause.txt diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.html b/studies/templates/emails/notify_researchers_of_max_responses_pause.html new file mode 100644 index 000000000..3df89229f --- /dev/null +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.html @@ -0,0 +1,17 @@ +{% load web_extras %} +

Dear Study Researchers,

+

+ Your study {{ study_name }} has been automatically paused + because it reached the maximum number of valid responses + ({{ valid_response_count }}/{{ max_responses }}). +

+

+ If you would like to collect more data, you can increase the study's response limit and/or edit the valid/invalid status of individual responses, and then re-start it (your study will NOT be re-started automatically). +
+ Your study can be found here. +

+

+ Best, +
+ Lookit Bot +

diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.txt b/studies/templates/emails/notify_researchers_of_max_responses_pause.txt new file mode 100644 index 000000000..3ff720281 --- /dev/null +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.txt @@ -0,0 +1,11 @@ +{% load web_extras %} +Dear Study Researchers, + +Your study {{ study_name }} has been automatically paused because it reached the maximum number of valid responses ({{ valid_response_count }}/{{ max_responses }}). + +If you would like to collect more data, you can increase the study's response limit and/or edit the valid/invalid status of individual responses, and then re-start it (your study will NOT be re-started automatically). + +Your study can be found here: {% absolute_url 'exp:study' study_id %} + +Best, +Lookit Bot From 8cbbcbece26ac551b2dfd962b065396ce7194c27 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 17 Feb 2026 15:52:05 -0800 Subject: [PATCH 23/45] update post resp save check_and_pause method to also send researchers email if study has been automatically paused --- studies/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/studies/models.py b/studies/models.py index 36b3e22a3..5f9456ea6 100644 --- a/studies/models.py +++ b/studies/models.py @@ -1411,9 +1411,9 @@ def take_action_on_exp_data(sender, instance, created, **kwargs): else: dispatch_frame_action(response) - # If this response is complete, then check if this study has reached max responses and pause if needed + # If this response is complete, then check if this study has reached max responses and, if so, pause the study and email researchers. if response.completed: - response.study.check_and_pause_if_at_max_responses() + response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) class FeedbackApiManager(models.Manager): From ba686a070a9cdbb65f76a5ddf590237b127fb859 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Tue, 17 Feb 2026 15:58:48 -0800 Subject: [PATCH 24/45] add tests for updates to check_and_pause_if_at_max_responses: args for sending researcher email and displaying banner message --- studies/tests.py | 77 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/studies/tests.py b/studies/tests.py index 5d3d2d6df..5c4089a87 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -1,7 +1,7 @@ import json import re from datetime import date, datetime, timedelta, timezone -from unittest.mock import patch +from unittest.mock import MagicMock, patch from botocore.exceptions import ClientError, ParamValidationError from django.conf import settings @@ -1073,6 +1073,81 @@ def test_check_and_pause_if_at_max_responses_limit_exceeded(self): self.assertEqual(study.state, "paused") + @patch("studies.models.send_mail") + def test_check_and_pause_sends_researcher_email_when_requested( + self, mock_send_mail + ): + """Researcher notification email is sent when send_researcher_email=True.""" + study = self._create_study_with_lab( + "Email Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses(send_researcher_email=True) + + researcher_calls = [ + c + for c in mock_send_mail.delay.call_args_list + if c[0][0] == "notify_researchers_of_max_responses_pause" + ] + self.assertEqual(len(researcher_calls), 1) + + @patch("studies.models.send_mail") + def test_check_and_pause_no_researcher_email_by_default(self, mock_send_mail): + """Researcher notification email is not sent by default.""" + study = self._create_study_with_lab( + "No Email Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses() + + researcher_calls = [ + c + for c in mock_send_mail.delay.call_args_list + if c[0][0] == "notify_researchers_of_max_responses_pause" + ] + self.assertEqual(len(researcher_calls), 0) + + @patch("studies.models.send_mail") + @patch("studies.models.messages") + def test_check_and_pause_shows_banner_when_request_provided( + self, mock_messages, mock_send_mail + ): + """A Django messages warning is added when request is provided.""" + study = self._create_study_with_lab( + "Banner Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + mock_request = MagicMock() + + study.check_and_pause_if_at_max_responses(request=mock_request) + + mock_messages.warning.assert_called_once() + call_args = mock_messages.warning.call_args + self.assertEqual(call_args[0][0], mock_request) + self.assertIn("automatically paused", call_args[0][1]) + + researcher_calls = [ + c + for c in mock_send_mail.delay.call_args_list + if c[0][0] == "notify_researchers_of_max_responses_pause" + ] + self.assertEqual(len(researcher_calls), 0) + + @patch("studies.models.send_mail") + @patch("studies.models.messages") + def test_check_and_pause_no_banner_by_default(self, mock_messages, mock_send_mail): + """No Django message is added when request is not provided.""" + study = self._create_study_with_lab( + "No Banner Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses() + + mock_messages.warning.assert_not_called() + class VideoModelTestCase(TestCase): def setUp(self): From a24a3af99bd09e259683bca61c303ca5c0c3d055 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 18 Feb 2026 13:39:46 -0800 Subject: [PATCH 25/45] move pause/email/banner message into if study state is active block, otherwise show a different banner message about reaching limit (no email) --- studies/models.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/studies/models.py b/studies/models.py index 5f9456ea6..8189628f8 100644 --- a/studies/models.py +++ b/studies/models.py @@ -598,6 +598,7 @@ def check_and_pause_if_at_max_responses( Only pauses if the study is currently active. Uses the state machine's pause trigger to properly transition and run callbacks. + If the study is not active, this method is used to optionally display a banner message, with no state transition. Args: send_researcher_email: If True, send notification email to researchers @@ -607,9 +608,6 @@ def check_and_pause_if_at_max_responses( if self.max_responses is None: return - if self.state != "active": - return - if not self.has_reached_max_responses: return @@ -621,17 +619,25 @@ def check_and_pause_if_at_max_responses( # and run callbacks (like notify_administrators_of_pause). # Note: no explicit save() needed here because the state machine's # _finalize_state_change callback already saves the model. - self.pause() # No user since this is system-triggered + if self.state == "active": + self.pause() # No user since this is system-triggered - if send_researcher_email: - self._notify_researchers_of_max_responses_pause() + if send_researcher_email: + self._notify_researchers_of_max_responses_pause() - if request: - messages.warning( - request, - f'Study "{self.name}" has been automatically paused because it ' - f"reached the response limit ({self.valid_response_count}/{self.max_responses}).", - ) + if request: + messages.warning( + request, + f'Study "{self.name}" has been automatically paused because it ' + f"reached the response limit ({self.valid_response_count}/{self.max_responses}).", + ) + else: + # Study is not active, so not state transition is needed. Just notify the researcher that they cannot start the study. + if request: + messages.warning( + request, + f'Study "{self.name}" has reached the response limit ({self.valid_response_count}/{self.max_responses}).', + ) @property def consent_videos(self): From 6135bdcacb5709970efca2c505da88dd29fa35e7 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 18 Feb 2026 13:52:59 -0800 Subject: [PATCH 26/45] when a study edit form is submitted, check to see if the max_responses value has changed, then check if this has been exceeded and handle (pause, banner message) --- exp/views/study.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/exp/views/study.py b/exp/views/study.py index 16cb39e54..4137a486d 100644 --- a/exp/views/study.py +++ b/exp/views/study.py @@ -226,9 +226,18 @@ def form_valid(self, form: StudyEditForm): ) study.must_have_participated.set(form.cleaned_data["must_have_participated"]) + changed_fields = form.changed_data + messages.success(self.request, f"{study.name} study details saved.") - return super().form_valid(form) + # Save form first so the new max_responses value is persisted before + # check_and_pause_if_at_max_responses (which calls refresh_from_db). + response = super().form_valid(form) + # Now check to see if the study has reached max responses with the new value + if "max_responses" in changed_fields: + study.check_and_pause_if_at_max_responses(request=self.request) + + return response def form_invalid(self, form: StudyEditForm): messages.error(self.request, form.errors) From b156081d359373573db24589136ac002a4be6a4f Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 18 Feb 2026 13:56:59 -0800 Subject: [PATCH 27/45] add tests for check/pause if at max responses in Study Update view --- exp/tests/test_study_views.py | 195 +++++++++++++++++++++++++++++++++- 1 file changed, 194 insertions(+), 1 deletion(-) diff --git a/exp/tests/test_study_views.py b/exp/tests/test_study_views.py index 4175b5dd3..ee2887b1b 100644 --- a/exp/tests/test_study_views.py +++ b/exp/tests/test_study_views.py @@ -31,7 +31,8 @@ StudyDetailView, StudyPreviewDetailView, ) -from studies.models import Lab, Study, StudyType +from studies.helpers import ResponseEligibility +from studies.models import Lab, Response, Study, StudyType from studies.permissions import LabPermission, StudyPermission @@ -1334,3 +1335,195 @@ def test_must_not_have_participated(self): # TODO: StudyPreviewProxyView # - add checks analogous to preview detail view # - check for correct redirect + + +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) +@override_settings(CELERY_TASK_EAGER_PROPAGATES=True) +@patch("studies.helpers.send_mail") +class StudyUpdateMaxResponsesTestCase(TestCase): + """Tests for banner messages when max_responses is edited via StudyUpdateView.""" + + def setUp(self): + self.client = Force2FAClient() + self.user = G(User, is_active=True, is_researcher=True) + self.lab = G(Lab, name="Max Resp Lab", approved_to_test=True) + self.lab.researchers.add(self.user) + + self.study = G( + Study, + image=SimpleUploadedFile( + name="small.gif", + content=( + b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04" + b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + b"\x02\x4c\x01\x00\x3b" + ), + content_type="image/gif", + ), + study_type=StudyType.get_ember_frame_player(), + creator=self.user, + lab=self.lab, + name="Max Resp Study", + short_description="test", + preview_summary="test", + purpose="test", + criteria="test", + duration="test", + contact_info="test", + exit_url="https://mit.edu", + ) + self.study.admin_group.user_set.add(self.user) + assign_perm( + StudyPermission.WRITE_STUDY_DETAILS.prefixed_codename, + self.user, + self.study, + ) + self.client.force_login(self.user) + + def _form_data(self, include_image=True, **overrides): + """Build minimal valid form data for the StudyEditForm. + + Set include_image=False to avoid triggering the pre_save signal that + rejects approved/active studies when monitored fields change. + """ + data = { + "name": self.study.name, + "lab": self.study.lab_id, + "study_type": self.study.study_type_id, + "min_age_years": 0, + "min_age_months": 0, + "min_age_days": 0, + "max_age_years": 1, + "max_age_months": 0, + "max_age_days": 0, + "priority": 1, + "preview_summary": self.study.preview_summary, + "short_description": self.study.short_description, + "purpose": self.study.purpose, + "compensation_description": self.study.compensation_description, + "exit_url": self.study.exit_url, + "criteria": self.study.criteria, + "duration": self.study.duration, + "contact_info": self.study.contact_info, + } + if include_image: + data["image"] = SimpleUploadedFile( + name="test_image.jpg", + content=open("exp/tests/static/study_image.png", "rb").read(), + content_type="image/jpeg", + ) + data.update(overrides) + return data + + def _create_eligible_responses(self, count): + """Create eligible, completed, non-preview responses for the study.""" + participant = G(User, is_active=True) + child = G( + Child, + user=participant, + given_name="Test child", + birthday=datetime.date.today() - datetime.timedelta(days=30), + ) + for _ in range(count): + r = Response.objects.create( + study=self.study, + child=child, + study_type=self.study.study_type, + demographic_snapshot=participant.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + def _get_warning_messages(self, response): + """Extract warning-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.WARNING] + + def test_banner_when_max_responses_reached_non_active_study(self, mock_send_mail): + """Warning banner shown when max_responses is set at/below response count on non-active study.""" + self.assertEqual(self.study.state, "created") + self._create_eligible_responses(3) + data = self._form_data(set_response_limit=True, max_responses=3) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("reached the response limit", str(warnings[0])) + + # Study should NOT be paused (was not active) + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "paused") + + def test_study_paused_when_max_responses_reached_active_study(self, mock_send_mail): + """Active study is paused and warning shown when max_responses is set at response count.""" + self.assertEqual(self.study.state, "created") + self.study.state = "active" + self.study.save() + self._create_eligible_responses(3) + # include_image=False to avoid triggering the pre_save signal that + # rejects active studies when monitored fields (like image) change. + data = self._form_data( + include_image=False, set_response_limit=True, max_responses=3 + ) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("automatically paused", str(warnings[0])) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "paused") + + def test_no_banner_when_max_responses_not_reached(self, mock_send_mail): + """No warning when max_responses is set above the current response count.""" + self._create_eligible_responses(2) + data = self._form_data(set_response_limit=True, max_responses=10) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 0) + self.assertNotEqual(self.study.state, "paused") + + def test_no_banner_when_max_responses_unchanged(self, mock_send_mail): + """No warning when max_responses is submitted but hasn't changed.""" + self.study.max_responses = 5 + self.study.save() + self._create_eligible_responses(5) + data = self._form_data(set_response_limit=True, max_responses=5) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 0) + + def test_banner_when_max_responses_lowered_below_count(self, mock_send_mail): + """Warning shown when max_responses is lowered below existing response count.""" + self.assertEqual(self.study.state, "created") + self.study.max_responses = 10 + self.study.save() + self._create_eligible_responses(5) + data = self._form_data(set_response_limit=True, max_responses=3) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("reached the response limit", str(warnings[0])) + self.assertEqual(self.study.state, "created") From e0908350f722423b0ef5bd904d0de6e8d7ce171b Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 18 Feb 2026 14:23:47 -0800 Subject: [PATCH 28/45] fix typo: jspysch -> jspsych --- studies/models.py | 2 +- studies/test_recording_method.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/studies/models.py b/studies/models.py index 8189628f8..b325b60ec 100644 --- a/studies/models.py +++ b/studies/models.py @@ -1325,7 +1325,7 @@ def birthdate_difference(self): @property def normalized_exp_data(self): - # Where study type is jspysch, convert experiment data to resemble EFP exp data. + # Where study type is jspsych, convert experiment data to resemble EFP exp data. if self.study_type.is_jspsych: return {key: value for key, value in zip(self.sequence, self.exp_data)} else: diff --git a/studies/test_recording_method.py b/studies/test_recording_method.py index 46e3cd2db..78e0af66c 100644 --- a/studies/test_recording_method.py +++ b/studies/test_recording_method.py @@ -88,7 +88,7 @@ def session(self): class RecordingMethodJsPsychTestCase(TestCase): @patch("boto3.client") - def test_jspysch(self, mock_client): + def test_jspsych(self, mock_client): make_boto_client(mock_client) _, study, child = get_user(StudyType.get_jspsych()) context = {"study": study, "view": TestView(child.uuid)} From 6c28deb6269d8ec52f79d6d9c8b4221e63822a18 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Wed, 18 Feb 2026 14:51:24 -0800 Subject: [PATCH 29/45] response post save: if response is for external study, check if study has reached max responses, and if so, pause and email researchers --- studies/models.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/studies/models.py b/studies/models.py index b325b60ec..e1eb0081c 100644 --- a/studies/models.py +++ b/studies/models.py @@ -1412,12 +1412,15 @@ def take_action_on_exp_data(sender, instance, created, **kwargs): """ response = instance # Aliasing because instance is hooked as a kwarg. - if created or not response.sequence: + if response.study.study_type.is_external: + # External studies: check if study has reached max responses and, if so, pause the study and email researchers. + response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) + elif created or not response.sequence: return else: dispatch_frame_action(response) - # If this response is complete, then check if this study has reached max responses and, if so, pause the study and email researchers. + # Internal studies: if response is complete, check if this study has reached max responses and, if so, pause the study and email researchers. if response.completed: response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) From fa7d1993252ccdf184df6e482e42227ed039624f Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 12:55:00 -0800 Subject: [PATCH 30/45] add new before check for transitioning study to active state: check if at max responses --- studies/models.py | 14 +++++++++++++- studies/workflow.py | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/studies/models.py b/studies/models.py index e1eb0081c..9415a3808 100644 --- a/studies/models.py +++ b/studies/models.py @@ -841,7 +841,19 @@ def check_if_built(self, ev): """ if self.needs_to_be_built: raise RuntimeError( - f'Cannot activate study - experiment runner for "{self.name}" ({self.id}) has not been built!' + f'Cannot activate the study "{self.name}" ({self.id}) because the experiment runner has not been built!' + ) + + def check_if_at_max_responses(self, ev): + """Check if study has reached its max responses value before activating/starting. + + :param ev: The event object + :type ev: transitions.core.EventData + :raise: RuntimeError + """ + if self.has_reached_max_responses: + raise RuntimeError( + f'Cannot activate the study "{self.name}" ({self.id}) because it has reached its maximum number of responses.' ) def notify_administrators_of_activation(self, ev): diff --git a/studies/workflow.py b/studies/workflow.py index 339db59fb..475632d97 100644 --- a/studies/workflow.py +++ b/studies/workflow.py @@ -100,7 +100,7 @@ "trigger": "activate", "source": ["approved", "paused"], "dest": "active", - "before": ["check_if_built"], + "before": ["check_if_built", "check_if_at_max_responses"], "after": ["notify_administrators_of_activation"], }, { From fde052581ff046f2f0c4012922904583780ed846 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 13:01:55 -0800 Subject: [PATCH 31/45] add tests for max responses check prior to activating study --- exp/tests/test_study_views.py | 165 ++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) diff --git a/exp/tests/test_study_views.py b/exp/tests/test_study_views.py index ee2887b1b..e7db46348 100644 --- a/exp/tests/test_study_views.py +++ b/exp/tests/test_study_views.py @@ -673,6 +673,171 @@ def test_update_trigger_object_no_attr( mock_request.POST.keys.assert_not_called() +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) +@override_settings(CELERY_TASK_EAGER_PROPAGATES=True) +@patch("studies.helpers.send_mail") +class ActivateStudyMaxResponsesTestCase(TestCase): + """Integration tests for the check_if_at_max_responses workflow guard. + + When a researcher tries to activate a study (from approved or paused state), + the transition should be blocked if the study has already reached its + max_responses limit. + """ + + def setUp(self): + self.client = Force2FAClient() + self.user = G(User, is_active=True, is_researcher=True) + self.lab = G(Lab, name="Activation Test Lab", approved_to_test=True) + self.lab.researchers.add(self.user) + + self.study = G( + Study, + image=SimpleUploadedFile( + name="small.gif", + content=( + b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04" + b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + b"\x02\x4c\x01\x00\x3b" + ), + content_type="image/gif", + ), + study_type=StudyType.get_external(), + creator=self.user, + lab=self.lab, + name="Activation Test Study", + built=True, + ) + self.study.admin_group.user_set.add(self.user) + assign_perm( + StudyPermission.CHANGE_STUDY_STATUS.prefixed_codename, + self.user, + self.study, + ) + self.client.force_login(self.user) + + self.change_status_url = reverse( + "exp:change-study-status", kwargs={"pk": self.study.pk} + ) + + def _create_eligible_responses(self, count): + """Create eligible, non-preview responses for the study.""" + participant = G(User, is_active=True) + child = G( + Child, + user=participant, + given_name="Test child", + birthday=datetime.date.today() - datetime.timedelta(days=30), + ) + for _ in range(count): + r = Response.objects.create( + study=self.study, + child=child, + study_type=self.study.study_type, + demographic_snapshot=participant.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + def _get_error_messages(self, response): + """Extract error-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.ERROR] + + def _get_success_messages(self, response): + """Extract success-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.SUCCESS] + + def test_activate_blocked_from_approved_when_at_max_responses(self, mock_send_mail): + """Activating an approved study fails when max_responses has been reached.""" + self.study.state = "approved" + self.study.max_responses = 3 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 1) + self.assertIn("TRANSITION ERROR", str(errors[0])) + self.assertIn("maximum number of responses", str(errors[0])) + + def test_activate_blocked_from_paused_when_at_max_responses(self, mock_send_mail): + """Reactivating a paused study fails when max_responses has been reached.""" + self.study.state = "paused" + self.study.max_responses = 2 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 1) + self.assertIn("TRANSITION ERROR", str(errors[0])) + self.assertIn("maximum number of responses", str(errors[0])) + + def test_activate_succeeds_when_below_max_responses(self, mock_send_mail): + """Activating an approved study succeeds when below the max_responses limit.""" + self.study.state = "approved" + self.study.max_responses = 10 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + def test_activate_succeeds_when_no_max_responses_set(self, mock_send_mail): + """Activating an approved study succeeds when max_responses is not set.""" + self.study.state = "approved" + self.study.max_responses = None + self.study.save() + self._create_eligible_responses(5) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + def test_activate_succeeds_when_exactly_at_limit_minus_one(self, mock_send_mail): + """Activating succeeds when response count is one below max_responses.""" + self.study.state = "approved" + self.study.max_responses = 4 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + class ManageResearcherPermissionsViewTestCase(TestCase): def test_model(self) -> None: manage_researcher_permissions_view = ManageResearcherPermissionsView() From ae90bd6de959ec635f6d91f5c02cf8131415f8df Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 13:08:59 -0800 Subject: [PATCH 32/45] update max responses email for clarity --- .../emails/notify_researchers_of_max_responses_pause.html | 4 ++-- .../emails/notify_researchers_of_max_responses_pause.txt | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.html b/studies/templates/emails/notify_researchers_of_max_responses_pause.html index 3df89229f..dab7f9c56 100644 --- a/studies/templates/emails/notify_researchers_of_max_responses_pause.html +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.html @@ -3,10 +3,10 @@

Your study {{ study_name }} has been automatically paused because it reached the maximum number of valid responses - ({{ valid_response_count }}/{{ max_responses }}). + ({{ valid_response_count }} valid / {{ max_responses }} limit).

- If you would like to collect more data, you can increase the study's response limit and/or edit the valid/invalid status of individual responses, and then re-start it (your study will NOT be re-started automatically). + Please make sure to handle any pending consents and review your responses, as doing so may open up more slots. Then, if you would like to collect more data, you can increase the study's response limit if necessary, and re-start it. Your study will NOT be restarted automatically if more slots become available and/or you increase the response limit.
Your study can be found here.

diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.txt b/studies/templates/emails/notify_researchers_of_max_responses_pause.txt index 3ff720281..e06a6ad58 100644 --- a/studies/templates/emails/notify_researchers_of_max_responses_pause.txt +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.txt @@ -1,9 +1,9 @@ {% load web_extras %} Dear Study Researchers, -Your study {{ study_name }} has been automatically paused because it reached the maximum number of valid responses ({{ valid_response_count }}/{{ max_responses }}). +Your study {{ study_name }} has been automatically paused because it reached the maximum number of valid responses ({{ valid_response_count }} valid / {{ max_responses }} limit). -If you would like to collect more data, you can increase the study's response limit and/or edit the valid/invalid status of individual responses, and then re-start it (your study will NOT be re-started automatically). +Please make sure to handle any pending consents and review your responses, as doing so may open up more slots. Then, if you would like to collect more data, you can increase the study's response limit if necessary, and re-start it. Your study will NOT be restarted automatically if more slots become available and/or you increase the response limit. Your study can be found here: {% absolute_url 'exp:study' study_id %} From fd0fa1627e37a981f0f0a4c720fe5654c4024eec Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 13:23:14 -0800 Subject: [PATCH 33/45] study activation error message due to max responses: add info about what to do next --- studies/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/studies/models.py b/studies/models.py index 9415a3808..8fc670635 100644 --- a/studies/models.py +++ b/studies/models.py @@ -853,7 +853,7 @@ def check_if_at_max_responses(self, ev): """ if self.has_reached_max_responses: raise RuntimeError( - f'Cannot activate the study "{self.name}" ({self.id}) because it has reached its maximum number of responses.' + f'Cannot activate the study "{self.name}" ({self.id}) because it has reached its maximum number of responses. Be sure to handle all pending consents and review existing responses, as this may open up slots. Then increase the response limit in the Study Ad if necessary, and try starting the study again.' ) def notify_administrators_of_activation(self, ev): From aeb614f60226ed105657426b76ef4648347d8d21 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 13:51:52 -0800 Subject: [PATCH 34/45] edit study log template to indicate automatic pausing --- .../templates/studies/study_detail/_study_logs.html | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/studies/templates/studies/study_detail/_study_logs.html b/studies/templates/studies/study_detail/_study_logs.html index 5c239510b..bc63ab5de 100644 --- a/studies/templates/studies/study_detail/_study_logs.html +++ b/studies/templates/studies/study_detail/_study_logs.html @@ -5,8 +5,8 @@
{% for log in logs %}
-
{% localtime on %}{{ log.created_at }}{% endlocaltime %}
-
+
{% localtime on %}{{ log.created_at }}{% endlocaltime %}
+
Study {% if log.action == "active" %} started @@ -17,8 +17,12 @@ {% endif %} {% if log.user %} by {{ log.user.get_short_name }} - {% elif log.action == "rejected" %} - due to changes + {% else %} + {% if log.action == "rejected" %} + due to changes + {% elif log.action == "paused" %} + automatically: response limit reached + {% endif %} {% endif %}
From ed963ce095bb82c996212c0a0154905ff8d26c80 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 14:50:26 -0800 Subject: [PATCH 35/45] valid response count: internal responses must have a consent frame, and consent status must not be rejected --- studies/models.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/studies/models.py b/studies/models.py index 26e0bd3c0..89703a95f 100644 --- a/studies/models.py +++ b/studies/models.py @@ -560,20 +560,38 @@ def valid_response_count(self) -> int: A response is counted as valid if: - is_preview is False - - eligibility is "Eligible" or blank/empty - - completed is True (internal study types only) + - eligibility is "Eligible" or blank/empty (backwards compatibility) - For external studies, the completed field is ignored. + And for internal studies, responses must also meet the following conditions: + - completed is True + - completed_consent_frame is True + - the consent has not been rejected (must be either pending or accepted) + + For external studies, the completed, completed_consent_frame, and consent requirements are ignored. Returns: int: Count of valid responses """ + # Filter out preview responses responses = self.responses.filter(is_preview=False) - # For internal study types, also require completed=True + # For internal study types, also require completed_consent_frame=True, completed=True, and consent not rejected if not self.study_type.is_external: - responses = responses.filter(completed=True) + responses = responses.filter(completed=True, completed_consent_frame=True) + newest_ruling_subquery = models.Subquery( + ConsentRuling.objects.filter(response=models.OuterRef("pk")) + .order_by("-created_at") + .values("action")[:1] + ) + # Filter out responses with rejected consent, and explicitly allow NULL consent rulings (pending, i.e. no judgment has been submitted). + responses = responses.annotate( + current_ruling=newest_ruling_subquery + ).filter( + models.Q(current_ruling__isnull=True) + | ~models.Q(current_ruling=REJECTED) + ) + # Filter out ineligible responses return responses.filter( models.Q(eligibility=[]) | models.Q(eligibility__contains=[ResponseEligibility.ELIGIBLE]) From 28fdbda3f6108ad7dfa40f2cd3b3a6dc4aa12227 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 14:56:43 -0800 Subject: [PATCH 36/45] fix valid response cases to include completed_consent_frame=True; add test cases for no consent frame and rejected consent --- studies/tests.py | 154 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 151 insertions(+), 3 deletions(-) diff --git a/studies/tests.py b/studies/tests.py index ab15a1385..607eed1a6 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -22,7 +22,16 @@ get_experiment_absolute_url, send_mail, ) -from studies.models import Lab, Response, Study, StudyType, StudyTypeEnum, Video +from studies.models import ( + REJECTED, + ConsentRuling, + Lab, + Response, + Study, + StudyType, + StudyTypeEnum, + Video, +) from studies.permissions import StudyPermission from studies.tasks import ( MessageTarget, @@ -869,24 +878,26 @@ def test_valid_response_count_internal_study(self): # Note: Response.save() auto-sets eligibility, so we update this value after creation - # Valid response: completed, not preview, eligible + # Valid response: completed, consent frame completed, not preview, eligible valid1 = Response.objects.create( study=study, child=child, study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=valid1.pk).update(eligibility=[]) - # Valid response: completed, not preview, empty eligibility + # Valid response: completed, consent frame completed, not preview, empty eligibility valid2 = Response.objects.create( study=study, child=child, study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=valid2.pk).update( @@ -900,6 +911,7 @@ def test_valid_response_count_internal_study(self): study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=True, ) Response.objects.filter(pk=invalid3.pk).update( @@ -913,6 +925,7 @@ def test_valid_response_count_internal_study(self): study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=False, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=invalid4.pk).update( @@ -926,12 +939,27 @@ def test_valid_response_count_internal_study(self): study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=invalid5.pk).update( eligibility=[ResponseEligibility.INELIGIBLE_OLD] ) + # Invalid: consent frame not completed + invalid6 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + completed_consent_frame=False, + is_preview=False, + ) + Response.objects.filter(pk=invalid6.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + self.assertEqual(study.valid_response_count, 2) def test_valid_response_count_external_study(self): @@ -1006,6 +1034,123 @@ def test_valid_response_count_external_study(self): # 3 valid responses: r1, r2, r3 (completed field ignored for external) self.assertEqual(study.valid_response_count, 3) + def test_valid_response_count_excludes_rejected_consent_internal(self): + """Test that valid_response_count excludes responses with rejected consent for internal studies.""" + study = Study.objects.create(study_type=StudyType.get_ember_frame_player()) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Valid: completed, consent frame completed, eligible, no consent ruling (pending) + r1 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + completed_consent_frame=True, + is_preview=False, + ) + Response.objects.filter(pk=r1.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + # Valid: completed, consent frame completed, eligible, accepted consent + r2 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + completed_consent_frame=True, + is_preview=False, + ) + Response.objects.filter(pk=r2.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + ConsentRuling.objects.create(response=r2, action="accepted", arbiter=user) + + # Invalid: completed, consent frame completed, eligible, but consent rejected + r3 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + completed_consent_frame=True, + is_preview=False, + ) + Response.objects.filter(pk=r3.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + ConsentRuling.objects.create(response=r3, action=REJECTED, arbiter=user) + + # 2 valid: r1 (no ruling = pending) and r2 (accepted). r3 excluded (rejected). + self.assertEqual(study.valid_response_count, 2) + + def test_valid_response_count_uses_most_recent_consent_ruling(self): + """Test that only the most recent consent ruling is considered.""" + study = Study.objects.create(study_type=StudyType.get_ember_frame_player()) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Response with rejected then accepted consent (most recent = accepted, so valid) + r1 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + completed_consent_frame=True, + is_preview=False, + ) + Response.objects.filter(pk=r1.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + ConsentRuling.objects.create(response=r1, action=REJECTED, arbiter=user) + ConsentRuling.objects.create(response=r1, action="accepted", arbiter=user) + + # Response with accepted then rejected consent (most recent = rejected, so invalid) + r2 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + completed_consent_frame=True, + is_preview=False, + ) + Response.objects.filter(pk=r2.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + ConsentRuling.objects.create(response=r2, action="accepted", arbiter=user) + ConsentRuling.objects.create(response=r2, action=REJECTED, arbiter=user) + + # Only r1 is valid (most recent ruling is accepted) + self.assertEqual(study.valid_response_count, 1) + + def test_valid_response_count_consent_ignored_for_external(self): + """Test that consent rulings are not checked for external studies.""" + study = Study.objects.create(study_type=StudyType.get_external()) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + + # Response with rejected consent - should still count for external + r1 = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r1.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + ConsentRuling.objects.create(response=r1, action=REJECTED, arbiter=user) + + # Should count because external studies don't check consent + self.assertEqual(study.valid_response_count, 1) + def test_has_reached_max_responses_no_limit(self): """Test that has_reached_max_responses returns False when max_responses is None.""" study = Study.objects.create( @@ -1031,6 +1176,7 @@ def test_has_reached_max_responses_not_reached(self): study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=r.pk).update( @@ -1056,6 +1202,7 @@ def test_has_reached_max_responses_reached(self): study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=r.pk).update( @@ -1081,6 +1228,7 @@ def test_has_reached_max_responses_exceeded(self): study_type=study.study_type, demographic_snapshot=user.latest_demographics, completed=True, + completed_consent_frame=True, is_preview=False, ) Response.objects.filter(pk=r.pk).update( From 04b1ad9d27957e158ba1cbd993256c9c153bf9e4 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 15:19:13 -0800 Subject: [PATCH 37/45] refactor tests to pass code duplication checks --- studies/tests.py | 360 +++++++++++------------------------------------ 1 file changed, 84 insertions(+), 276 deletions(-) diff --git a/studies/tests.py b/studies/tests.py index 607eed1a6..311b4bb49 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -851,12 +851,52 @@ def test_get_jspsych(self): class StudyModelTestCase(TestCase): + def _create_study_with_participant(self, study_type=None, **study_kwargs): + """Create a study with a user and child for testing. + + Returns (study, user, child) tuple. + """ + if study_type is None: + study_type = StudyType.get_ember_frame_player() + study = Study.objects.create(study_type=study_type, **study_kwargs) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + return study, user, child + + def _create_response( + self, + study, + child, + eligibility=None, + completed=True, + completed_consent_frame=True, + is_preview=False, + ): + """Create a response and update its eligibility. + + Note: Response.save() auto-sets eligibility, so we use .update() after creation. + """ + if eligibility is None: + eligibility = [ResponseEligibility.ELIGIBLE] + user = child.user + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=completed, + completed_consent_frame=completed_consent_frame, + is_preview=is_preview, + ) + Response.objects.filter(pk=r.pk).update(eligibility=eligibility) + return r + def test_responses_for_researcher_external_studies(self): - study = Study.objects.create( - study_type=StudyType.get_external(), + study, user, child = self._create_study_with_participant( + study_type=StudyType.get_external() ) - user = User.objects.create(is_active=True, is_researcher=True) - child = Child.objects.create(user=user, birthday=date.today()) + user.is_researcher = True + user.save() response = Response.objects.create( study=study, child=child, @@ -872,216 +912,69 @@ def test_responses_for_researcher_external_studies(self): def test_valid_response_count_internal_study(self): """Test that valid_response_count correctly counts eligible, completed, non-preview responses.""" - study = Study.objects.create(study_type=StudyType.get_ember_frame_player()) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, _, child = self._create_study_with_participant() - # Note: Response.save() auto-sets eligibility, so we update this value after creation - - # Valid response: completed, consent frame completed, not preview, eligible - valid1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=valid1.pk).update(eligibility=[]) + # Valid: completed, consent frame completed, not preview, empty eligibility + self._create_response(study, child, eligibility=[]) - # Valid response: completed, consent frame completed, not preview, empty eligibility - valid2 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=valid2.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + # Valid: completed, consent frame completed, not preview, eligible + self._create_response(study, child) # Invalid: preview response - invalid3 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=True, - ) - Response.objects.filter(pk=invalid3.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child, is_preview=True) # Invalid: not completed - invalid4 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=False, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=invalid4.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child, completed=False) # Invalid: ineligible - invalid5 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=invalid5.pk).update( - eligibility=[ResponseEligibility.INELIGIBLE_OLD] + self._create_response( + study, child, eligibility=[ResponseEligibility.INELIGIBLE_OLD] ) # Invalid: consent frame not completed - invalid6 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=False, - is_preview=False, - ) - Response.objects.filter(pk=invalid6.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child, completed_consent_frame=False) self.assertEqual(study.valid_response_count, 2) def test_valid_response_count_external_study(self): """Test that valid_response_count for external studies ignores completed field.""" - study = Study.objects.create(study_type=StudyType.get_external()) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, _, child = self._create_study_with_participant( + study_type=StudyType.get_external() + ) # Valid: not preview, eligible, completed - r1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r1.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child) # Valid: not preview, eligible, NOT completed (should still count for external) - r2 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=False, - is_preview=False, - ) - Response.objects.filter(pk=r2.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child, completed=False) # Valid: not preview, empty eligibility, NOT completed - r3 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=False, - is_preview=False, - ) - Response.objects.filter(pk=r3.pk).update(eligibility=[]) + self._create_response(study, child, completed=False, eligibility=[]) # Invalid: preview response (should not count) - r4 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=True, - ) - Response.objects.filter(pk=r4.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child, is_preview=True) # Invalid: ineligible (should not count) - r5 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r5.pk).update( - eligibility=[ResponseEligibility.INELIGIBLE_CRITERIA] + self._create_response( + study, child, eligibility=[ResponseEligibility.INELIGIBLE_CRITERIA] ) - # 3 valid responses: r1, r2, r3 (completed field ignored for external) + # 3 valid responses (completed field ignored for external) self.assertEqual(study.valid_response_count, 3) def test_valid_response_count_excludes_rejected_consent_internal(self): """Test that valid_response_count excludes responses with rejected consent for internal studies.""" - study = Study.objects.create(study_type=StudyType.get_ember_frame_player()) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, user, child = self._create_study_with_participant() - # Valid: completed, consent frame completed, eligible, no consent ruling (pending) - r1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r1.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + # Valid: no consent ruling (pending) + self._create_response(study, child) - # Valid: completed, consent frame completed, eligible, accepted consent - r2 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r2.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + # Valid: accepted consent + r2 = self._create_response(study, child) ConsentRuling.objects.create(response=r2, action="accepted", arbiter=user) - # Invalid: completed, consent frame completed, eligible, but consent rejected - r3 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r3.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + # Invalid: consent rejected + r3 = self._create_response(study, child) ConsentRuling.objects.create(response=r3, action=REJECTED, arbiter=user) # 2 valid: r1 (no ruling = pending) and r2 (accepted). r3 excluded (rejected). @@ -1089,39 +982,15 @@ def test_valid_response_count_excludes_rejected_consent_internal(self): def test_valid_response_count_uses_most_recent_consent_ruling(self): """Test that only the most recent consent ruling is considered.""" - study = Study.objects.create(study_type=StudyType.get_ember_frame_player()) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, user, child = self._create_study_with_participant() # Response with rejected then accepted consent (most recent = accepted, so valid) - r1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r1.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + r1 = self._create_response(study, child) ConsentRuling.objects.create(response=r1, action=REJECTED, arbiter=user) ConsentRuling.objects.create(response=r1, action="accepted", arbiter=user) # Response with accepted then rejected consent (most recent = rejected, so invalid) - r2 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r2.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + r2 = self._create_response(study, child) ConsentRuling.objects.create(response=r2, action="accepted", arbiter=user) ConsentRuling.objects.create(response=r2, action=REJECTED, arbiter=user) @@ -1130,22 +999,12 @@ def test_valid_response_count_uses_most_recent_consent_ruling(self): def test_valid_response_count_consent_ignored_for_external(self): """Test that consent rulings are not checked for external studies.""" - study = Study.objects.create(study_type=StudyType.get_external()) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, user, child = self._create_study_with_participant( + study_type=StudyType.get_external() + ) # Response with rejected consent - should still count for external - r1 = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - is_preview=False, - ) - Response.objects.filter(pk=r1.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + r1 = self._create_response(study, child) ConsentRuling.objects.create(response=r1, action=REJECTED, arbiter=user) # Should count because external studies don't check consent @@ -1161,79 +1020,28 @@ def test_has_reached_max_responses_no_limit(self): def test_has_reached_max_responses_not_reached(self): """Test that has_reached_max_responses returns False when limit not reached.""" - study = Study.objects.create( - study_type=StudyType.get_ember_frame_player(), - max_responses=5, - ) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, _, child = self._create_study_with_participant(max_responses=5) - # Add 2 valid responses for _ in range(2): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child) self.assertFalse(study.has_reached_max_responses) def test_has_reached_max_responses_reached(self): """Test that has_reached_max_responses returns True when limit is reached.""" - study = Study.objects.create( - study_type=StudyType.get_ember_frame_player(), - max_responses=3, - ) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, _, child = self._create_study_with_participant(max_responses=3) - # Add 3 valid responses for _ in range(3): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child) self.assertTrue(study.has_reached_max_responses) def test_has_reached_max_responses_exceeded(self): """Test that has_reached_max_responses returns True when limit is exceeded.""" - study = Study.objects.create( - study_type=StudyType.get_ember_frame_player(), - max_responses=2, - ) - user = User.objects.create(is_active=True) - child = Child.objects.create(user=user, birthday=date.today()) + study, _, child = self._create_study_with_participant(max_responses=2) - # Add 4 valid responses (exceeds limit of 2) for _ in range(4): - r = Response.objects.create( - study=study, - child=child, - study_type=study.study_type, - demographic_snapshot=user.latest_demographics, - completed=True, - completed_consent_frame=True, - is_preview=False, - ) - Response.objects.filter(pk=r.pk).update( - eligibility=[ResponseEligibility.ELIGIBLE] - ) + self._create_response(study, child) self.assertTrue(study.has_reached_max_responses) From 4692acb691f58d13814f73bb5a1ea09e9d043d0f Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Thu, 19 Feb 2026 17:11:30 -0800 Subject: [PATCH 38/45] fix sonarqube issue: Prefer Number.parseInt over parseInt --- web/static/js/study-fields.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/static/js/study-fields.js b/web/static/js/study-fields.js index 709d880b8..c7addb497 100644 --- a/web/static/js/study-fields.js +++ b/web/static/js/study-fields.js @@ -141,7 +141,7 @@ $(document).ready(function () { // Remove non-numeric characters and leading zeros let value = maxResponses.value.replace(/[^0-9]/g, '').replace(/^0+/, ''); // Ensure minimum value of 1 if not empty - if (value !== '' && parseInt(value) < 1) { + if (value !== '' && Number.parseInt(value) < 1) { value = '1'; } maxResponses.value = value; From a2971fe5c8049e29532e2eb3f5e992a0abc9c06d Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 20 Feb 2026 15:09:07 -0800 Subject: [PATCH 39/45] replace div with progress element for improved accessibility (resolves sonarqube issue) --- studies/templates/studies/study_detail.html | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/studies/templates/studies/study_detail.html b/studies/templates/studies/study_detail.html index 962da56f0..ca5657158 100644 --- a/studies/templates/studies/study_detail.html +++ b/studies/templates/studies/study_detail.html @@ -135,14 +135,10 @@
{% widthratio study.valid_response_count study.max_responses 100 as percent_str %} {% with percent=percent_str|add:"0" %} -
-
-
+
+ {{ study.valid_response_count }} / {{ study.max_responses }}
{% if percent == 100 %}
From 775db4f3abdcc80218a7006b5c97eb75945bc31d Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 20 Feb 2026 15:11:21 -0800 Subject: [PATCH 40/45] update CSS for change from div to progress element; move CSS out of compiled file and into scss --- scss/base.scss | 1 + scss/study-detail-progress-bar.scss | 54 +++++++++++++++++++++++++++++ web/static/custom_bootstrap5.css | 43 +++++++++++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 scss/study-detail-progress-bar.scss diff --git a/scss/base.scss b/scss/base.scss index 97b51005a..34fe4e3a8 100644 --- a/scss/base.scss +++ b/scss/base.scss @@ -12,6 +12,7 @@ // import study-responses after custom variables because it uses a color variable @import "study-responses"; +@import "study-detail-progress-bar"; // Add all bootstrap features @import "bootstrap-5.2.0/scss/bootstrap"; diff --git a/scss/study-detail-progress-bar.scss b/scss/study-detail-progress-bar.scss new file mode 100644 index 000000000..ef73887a1 --- /dev/null +++ b/scss/study-detail-progress-bar.scss @@ -0,0 +1,54 @@ +progress { + &.study-progress { + --study-progress-color: var(--bs-info); + appearance: none; + -webkit-appearance: none; + border: none; + border-radius: .375rem; + height: 20px; + width: 100%; + overflow: hidden; + background-color: #e9ecef; + } + + &.study-progress::-webkit-progress-bar { + background-color: #e9ecef; + border-radius: .375rem; + } + + &.study-progress::-webkit-progress-value { + background-color: var(--study-progress-color); + border-radius: .375rem; + } + + &.study-progress::-moz-progress-bar { + background-color: var(--study-progress-color); + border-radius: .375rem; + } + + &.study-progress-info { + --study-progress-color: var(--bs-info); + } + + &.study-progress-warning { + --study-progress-color: var(--bs-warning); + } + + &.study-progress-success { + --study-progress-color: var(--bs-success); + } + + &.study-progress-danger { + --study-progress-color: var(--bs-danger); + } + + &.study-progress-danger::-webkit-progress-value { + background-image: linear-gradient(45deg, rgba(255,255,255,.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,.15) 50%, rgba(255,255,255,.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; + } + + &.study-progress-danger::-moz-progress-bar { + background-image: linear-gradient(45deg, rgba(255,255,255,.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,.15) 50%, rgba(255,255,255,.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; + } +} diff --git a/web/static/custom_bootstrap5.css b/web/static/custom_bootstrap5.css index a6492fa07..c423229fd 100644 --- a/web/static/custom_bootstrap5.css +++ b/web/static/custom_bootstrap5.css @@ -56,6 +56,49 @@ input[type="checkbox"].researcher-editable:disabled + label .icon-star { -webkit-line-clamp: 2; -webkit-box-orient: vertical; } +progress.study-progress { + --study-progress-color: var(--bs-info); + appearance: none; + -webkit-appearance: none; + border: none; + border-radius: .375rem; + height: 20px; + width: 100%; + overflow: hidden; + background-color: #e9ecef; } + +progress.study-progress::-webkit-progress-bar { + background-color: #e9ecef; + border-radius: .375rem; } + +progress.study-progress::-webkit-progress-value { + background-color: var(--study-progress-color); + border-radius: .375rem; } + +progress.study-progress::-moz-progress-bar { + background-color: var(--study-progress-color); + border-radius: .375rem; } + +progress.study-progress-info { + --study-progress-color: var(--bs-info); } + +progress.study-progress-warning { + --study-progress-color: var(--bs-warning); } + +progress.study-progress-success { + --study-progress-color: var(--bs-success); } + +progress.study-progress-danger { + --study-progress-color: var(--bs-danger); } + +progress.study-progress-danger::-webkit-progress-value { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; } + +progress.study-progress-danger::-moz-progress-bar { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; } + /*! * Bootstrap v5.2.0 (https://getbootstrap.com/) * Copyright 2011-2022 The Bootstrap Authors From 3c668b5120342ed1c89b83bc7252ce246082fb63 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 20 Feb 2026 15:16:55 -0800 Subject: [PATCH 41/45] remove TODO comment (resolves sonarqube issue) --- studies/models.py | 1 - 1 file changed, 1 deletion(-) diff --git a/studies/models.py b/studies/models.py index 89703a95f..1ece9af78 100644 --- a/studies/models.py +++ b/studies/models.py @@ -613,7 +613,6 @@ def check_and_pause_if_at_max_responses(self): Only pauses if the study is currently active. """ - # TODO: Implement logic to pause study when max responses reached pass @property From b036ac21308d7498e4308b81352ef8515b3b8d60 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 20 Feb 2026 15:21:08 -0800 Subject: [PATCH 42/45] use more concise regex (resolves sonarqube issue) --- web/static/js/study-fields.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/static/js/study-fields.js b/web/static/js/study-fields.js index c7addb497..2bc60bd1a 100644 --- a/web/static/js/study-fields.js +++ b/web/static/js/study-fields.js @@ -139,7 +139,7 @@ $(document).ready(function () { // Input validation for max_responses maxResponses.addEventListener('input', () => { // Remove non-numeric characters and leading zeros - let value = maxResponses.value.replace(/[^0-9]/g, '').replace(/^0+/, ''); + let value = maxResponses.value.replace(/\D/g, '').replace(/^0+/, ''); // Ensure minimum value of 1 if not empty if (value !== '' && Number.parseInt(value) < 1) { value = '1'; From 49c12ac152c0ea92e96ff671deeca39dbe0cabcd Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 20 Feb 2026 15:36:04 -0800 Subject: [PATCH 43/45] use type=text for max responses input - fixes problems with preventing non-numeric characters --- studies/forms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/studies/forms.py b/studies/forms.py index 1b4419224..104529022 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -295,7 +295,7 @@ class Meta: "priority": forms.TextInput( attrs={"type": "range", "min": "1", "max": "99"} ), - "max_responses": forms.NumberInput( + "max_responses": forms.TextInput( attrs={"min": "1", "placeholder": "Enter a number"} ), } From 8ffd76c14f21a50ccb0435230794f644fc2993e4 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Fri, 20 Feb 2026 16:03:26 -0800 Subject: [PATCH 44/45] add sanitize_log_input helper to use for logging user input (addresses sonarqube issue) --- studies/helpers.py | 7 +++++++ studies/models.py | 23 ++++++++++++++++++----- studies/tests.py | 16 ++++++++++++++++ 3 files changed, 41 insertions(+), 5 deletions(-) diff --git a/studies/helpers.py b/studies/helpers.py index 8db51f995..603868b0f 100644 --- a/studies/helpers.py +++ b/studies/helpers.py @@ -28,6 +28,13 @@ def get_experiment_absolute_url(path): return urljoin(settings.EXPERIMENT_BASE_URL, path) +def sanitize_log_input(user_input): + """Removes newline characters to prevent log injection.""" + if isinstance(user_input, str): + return user_input.replace("\n", "").replace("\r", "") + return user_input + + @app.task def send_mail( template_name, diff --git a/studies/models.py b/studies/models.py index 1ece9af78..83cb2768c 100644 --- a/studies/models.py +++ b/studies/models.py @@ -31,6 +31,7 @@ ResponseEligibility, get_absolute_url, get_eligibility_for_response, + sanitize_log_input, send_mail, ) from studies.permissions import ( @@ -1313,7 +1314,9 @@ def generate_videos_from_events(self): response = file_obj.get() except ClientError: logger.warning( - f"could not find {video_id} or {pipe_name} in S3!" + sanitize_log_input( + f"could not find {video_id} or {pipe_name} in S3!" + ) ) continue # Read first 32 bytes from streaming body (file header) to get actual filetype. @@ -1465,20 +1468,28 @@ def check_and_parse_pipe_payload(cls, pipe_payload: str): ) except ValueError: logger.error( - f"Could not parse video filename {pipe_payload} to extract study and response" + sanitize_log_input( + f"Could not parse video filename {pipe_payload} to extract study and response" + ) ) raise try: study = Study.objects.get(uuid=study_uuid) except Study.DoesNotExist as ex: - logger.error(f"Study with uuid {study_uuid} does not exist. {ex}") + logger.error( + sanitize_log_input(f"Study with uuid {study_uuid} does not exist. {ex}") + ) raise try: response = Response.objects.get(uuid=response_uuid) except Response.DoesNotExist as ex: - logger.error(f"Response with uuid {response_uuid} does not exist. {ex}") + logger.error( + sanitize_log_input( + f"Response with uuid {response_uuid} does not exist. {ex}" + ) + ) raise return marked_as_consent, pipe_payload, study, frame_id, response, timestamp @@ -1515,7 +1526,9 @@ def from_pipe_payload(cls, pipe_response_dict: dict): ) except ClientError: # old_name_full not found! logger.error( - f"Amazon S3 couldn't find the video for Pipe ID {old_pipe_name} in bucket {settings.BUCKET_NAME}" + sanitize_log_input( + f"Amazon S3 couldn't find the video for Pipe ID {old_pipe_name} in bucket {settings.BUCKET_NAME}" + ) ) raise else: # Go on to remove the originals diff --git a/studies/tests.py b/studies/tests.py index 311b4bb49..f1e768cfd 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -20,6 +20,7 @@ ResponseEligibility, get_absolute_url, get_experiment_absolute_url, + sanitize_log_input, send_mail, ) from studies.models import ( @@ -2402,3 +2403,18 @@ def test_get_experiment_absolute_url(self): self.assertTrue( get_experiment_absolute_url("somepath"), f"{exp_fake_website}somepath" ) + + +class TestSanitizeLogInputCase(TestCase): + def test_sanitize_input_with_log_injection(self): + sanitized_input = "some dataFAKE LOG: User Admin logged in" + user_input_n = "some data\nFAKE LOG: User Admin logged in" + self.assertEqual(sanitize_log_input(user_input_n), sanitized_input) + user_input_r = "some data\rFAKE LOG: User Admin logged in" + self.assertEqual(sanitize_log_input(user_input_r), sanitized_input) + user_input_rn = "some data\r\nFAKE LOG: User Admin logged in" + self.assertEqual(sanitize_log_input(user_input_rn), sanitized_input) + + def test_sanitize_input_with_safe_characters(self): + user_input_safe = "some safe user input" + self.assertEqual(sanitize_log_input(user_input_safe), user_input_safe) From 54168e85d9d906359f5869acf52939b9c225e741 Mon Sep 17 00:00:00 2001 From: Becky Gilbert Date: Mon, 23 Feb 2026 15:08:00 -0800 Subject: [PATCH 45/45] Revert "add sanitize_log_input helper to use for logging user input (addresses sonarqube issue)" This reverts commit 8ffd76c14f21a50ccb0435230794f644fc2993e4. --- studies/helpers.py | 7 ------- studies/models.py | 23 +++++------------------ studies/tests.py | 16 ---------------- 3 files changed, 5 insertions(+), 41 deletions(-) diff --git a/studies/helpers.py b/studies/helpers.py index 603868b0f..8db51f995 100644 --- a/studies/helpers.py +++ b/studies/helpers.py @@ -28,13 +28,6 @@ def get_experiment_absolute_url(path): return urljoin(settings.EXPERIMENT_BASE_URL, path) -def sanitize_log_input(user_input): - """Removes newline characters to prevent log injection.""" - if isinstance(user_input, str): - return user_input.replace("\n", "").replace("\r", "") - return user_input - - @app.task def send_mail( template_name, diff --git a/studies/models.py b/studies/models.py index 83cb2768c..1ece9af78 100644 --- a/studies/models.py +++ b/studies/models.py @@ -31,7 +31,6 @@ ResponseEligibility, get_absolute_url, get_eligibility_for_response, - sanitize_log_input, send_mail, ) from studies.permissions import ( @@ -1314,9 +1313,7 @@ def generate_videos_from_events(self): response = file_obj.get() except ClientError: logger.warning( - sanitize_log_input( - f"could not find {video_id} or {pipe_name} in S3!" - ) + f"could not find {video_id} or {pipe_name} in S3!" ) continue # Read first 32 bytes from streaming body (file header) to get actual filetype. @@ -1468,28 +1465,20 @@ def check_and_parse_pipe_payload(cls, pipe_payload: str): ) except ValueError: logger.error( - sanitize_log_input( - f"Could not parse video filename {pipe_payload} to extract study and response" - ) + f"Could not parse video filename {pipe_payload} to extract study and response" ) raise try: study = Study.objects.get(uuid=study_uuid) except Study.DoesNotExist as ex: - logger.error( - sanitize_log_input(f"Study with uuid {study_uuid} does not exist. {ex}") - ) + logger.error(f"Study with uuid {study_uuid} does not exist. {ex}") raise try: response = Response.objects.get(uuid=response_uuid) except Response.DoesNotExist as ex: - logger.error( - sanitize_log_input( - f"Response with uuid {response_uuid} does not exist. {ex}" - ) - ) + logger.error(f"Response with uuid {response_uuid} does not exist. {ex}") raise return marked_as_consent, pipe_payload, study, frame_id, response, timestamp @@ -1526,9 +1515,7 @@ def from_pipe_payload(cls, pipe_response_dict: dict): ) except ClientError: # old_name_full not found! logger.error( - sanitize_log_input( - f"Amazon S3 couldn't find the video for Pipe ID {old_pipe_name} in bucket {settings.BUCKET_NAME}" - ) + f"Amazon S3 couldn't find the video for Pipe ID {old_pipe_name} in bucket {settings.BUCKET_NAME}" ) raise else: # Go on to remove the originals diff --git a/studies/tests.py b/studies/tests.py index f1e768cfd..311b4bb49 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -20,7 +20,6 @@ ResponseEligibility, get_absolute_url, get_experiment_absolute_url, - sanitize_log_input, send_mail, ) from studies.models import ( @@ -2403,18 +2402,3 @@ def test_get_experiment_absolute_url(self): self.assertTrue( get_experiment_absolute_url("somepath"), f"{exp_fake_website}somepath" ) - - -class TestSanitizeLogInputCase(TestCase): - def test_sanitize_input_with_log_injection(self): - sanitized_input = "some dataFAKE LOG: User Admin logged in" - user_input_n = "some data\nFAKE LOG: User Admin logged in" - self.assertEqual(sanitize_log_input(user_input_n), sanitized_input) - user_input_r = "some data\rFAKE LOG: User Admin logged in" - self.assertEqual(sanitize_log_input(user_input_r), sanitized_input) - user_input_rn = "some data\r\nFAKE LOG: User Admin logged in" - self.assertEqual(sanitize_log_input(user_input_rn), sanitized_input) - - def test_sanitize_input_with_safe_characters(self): - user_input_safe = "some safe user input" - self.assertEqual(sanitize_log_input(user_input_safe), user_input_safe)