diff --git a/exp/tests/test_study_views.py b/exp/tests/test_study_views.py index 4175b5dd3..4bfb911ef 100644 --- a/exp/tests/test_study_views.py +++ b/exp/tests/test_study_views.py @@ -31,7 +31,8 @@ StudyDetailView, StudyPreviewDetailView, ) -from studies.models import Lab, Study, StudyType +from studies.helpers import ResponseEligibility +from studies.models import Lab, Response, Study, StudyType from studies.permissions import LabPermission, StudyPermission @@ -672,6 +673,171 @@ def test_update_trigger_object_no_attr( mock_request.POST.keys.assert_not_called() +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) +@override_settings(CELERY_TASK_EAGER_PROPAGATES=True) +@patch("studies.helpers.send_mail") +class ActivateStudyMaxResponsesTestCase(TestCase): + """Integration tests for the check_if_at_max_responses workflow guard. + + When a researcher tries to activate a study (from approved or paused state), + the transition should be blocked if the study has already reached its + max_responses limit. + """ + + def setUp(self): + self.client = Force2FAClient() + self.user = G(User, is_active=True, is_researcher=True) + self.lab = G(Lab, name="Activation Test Lab", approved_to_test=True) + self.lab.researchers.add(self.user) + + self.study = G( + Study, + image=SimpleUploadedFile( + name="small.gif", + content=( + b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04" + b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + b"\x02\x4c\x01\x00\x3b" + ), + content_type="image/gif", + ), + study_type=StudyType.get_external(), + creator=self.user, + lab=self.lab, + name="Activation Test Study", + built=True, + ) + self.study.admin_group.user_set.add(self.user) + assign_perm( + StudyPermission.CHANGE_STUDY_STATUS.prefixed_codename, + self.user, + self.study, + ) + self.client.force_login(self.user) + + self.change_status_url = reverse( + "exp:change-study-status", kwargs={"pk": self.study.pk} + ) + + def _create_eligible_responses(self, count): + """Create eligible, non-preview responses for the study.""" + participant = G(User, is_active=True) + child = G( + Child, + user=participant, + given_name="Test child", + birthday=datetime.date.today() - datetime.timedelta(days=30), + ) + for _ in range(count): + r = Response.objects.create( + study=self.study, + child=child, + study_type=self.study.study_type, + demographic_snapshot=participant.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + def _get_error_messages(self, response): + """Extract error-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.ERROR] + + def _get_success_messages(self, response): + """Extract success-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.SUCCESS] + + def test_activate_blocked_from_approved_when_at_max_responses(self, mock_send_mail): + """Activating an approved study fails when max_responses has been reached.""" + self.study.state = "approved" + self.study.max_responses = 3 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 1) + self.assertIn("TRANSITION ERROR", str(errors[0])) + self.assertIn("maximum number of responses", str(errors[0])) + + def test_activate_blocked_from_paused_when_at_max_responses(self, mock_send_mail): + """Reactivating a paused study fails when max_responses has been reached.""" + self.study.state = "paused" + self.study.max_responses = 2 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 1) + self.assertIn("TRANSITION ERROR", str(errors[0])) + self.assertIn("maximum number of responses", str(errors[0])) + + def test_activate_succeeds_when_below_max_responses(self, mock_send_mail): + """Activating an approved study succeeds when below the max_responses limit.""" + self.study.state = "approved" + self.study.max_responses = 10 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + def test_activate_succeeds_when_no_max_responses_set(self, mock_send_mail): + """Activating an approved study succeeds when max_responses is not set.""" + self.study.state = "approved" + self.study.max_responses = None + self.study.save() + self._create_eligible_responses(5) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + def test_activate_succeeds_when_exactly_at_limit_minus_one(self, mock_send_mail): + """Activating succeeds when response count is one below max_responses.""" + self.study.state = "approved" + self.study.max_responses = 4 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + class ManageResearcherPermissionsViewTestCase(TestCase): def test_model(self) -> None: manage_researcher_permissions_view = ManageResearcherPermissionsView() @@ -1334,3 +1500,196 @@ def test_must_not_have_participated(self): # TODO: StudyPreviewProxyView # - add checks analogous to preview detail view # - check for correct redirect + + +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) +@override_settings(CELERY_TASK_EAGER_PROPAGATES=True) +@patch("studies.helpers.send_mail") +class StudyUpdateMaxResponsesTestCase(TestCase): + """Tests for banner messages when max_responses is edited via StudyUpdateView.""" + + def setUp(self): + self.client = Force2FAClient() + self.user = G(User, is_active=True, is_researcher=True) + self.lab = G(Lab, name="Max Resp Lab", approved_to_test=True) + self.lab.researchers.add(self.user) + + self.study = G( + Study, + image=SimpleUploadedFile( + name="small.gif", + content=( + b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04" + b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + b"\x02\x4c\x01\x00\x3b" + ), + content_type="image/gif", + ), + study_type=StudyType.get_ember_frame_player(), + creator=self.user, + lab=self.lab, + name="Max Resp Study", + short_description="test", + preview_summary="test", + purpose="test", + criteria="test", + duration="test", + contact_info="test", + exit_url="https://mit.edu", + ) + self.study.admin_group.user_set.add(self.user) + assign_perm( + StudyPermission.WRITE_STUDY_DETAILS.prefixed_codename, + self.user, + self.study, + ) + self.client.force_login(self.user) + + def _form_data(self, include_image=True, **overrides): + """Build minimal valid form data for the StudyEditForm. + + Set include_image=False to avoid triggering the pre_save signal that + rejects approved/active studies when monitored fields change. + """ + data = { + "name": self.study.name, + "lab": self.study.lab_id, + "study_type": self.study.study_type_id, + "min_age_years": 0, + "min_age_months": 0, + "min_age_days": 0, + "max_age_years": 1, + "max_age_months": 0, + "max_age_days": 0, + "priority": 1, + "preview_summary": self.study.preview_summary, + "short_description": self.study.short_description, + "purpose": self.study.purpose, + "compensation_description": self.study.compensation_description, + "exit_url": self.study.exit_url, + "criteria": self.study.criteria, + "duration": self.study.duration, + "contact_info": self.study.contact_info, + } + if include_image: + data["image"] = SimpleUploadedFile( + name="test_image.jpg", + content=open("exp/tests/static/study_image.png", "rb").read(), + content_type="image/jpeg", + ) + data.update(overrides) + return data + + def _create_eligible_responses(self, count): + """Create eligible, completed, non-preview responses for the study.""" + participant = G(User, is_active=True) + child = G( + Child, + user=participant, + given_name="Test child", + birthday=datetime.date.today() - datetime.timedelta(days=30), + ) + for _ in range(count): + r = Response.objects.create( + study=self.study, + child=child, + study_type=self.study.study_type, + demographic_snapshot=participant.latest_demographics, + completed_consent_frame=True, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + def _get_warning_messages(self, response): + """Extract warning-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.WARNING] + + def test_banner_when_max_responses_reached_non_active_study(self, mock_send_mail): + """Warning banner shown when max_responses is set at/below response count on non-active study.""" + self.assertEqual(self.study.state, "created") + self._create_eligible_responses(3) + data = self._form_data(set_response_limit=True, max_responses=3) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("reached the response limit", str(warnings[0])) + + # Study should NOT be paused (was not active) + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "paused") + + def test_study_paused_when_max_responses_reached_active_study(self, mock_send_mail): + """Active study is paused and warning shown when max_responses is set at response count.""" + self.assertEqual(self.study.state, "created") + self.study.state = "active" + self.study.save() + self._create_eligible_responses(3) + # include_image=False to avoid triggering the pre_save signal that + # rejects active studies when monitored fields (like image) change. + data = self._form_data( + include_image=False, set_response_limit=True, max_responses=3 + ) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("automatically paused", str(warnings[0])) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "paused") + + def test_no_banner_when_max_responses_not_reached(self, mock_send_mail): + """No warning when max_responses is set above the current response count.""" + self._create_eligible_responses(2) + data = self._form_data(set_response_limit=True, max_responses=10) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 0) + self.assertNotEqual(self.study.state, "paused") + + def test_no_banner_when_max_responses_unchanged(self, mock_send_mail): + """No warning when max_responses is submitted but hasn't changed.""" + self.study.max_responses = 5 + self.study.save() + self._create_eligible_responses(5) + data = self._form_data(set_response_limit=True, max_responses=5) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 0) + + def test_banner_when_max_responses_lowered_below_count(self, mock_send_mail): + """Warning shown when max_responses is lowered below existing response count.""" + self.assertEqual(self.study.state, "created") + self.study.max_responses = 10 + self.study.save() + self._create_eligible_responses(5) + data = self._form_data(set_response_limit=True, max_responses=3) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("reached the response limit", str(warnings[0])) + self.assertEqual(self.study.state, "created") diff --git a/exp/views/study.py b/exp/views/study.py index 16cb39e54..4137a486d 100644 --- a/exp/views/study.py +++ b/exp/views/study.py @@ -226,9 +226,18 @@ def form_valid(self, form: StudyEditForm): ) study.must_have_participated.set(form.cleaned_data["must_have_participated"]) + changed_fields = form.changed_data + messages.success(self.request, f"{study.name} study details saved.") - return super().form_valid(form) + # Save form first so the new max_responses value is persisted before + # check_and_pause_if_at_max_responses (which calls refresh_from_db). + response = super().form_valid(form) + # Now check to see if the study has reached max responses with the new value + if "max_responses" in changed_fields: + study.check_and_pause_if_at_max_responses(request=self.request) + + return response def form_invalid(self, form: StudyEditForm): messages.error(self.request, form.errors) diff --git a/scss/base.scss b/scss/base.scss index 97b51005a..34fe4e3a8 100644 --- a/scss/base.scss +++ b/scss/base.scss @@ -12,6 +12,7 @@ // import study-responses after custom variables because it uses a color variable @import "study-responses"; +@import "study-detail-progress-bar"; // Add all bootstrap features @import "bootstrap-5.2.0/scss/bootstrap"; diff --git a/scss/study-detail-progress-bar.scss b/scss/study-detail-progress-bar.scss new file mode 100644 index 000000000..ef73887a1 --- /dev/null +++ b/scss/study-detail-progress-bar.scss @@ -0,0 +1,54 @@ +progress { + &.study-progress { + --study-progress-color: var(--bs-info); + appearance: none; + -webkit-appearance: none; + border: none; + border-radius: .375rem; + height: 20px; + width: 100%; + overflow: hidden; + background-color: #e9ecef; + } + + &.study-progress::-webkit-progress-bar { + background-color: #e9ecef; + border-radius: .375rem; + } + + &.study-progress::-webkit-progress-value { + background-color: var(--study-progress-color); + border-radius: .375rem; + } + + &.study-progress::-moz-progress-bar { + background-color: var(--study-progress-color); + border-radius: .375rem; + } + + &.study-progress-info { + --study-progress-color: var(--bs-info); + } + + &.study-progress-warning { + --study-progress-color: var(--bs-warning); + } + + &.study-progress-success { + --study-progress-color: var(--bs-success); + } + + &.study-progress-danger { + --study-progress-color: var(--bs-danger); + } + + &.study-progress-danger::-webkit-progress-value { + background-image: linear-gradient(45deg, rgba(255,255,255,.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,.15) 50%, rgba(255,255,255,.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; + } + + &.study-progress-danger::-moz-progress-bar { + background-image: linear-gradient(45deg, rgba(255,255,255,.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,.15) 50%, rgba(255,255,255,.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; + } +} diff --git a/studies/forms.py b/studies/forms.py index 8d68ee41c..104529022 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -168,9 +168,15 @@ def participated_choices(): must_not_have_participated = forms.MultipleChoiceField( choices=participated_choices, required=False ) + set_response_limit = forms.BooleanField( + required=False, + label="Set a Response Limit", + help_text="Check this box to set a target number of valid responses for this study.", + ) def clean(self): cleaned_data = super().clean() + min_age_days = self.cleaned_data.get("min_age_days") min_age_months = self.cleaned_data.get("min_age_months") min_age_years = self.cleaned_data.get("min_age_years") @@ -205,12 +211,23 @@ def clean_image(self): return cleaned_image + def save(self, commit=True): + instance = super().save(commit=False) + # Explicitly set max_responses to None if set_response_limit is unchecked + if not self.cleaned_data.get("set_response_limit"): + instance.max_responses = None + if commit: + instance.save() + self.save_m2m() + return instance + class Meta: model = Study fields = [ "name", "lab", "priority", + "max_responses", "image", "preview_summary", "short_description", @@ -247,6 +264,7 @@ class Meta: "study_type": "Experiment Type", "compensation_description": "Compensation", "priority": "Lab Page Priority", + "max_responses": "Maximum Responses", } widgets = { "preview_summary": Textarea(attrs={"rows": 2}), @@ -277,6 +295,9 @@ class Meta: "priority": forms.TextInput( attrs={"type": "range", "min": "1", "max": "99"} ), + "max_responses": forms.TextInput( + attrs={"min": "1", "placeholder": "Enter a number"} + ), } help_texts = { @@ -300,6 +321,7 @@ class Meta: "shared_preview": "Allow other Lookit researchers to preview your study and give feedback.", "study_type": "Choose the type of experiment you are creating - this will change the fields that appear on the Study Details page.", "priority": "This affects how studies are ordered at your lab's custom URL, not the main study page. If you leave all studies at the highest priority (99), then all of your lab's active/discoverable studies will be shown in a randomized order on your lab page. If you lower the priority of this study to 1, then it will appear last in the list on your lab page. You can find your lab's custom URL from the labs page. For more info, see the documentation on study prioritization.", + "max_responses": "The study will automatically pause when the number of valid responses reaches this limit. You can change this value at any time. Note that participant sessions running when the limit is reached are permitted to continue. See [the documentation] for more information on response limits, valid responses, and changing a response's valid/invaild status. For no response limit, leave this field blank.", } diff --git a/studies/migrations/0105_add_max_responses_to_study.py b/studies/migrations/0105_add_max_responses_to_study.py new file mode 100644 index 000000000..284c928da --- /dev/null +++ b/studies/migrations/0105_add_max_responses_to_study.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.9 on 2026-02-03 22:08 + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("studies", "0104_add_bucket_kwarg_to_video_cleanup"), + ] + + operations = [ + migrations.RenameIndex( + model_name="consentruling", + new_name="studies_con_respons_23a801_idx", + old_fields=("response", "action"), + ), + migrations.RenameIndex( + model_name="consentruling", + new_name="studies_con_respons_df509e_idx", + old_fields=("response", "arbiter"), + ), + migrations.RenameIndex( + model_name="studylog", + new_name="studies_stu_study_i_b16384_idx", + old_fields=("study", "action"), + ), + migrations.AddField( + model_name="study", + name="max_responses", + field=models.IntegerField( + blank=True, + default=None, + null=True, + validators=[django.core.validators.MinValueValidator(1)], + ), + ), + ] diff --git a/studies/models.py b/studies/models.py index ea187be8f..f1afad753 100644 --- a/studies/models.py +++ b/studies/models.py @@ -8,6 +8,7 @@ import fleep from botocore.exceptions import ClientError from django.conf import settings +from django.contrib import messages from django.contrib.auth.models import Group, Permission from django.contrib.postgres.fields import ArrayField from django.core.validators import MaxValueValidator, MinValueValidator @@ -364,6 +365,12 @@ class Study(models.Model): is_building = models.BooleanField(default=False) compensation_description = models.TextField(blank=True) criteria_expression = models.TextField(blank=True, default="") + max_responses = models.IntegerField( + null=True, + blank=True, + default=None, + validators=[MinValueValidator(1)], + ) must_have_participated = models.ManyToManyField( "self", blank=True, symmetrical=False, related_name="expected_participation" ) @@ -548,6 +555,108 @@ def videos_for_consented_responses(self): """Gets videos but only for consented responses.""" return Video.objects.filter(response_id__in=self.consented_responses) + @property + def valid_response_count(self) -> int: + """Return the count of valid responses for max_responses limit. + + A response is counted as valid if: + - is_preview is False + - eligibility is "Eligible" or blank/empty (backwards compatibility) + + And for internal studies, responses must also meet the following conditions: + - completed is True + - completed_consent_frame is True + - the consent has not been rejected (must be either pending or accepted) + + For external studies, the completed, completed_consent_frame, and consent requirements are ignored. + + Returns: + int: Count of valid responses + """ + # Filter out preview responses + responses = self.responses.filter(is_preview=False) + + # For internal study types, also require completed_consent_frame=True, completed=True, and consent not rejected + if not self.study_type.is_external: + responses = responses.filter(completed=True, completed_consent_frame=True) + newest_ruling_subquery = models.Subquery( + ConsentRuling.objects.filter(response=models.OuterRef("pk")) + .order_by("-created_at") + .values("action")[:1] + ) + # Filter out responses with rejected consent, and explicitly allow NULL consent rulings (pending, i.e. no judgment has been submitted). + responses = responses.annotate( + current_ruling=newest_ruling_subquery + ).filter( + models.Q(current_ruling__isnull=True) + | ~models.Q(current_ruling=REJECTED) + ) + + # Filter out ineligible responses + return responses.filter( + models.Q(eligibility=[]) + | models.Q(eligibility__contains=[ResponseEligibility.ELIGIBLE]) + ).count() + + @property + def has_reached_max_responses(self) -> bool: + """Check if the study has reached its maximum number of valid responses. + + Returns: + bool: True if max_responses is set and the limit has been reached + """ + if self.max_responses is None: + return False + return self.valid_response_count >= self.max_responses + + def check_and_pause_if_at_max_responses( + self, send_researcher_email=False, request=None + ): + """Check if max responses reached and pause the study if so. + + Only pauses if the study is currently active. Uses the state machine's + pause trigger to properly transition and run callbacks. + If the study is not active, this method is used to optionally display a banner message, with no state transition. + + Args: + send_researcher_email: If True, send notification email to researchers + with CHANGE_STUDY_STATUS permission. + request: If provided, display a Django messages banner to the user. + """ + if self.max_responses is None: + return + + if not self.has_reached_max_responses: + return + + # Refresh from DB to ensure the in-memory study is current before + # the pause transition triggers a save (via _finalize_state_change). + self.refresh_from_db() + + # Use the state machine's pause trigger to properly transition + # and run callbacks (like notify_administrators_of_pause). + # Note: no explicit save() needed here because the state machine's + # _finalize_state_change callback already saves the model. + if self.state == "active": + self.pause() # No user since this is system-triggered + + if send_researcher_email: + self._notify_researchers_of_max_responses_pause() + + if request: + messages.warning( + request, + f'Study "{self.name}" has been automatically paused because it ' + f"reached the response limit ({self.valid_response_count}/{self.max_responses}).", + ) + else: + # Study is not active, so not state transition is needed. Just notify the researcher that they cannot start the study. + if request: + messages.warning( + request, + f'Study "{self.name}" has reached the response limit ({self.valid_response_count}/{self.max_responses}).', + ) + @property def consent_videos(self): return self.videos.filter(is_consent_footage=True) @@ -750,7 +859,19 @@ def check_if_built(self, ev): """ if self.needs_to_be_built: raise RuntimeError( - f'Cannot activate study - experiment runner for "{self.name}" ({self.id}) has not been built!' + f'Cannot activate the study "{self.name}" ({self.id}) because the experiment runner has not been built!' + ) + + def check_if_at_max_responses(self, ev): + """Check if study has reached its max responses value before activating/starting. + + :param ev: The event object + :type ev: transitions.core.EventData + :raise: RuntimeError + """ + if self.has_reached_max_responses: + raise RuntimeError( + f'Cannot activate the study "{self.name}" ({self.id}) because it has reached its maximum number of responses. Be sure to handle all pending consents and review existing responses, as this may open up slots. Then increase the response limit in the Study Ad if necessary, and try starting the study again.' ) def notify_administrators_of_activation(self, ev): @@ -775,12 +896,16 @@ def notify_administrators_of_activation(self, ev): ) def notify_administrators_of_pause(self, ev): + user = ev.kwargs.get("user") + caller_name = ( + user.get_short_name() if user else "System (max responses reached)" + ) context = { "lab_name": self.lab.name, "study_name": self.name, "study_id": self.pk, "study_uuid": str(self.uuid), - "researcher_name": ev.kwargs.get("user").get_short_name(), + "researcher_name": caller_name, "action": ev.transition.dest, } send_mail.delay( @@ -795,6 +920,28 @@ def notify_administrators_of_pause(self, ev): **context, ) + def _notify_researchers_of_max_responses_pause(self): + """Send email to researchers notifying them the study was auto-paused + because it reached the maximum number of responses.""" + context = { + "study_name": self.name, + "study_id": self.pk, + "study_uuid": str(self.uuid), + "max_responses": self.max_responses, + "valid_response_count": self.valid_response_count, + } + send_mail.delay( + "notify_researchers_of_max_responses_pause", + f"{self.name}: Study paused - response limit reached", + settings.EMAIL_FROM_ADDRESS, + bcc=list( + self.users_with_study_perms( + StudyPermission.CHANGE_STUDY_STATUS + ).values_list("username", flat=True) + ), + **context, + ) + def notify_administrators_of_deactivation(self, ev): context = { "lab_name": self.lab.name, @@ -948,6 +1095,12 @@ def check_modification_of_approved_study( ): continue # Skip, since the actual JSON content is the same - only exact_text changing if new != current: + # For file fields (e.g. image), None and "" are equivalent empty + # values that can differ between in-memory defaults and DB-loaded + # values. Treat them as unchanged. + if hasattr(current, "name") and hasattr(new, "name"): + if (current.name or "") == (new.name or ""): + continue important_fields_changed = True break @@ -1202,7 +1355,7 @@ def birthdate_difference(self): @property def normalized_exp_data(self): - # Where study type is jspysch, convert experiment data to resemble EFP exp data. + # Where study type is jspsych, convert experiment data to resemble EFP exp data. if self.study_type.is_jspsych: return {key: value for key, value in zip(self.sequence, self.exp_data)} else: @@ -1289,11 +1442,18 @@ def take_action_on_exp_data(sender, instance, created, **kwargs): """ response = instance # Aliasing because instance is hooked as a kwarg. - if created or not response.sequence: + if response.study.study_type.is_external: + # External studies: check if study has reached max responses and, if so, pause the study and email researchers. + response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) + elif created or not response.sequence: return else: dispatch_frame_action(response) + # Internal studies: if response is complete, check if this study has reached max responses and, if so, pause the study and email researchers. + if response.completed: + response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) + class FeedbackApiManager(models.Manager): """Prefetch all the things.""" diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.html b/studies/templates/emails/notify_researchers_of_max_responses_pause.html new file mode 100644 index 000000000..dab7f9c56 --- /dev/null +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.html @@ -0,0 +1,17 @@ +{% load web_extras %} +

Dear Study Researchers,

+

+ Your study {{ study_name }} has been automatically paused + because it reached the maximum number of valid responses + ({{ valid_response_count }} valid / {{ max_responses }} limit). +

+

+ Please make sure to handle any pending consents and review your responses, as doing so may open up more slots. Then, if you would like to collect more data, you can increase the study's response limit if necessary, and re-start it. Your study will NOT be restarted automatically if more slots become available and/or you increase the response limit. +
+ Your study can be found here. +

+

+ Best, +
+ Lookit Bot +

diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.txt b/studies/templates/emails/notify_researchers_of_max_responses_pause.txt new file mode 100644 index 000000000..e06a6ad58 --- /dev/null +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.txt @@ -0,0 +1,11 @@ +{% load web_extras %} +Dear Study Researchers, + +Your study {{ study_name }} has been automatically paused because it reached the maximum number of valid responses ({{ valid_response_count }} valid / {{ max_responses }} limit). + +Please make sure to handle any pending consents and review your responses, as doing so may open up more slots. Then, if you would like to collect more data, you can increase the study's response limit if necessary, and re-start it. Your study will NOT be restarted automatically if more slots become available and/or you increase the response limit. + +Your study can be found here: {% absolute_url 'exp:study' study_id %} + +Best, +Lookit Bot diff --git a/studies/templates/studies/_study_fields.html b/studies/templates/studies/_study_fields.html index c8c79e354..d7c5406ce 100644 --- a/studies/templates/studies/_study_fields.html +++ b/studies/templates/studies/_study_fields.html @@ -157,4 +157,12 @@ {% bootstrap_field form.must_not_have_participated label_class="form-label fw-bold" wrapper_class="mb-4" %} {% bootstrap_field form.criteria_expression label_class="form-label fw-bold" wrapper_class="mb-4" %}
+
+ {% bootstrap_field form.set_response_limit label_class="form-label fw-bold" wrapper_class="mb-2" %} +
+ {% bootstrap_label "Maximum Responses" label_class="form-label fw-bold" label_for="id_max_responses" %} + {% bootstrap_field form.max_responses show_label=False show_help=True field_class="w-25" %} +
+
+
{% bootstrap_field form.study_type label_class="form-label fw-bold" wrapper_class="mb-4" %} diff --git a/studies/templates/studies/study_detail.html b/studies/templates/studies/study_detail.html index 1720f65ad..ca5657158 100644 --- a/studies/templates/studies/study_detail.html +++ b/studies/templates/studies/study_detail.html @@ -127,6 +127,31 @@ Your study link will show up here when you have built the experiment runner. {% endif %}

+ {% if study.max_responses %} +
+
+ Valid Responses + {{ study.valid_response_count }} / {{ study.max_responses }} +
+ {% widthratio study.valid_response_count study.max_responses 100 as percent_str %} + {% with percent=percent_str|add:"0" %} +
+ {{ study.valid_response_count }} / {{ study.max_responses }} +
+ {% if percent == 100 %} +
+ Response limit reached. Your study has been paused. +
+ {% elif percent > 100 %} +
+ Response limit exceeded. Your study has been paused. +
+ {% endif %} + {% endwith %} +
+ {% endif %}
{% for log in logs %}
-
{% localtime on %}{{ log.created_at }}{% endlocaltime %}
-
+
{% localtime on %}{{ log.created_at }}{% endlocaltime %}
+
Study {% if log.action == "active" %} started @@ -17,8 +17,12 @@ {% endif %} {% if log.user %} by {{ log.user.get_short_name }} - {% elif log.action == "rejected" %} - due to changes + {% else %} + {% if log.action == "rejected" %} + due to changes + {% elif log.action == "paused" %} + automatically: response limit reached + {% endif %} {% endif %}
diff --git a/studies/test_recording_method.py b/studies/test_recording_method.py index 46e3cd2db..78e0af66c 100644 --- a/studies/test_recording_method.py +++ b/studies/test_recording_method.py @@ -88,7 +88,7 @@ def session(self): class RecordingMethodJsPsychTestCase(TestCase): @patch("boto3.client") - def test_jspysch(self, mock_client): + def test_jspsych(self, mock_client): make_boto_client(mock_client) _, study, child = get_user(StudyType.get_jspsych()) context = {"study": study, "view": TestView(child.uuid)} diff --git a/studies/tests.py b/studies/tests.py index 86c0b9705..9dd8265ef 100644 --- a/studies/tests.py +++ b/studies/tests.py @@ -1,7 +1,7 @@ import json import re from datetime import date, datetime, timedelta, timezone -from unittest.mock import patch +from unittest.mock import MagicMock, patch from botocore.exceptions import ClientError, ParamValidationError from django.conf import settings @@ -22,7 +22,16 @@ get_experiment_absolute_url, send_mail, ) -from studies.models import Lab, Response, Study, StudyType, StudyTypeEnum, Video +from studies.models import ( + REJECTED, + ConsentRuling, + Lab, + Response, + Study, + StudyType, + StudyTypeEnum, + Video, +) from studies.permissions import StudyPermission from studies.tasks import ( MessageTarget, @@ -841,13 +850,81 @@ def test_get_jspsych(self): self.assertFalse(StudyType.get_jspsych().is_external) +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) class StudyModelTestCase(TestCase): - def test_responses_for_researcher_external_studies(self): - study = Study.objects.create( - study_type=StudyType.get_external(), + def _create_study_with_participant(self, study_type=None, **study_kwargs): + """Create a study with a user and child for testing. + + Returns (study, user, child) tuple. + """ + if study_type is None: + study_type = StudyType.get_ember_frame_player() + study = Study.objects.create(study_type=study_type, **study_kwargs) + user = User.objects.create(is_active=True) + child = Child.objects.create(user=user, birthday=date.today()) + return study, user, child + + def _create_response( + self, + study, + child, + eligibility=None, + completed=True, + completed_consent_frame=True, + is_preview=False, + ): + """Create a response and update its eligibility. + + Note: Response.save() auto-sets eligibility, so we use .update() after creation. + """ + if eligibility is None: + eligibility = [ResponseEligibility.ELIGIBLE] + user = child.user + r = Response.objects.create( + study=study, + child=child, + study_type=study.study_type, + demographic_snapshot=user.latest_demographics, + completed=completed, + completed_consent_frame=completed_consent_frame, + is_preview=is_preview, ) - user = User.objects.create(is_active=True, is_researcher=True) + Response.objects.filter(pk=r.pk).update(eligibility=eligibility) + return r + + def _create_eligible_responses(self, study, count): + """Create a user, child, and the given number of eligible responses for a study.""" + user = User.objects.create(is_active=True) child = Child.objects.create(user=user, birthday=date.today()) + for _ in range(count): + self._create_response( + study, child, eligibility=[ResponseEligibility.ELIGIBLE] + ) + return user, child + + def _create_study_with_lab(self, name, max_responses, state=None): + """Create a study with a lab, optionally setting its state.""" + study = Study.objects.create( + name=name, + lab=Lab.objects.create( + name=f"Test Lab {name}", + institution="Test", + contact_email="test@test.com", + ), + study_type=StudyType.get_ember_frame_player(), + max_responses=max_responses, + ) + if state: + study.state = state + study.save() + return study + + def test_responses_for_researcher_external_studies(self): + study, user, child = self._create_study_with_participant( + study_type=StudyType.get_external() + ) + user.is_researcher = True + user.save() response = Response.objects.create( study=study, child=child, @@ -861,6 +938,274 @@ def test_responses_for_researcher_external_studies(self): self.assertIn(response, study.responses_for_researcher(user)) + def test_valid_response_count_internal_study(self): + """Test that valid_response_count correctly counts eligible, completed, non-preview responses.""" + study, _, child = self._create_study_with_participant() + + # Valid: completed, consent frame completed, not preview, empty eligibility + self._create_response(study, child, eligibility=[]) + + # Valid: completed, consent frame completed, not preview, eligible + self._create_response(study, child) + + # Invalid: preview response + self._create_response(study, child, is_preview=True) + + # Invalid: not completed + self._create_response(study, child, completed=False) + + # Invalid: ineligible + self._create_response( + study, child, eligibility=[ResponseEligibility.INELIGIBLE_OLD] + ) + + # Invalid: consent frame not completed + self._create_response(study, child, completed_consent_frame=False) + + self.assertEqual(study.valid_response_count, 2) + + def test_valid_response_count_external_study(self): + """Test that valid_response_count for external studies ignores completed field.""" + study, _, child = self._create_study_with_participant( + study_type=StudyType.get_external() + ) + + # Valid: not preview, eligible, completed + self._create_response(study, child) + + # Valid: not preview, eligible, NOT completed (should still count for external) + self._create_response(study, child, completed=False) + + # Valid: not preview, empty eligibility, NOT completed + self._create_response(study, child, completed=False, eligibility=[]) + + # Invalid: preview response (should not count) + self._create_response(study, child, is_preview=True) + + # Invalid: ineligible (should not count) + self._create_response( + study, child, eligibility=[ResponseEligibility.INELIGIBLE_CRITERIA] + ) + + # 3 valid responses (completed field ignored for external) + self.assertEqual(study.valid_response_count, 3) + + def test_valid_response_count_excludes_rejected_consent_internal(self): + """Test that valid_response_count excludes responses with rejected consent for internal studies.""" + study, user, child = self._create_study_with_participant() + + # Valid: no consent ruling (pending) + self._create_response(study, child) + + # Valid: accepted consent + r2 = self._create_response(study, child) + ConsentRuling.objects.create(response=r2, action="accepted", arbiter=user) + + # Invalid: consent rejected + r3 = self._create_response(study, child) + ConsentRuling.objects.create(response=r3, action=REJECTED, arbiter=user) + + # 2 valid: r1 (no ruling = pending) and r2 (accepted). r3 excluded (rejected). + self.assertEqual(study.valid_response_count, 2) + + def test_valid_response_count_uses_most_recent_consent_ruling(self): + """Test that only the most recent consent ruling is considered.""" + study, user, child = self._create_study_with_participant() + + # Response with rejected then accepted consent (most recent = accepted, so valid) + r1 = self._create_response(study, child) + ConsentRuling.objects.create(response=r1, action=REJECTED, arbiter=user) + ConsentRuling.objects.create(response=r1, action="accepted", arbiter=user) + + # Response with accepted then rejected consent (most recent = rejected, so invalid) + r2 = self._create_response(study, child) + ConsentRuling.objects.create(response=r2, action="accepted", arbiter=user) + ConsentRuling.objects.create(response=r2, action=REJECTED, arbiter=user) + + # Only r1 is valid (most recent ruling is accepted) + self.assertEqual(study.valid_response_count, 1) + + def test_valid_response_count_consent_ignored_for_external(self): + """Test that consent rulings are not checked for external studies.""" + study, user, child = self._create_study_with_participant( + study_type=StudyType.get_external() + ) + + # Response with rejected consent - should still count for external + r1 = self._create_response(study, child) + ConsentRuling.objects.create(response=r1, action=REJECTED, arbiter=user) + + # Should count because external studies don't check consent + self.assertEqual(study.valid_response_count, 1) + + def test_has_reached_max_responses_no_limit(self): + """Test that has_reached_max_responses returns False when max_responses is None.""" + study = Study.objects.create( + study_type=StudyType.get_ember_frame_player(), + max_responses=None, + ) + self.assertFalse(study.has_reached_max_responses) + + def test_has_reached_max_responses_not_reached(self): + """Test that has_reached_max_responses returns False when limit not reached.""" + study, _, child = self._create_study_with_participant(max_responses=5) + + for _ in range(2): + self._create_response(study, child) + + self.assertFalse(study.has_reached_max_responses) + + def test_has_reached_max_responses_reached(self): + """Test that has_reached_max_responses returns True when limit is reached.""" + study, _, child = self._create_study_with_participant(max_responses=3) + + for _ in range(3): + self._create_response(study, child) + + self.assertTrue(study.has_reached_max_responses) + + def test_has_reached_max_responses_exceeded(self): + """Test that has_reached_max_responses returns True when limit is exceeded.""" + study, _, child = self._create_study_with_participant(max_responses=2) + + for _ in range(4): + self._create_response(study, child) + + self.assertTrue(study.has_reached_max_responses) + + def test_check_and_pause_if_at_max_responses_no_limit_set(self): + """Study without max_responses set should not pause.""" + study = self._create_study_with_lab( + "No Limit Study", max_responses=None, state="active" + ) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "active") + + def test_check_and_pause_if_at_max_responses_not_active(self): + """Study not in active state should not pause.""" + study = self._create_study_with_lab("Not Active Study", max_responses=1) + # Study is in "created" state by default + self.assertEqual(study.state, "created") + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "created") + + def test_check_and_pause_if_at_max_responses_not_reached(self): + """Active study that hasn't reached max_responses should not pause.""" + study = self._create_study_with_lab( + "Under Limit Study", max_responses=5, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "active") + + def test_check_and_pause_if_at_max_responses_limit_reached(self): + """Active study that has reached max_responses should pause.""" + study = self._create_study_with_lab( + "At Limit Study", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "paused") + + def test_check_and_pause_if_at_max_responses_limit_exceeded(self): + """Active study that has exceeded max_responses should pause.""" + study = self._create_study_with_lab( + "Over Limit Study", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=4) + + study.check_and_pause_if_at_max_responses() + study.refresh_from_db() + + self.assertEqual(study.state, "paused") + + @patch("studies.models.send_mail") + def test_check_and_pause_sends_researcher_email_when_requested( + self, mock_send_mail + ): + """Researcher notification email is sent when send_researcher_email=True.""" + study = self._create_study_with_lab( + "Email Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses(send_researcher_email=True) + + researcher_calls = [ + c + for c in mock_send_mail.delay.call_args_list + if c[0][0] == "notify_researchers_of_max_responses_pause" + ] + self.assertEqual(len(researcher_calls), 1) + + @patch("studies.models.send_mail") + def test_check_and_pause_no_researcher_email_by_default(self, mock_send_mail): + """Researcher notification email is not sent by default.""" + study = self._create_study_with_lab( + "No Email Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses() + + researcher_calls = [ + c + for c in mock_send_mail.delay.call_args_list + if c[0][0] == "notify_researchers_of_max_responses_pause" + ] + self.assertEqual(len(researcher_calls), 0) + + @patch("studies.models.send_mail") + @patch("studies.models.messages") + def test_check_and_pause_shows_banner_when_request_provided( + self, mock_messages, mock_send_mail + ): + """A Django messages warning is added when request is provided.""" + study = self._create_study_with_lab( + "Banner Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + mock_request = MagicMock() + + study.check_and_pause_if_at_max_responses(request=mock_request) + + mock_messages.warning.assert_called_once() + call_args = mock_messages.warning.call_args + self.assertEqual(call_args[0][0], mock_request) + self.assertIn("automatically paused", call_args[0][1]) + + researcher_calls = [ + c + for c in mock_send_mail.delay.call_args_list + if c[0][0] == "notify_researchers_of_max_responses_pause" + ] + self.assertEqual(len(researcher_calls), 0) + + @patch("studies.models.send_mail") + @patch("studies.models.messages") + def test_check_and_pause_no_banner_by_default(self, mock_messages, mock_send_mail): + """No Django message is added when request is not provided.""" + study = self._create_study_with_lab( + "No Banner Test", max_responses=2, state="active" + ) + self._create_eligible_responses(study, count=2) + + study.check_and_pause_if_at_max_responses() + + mock_messages.warning.assert_not_called() + class VideoModelTestCase(TestCase): def setUp(self): diff --git a/studies/workflow.py b/studies/workflow.py index 339db59fb..475632d97 100644 --- a/studies/workflow.py +++ b/studies/workflow.py @@ -100,7 +100,7 @@ "trigger": "activate", "source": ["approved", "paused"], "dest": "active", - "before": ["check_if_built"], + "before": ["check_if_built", "check_if_at_max_responses"], "after": ["notify_administrators_of_activation"], }, { diff --git a/web/static/custom_bootstrap5.css b/web/static/custom_bootstrap5.css index a6492fa07..c423229fd 100644 --- a/web/static/custom_bootstrap5.css +++ b/web/static/custom_bootstrap5.css @@ -56,6 +56,49 @@ input[type="checkbox"].researcher-editable:disabled + label .icon-star { -webkit-line-clamp: 2; -webkit-box-orient: vertical; } +progress.study-progress { + --study-progress-color: var(--bs-info); + appearance: none; + -webkit-appearance: none; + border: none; + border-radius: .375rem; + height: 20px; + width: 100%; + overflow: hidden; + background-color: #e9ecef; } + +progress.study-progress::-webkit-progress-bar { + background-color: #e9ecef; + border-radius: .375rem; } + +progress.study-progress::-webkit-progress-value { + background-color: var(--study-progress-color); + border-radius: .375rem; } + +progress.study-progress::-moz-progress-bar { + background-color: var(--study-progress-color); + border-radius: .375rem; } + +progress.study-progress-info { + --study-progress-color: var(--bs-info); } + +progress.study-progress-warning { + --study-progress-color: var(--bs-warning); } + +progress.study-progress-success { + --study-progress-color: var(--bs-success); } + +progress.study-progress-danger { + --study-progress-color: var(--bs-danger); } + +progress.study-progress-danger::-webkit-progress-value { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; } + +progress.study-progress-danger::-moz-progress-bar { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; } + /*! * Bootstrap v5.2.0 (https://getbootstrap.com/) * Copyright 2011-2022 The Bootstrap Authors diff --git a/web/static/js/study-fields.js b/web/static/js/study-fields.js index de1361e93..2bc60bd1a 100644 --- a/web/static/js/study-fields.js +++ b/web/static/js/study-fields.js @@ -104,4 +104,47 @@ $(document).ready(function () { // Trigger mousedown to populate ui. mustHave.dispatchEvent(new Event('mousedown')); mustNotHave.dispatchEvent(new Event('mousedown')); + + /* + Max Responses validation and checkbox toggle + */ + const maxResponses = document.querySelector('#id_max_responses'); + const setResponseLimit = document.querySelector('#id_set_response_limit'); + + if (maxResponses && setResponseLimit) { + // Function to enable/disable max_responses based on checkbox + function toggleMaxResponses() { + if (setResponseLimit.checked) { + maxResponses.disabled = false; + maxResponses.parentElement.classList.remove('text-muted'); + } else { + // The max responses value is cleared when the setResponseLimit box is unchecked, but this value won't actually be saved with the form data if the form is submitted in this state, because the max_responses field is disabled. This case is handled on the backend in the form's clean method, which checks if setResponseLimit is unchecked, and if so, sets max_responses to None. + maxResponses.disabled = true; + maxResponses.value = ''; + maxResponses.parentElement.classList.add('text-muted'); + } + } + + // On page load, check the checkbox if max_responses has a value + if (maxResponses.value) { + setResponseLimit.checked = true; + } + + // Set initial state + toggleMaxResponses(); + + // Listen for checkbox changes + setResponseLimit.addEventListener('change', toggleMaxResponses); + + // Input validation for max_responses + maxResponses.addEventListener('input', () => { + // Remove non-numeric characters and leading zeros + let value = maxResponses.value.replace(/\D/g, '').replace(/^0+/, ''); + // Ensure minimum value of 1 if not empty + if (value !== '' && Number.parseInt(value) < 1) { + value = '1'; + } + maxResponses.value = value; + }); + } });