diff --git a/exp/tests/test_study_views.py b/exp/tests/test_study_views.py index 4175b5dd3..4bfb911ef 100644 --- a/exp/tests/test_study_views.py +++ b/exp/tests/test_study_views.py @@ -31,7 +31,8 @@ StudyDetailView, StudyPreviewDetailView, ) -from studies.models import Lab, Study, StudyType +from studies.helpers import ResponseEligibility +from studies.models import Lab, Response, Study, StudyType from studies.permissions import LabPermission, StudyPermission @@ -672,6 +673,171 @@ def test_update_trigger_object_no_attr( mock_request.POST.keys.assert_not_called() +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) +@override_settings(CELERY_TASK_EAGER_PROPAGATES=True) +@patch("studies.helpers.send_mail") +class ActivateStudyMaxResponsesTestCase(TestCase): + """Integration tests for the check_if_at_max_responses workflow guard. + + When a researcher tries to activate a study (from approved or paused state), + the transition should be blocked if the study has already reached its + max_responses limit. + """ + + def setUp(self): + self.client = Force2FAClient() + self.user = G(User, is_active=True, is_researcher=True) + self.lab = G(Lab, name="Activation Test Lab", approved_to_test=True) + self.lab.researchers.add(self.user) + + self.study = G( + Study, + image=SimpleUploadedFile( + name="small.gif", + content=( + b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04" + b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + b"\x02\x4c\x01\x00\x3b" + ), + content_type="image/gif", + ), + study_type=StudyType.get_external(), + creator=self.user, + lab=self.lab, + name="Activation Test Study", + built=True, + ) + self.study.admin_group.user_set.add(self.user) + assign_perm( + StudyPermission.CHANGE_STUDY_STATUS.prefixed_codename, + self.user, + self.study, + ) + self.client.force_login(self.user) + + self.change_status_url = reverse( + "exp:change-study-status", kwargs={"pk": self.study.pk} + ) + + def _create_eligible_responses(self, count): + """Create eligible, non-preview responses for the study.""" + participant = G(User, is_active=True) + child = G( + Child, + user=participant, + given_name="Test child", + birthday=datetime.date.today() - datetime.timedelta(days=30), + ) + for _ in range(count): + r = Response.objects.create( + study=self.study, + child=child, + study_type=self.study.study_type, + demographic_snapshot=participant.latest_demographics, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + def _get_error_messages(self, response): + """Extract error-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.ERROR] + + def _get_success_messages(self, response): + """Extract success-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.SUCCESS] + + def test_activate_blocked_from_approved_when_at_max_responses(self, mock_send_mail): + """Activating an approved study fails when max_responses has been reached.""" + self.study.state = "approved" + self.study.max_responses = 3 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 1) + self.assertIn("TRANSITION ERROR", str(errors[0])) + self.assertIn("maximum number of responses", str(errors[0])) + + def test_activate_blocked_from_paused_when_at_max_responses(self, mock_send_mail): + """Reactivating a paused study fails when max_responses has been reached.""" + self.study.state = "paused" + self.study.max_responses = 2 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 1) + self.assertIn("TRANSITION ERROR", str(errors[0])) + self.assertIn("maximum number of responses", str(errors[0])) + + def test_activate_succeeds_when_below_max_responses(self, mock_send_mail): + """Activating an approved study succeeds when below the max_responses limit.""" + self.study.state = "approved" + self.study.max_responses = 10 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + def test_activate_succeeds_when_no_max_responses_set(self, mock_send_mail): + """Activating an approved study succeeds when max_responses is not set.""" + self.study.state = "approved" + self.study.max_responses = None + self.study.save() + self._create_eligible_responses(5) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + def test_activate_succeeds_when_exactly_at_limit_minus_one(self, mock_send_mail): + """Activating succeeds when response count is one below max_responses.""" + self.study.state = "approved" + self.study.max_responses = 4 + self.study.save() + self._create_eligible_responses(3) + + response = self.client.post( + self.change_status_url, {"trigger": "activate"}, follow=True + ) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "active") + errors = self._get_error_messages(response) + self.assertEqual(len(errors), 0) + + class ManageResearcherPermissionsViewTestCase(TestCase): def test_model(self) -> None: manage_researcher_permissions_view = ManageResearcherPermissionsView() @@ -1334,3 +1500,196 @@ def test_must_not_have_participated(self): # TODO: StudyPreviewProxyView # - add checks analogous to preview detail view # - check for correct redirect + + +@override_settings(CELERY_TASK_ALWAYS_EAGER=True) +@override_settings(CELERY_TASK_EAGER_PROPAGATES=True) +@patch("studies.helpers.send_mail") +class StudyUpdateMaxResponsesTestCase(TestCase): + """Tests for banner messages when max_responses is edited via StudyUpdateView.""" + + def setUp(self): + self.client = Force2FAClient() + self.user = G(User, is_active=True, is_researcher=True) + self.lab = G(Lab, name="Max Resp Lab", approved_to_test=True) + self.lab.researchers.add(self.user) + + self.study = G( + Study, + image=SimpleUploadedFile( + name="small.gif", + content=( + b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04" + b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + b"\x02\x4c\x01\x00\x3b" + ), + content_type="image/gif", + ), + study_type=StudyType.get_ember_frame_player(), + creator=self.user, + lab=self.lab, + name="Max Resp Study", + short_description="test", + preview_summary="test", + purpose="test", + criteria="test", + duration="test", + contact_info="test", + exit_url="https://mit.edu", + ) + self.study.admin_group.user_set.add(self.user) + assign_perm( + StudyPermission.WRITE_STUDY_DETAILS.prefixed_codename, + self.user, + self.study, + ) + self.client.force_login(self.user) + + def _form_data(self, include_image=True, **overrides): + """Build minimal valid form data for the StudyEditForm. + + Set include_image=False to avoid triggering the pre_save signal that + rejects approved/active studies when monitored fields change. + """ + data = { + "name": self.study.name, + "lab": self.study.lab_id, + "study_type": self.study.study_type_id, + "min_age_years": 0, + "min_age_months": 0, + "min_age_days": 0, + "max_age_years": 1, + "max_age_months": 0, + "max_age_days": 0, + "priority": 1, + "preview_summary": self.study.preview_summary, + "short_description": self.study.short_description, + "purpose": self.study.purpose, + "compensation_description": self.study.compensation_description, + "exit_url": self.study.exit_url, + "criteria": self.study.criteria, + "duration": self.study.duration, + "contact_info": self.study.contact_info, + } + if include_image: + data["image"] = SimpleUploadedFile( + name="test_image.jpg", + content=open("exp/tests/static/study_image.png", "rb").read(), + content_type="image/jpeg", + ) + data.update(overrides) + return data + + def _create_eligible_responses(self, count): + """Create eligible, completed, non-preview responses for the study.""" + participant = G(User, is_active=True) + child = G( + Child, + user=participant, + given_name="Test child", + birthday=datetime.date.today() - datetime.timedelta(days=30), + ) + for _ in range(count): + r = Response.objects.create( + study=self.study, + child=child, + study_type=self.study.study_type, + demographic_snapshot=participant.latest_demographics, + completed_consent_frame=True, + completed=True, + is_preview=False, + ) + Response.objects.filter(pk=r.pk).update( + eligibility=[ResponseEligibility.ELIGIBLE] + ) + + def _get_warning_messages(self, response): + """Extract warning-level messages from a followed response.""" + from django.contrib.messages import constants + + return [m for m in response.context["messages"] if m.level == constants.WARNING] + + def test_banner_when_max_responses_reached_non_active_study(self, mock_send_mail): + """Warning banner shown when max_responses is set at/below response count on non-active study.""" + self.assertEqual(self.study.state, "created") + self._create_eligible_responses(3) + data = self._form_data(set_response_limit=True, max_responses=3) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("reached the response limit", str(warnings[0])) + + # Study should NOT be paused (was not active) + self.study.refresh_from_db() + self.assertNotEqual(self.study.state, "paused") + + def test_study_paused_when_max_responses_reached_active_study(self, mock_send_mail): + """Active study is paused and warning shown when max_responses is set at response count.""" + self.assertEqual(self.study.state, "created") + self.study.state = "active" + self.study.save() + self._create_eligible_responses(3) + # include_image=False to avoid triggering the pre_save signal that + # rejects active studies when monitored fields (like image) change. + data = self._form_data( + include_image=False, set_response_limit=True, max_responses=3 + ) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("automatically paused", str(warnings[0])) + + self.study.refresh_from_db() + self.assertEqual(self.study.state, "paused") + + def test_no_banner_when_max_responses_not_reached(self, mock_send_mail): + """No warning when max_responses is set above the current response count.""" + self._create_eligible_responses(2) + data = self._form_data(set_response_limit=True, max_responses=10) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 0) + self.assertNotEqual(self.study.state, "paused") + + def test_no_banner_when_max_responses_unchanged(self, mock_send_mail): + """No warning when max_responses is submitted but hasn't changed.""" + self.study.max_responses = 5 + self.study.save() + self._create_eligible_responses(5) + data = self._form_data(set_response_limit=True, max_responses=5) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 0) + + def test_banner_when_max_responses_lowered_below_count(self, mock_send_mail): + """Warning shown when max_responses is lowered below existing response count.""" + self.assertEqual(self.study.state, "created") + self.study.max_responses = 10 + self.study.save() + self._create_eligible_responses(5) + data = self._form_data(set_response_limit=True, max_responses=3) + response = self.client.post( + reverse("exp:study-edit", kwargs={"pk": self.study.id}), + data, + follow=True, + ) + warnings = self._get_warning_messages(response) + self.assertEqual(len(warnings), 1) + self.assertIn("reached the response limit", str(warnings[0])) + self.assertEqual(self.study.state, "created") diff --git a/exp/views/study.py b/exp/views/study.py index 16cb39e54..4137a486d 100644 --- a/exp/views/study.py +++ b/exp/views/study.py @@ -226,9 +226,18 @@ def form_valid(self, form: StudyEditForm): ) study.must_have_participated.set(form.cleaned_data["must_have_participated"]) + changed_fields = form.changed_data + messages.success(self.request, f"{study.name} study details saved.") - return super().form_valid(form) + # Save form first so the new max_responses value is persisted before + # check_and_pause_if_at_max_responses (which calls refresh_from_db). + response = super().form_valid(form) + # Now check to see if the study has reached max responses with the new value + if "max_responses" in changed_fields: + study.check_and_pause_if_at_max_responses(request=self.request) + + return response def form_invalid(self, form: StudyEditForm): messages.error(self.request, form.errors) diff --git a/scss/base.scss b/scss/base.scss index 97b51005a..34fe4e3a8 100644 --- a/scss/base.scss +++ b/scss/base.scss @@ -12,6 +12,7 @@ // import study-responses after custom variables because it uses a color variable @import "study-responses"; +@import "study-detail-progress-bar"; // Add all bootstrap features @import "bootstrap-5.2.0/scss/bootstrap"; diff --git a/scss/study-detail-progress-bar.scss b/scss/study-detail-progress-bar.scss new file mode 100644 index 000000000..ef73887a1 --- /dev/null +++ b/scss/study-detail-progress-bar.scss @@ -0,0 +1,54 @@ +progress { + &.study-progress { + --study-progress-color: var(--bs-info); + appearance: none; + -webkit-appearance: none; + border: none; + border-radius: .375rem; + height: 20px; + width: 100%; + overflow: hidden; + background-color: #e9ecef; + } + + &.study-progress::-webkit-progress-bar { + background-color: #e9ecef; + border-radius: .375rem; + } + + &.study-progress::-webkit-progress-value { + background-color: var(--study-progress-color); + border-radius: .375rem; + } + + &.study-progress::-moz-progress-bar { + background-color: var(--study-progress-color); + border-radius: .375rem; + } + + &.study-progress-info { + --study-progress-color: var(--bs-info); + } + + &.study-progress-warning { + --study-progress-color: var(--bs-warning); + } + + &.study-progress-success { + --study-progress-color: var(--bs-success); + } + + &.study-progress-danger { + --study-progress-color: var(--bs-danger); + } + + &.study-progress-danger::-webkit-progress-value { + background-image: linear-gradient(45deg, rgba(255,255,255,.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,.15) 50%, rgba(255,255,255,.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; + } + + &.study-progress-danger::-moz-progress-bar { + background-image: linear-gradient(45deg, rgba(255,255,255,.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,.15) 50%, rgba(255,255,255,.15) 75%, transparent 75%, transparent); + background-size: 1rem 1rem; + } +} diff --git a/studies/forms.py b/studies/forms.py index 8d68ee41c..104529022 100644 --- a/studies/forms.py +++ b/studies/forms.py @@ -168,9 +168,15 @@ def participated_choices(): must_not_have_participated = forms.MultipleChoiceField( choices=participated_choices, required=False ) + set_response_limit = forms.BooleanField( + required=False, + label="Set a Response Limit", + help_text="Check this box to set a target number of valid responses for this study.", + ) def clean(self): cleaned_data = super().clean() + min_age_days = self.cleaned_data.get("min_age_days") min_age_months = self.cleaned_data.get("min_age_months") min_age_years = self.cleaned_data.get("min_age_years") @@ -205,12 +211,23 @@ def clean_image(self): return cleaned_image + def save(self, commit=True): + instance = super().save(commit=False) + # Explicitly set max_responses to None if set_response_limit is unchecked + if not self.cleaned_data.get("set_response_limit"): + instance.max_responses = None + if commit: + instance.save() + self.save_m2m() + return instance + class Meta: model = Study fields = [ "name", "lab", "priority", + "max_responses", "image", "preview_summary", "short_description", @@ -247,6 +264,7 @@ class Meta: "study_type": "Experiment Type", "compensation_description": "Compensation", "priority": "Lab Page Priority", + "max_responses": "Maximum Responses", } widgets = { "preview_summary": Textarea(attrs={"rows": 2}), @@ -277,6 +295,9 @@ class Meta: "priority": forms.TextInput( attrs={"type": "range", "min": "1", "max": "99"} ), + "max_responses": forms.TextInput( + attrs={"min": "1", "placeholder": "Enter a number"} + ), } help_texts = { @@ -300,6 +321,7 @@ class Meta: "shared_preview": "Allow other Lookit researchers to preview your study and give feedback.", "study_type": "Choose the type of experiment you are creating - this will change the fields that appear on the Study Details page.", "priority": "This affects how studies are ordered at your lab's custom URL, not the main study page. If you leave all studies at the highest priority (99), then all of your lab's active/discoverable studies will be shown in a randomized order on your lab page. If you lower the priority of this study to 1, then it will appear last in the list on your lab page. You can find your lab's custom URL from the labs page. For more info, see the documentation on study prioritization.", + "max_responses": "The study will automatically pause when the number of valid responses reaches this limit. You can change this value at any time. Note that participant sessions running when the limit is reached are permitted to continue. See [the documentation] for more information on response limits, valid responses, and changing a response's valid/invaild status. For no response limit, leave this field blank.", } diff --git a/studies/migrations/0105_add_max_responses_to_study.py b/studies/migrations/0105_add_max_responses_to_study.py new file mode 100644 index 000000000..284c928da --- /dev/null +++ b/studies/migrations/0105_add_max_responses_to_study.py @@ -0,0 +1,38 @@ +# Generated by Django 5.2.9 on 2026-02-03 22:08 + +import django.core.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("studies", "0104_add_bucket_kwarg_to_video_cleanup"), + ] + + operations = [ + migrations.RenameIndex( + model_name="consentruling", + new_name="studies_con_respons_23a801_idx", + old_fields=("response", "action"), + ), + migrations.RenameIndex( + model_name="consentruling", + new_name="studies_con_respons_df509e_idx", + old_fields=("response", "arbiter"), + ), + migrations.RenameIndex( + model_name="studylog", + new_name="studies_stu_study_i_b16384_idx", + old_fields=("study", "action"), + ), + migrations.AddField( + model_name="study", + name="max_responses", + field=models.IntegerField( + blank=True, + default=None, + null=True, + validators=[django.core.validators.MinValueValidator(1)], + ), + ), + ] diff --git a/studies/models.py b/studies/models.py index ea187be8f..f1afad753 100644 --- a/studies/models.py +++ b/studies/models.py @@ -8,6 +8,7 @@ import fleep from botocore.exceptions import ClientError from django.conf import settings +from django.contrib import messages from django.contrib.auth.models import Group, Permission from django.contrib.postgres.fields import ArrayField from django.core.validators import MaxValueValidator, MinValueValidator @@ -364,6 +365,12 @@ class Study(models.Model): is_building = models.BooleanField(default=False) compensation_description = models.TextField(blank=True) criteria_expression = models.TextField(blank=True, default="") + max_responses = models.IntegerField( + null=True, + blank=True, + default=None, + validators=[MinValueValidator(1)], + ) must_have_participated = models.ManyToManyField( "self", blank=True, symmetrical=False, related_name="expected_participation" ) @@ -548,6 +555,108 @@ def videos_for_consented_responses(self): """Gets videos but only for consented responses.""" return Video.objects.filter(response_id__in=self.consented_responses) + @property + def valid_response_count(self) -> int: + """Return the count of valid responses for max_responses limit. + + A response is counted as valid if: + - is_preview is False + - eligibility is "Eligible" or blank/empty (backwards compatibility) + + And for internal studies, responses must also meet the following conditions: + - completed is True + - completed_consent_frame is True + - the consent has not been rejected (must be either pending or accepted) + + For external studies, the completed, completed_consent_frame, and consent requirements are ignored. + + Returns: + int: Count of valid responses + """ + # Filter out preview responses + responses = self.responses.filter(is_preview=False) + + # For internal study types, also require completed_consent_frame=True, completed=True, and consent not rejected + if not self.study_type.is_external: + responses = responses.filter(completed=True, completed_consent_frame=True) + newest_ruling_subquery = models.Subquery( + ConsentRuling.objects.filter(response=models.OuterRef("pk")) + .order_by("-created_at") + .values("action")[:1] + ) + # Filter out responses with rejected consent, and explicitly allow NULL consent rulings (pending, i.e. no judgment has been submitted). + responses = responses.annotate( + current_ruling=newest_ruling_subquery + ).filter( + models.Q(current_ruling__isnull=True) + | ~models.Q(current_ruling=REJECTED) + ) + + # Filter out ineligible responses + return responses.filter( + models.Q(eligibility=[]) + | models.Q(eligibility__contains=[ResponseEligibility.ELIGIBLE]) + ).count() + + @property + def has_reached_max_responses(self) -> bool: + """Check if the study has reached its maximum number of valid responses. + + Returns: + bool: True if max_responses is set and the limit has been reached + """ + if self.max_responses is None: + return False + return self.valid_response_count >= self.max_responses + + def check_and_pause_if_at_max_responses( + self, send_researcher_email=False, request=None + ): + """Check if max responses reached and pause the study if so. + + Only pauses if the study is currently active. Uses the state machine's + pause trigger to properly transition and run callbacks. + If the study is not active, this method is used to optionally display a banner message, with no state transition. + + Args: + send_researcher_email: If True, send notification email to researchers + with CHANGE_STUDY_STATUS permission. + request: If provided, display a Django messages banner to the user. + """ + if self.max_responses is None: + return + + if not self.has_reached_max_responses: + return + + # Refresh from DB to ensure the in-memory study is current before + # the pause transition triggers a save (via _finalize_state_change). + self.refresh_from_db() + + # Use the state machine's pause trigger to properly transition + # and run callbacks (like notify_administrators_of_pause). + # Note: no explicit save() needed here because the state machine's + # _finalize_state_change callback already saves the model. + if self.state == "active": + self.pause() # No user since this is system-triggered + + if send_researcher_email: + self._notify_researchers_of_max_responses_pause() + + if request: + messages.warning( + request, + f'Study "{self.name}" has been automatically paused because it ' + f"reached the response limit ({self.valid_response_count}/{self.max_responses}).", + ) + else: + # Study is not active, so not state transition is needed. Just notify the researcher that they cannot start the study. + if request: + messages.warning( + request, + f'Study "{self.name}" has reached the response limit ({self.valid_response_count}/{self.max_responses}).', + ) + @property def consent_videos(self): return self.videos.filter(is_consent_footage=True) @@ -750,7 +859,19 @@ def check_if_built(self, ev): """ if self.needs_to_be_built: raise RuntimeError( - f'Cannot activate study - experiment runner for "{self.name}" ({self.id}) has not been built!' + f'Cannot activate the study "{self.name}" ({self.id}) because the experiment runner has not been built!' + ) + + def check_if_at_max_responses(self, ev): + """Check if study has reached its max responses value before activating/starting. + + :param ev: The event object + :type ev: transitions.core.EventData + :raise: RuntimeError + """ + if self.has_reached_max_responses: + raise RuntimeError( + f'Cannot activate the study "{self.name}" ({self.id}) because it has reached its maximum number of responses. Be sure to handle all pending consents and review existing responses, as this may open up slots. Then increase the response limit in the Study Ad if necessary, and try starting the study again.' ) def notify_administrators_of_activation(self, ev): @@ -775,12 +896,16 @@ def notify_administrators_of_activation(self, ev): ) def notify_administrators_of_pause(self, ev): + user = ev.kwargs.get("user") + caller_name = ( + user.get_short_name() if user else "System (max responses reached)" + ) context = { "lab_name": self.lab.name, "study_name": self.name, "study_id": self.pk, "study_uuid": str(self.uuid), - "researcher_name": ev.kwargs.get("user").get_short_name(), + "researcher_name": caller_name, "action": ev.transition.dest, } send_mail.delay( @@ -795,6 +920,28 @@ def notify_administrators_of_pause(self, ev): **context, ) + def _notify_researchers_of_max_responses_pause(self): + """Send email to researchers notifying them the study was auto-paused + because it reached the maximum number of responses.""" + context = { + "study_name": self.name, + "study_id": self.pk, + "study_uuid": str(self.uuid), + "max_responses": self.max_responses, + "valid_response_count": self.valid_response_count, + } + send_mail.delay( + "notify_researchers_of_max_responses_pause", + f"{self.name}: Study paused - response limit reached", + settings.EMAIL_FROM_ADDRESS, + bcc=list( + self.users_with_study_perms( + StudyPermission.CHANGE_STUDY_STATUS + ).values_list("username", flat=True) + ), + **context, + ) + def notify_administrators_of_deactivation(self, ev): context = { "lab_name": self.lab.name, @@ -948,6 +1095,12 @@ def check_modification_of_approved_study( ): continue # Skip, since the actual JSON content is the same - only exact_text changing if new != current: + # For file fields (e.g. image), None and "" are equivalent empty + # values that can differ between in-memory defaults and DB-loaded + # values. Treat them as unchanged. + if hasattr(current, "name") and hasattr(new, "name"): + if (current.name or "") == (new.name or ""): + continue important_fields_changed = True break @@ -1202,7 +1355,7 @@ def birthdate_difference(self): @property def normalized_exp_data(self): - # Where study type is jspysch, convert experiment data to resemble EFP exp data. + # Where study type is jspsych, convert experiment data to resemble EFP exp data. if self.study_type.is_jspsych: return {key: value for key, value in zip(self.sequence, self.exp_data)} else: @@ -1289,11 +1442,18 @@ def take_action_on_exp_data(sender, instance, created, **kwargs): """ response = instance # Aliasing because instance is hooked as a kwarg. - if created or not response.sequence: + if response.study.study_type.is_external: + # External studies: check if study has reached max responses and, if so, pause the study and email researchers. + response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) + elif created or not response.sequence: return else: dispatch_frame_action(response) + # Internal studies: if response is complete, check if this study has reached max responses and, if so, pause the study and email researchers. + if response.completed: + response.study.check_and_pause_if_at_max_responses(send_researcher_email=True) + class FeedbackApiManager(models.Manager): """Prefetch all the things.""" diff --git a/studies/templates/emails/notify_researchers_of_max_responses_pause.html b/studies/templates/emails/notify_researchers_of_max_responses_pause.html new file mode 100644 index 000000000..dab7f9c56 --- /dev/null +++ b/studies/templates/emails/notify_researchers_of_max_responses_pause.html @@ -0,0 +1,17 @@ +{% load web_extras %} +
Dear Study Researchers,
++ Your study {{ study_name }} has been automatically paused + because it reached the maximum number of valid responses + ({{ valid_response_count }} valid / {{ max_responses }} limit). +
+
+ Please make sure to handle any pending consents and review your responses, as doing so may open up more slots. Then, if you would like to collect more data, you can increase the study's response limit if necessary, and re-start it. Your study will NOT be restarted automatically if more slots become available and/or you increase the response limit.
+
+ Your study can be found here.
+
+ Best,
+
+ Lookit Bot
+