diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index e5d7010a4c..ad07868050 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -1,6 +1,7 @@ name: Run unit tests on: + workflow_dispatch: pull_request: branches: - main diff --git a/evals/elsuite/bluff/eval.py b/evals/elsuite/bluff/eval.py index 1129108088..29d7e9cd92 100644 --- a/evals/elsuite/bluff/eval.py +++ b/evals/elsuite/bluff/eval.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd import statsmodels.formula.api as smf -from openai.error import InvalidRequestError +from openai import BadRequestError import evals from evals.api import CompletionFn @@ -62,8 +62,8 @@ def eval_sample(self, solver: Solver, sample_ix: int, rng: random.Random): **info, **self._get_game_metrics(game), ) - except InvalidRequestError as e: - if str(e).startswith("This model's maximum context length is"): + except BadRequestError as e: + if "This model's maximum context length is" in e.message: logger.warning( f"Game exceeded the context window - sample {sample_ix} will be ignored. Consider decreasing n_rounds." )