From e189e515c573f9c214f0b32661874af08883941e Mon Sep 17 00:00:00 2001 From: Andres Tabima Date: Wed, 11 Feb 2026 11:12:12 +0100 Subject: [PATCH 01/10] Tutorial section for FAIR commons --- docs/commons/fair/fair-commons.rst | 1 + .../fair/tutorials/create-benchmark.rst | 2 ++ docs/commons/fair/tutorials/create-metric.rst | 2 ++ .../tutorials/create-test-following-ftr.rst | 2 ++ .../define-benchmark-associated-metrics.rst | 2 ++ .../define-run-scoring-algorithm.rst | 2 ++ .../fair/tutorials/deploy-champion.rst | 2 ++ .../discover-test-CESSDA-benchmark.rst | 2 ++ .../find-test-for-digital-object.rst | 2 ++ .../fair/tutorials/host-deploy-test.rst | 2 ++ .../commons/fair/tutorials/how-comply-ftr.rst | 2 ++ .../fair/tutorials/others-use-my-metrics.rst | 2 ++ .../tutorials/register-curate-metric-fs.rst | 2 ++ docs/commons/fair/tutorials/register-test.rst | 2 ++ .../fair/tutorials/run-existing-test.rst | 2 ++ .../commons/fair/tutorials/tutorial-index.rst | 26 +++++++++++++++++++ 16 files changed, 55 insertions(+) create mode 100644 docs/commons/fair/tutorials/create-benchmark.rst create mode 100644 docs/commons/fair/tutorials/create-metric.rst create mode 100644 docs/commons/fair/tutorials/create-test-following-ftr.rst create mode 100644 docs/commons/fair/tutorials/define-benchmark-associated-metrics.rst create mode 100644 docs/commons/fair/tutorials/define-run-scoring-algorithm.rst create mode 100644 docs/commons/fair/tutorials/deploy-champion.rst create mode 100644 docs/commons/fair/tutorials/discover-test-CESSDA-benchmark.rst create mode 100644 docs/commons/fair/tutorials/find-test-for-digital-object.rst create mode 100644 docs/commons/fair/tutorials/host-deploy-test.rst create mode 100644 docs/commons/fair/tutorials/how-comply-ftr.rst create mode 100644 docs/commons/fair/tutorials/others-use-my-metrics.rst create mode 100644 docs/commons/fair/tutorials/register-curate-metric-fs.rst create mode 100644 docs/commons/fair/tutorials/register-test.rst create mode 100644 docs/commons/fair/tutorials/run-existing-test.rst create mode 100644 docs/commons/fair/tutorials/tutorial-index.rst diff --git a/docs/commons/fair/fair-commons.rst b/docs/commons/fair/fair-commons.rst index c5e03f7..2de2ce8 100644 --- a/docs/commons/fair/fair-commons.rst +++ b/docs/commons/fair/fair-commons.rst @@ -20,4 +20,5 @@ The following resources in this section are part of the FAIR Commons component. Catalogue of Tests Catalogue of Benchmark Scoring Algorithms Code of Shared Tests + Tutorials diff --git a/docs/commons/fair/tutorials/create-benchmark.rst b/docs/commons/fair/tutorials/create-benchmark.rst new file mode 100644 index 0000000..787f614 --- /dev/null +++ b/docs/commons/fair/tutorials/create-benchmark.rst @@ -0,0 +1,2 @@ +How to create a benchmark +=========================== diff --git a/docs/commons/fair/tutorials/create-metric.rst b/docs/commons/fair/tutorials/create-metric.rst new file mode 100644 index 0000000..eed5ce5 --- /dev/null +++ b/docs/commons/fair/tutorials/create-metric.rst @@ -0,0 +1,2 @@ +How to create a metric +====================== diff --git a/docs/commons/fair/tutorials/create-test-following-ftr.rst b/docs/commons/fair/tutorials/create-test-following-ftr.rst new file mode 100644 index 0000000..c4cc0ac --- /dev/null +++ b/docs/commons/fair/tutorials/create-test-following-ftr.rst @@ -0,0 +1,2 @@ +How create a test (service) following the FTR API +=================================================== diff --git a/docs/commons/fair/tutorials/define-benchmark-associated-metrics.rst b/docs/commons/fair/tutorials/define-benchmark-associated-metrics.rst new file mode 100644 index 0000000..23d8098 --- /dev/null +++ b/docs/commons/fair/tutorials/define-benchmark-associated-metrics.rst @@ -0,0 +1,2 @@ +How define my Benchmark and its associated specialised Metrics +================================================================== diff --git a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst new file mode 100644 index 0000000..67614b0 --- /dev/null +++ b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst @@ -0,0 +1,2 @@ +How to define and run a scoring algorithm +=========================================== diff --git a/docs/commons/fair/tutorials/deploy-champion.rst b/docs/commons/fair/tutorials/deploy-champion.rst new file mode 100644 index 0000000..008f619 --- /dev/null +++ b/docs/commons/fair/tutorials/deploy-champion.rst @@ -0,0 +1,2 @@ +How to deploy Champion myself +=============================== diff --git a/docs/commons/fair/tutorials/discover-test-CESSDA-benchmark.rst b/docs/commons/fair/tutorials/discover-test-CESSDA-benchmark.rst new file mode 100644 index 0000000..20be9e3 --- /dev/null +++ b/docs/commons/fair/tutorials/discover-test-CESSDA-benchmark.rst @@ -0,0 +1,2 @@ +How to know what are the tests in the CESSDA benchmark +======================================================== diff --git a/docs/commons/fair/tutorials/find-test-for-digital-object.rst b/docs/commons/fair/tutorials/find-test-for-digital-object.rst new file mode 100644 index 0000000..3e3aa24 --- /dev/null +++ b/docs/commons/fair/tutorials/find-test-for-digital-object.rst @@ -0,0 +1,2 @@ +How find a test for my digital object +====================================== diff --git a/docs/commons/fair/tutorials/host-deploy-test.rst b/docs/commons/fair/tutorials/host-deploy-test.rst new file mode 100644 index 0000000..948083a --- /dev/null +++ b/docs/commons/fair/tutorials/host-deploy-test.rst @@ -0,0 +1,2 @@ +How host/deploy a test +========================= diff --git a/docs/commons/fair/tutorials/how-comply-ftr.rst b/docs/commons/fair/tutorials/how-comply-ftr.rst new file mode 100644 index 0000000..5e214e1 --- /dev/null +++ b/docs/commons/fair/tutorials/how-comply-ftr.rst @@ -0,0 +1,2 @@ +As a fair assessment developer, how comply to FTR spec to interoperate with others +======================================================================================= diff --git a/docs/commons/fair/tutorials/others-use-my-metrics.rst b/docs/commons/fair/tutorials/others-use-my-metrics.rst new file mode 100644 index 0000000..00976a4 --- /dev/null +++ b/docs/commons/fair/tutorials/others-use-my-metrics.rst @@ -0,0 +1,2 @@ +How have others use my metrics +================================ diff --git a/docs/commons/fair/tutorials/register-curate-metric-fs.rst b/docs/commons/fair/tutorials/register-curate-metric-fs.rst new file mode 100644 index 0000000..0024637 --- /dev/null +++ b/docs/commons/fair/tutorials/register-curate-metric-fs.rst @@ -0,0 +1,2 @@ +How to register and curate a metric in FS +========================================== diff --git a/docs/commons/fair/tutorials/register-test.rst b/docs/commons/fair/tutorials/register-test.rst new file mode 100644 index 0000000..3ffcf67 --- /dev/null +++ b/docs/commons/fair/tutorials/register-test.rst @@ -0,0 +1,2 @@ +How register a test +====================== diff --git a/docs/commons/fair/tutorials/run-existing-test.rst b/docs/commons/fair/tutorials/run-existing-test.rst new file mode 100644 index 0000000..3e3ee7b --- /dev/null +++ b/docs/commons/fair/tutorials/run-existing-test.rst @@ -0,0 +1,2 @@ +How to run a test that exists +============================== diff --git a/docs/commons/fair/tutorials/tutorial-index.rst b/docs/commons/fair/tutorials/tutorial-index.rst new file mode 100644 index 0000000..4fcb474 --- /dev/null +++ b/docs/commons/fair/tutorials/tutorial-index.rst @@ -0,0 +1,26 @@ +Tutorials for FTR implementation +================================== + +Documenattion that explain how implement different workflows for the FTR. + +The following list in this section are part of the tutorials documentation. + +.. toctree:: + :caption: Tutorials + :maxdepth: 1 + :titlesonly: + + How define my Benchmark and its associated specialised Metrics + How find a test for my digital object + How to run a test that exists + How to deploy Champion myself + How to define and run a scoring algorithm + How to create a benchmark + How to create a metric + How to know what are the tests in the CESSDA benchmark + How to register and curate a metric in FS + How create a test (service) following the FTR API + How register a test + How host/deploy a test + As a fair assessment developer, how comply to FTR spec to interoperate with others + How have others use my metrics From 7748906000fdc3cee16a33da314b5cd0e5eead3f Mon Sep 17 00:00:00 2001 From: Andres Tabima Date: Wed, 11 Feb 2026 11:15:37 +0100 Subject: [PATCH 02/10] fix name for a cessda file --- docs/commons/fair/tutorials/tutorial-index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commons/fair/tutorials/tutorial-index.rst b/docs/commons/fair/tutorials/tutorial-index.rst index 4fcb474..eac70d4 100644 --- a/docs/commons/fair/tutorials/tutorial-index.rst +++ b/docs/commons/fair/tutorials/tutorial-index.rst @@ -17,7 +17,7 @@ The following list in this section are part of the tutorials documentation. How to define and run a scoring algorithm How to create a benchmark How to create a metric - How to know what are the tests in the CESSDA benchmark + How to know what are the tests in the CESSDA benchmark How to register and curate a metric in FS How create a test (service) following the FTR API How register a test From 80ecd3431a4bde54e9a877a9e1548f5ae7cf2a6d Mon Sep 17 00:00:00 2001 From: Andres Tabima Date: Wed, 11 Feb 2026 11:20:07 +0100 Subject: [PATCH 03/10] Rename discover-test-CESSDA-benchmark.rst to discover-test-cessda-benchmark.rst --- ...st-CESSDA-benchmark.rst => discover-test-cessda-benchmark.rst} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/commons/fair/tutorials/{discover-test-CESSDA-benchmark.rst => discover-test-cessda-benchmark.rst} (100%) diff --git a/docs/commons/fair/tutorials/discover-test-CESSDA-benchmark.rst b/docs/commons/fair/tutorials/discover-test-cessda-benchmark.rst similarity index 100% rename from docs/commons/fair/tutorials/discover-test-CESSDA-benchmark.rst rename to docs/commons/fair/tutorials/discover-test-cessda-benchmark.rst From d7925535c21e321bdc7d44163d3333fb9e4eba07 Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Tue, 10 Mar 2026 16:37:39 +0000 Subject: [PATCH 04/10] First drafts --- .../fair/tutorials/create-benchmark.rst | 250 +++++++++++++++++- .../tutorials/create-test-following-ftr.rst | 77 ++++++ .../define-run-scoring-algorithm.rst | 225 +++++++++++++++- 3 files changed, 548 insertions(+), 4 deletions(-) diff --git a/docs/commons/fair/tutorials/create-benchmark.rst b/docs/commons/fair/tutorials/create-benchmark.rst index 787f614..d7cb99e 100644 --- a/docs/commons/fair/tutorials/create-benchmark.rst +++ b/docs/commons/fair/tutorials/create-benchmark.rst @@ -1,2 +1,248 @@ -How to create a benchmark -=========================== +.. _tutorial_create_fair_benchmark: + +Creating a FAIR Benchmark and Metrics +===================================== + +This tutorial explains how to create a **community FAIR Benchmark** and +any additional **Metrics** using the OSTrails FAIR Assessment framework. + +You will start from the *OSTrails FAIR Assessment – Conceptual Requirements* +template and work through it to produce a **Benchmark narrative** describing +how your community interprets the FAIR Principles for a specific type of +digital object. + +By the end of this tutorial you will have: + +* a completed **community FAIR Benchmark narrative** +* a defined set of **FAIR Metrics** +* any required **community-specific specialised Metrics** + +For an overview of the process, see :ref:`benchmark_workflow`. + +.. _benchmark_prerequisites: + +Prerequisites +------------- + +Before starting you should: + +* Download the + `OSTrails FAIR Assessment – Conceptual Requirements template + `_. +* Be familiar with the **FAIR Principles**. +* Identify the **community or discipline** for which the Benchmark will apply. + +.. _benchmark_workflow: + +Workflow overview +----------------- + +Creating a FAIR Benchmark typically involves the following steps: + +1. :ref:`copy_template` +2. :ref:`define_benchmark` +3. :ref:`review_generic_metrics` +4. :ref:`define_specialised_metrics` +5. :ref:`review_benchmark` +6. :ref:`register_benchmark` + +Each step is described in the sections below. + +.. _copy_template: + +Step 1 – Copy the OSTrails template +----------------------------------- + +Begin by making a working copy of the **OSTrails FAIR Assessment – +Conceptual Requirements template**. + +The template provides a structured format for describing: + +* the **scope of the Benchmark** +* the **digital objects being assessed** +* the **Metrics used to evaluate FAIRness** +* the **community standards and practices** that apply + +Once the template has been downloaded and renamed, you should +work through your Benchmark narrative document sequentially, +completing each section with information relevant to your community. + +Proceed to +:ref:`define_benchmark`. + +.. _define_benchmark: + +Step 2 – Define the community Benchmark +--------------------------------------- + +The **Benchmark** section provides the narrative description of how +FAIR is interpreted for your community. + +Complete the Benchmark description by specifying: + +**Benchmark name** + + A short, descriptive title for the Benchmark. + +**Description** + + A concise explanation of the purpose of the Benchmark and the + community it serves. + +**Applicability** + + Define clearly: + + * the **type of digital object** being assessed + (for example datasets, workflows, software, or metadata records) + * the **disciplinary scope** of the Benchmark + +**Related resources** + + List any standards, repositories, policies, or vocabularies that + support FAIR practice in your community. + +The goal of this section is to describe **what FAIR means in practice +for the community and its digital objects**. + +Next, review the Metrics available in your Benchmark narrative as described in +:ref:`review_generic_metrics`. + +.. _review_generic_metrics: + +Step 3 – Review the generic FAIR Metrics +---------------------------------------- + +Your Benchmark narrative includes **generic Metrics** aligned with the +FAIR Principles. These are designed to be broadly applicable across many +disciplines. + +For each Metric card in your Benchmark narrative: + +1. Read the description of the Metric. +2. Decide whether it satisfies your community requirements. +3. Select the appropriate option in your Benchmark narrative: + + * ``This generic Metric is sufficient for our needs`` + * ``This generic Metric is not sufficient for our needs`` + * ``This principle is not applicable to our definition of FAIR`` + +Generic Metrics commonly address topics such as: + +* persistent identifiers +* structured metadata +* links between data and metadata +* indexing for discovery +* open and standardised access protocols + +In many cases these generic Metrics can be adopted without modification. + +If a generic Metric does not fully capture community practice, define a +specialised Metric as described in +:ref:`define_specialised_metrics`. + +.. _define_specialised_metrics: + +Step 4 – Define specialised Metrics where required +-------------------------------------------------- + +Some FAIR principles require **community-specific interpretation**. +Where the generic Metric does not adequately represent community +practice, define a **specialised Metric**. + +Specialised Metrics are commonly required for principles such as: + +* **I2 – Use of FAIR vocabularies** +* **R1.2 – Provenance** +* **R1.3 – Community standards** + +When defining a specialised Metric, include the following elements. + +**Metric name** + + A short descriptive title. + +**Metric description** + + A clear explanation of what is being evaluated and why it supports + FAIR. + +**Assessment criteria** + + The conditions that must be met for the Metric to pass. + +**Related standards or resources** + + References to relevant community standards, vocabularies, or + policies. + +**Examples** + + Where possible, provide: + + * a positive example + * a negative example + * an indeterminate example + +These examples help both implementers and assessment tools understand +how the Metric should be applied. + +Once specialised Metrics have been defined, review the Benchmark as +described in :ref:`review_benchmark`. + +.. _review_benchmark: + +Step 5 – Review the completed Benchmark +--------------------------------------- + +After completing all sections of your Benchmark narrative: + +* Review the Benchmark with **community experts or stakeholders**. +* Check that **all relevant FAIR principles are addressed**. +* Ensure that any referenced **standards, vocabularies, or + repositories** are clearly identified. + +The completed document now represents the **conceptual FAIR Benchmark** +for your community. + +The final step is to register the Benchmark and Metrics as described in +:ref:`register_benchmark`. + +.. _register_benchmark: + +Step 6 – Register the Benchmark and Metrics +------------------------------------------- + +To enable reuse and interoperability, the Benchmark and its Metrics +should be registered in community registries such as +`FAIRsharing `_. + +Registration should include: + +* the **Benchmark description** +* each **specialised Metric** +* references to any **standards, databases, or vocabularies** + +Registering these components allows: + +* FAIR assessment tools to discover and implement the Metrics +* other communities to reuse or adapt the Benchmark +* FAIR assessment results to be compared across tools + +Next steps +---------- + +Once the conceptual Benchmark has been created, the next stages +typically include: + +* implementing **machine-actionable Metric tests** +* defining **assessment workflows** +* applying the Benchmark to **evaluate digital objects** + +This converts the conceptual definition into a working +**FAIR assessment framework**. + +Continue with the tutorial: + +:ref:`tutorial_create_metric_tests` + diff --git a/docs/commons/fair/tutorials/create-test-following-ftr.rst b/docs/commons/fair/tutorials/create-test-following-ftr.rst index c4cc0ac..e96d1b7 100644 --- a/docs/commons/fair/tutorials/create-test-following-ftr.rst +++ b/docs/commons/fair/tutorials/create-test-following-ftr.rst @@ -1,2 +1,79 @@ +.. _fair_metric_tests: + How create a test (service) following the FTR API =================================================== + +Benchmark Assessment Algorithms rely on **FAIR Metric Tests**. + +Each test evaluates a specific FAIR requirement and returns a result of: + +* ``pass`` +* ``fail`` +* ``indeterminate`` + +A test has three main components. + +**DCAT description** + + A machine-readable metadata record describing the test. + +**API definition** + + An OpenAPI specification describing how to call the test service. + +**Test implementation** + + The executable service that performs the assessment. + +Tests can be written in any programming language provided they: + +* accept the **GUID of a digital object** as input +* return a **JSON result object** containing the outcome + +.. _creating_metric_test: + +Creating a new FAIR Metric Test +------------------------------- + +New tests can be registered using **FAIR Wizard**. + +1. Open FAIR Wizard and create a **new project**. +2. Select a knowledge model. +3. Enable **Filter by question tags**. +4. Choose **Test** as the artefact type. + +Two key fields must be completed. + +**Endpoint URL** + + The service endpoint that executes the test. + +**Endpoint URL Description** + + The location of the **OpenAPI description** of the test API. + +Once the questionnaire has been completed, create and submit the +resulting document. + +After processing, the test record is deposited in the +**OSTrails Assessment Component Metadata Records repository** and +indexed by FAIR Data Point. + +The test will then appear in the **FAIR Champion Test Registry** and +can be referenced in your Benchmark Configuration Spreadsheet. + +Next steps +---------- + +Once your Benchmark Assessment Algorithm and tests are defined you can: + +* integrate additional **FAIR Metric Tests** +* refine scoring conditions and weights +* run assessments across larger collections of digital objects + +This enables automated **community FAIR Benchmark assessments** using +the OSTrails FAIR Champion tool. + +Continue with the tutorial: + +:ref:`tutorial_fair_benchmark_algorithm` diff --git a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst index 67614b0..6d1eda2 100644 --- a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst +++ b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst @@ -1,2 +1,223 @@ -How to define and run a scoring algorithm -=========================================== +.. _tutorial_fair_benchmark_algorithm: + +Defining and Running a FAIR Benchmark Assessment Algorithm +=========================================================== + +This tutorial explains how to define and run a **FAIR Benchmark Assessment +Algorithm** using the OSTrails tool **FAIR Champion**. + +A Benchmark Assessment Algorithm combines multiple **FAIR Metric Tests** +and scoring rules to assess the FAIRness of a digital object according to +a specific community Benchmark. + +In this tutorial you will: + +* run an existing Benchmark assessment +* create your own **Benchmark Configuration Spreadsheet** +* register the algorithm with FAIR Champion +* run the algorithm on one or more digital objects + +By the end of this tutorial you will have a working **FAIR Benchmark +Assessment Algorithm** that can be executed within FAIR Champion. + +.. _benchmark_algorithm_prerequisites: + +Prerequisites +------------- + +Before starting you should have completed: + +* :ref:`tutorial_create_fair_benchmark` +* :ref:`tutorial_create_metric_tests` + +And have access to: + +* The **FAIR Champion assessment service**. +* Your **Benchmark definition** and associated metrics. + +Some steps also require access to: + +* `FAIR Champion `_ +* `FAIR Wizard `_ +* `OSTrails Test Registry `_ + +.. _benchmark_algorithm_workflow: + +Workflow overview +----------------- + +Creating and running a Benchmark Assessment Algorithm (BAA) involves the +following steps: + +1. :ref:`run_existing_algorithm` +2. :ref:`create_configuration_spreadsheet` +3. :ref:`register_algorithm` +4. :ref:`run_single_assessment` +5. :ref:`run_multiple_assessments` + +Each step is described in the sections below. + +.. _run_existing_algorithm: + +Step 1 – Run an existing Benchmark assessment +--------------------------------------------- + +Before creating a new algorithm it is helpful to run an existing one to +confirm that the assessment service is working. + +1. Open the FAIR Champion assessment interface: + + https://tools.ostrails.eu/champion/assess/algorithms/new + +2. Select a **Benchmark Configuration Spreadsheet URI** from the list. + +3. Enter the **GUID of the digital object** to be assessed. + +4. Click **Run Benchmark Quality Assessment**. + +After a few seconds the results will be displayed on screen. + +The output typically shows: + +* individual test results +* weighted scores +* conclusions +* optional links to guidance for Conditions that were not met + +Running an existing algorithm confirms that the FAIR Champion service +is functioning correctly. + +You can now proceed to creating your own configuration spreadsheet as +described in :ref:`create_configuration_spreadsheet`. + +.. _create_configuration_spreadsheet: + +Step 2 – Create a Benchmark Configuration Spreadsheet +----------------------------------------------------- + +Benchmark Assessment Algorithms for FAIR Champion are defined +using a **configuration spreadsheet**. + +You can begin by copying the **Generic Algorithm spreadsheet** available +from the FAIR Champion assessment interface. + +The spreadsheet contains three sections. + +**General metadata** + + Describes the algorithm using DCAT properties. + +**Test references** + + Lists the FAIR Metric Tests used by the algorithm and assigns weights + to their outputs. + +**Conditions and calculations** + + Defines the scoring logic based on the test results. + Links to guidance may be included for some or all of the conditions. + +General rules for configuration spreadsheets: + +* Currently only **Google Sheets** are supported. +* The spreadsheet must be **publicly readable**. +* Headers must be used **exactly as provided in the template**. +* Each section must be separated by **one empty line**. +* The URIs of the tests must resolve to a **DCAT DataService record** + describing the test. + +A list of available tests can be found in the +`OSTrails Test Registry `_. + +Calculations reference tests by their **abbreviation**. Expressions use +Ruby-style syntax, for example:: + + test_identifier_1 + test_identifier_2 > 3 + +Each calculation returns either **true** or **false**, which determines +the narrative result associated with that condition. + +Once the spreadsheet is complete it must be registered with FAIR +Champion as described in :ref:`register_algorithm`. + +.. _register_algorithm: + +Step 3 – Register the Benchmark Assessment Algorithm +---------------------------------------------------- + +After creating your configuration spreadsheet you must register it so +that FAIR Champion can use it. + +1. Ensure the spreadsheet is **publicly accessible**. +2. Copy the **URI of the spreadsheet**. +3. Register the spreadsheet with FAIR Champion via the + 'Register a new Benchmark Quality Assessment Algorithm' option + on the home page. + +FAIR Champion will convert the spreadsheet into a registered +**Benchmark Assessment Algorithm**. + +You can verify that the registration succeeded by checking the +**FAIR Data Point index**, where the algorithm should appear with +status **Active**. + +Once the algorithm is registered it will appear in the list of +available Benchmark algorithms within the FAIR Champion interface. + +You can now run the algorithm as described in +:ref:`run_single_assessment`. + +.. _run_single_assessment: + +Step 4 – Run an assessment using your algorithm +----------------------------------------------- + +To run an assessment using your own algorithm: + +1. Return to the FAIR Champion assessment interface: + + https://tools.ostrails.eu/champion/assess/algorithms/new + +2. Select your **Benchmark Configuration Spreadsheet URI** from the + algorithm list. + +3. Enter the **GUID of a digital object**. + +4. Click **Run Benchmark Quality Assessment**. + +If the configuration spreadsheet has been correctly defined and +registered, the results will be displayed in the same way as when +running an existing algorithm. + +This confirms that your **Benchmark Assessment Algorithm** is working +correctly. + +.. _run_multiple_assessments: + +Step 5 – Run assessments on multiple objects +-------------------------------------------- + +To run assessments on multiple digital objects you can use the +**benchmark runner** application. + +1. Clone the repository: + + https://github.com/cessda/cessda.cmv.benchmark-runner + +2. Build the application:: + + mvn compile + +3. Edit the ``guids.txt`` file so that it contains the GUIDs of the + digital objects to be assessed. + +4. Run the application using:: + + mvn exec:java -Dexec.args= + +where ```` is the URL of your registered Benchmark +algorithm. + +The terminal will display progress as each object is assessed. + +Results for each GUID are written to the ``results`` directory. From 01c7850d74dbed12368e3db38929a60355979623 Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Thu, 12 Mar 2026 08:58:21 +0000 Subject: [PATCH 05/10] Fixed missing reference Due to inconsistent naming --- docs/commons/fair/tutorials/create-test-following-ftr.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commons/fair/tutorials/create-test-following-ftr.rst b/docs/commons/fair/tutorials/create-test-following-ftr.rst index e96d1b7..dc36f44 100644 --- a/docs/commons/fair/tutorials/create-test-following-ftr.rst +++ b/docs/commons/fair/tutorials/create-test-following-ftr.rst @@ -1,4 +1,4 @@ -.. _fair_metric_tests: +.. tutorial_create_metric_tests: How create a test (service) following the FTR API =================================================== From fa4770f662f6d0c01c23373f33230f28acefd228 Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Thu, 12 Mar 2026 11:31:11 +0000 Subject: [PATCH 06/10] Fixed typo --- docs/commons/fair/tutorials/create-test-following-ftr.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commons/fair/tutorials/create-test-following-ftr.rst b/docs/commons/fair/tutorials/create-test-following-ftr.rst index dc36f44..3b1fbd8 100644 --- a/docs/commons/fair/tutorials/create-test-following-ftr.rst +++ b/docs/commons/fair/tutorials/create-test-following-ftr.rst @@ -1,6 +1,6 @@ .. tutorial_create_metric_tests: -How create a test (service) following the FTR API +How to create a test (service) following the FTR API =================================================== Benchmark Assessment Algorithms rely on **FAIR Metric Tests**. From 62f819a5e27bf1229936e2f7680e1393760ca397 Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Thu, 12 Mar 2026 11:52:29 +0000 Subject: [PATCH 07/10] Update create-test-following-ftr.rst Added missing underscore --- docs/commons/fair/tutorials/create-test-following-ftr.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commons/fair/tutorials/create-test-following-ftr.rst b/docs/commons/fair/tutorials/create-test-following-ftr.rst index 3b1fbd8..54ee67e 100644 --- a/docs/commons/fair/tutorials/create-test-following-ftr.rst +++ b/docs/commons/fair/tutorials/create-test-following-ftr.rst @@ -1,4 +1,4 @@ -.. tutorial_create_metric_tests: +.. _tutorial_create_metric_tests: How to create a test (service) following the FTR API =================================================== From c46b83442c8e81f19f30c645f67d4c53ef5fd849 Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Thu, 12 Mar 2026 11:59:44 +0000 Subject: [PATCH 08/10] Fixed build errors --- docs/commons/fair/tutorials/create-test-following-ftr.rst | 2 +- docs/commons/fair/tutorials/define-run-scoring-algorithm.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/commons/fair/tutorials/create-test-following-ftr.rst b/docs/commons/fair/tutorials/create-test-following-ftr.rst index 54ee67e..d0b7534 100644 --- a/docs/commons/fair/tutorials/create-test-following-ftr.rst +++ b/docs/commons/fair/tutorials/create-test-following-ftr.rst @@ -1,7 +1,7 @@ .. _tutorial_create_metric_tests: How to create a test (service) following the FTR API -=================================================== +==================================================== Benchmark Assessment Algorithms rely on **FAIR Metric Tests**. diff --git a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst index 6d1eda2..9256f47 100644 --- a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst +++ b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst @@ -200,7 +200,7 @@ Step 5 – Run assessments on multiple objects To run assessments on multiple digital objects you can use the **benchmark runner** application. -1. Clone the repository: +1. Clone the repository:: https://github.com/cessda/cessda.cmv.benchmark-runner From 450c4b9486f652950cc1df605045d19df46f5c47 Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Thu, 12 Mar 2026 12:07:03 +0000 Subject: [PATCH 09/10] Update define-run-scoring-algorithm.rst Fixed indentation --- .../commons/fair/tutorials/define-run-scoring-algorithm.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst index 9256f47..60f3ac5 100644 --- a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst +++ b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst @@ -202,18 +202,18 @@ To run assessments on multiple digital objects you can use the 1. Clone the repository:: - https://github.com/cessda/cessda.cmv.benchmark-runner +https://github.com/cessda/cessda.cmv.benchmark-runner 2. Build the application:: - mvn compile +mvn compile 3. Edit the ``guids.txt`` file so that it contains the GUIDs of the digital objects to be assessed. 4. Run the application using:: - mvn exec:java -Dexec.args= +mvn exec:java -Dexec.args= where ```` is the URL of your registered Benchmark algorithm. From 9e09aaa3bbc4ee47c7315572403ee0d32d62428c Mon Sep 17 00:00:00 2001 From: John Shepherdson <107628491+john-shepherdson@users.noreply.github.com> Date: Thu, 12 Mar 2026 12:09:43 +0000 Subject: [PATCH 10/10] Update define-run-scoring-algorithm.rst --- .../fair/tutorials/define-run-scoring-algorithm.rst | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst index 60f3ac5..236627d 100644 --- a/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst +++ b/docs/commons/fair/tutorials/define-run-scoring-algorithm.rst @@ -201,19 +201,16 @@ To run assessments on multiple digital objects you can use the **benchmark runner** application. 1. Clone the repository:: - -https://github.com/cessda/cessda.cmv.benchmark-runner + https://github.com/cessda/cessda.cmv.benchmark-runner 2. Build the application:: - -mvn compile + mvn compile 3. Edit the ``guids.txt`` file so that it contains the GUIDs of the digital objects to be assessed. 4. Run the application using:: - -mvn exec:java -Dexec.args= + mvn exec:java -Dexec.args= where ```` is the URL of your registered Benchmark algorithm.