diff --git a/qcrboxtools/analyse/quality/base.py b/qcrboxtools/analyse/quality/base.py index c6750d3..dbec85c 100644 --- a/qcrboxtools/analyse/quality/base.py +++ b/qcrboxtools/analyse/quality/base.py @@ -79,6 +79,7 @@ def ascending_levels2func(levels: Tuple[float, ...]) -> Callable[[float], int]: """ return lambda x: next((i for i, v in enumerate(levels) if x < v)) + def descending_levels2func(levels: Tuple[float, ...]) -> Callable[[float], int]: """ Create a function that maps a value to its corresponding level index in descending order. diff --git a/qcrboxtools/analyse/quality/precision.py b/qcrboxtools/analyse/quality/precision.py index 9b96662..ef534bf 100644 --- a/qcrboxtools/analyse/quality/precision.py +++ b/qcrboxtools/analyse/quality/precision.py @@ -105,7 +105,7 @@ def precision_all_data(cif_block: block, indicators: Optional[List[str]] = None) results_overall["Completeness"] = int_nonsym.completeness() if not already_merged: - int_merged = int_nonsym.merge_equivalents() + int_merged = int_nonsym.merge_equivalents() if "Mean Redundancy" in indicators: results_overall["Mean Redundancy"] = int_merged.redundancies().as_double().mean() if "R_meas" in indicators: @@ -118,19 +118,12 @@ def precision_all_data(cif_block: block, indicators: Optional[List[str]] = None) results_overall["R_sigma"] = int_merged.r_sigma() if "CC1/2" in indicators: results_overall["CC1/2"] = int_nonsym.cc_one_half() - else: - non_sensical_entries = [ - "Mean Redundancy", - "R_meas", - "R_pim", - "R_int", - "R_sigma", - "CC1/2" - ] + else: + non_sensical_entries = ["Mean Redundancy", "R_meas", "R_pim", "R_int", "R_sigma", "CC1/2"] for indicator in non_sensical_entries: if indicator in indicators: - results_overall[indicator] = 'N/A' - + results_overall[indicator] = "N/A" + return results_overall @@ -164,7 +157,7 @@ def precision_all_data_quality(results_overall: Dict[str, float]) -> Dict[str, D } quality_values = {} for indicator, value in results_overall.items(): - if indicator == "d_min lower" or value == 'N/A': + if indicator == "d_min lower" or value == "N/A": quality_values[indicator] = DataQuality.INFORMATION else: operation = value2level_dict[indicator] @@ -222,15 +215,8 @@ def precision_vs_resolution( intensity_array = cif_block2intensity_array(cif_block) if intensity_array.is_unique_set_under_symmetry(): - non_sensical_entries = [ - "Mean Redundancy", - "R_meas", - "R_pim", - "R_int", - "R_sigma", - "CC1/2" - ] - indicators = [ind for ind in indicators if ind not in non_sensical_entries] + non_sensical_entries = ["Mean Redundancy", "R_meas", "R_pim", "R_int", "R_sigma", "CC1/2"] + indicators = [ind for ind in indicators if ind not in non_sensical_entries] intensity_array.setup_binner(n_bins=n_bins) @@ -242,7 +228,7 @@ def precision_vs_resolution( bin_array = intensity_array.select(sel) bin_merged = bin_array.merge_equivalents() lowlim, highlim = intensity_array.binner().bin_d_range(i_bin) - + if "d_min lower" in indicators: results_binned["d_min lower"][array_index] = lowlim if "d_min upper" in indicators: diff --git a/qcrboxtools/cif/cif2cif/base.py b/qcrboxtools/cif/cif2cif/base.py index 51b0588..f2e2e0a 100644 --- a/qcrboxtools/cif/cif2cif/base.py +++ b/qcrboxtools/cif/cif2cif/base.py @@ -53,12 +53,12 @@ def cif_file_to_unified( def cif_model_to_specific( - cif_model: model, + cif_model: model.cif, required_entries: Optional[List[str]] = None, optional_entries: Optional[List[str]] = None, custom_categories: Optional[List[str]] = None, merge_su: bool = False, -) -> model: +) -> model.cif: """ Filters and processes an iotbx CIF model to include only specific entries. @@ -116,9 +116,9 @@ def cif_model_to_specific( def cif_file_to_specific( input_cif_path: Union[str, Path], output_cif_path: Union[str, Path], - required_entries: List[str] = None, - optional_entries: List[str] = None, - custom_categories: List[str] = None, + required_entries: Optional[List[str]] = None, + optional_entries: Optional[List[str]] = None, + custom_categories: Optional[List[str]] = None, merge_su: bool = False, ): """ diff --git a/qcrboxtools/cif/cif2cif/yaml.py b/qcrboxtools/cif/cif2cif/yaml.py index 19f0f34..47f5f8b 100644 --- a/qcrboxtools/cif/cif2cif/yaml.py +++ b/qcrboxtools/cif/cif2cif/yaml.py @@ -15,7 +15,7 @@ from ..read import cifdata_str_or_index, read_cif_safe from ..trim import trim_cif_block from ..uncertainties import split_su_block -from .base import cif_file_to_specific +from .base import cif_model_to_specific class NoKeywordsError(BaseException): @@ -516,6 +516,51 @@ def yml_entries_resolve_special( raise ValueError("yml_entry must be of type YmlCifInputSettings or YmlCifOutputSettings.") +def cif_text_to_specific_by_yml(input_cif_text: str, yml_path: Union[str, Path], command: str, parameter: str) -> str: + """ + Processes a CIF text based on instructions defined in a YAML configuration, applying + specified keyword transformations defined in a commands parameter as well as its standard + uncertainty merge settings. + + Parameters + ---------- + input_cif_text : str + The CIF text to be processed. + yml_path : Union[str, Path] + The file path to the YAML file containing processing instructions. + command : str + The specific command within the YAML file to follow for processing the CIF text. + parameter : str + The specific parameter within the command to follow for processing the CIF text. + + Returns + ------- + str + The processed CIF text. + """ + with open(yml_path, "r", encoding="UTF-8") as fobj: + yml_dict = yaml.safe_load(fobj) + + yml_input_settings = cif_input_entries_from_yml(yml_dict, command, parameter) + + model = cif.reader(input_string=input_cif_text).model() + block, _ = cifdata_str_or_index(model, 0) + + yml_input_settings = yml_entries_resolve_special(yml_input_settings, block) + + cif_model = cif.reader(input_string=input_cif_text).model() + + specific_cif_model = cif_model_to_specific( + cif_model, + yml_input_settings.required_entries, + yml_input_settings.optional_entries, + yml_input_settings.custom_categories, + yml_input_settings.merge_su, + ) + + return str(specific_cif_model) + + def cif_file_to_specific_by_yml( input_cif_path: Union[str, Path], output_cif_path: Union[str, Path], @@ -546,23 +591,11 @@ def cif_file_to_specific_by_yml( This file was developed for exposing commands within QCrBox. See this project or the test of this function for an example of how such a yml file might look like. """ - with open(yml_path, "r", encoding="UTF-8") as fobj: - yml_dict = yaml.safe_load(fobj) - - yml_input_settings = cif_input_entries_from_yml(yml_dict, command, parameter) - - block, _ = cifdata_str_or_index(read_cif_safe(input_cif_path), "0") + input_cif_text = Path(input_cif_path).read_text(encoding="UTF-8") - yml_input_settings = yml_entries_resolve_special(yml_input_settings, block) + output_cif_text = cif_text_to_specific_by_yml(input_cif_text, yml_path, command, parameter) - cif_file_to_specific( - input_cif_path, - output_cif_path, - yml_input_settings.required_entries, - yml_input_settings.optional_entries, - yml_input_settings.custom_categories, - yml_input_settings.merge_su, - ) + Path(output_cif_path).write_text(output_cif_text, encoding="UTF-8") def cif_file_merge_to_unified_by_yml( @@ -598,18 +631,45 @@ def cif_file_merge_to_unified_by_yml( This file was developed for exposeing commands within QCrBox. See this project or the test of this function for an example of how such a yml file might look like. """ + input_cif_text = Path(input_cif_path).read_text(encoding="UTF-8") + merge_cif_text = Path(merge_cif_path).read_text(encoding="UTF-8") if merge_cif_path else None + + output_cif_text = cif_text_merge_to_unified_by_yml(input_cif_text, merge_cif_text, yml_path, command, parameter) + + Path(output_cif_path).write_text(output_cif_text, encoding="UTF-8") + + +def cif_text_merge_to_unified_by_yml(input_cif_text, merge_cif_text, yml_path, command, parameter): + """ + Merges two CIF texts into a unified format based on YAML configuration. + + Parameters + ---------- + input_cif_text : str + The CIF text to be processed. + merge_cif_text : str + The CIF text to be merged with the input CIF text. + yml_path : str + The file path to the YAML file containing processing instructions. + command : str + The specific command within the YAML file to follow for processing the CIF texts. + parameter : str + The specific parameter within the command to follow for processing the CIF texts. + + """ + with open(yml_path, "r", encoding="UTF-8") as fobj: yml_dict = yaml.safe_load(fobj) yml_output_settings = cif_output_entries_from_yml(yml_dict, command, parameter) - input_cif = read_cif_safe(input_cif_path) + input_cif = cif.reader(input_string=input_cif_text).model() # dataset name will be overwritten if merge_cif is not None input_block, dataset_name = cifdata_str_or_index(input_cif, yml_output_settings.select_block) - if merge_cif_path is None: + if merge_cif_text is None: merge_block = cif.model.block() else: merge_block, dataset_name = cifdata_str_or_index( - read_cif_safe(merge_cif_path), "0" + cif.reader(input_string=merge_cif_text).model(), 0 ) # QCrBox cif files have only one block yml_output_settings = yml_entries_resolve_special(yml_output_settings, input_block) @@ -644,7 +704,7 @@ def cif_file_merge_to_unified_by_yml( output_cif = cif.model.cif() output_cif[dataset_name] = output_cif_block - Path(output_cif_path).write_text(str(output_cif), encoding="UTF-8") + return str(output_cif) def can_run_command(yml_path: Path, command: str, input_cif_path: Path): diff --git a/qcrboxtools/cif/entries/entry_conversion.py b/qcrboxtools/cif/entries/entry_conversion.py index 0c38adf..06e459b 100644 --- a/qcrboxtools/cif/entries/entry_conversion.py +++ b/qcrboxtools/cif/entries/entry_conversion.py @@ -31,7 +31,7 @@ def entry_to_unified_keyword(old_name: str, custom_categories: List[str]) -> str cut_name = old_name[1:] for category in custom_categories: if cut_name.startswith(category): - return f"_{category}.{cut_name[len(category)+1:]}" + return f"_{category}.{cut_name[len(category) + 1 :]}" return "_" + aliases.get(cut_name, cut_name) diff --git a/qcrboxtools/cif/entries/entry_lookup.py b/qcrboxtools/cif/entries/entry_lookup.py index e710f46..551b406 100644 --- a/qcrboxtools/cif/entries/entry_lookup.py +++ b/qcrboxtools/cif/entries/entry_lookup.py @@ -88,9 +88,9 @@ def generate_aliases( aliases = {key: val for key, val in aliases.items() if key != val} # ensure there are no circular translations - assert not any( - val in aliases for val in values - ), "One of the common references has gotten an alias, creating a circle" + assert not any(val in aliases for val in values), ( + "One of the common references has gotten an alias, creating a circle" + ) return aliases diff --git a/qcrboxtools/cif/file_converter/shelxl/cif2shelx_ins.py b/qcrboxtools/cif/file_converter/shelxl/cif2shelx_ins.py index acea026..3c98a97 100644 --- a/qcrboxtools/cif/file_converter/shelxl/cif2shelx_ins.py +++ b/qcrboxtools/cif/file_converter/shelxl/cif2shelx_ins.py @@ -187,7 +187,7 @@ def block2header(cif_block: block) -> str: ins_lines.append("CONF\nBOND $H\nL.S. 10\nLIST 4\nACTA\nBOND\nFMAP 2\nMORE -1") ins_lines.append(block2wght(cif_block)) - ins_lines.append(f'FVAR {cif_block["_qcrbox.shelx.scale_factor"]}') + ins_lines.append(f"FVAR {cif_block['_qcrbox.shelx.scale_factor']}") return "\n".join(ins_lines) @@ -227,10 +227,10 @@ def create_atom_string( atom_string = f"{start} {float(-uiso_mult): 4.2f}" elif (atom_site_aniso_loop is not None) and (label in atom_site_aniso_loop["_atom_site_aniso.label"]): index_aniso = list(atom_site_aniso_loop["_atom_site_aniso.label"]).index(label) - uijs = [f'{float(atom_site_aniso_loop[f"_atom_site_aniso.u_{ij}"][index_aniso]): 9.5f}' for ij in uij_indexes] + uijs = [f"{float(atom_site_aniso_loop[f'_atom_site_aniso.u_{ij}'][index_aniso]): 9.5f}" for ij in uij_indexes] atom_string = start + " " + " ".join(uijs) else: - atom_string = f'{start} {float(atom_site_loop["_atom_site.u_iso_or_equiv"][index]): 9.5f}' + atom_string = f"{start} {float(atom_site_loop['_atom_site.u_iso_or_equiv'][index]): 9.5f}" return " =\n ".join(wrap(atom_string)) @@ -322,9 +322,9 @@ def create_atom_list(cif_block: block) -> str: if attached_atom == ".": continue indexes = np.nonzero(atom_site_loop["_atom_site.calc_attached_atom"] == attached_atom)[0] - assert all( - (psn_id[int(i)] == psn_id[int(indexes[0])] for i in indexes[1:]) - ), f"not all constrain posn ids are equal for {attached_atom}" + assert all((psn_id[int(i)] == psn_id[int(indexes[0])] for i in indexes[1:])), ( + f"not all constrain posn ids are equal for {attached_atom}" + ) attached_collect[attached_atom] = list( sorted(indexes, key=lambda x: atom_site_loop["_atom_site.qcrbox_constraint_posn_index"][int(x)]) ) diff --git a/qcrboxtools/robots/eval/eval_files.py b/qcrboxtools/robots/eval/eval_files.py index b7698c2..93207b6 100644 --- a/qcrboxtools/robots/eval/eval_files.py +++ b/qcrboxtools/robots/eval/eval_files.py @@ -828,7 +828,7 @@ def extract_data(self, text: str): for key, pattern in patterns.items(): matches = re.findall(pattern, text, re.DOTALL) for i, match in enumerate(matches): - formatted_key = f"{key}_{i+1}" if key == "QVEC" and len(matches) > 1 else key + formatted_key = f"{key}_{i + 1}" if key == "QVEC" and len(matches) > 1 else key if key in second_entry: extracted_data[second_entry[key]] = match[0].strip() extracted_data[formatted_key] = self.convert_to_numpy(match[1]) @@ -854,15 +854,15 @@ def value_as_string(self, key: str) -> str: value = self[key] if isinstance(value, np.ndarray) and value.ndim == 2: if key == "RMAT": - key_string = f'RMAT {self["CENTRING"]}\n' + key_string = f"RMAT {self['CENTRING']}\n" elif key == "TMAT": - key_string = f'TMAT {self["CENTRING"]} {self["POINTGROUP"]}\n' + key_string = f"TMAT {self['CENTRING']} {self['POINTGROUP']}\n" else: key_string = key + " " return key_string + "\n".join("".join(f"{num: 12.7f}" for num in row) for row in value) elif isinstance(value, np.ndarray) and value.ndim == 1: key = "QVEC" if key.startswith("QVEC") else key - return f'{key} {" ".join(f"{num:.5f}" for num in value)}' + return f"{key} {' '.join(f'{num:.5f}' for num in value)}" else: return key + " " + " ".join(value) diff --git a/qcrboxtools/robots/eval/eval_robots.py b/qcrboxtools/robots/eval/eval_robots.py index 76252f1..09f0be3 100644 --- a/qcrboxtools/robots/eval/eval_robots.py +++ b/qcrboxtools/robots/eval/eval_robots.py @@ -798,7 +798,7 @@ def run( if focus_type is None: focus_type = "synchrotron" if focus_type not in possible_focusses: - raise ValueError(f'Invalid focus type, choose one of: {", ".join(possible_focusses)}') + raise ValueError(f"Invalid focus type, choose one of: {', '.join(possible_focusses)}") if polarisation_type is None: polarisation_type = "none" @@ -820,7 +820,7 @@ def run( "o", ) if polarisation_type not in possible_polarisations: - raise ValueError(f'Invalid polarisation, choose one of: {", ".join(possible_polarisations)}') + raise ValueError(f"Invalid polarisation, choose one of: {', '.join(possible_polarisations)}") if self.p4p_file is None: command_base = ( diff --git a/qcrboxtools/robots/olex2.py b/qcrboxtools/robots/olex2.py index 10dd2f7..f6daab5 100644 --- a/qcrboxtools/robots/olex2.py +++ b/qcrboxtools/robots/olex2.py @@ -100,9 +100,9 @@ def structure_path(self, path: str): self.wait_for_completion(2000, "startup", cmd) load_cmds = [ - f'file {path.with_suffix(".ins").name}', - f'export {path.with_suffix(".hkl").name}', - f'reap {path.with_suffix(".ins").name}', + f"file {path.with_suffix('.ins').name}", + f"export {path.with_suffix('.hkl').name}", + f"reap {path.with_suffix('.ins').name}", ] try: self.send_command("\n".join(load_cmds)) diff --git a/tests/analyse/quality/test_base.py b/tests/analyse/quality/test_base.py index 53c78e1..0551088 100644 --- a/tests/analyse/quality/test_base.py +++ b/tests/analyse/quality/test_base.py @@ -15,6 +15,7 @@ def test_data_quality_from_level(input_level, result): assert data_quality_from_level(input_level) is result + @pytest.mark.parametrize( "input_value, levels, expected_index", [ @@ -22,11 +23,12 @@ def test_data_quality_from_level(input_level, result): (3.5, (1.0, 2.0, 3.0, 4.0, np.inf), 3), (5.0, (1.0, 2.0, 3.0, 4.0, np.inf), 4), # Should return index of last level (0.5, (1.0, 2.0, 3.0, 4.0, np.inf), 0), # Should return index of first level - ] + ], ) def test_ascending_levels2func(input_value, levels, expected_index): func = ascending_levels2func(levels) - assert func(input_value) == expected_index + assert func(input_value) == expected_index + @pytest.mark.parametrize( "input_value, levels, expected_index", @@ -35,7 +37,7 @@ def test_ascending_levels2func(input_value, levels, expected_index): (3.5, (4.0, 3.0, 2.0, 1.0, -1.0), 1), (5.0, (4.0, 3.0, 2.0, 1.0, -1.0), 0), (0.5, (4.0, 3.0, 2.0, 1.0, -1.0), 4), - ] + ], ) def test_descending_levels2func(input_value, levels, expected_index): func = descending_levels2func(levels) diff --git a/tests/analyse/quality/test_precision.py b/tests/analyse/quality/test_precision.py index d581dca..2cf1ab8 100644 --- a/tests/analyse/quality/test_precision.py +++ b/tests/analyse/quality/test_precision.py @@ -50,6 +50,7 @@ def test_cifblock2intensity_array(hkl_cell_cif_block): intensity_array = cif_block2intensity_array(hkl_cell_cif_block) assert intensity_array.is_xray_intensity_array() + def test_precision_all_data(hkl_cell_cif_block): possible_indicators = [ "d_min lower", diff --git a/tests/cif/cif2cif/test_cif2cifyaml.py b/tests/cif/cif2cif/test_cif2cifyaml.py index a115d71..c1d0d1c 100644 --- a/tests/cif/cif2cif/test_cif2cifyaml.py +++ b/tests/cif/cif2cif/test_cif2cifyaml.py @@ -450,9 +450,9 @@ def test_cif_output_entries_from_yml(): assert yml_output_settings.required_entries == ["_cell_length_a"], "Failed to extract required entries" assert yml_output_settings.optional_entries == ["_cell_length_b"], "Failed to extract optional entries" correct_invalid = ["_cell_length_c", "_cell_volume", "_cell_angle_alpha"] - assert sorted(yml_output_settings.invalidated_entries) == sorted( - correct_invalid - ), "Failed to extract invalid entries" + assert sorted(yml_output_settings.invalidated_entries) == sorted(correct_invalid), ( + "Failed to extract invalid entries" + ) assert yml_output_settings.custom_categories == ["custom"], "Failed to extract custom categories" assert yml_output_settings.select_block == "0", "Failed to extract default output block value" diff --git a/tests/cif/convert/test_cif2shelx_ins.py b/tests/cif/convert/test_cif2shelx_ins.py index a331a61..a039e7c 100644 --- a/tests/cif/convert/test_cif2shelx_ins.py +++ b/tests/cif/convert/test_cif2shelx_ins.py @@ -223,9 +223,9 @@ def test_create_atom_string_with_uiso_mult(atom_site_data, index, uiso_mult, exp result = cif2shelx_ins.create_atom_string(index, atom_site_loop, atom_site_aniso_loop, uiso_mult) - assert ( - result == expected_output - ), f"For atom at index {index} with uiso_mult {uiso_mult}, expected:\n{expected_output}\nbut got:\n{result}" + assert result == expected_output, ( + f"For atom at index {index} with uiso_mult {uiso_mult}, expected:\n{expected_output}\nbut got:\n{result}" + ) @pytest.fixture(name="afix_objects") diff --git a/tests/cif/convert/test_shelxl_afix.py b/tests/cif/convert/test_shelxl_afix.py index 107fac7..7b9cd5b 100644 --- a/tests/cif/convert/test_shelxl_afix.py +++ b/tests/cif/convert/test_shelxl_afix.py @@ -188,9 +188,9 @@ def test_afix2cif_update_tables(minimal_cif_block): "_atom_site_aniso.u_33", ] for column in changed_columns: - assert all( - float(new) != float(old) for new, old in zip(result[column], original[column]) - ), f"{column} should be different" + assert all(float(new) != float(old) for new, old in zip(result[column], original[column])), ( + f"{column} should be different" + ) def test_afix2cif_add_columns(minimal_cif_block): diff --git a/tests/cif/test_entrynames.py b/tests/cif/test_entrynames.py index 69e8f59..cf809b2 100644 --- a/tests/cif/test_entrynames.py +++ b/tests/cif/test_entrynames.py @@ -135,9 +135,9 @@ def test_block_to_specific_keywords(unified_block, custom_categories): # Ensure all non-optional requested entries are present in the converted block for entry_name in requested_entries: if entry_name != "_nonexistent_entry": - assert ( - entry_name in converted_block - ), f"Requested entry '{entry_name}' was not found in the converted block." + assert entry_name in converted_block, ( + f"Requested entry '{entry_name}' was not found in the converted block." + ) # Ensure the optional, non-existent entry does not cause an error and is rightly not present assert "_nonexistent_entry" not in converted_block, "Optional, non-existent entry was generated from nothing." @@ -195,11 +195,11 @@ def test_cif_entries_present(mock_block): absent_entries = ["_missing_entry"] # Test with entries that are present - assert cif_entries_present( - mock_block, custom_categories, present_entries - ), "Function should return True when all entries are present." + assert cif_entries_present(mock_block, custom_categories, present_entries), ( + "Function should return True when all entries are present." + ) # Test with at least one absent entry - assert not cif_entries_present( - mock_block, custom_categories, present_entries + absent_entries - ), "Function should return False when any specified entry is absent." + assert not cif_entries_present(mock_block, custom_categories, present_entries + absent_entries), ( + "Function should return False when any specified entry is absent." + ) diff --git a/tests/cif/test_merge.py b/tests/cif/test_merge.py index 15911bf..056fab8 100644 --- a/tests/cif/test_merge.py +++ b/tests/cif/test_merge.py @@ -82,23 +82,23 @@ def test_merge_block_conflicts(): "C1", "C2", ], "Unique block 1 atom_site.label not correctly copied" - assert "?" not in list( - merged_block["_atom_site_aniso.u_11"] - ), "atom_site_aniso not correctly merged from both blocks" - assert "?" not in list( - merged_block["_atom_site_aniso.u_23"] - ), "atom_site_aniso not correctly merged from both blocks" - assert ( - list(merged_block["_diffrn_refln.test_column"]).count("?") == 3 - ), "Unknown values in diffrn_refln.test_column not filled as expected" - assert ( - merged_block["_space_group_symop.test_entry"][0] == "copy this" - ), "Additional value from block1 (merged by .id) not present" + assert "?" not in list(merged_block["_atom_site_aniso.u_11"]), ( + "atom_site_aniso not correctly merged from both blocks" + ) + assert "?" not in list(merged_block["_atom_site_aniso.u_23"]), ( + "atom_site_aniso not correctly merged from both blocks" + ) + assert list(merged_block["_diffrn_refln.test_column"]).count("?") == 3, ( + "Unknown values in diffrn_refln.test_column not filled as expected" + ) + assert merged_block["_space_group_symop.test_entry"][0] == "copy this", ( + "Additional value from block1 (merged by .id) not present" + ) assert float(merged_block["_cell.length_c"]) == 12.0, "Block2's cell.length_c does not overwrite block1 as expected" assert float(merged_block["_cell.volume"]) == 1200.0, "Block2's unique value cell.volume not copied as expected" - assert ( - merged_block["_space_group.name_h-m_alt"] == "P 1" - ), "Block1's unique value space_group.name_h-m_alt not correctly copied" + assert merged_block["_space_group.name_h-m_alt"] == "P 1", ( + "Block1's unique value space_group.name_h-m_alt not correctly copied" + ) def test_merge_block_string_marker(loop1, loop2): @@ -157,20 +157,20 @@ def test_merge_cif_files(selection_markers, tmp_path): "C1", "C2", ], "Unique block 1 atom_site.label not correctly copied" - assert "?" not in list( - merged_block["_atom_site_aniso.u_11"] - ), "atom_site_aniso not correctly merged from both blocks" - assert "?" not in list( - merged_block["_atom_site_aniso.u_23"] - ), "atom_site_aniso not correctly merged from both blocks" - assert ( - list(merged_block["_diffrn_refln.test_column"]).count("?") == 3 - ), "Unknown values in diffrn_refln.test_column not filled as expected" - assert ( - merged_block["_space_group_symop.test_entry"][0] == "copy this" - ), "Additional value from block1 (merged by .id) not present" + assert "?" not in list(merged_block["_atom_site_aniso.u_11"]), ( + "atom_site_aniso not correctly merged from both blocks" + ) + assert "?" not in list(merged_block["_atom_site_aniso.u_23"]), ( + "atom_site_aniso not correctly merged from both blocks" + ) + assert list(merged_block["_diffrn_refln.test_column"]).count("?") == 3, ( + "Unknown values in diffrn_refln.test_column not filled as expected" + ) + assert merged_block["_space_group_symop.test_entry"][0] == "copy this", ( + "Additional value from block1 (merged by .id) not present" + ) assert float(merged_block["_cell.length_c"]) == 12.0, "Block2's cell.length_c does not overwrite block1 as expected" assert float(merged_block["_cell.volume"]) == 1200.0, "Block2's unique value cell.volume not copied as expected" - assert ( - merged_block["_space_group.name_h-m_alt"] == "P 1" - ), "Block1's unique value space_group.name_h-m_alt not correctly copied" + assert merged_block["_space_group.name_h-m_alt"] == "P 1", ( + "Block1's unique value space_group.name_h-m_alt not correctly copied" + ) diff --git a/tests/cif/test_uncertainties.py b/tests/cif/test_uncertainties.py index 6416a50..ef61d2a 100644 --- a/tests/cif/test_uncertainties.py +++ b/tests/cif/test_uncertainties.py @@ -286,20 +286,20 @@ def test_merge_su_block(exclude, sample_block_with_su): if exclude is not None: # Tests for non-looped entries with exclusion assert merged_block["_cell.length_a"] == "10.0", "Failed to exclude _cell.length_a from merging" - assert ( - "_cell.length_a_su" in merged_block - ), "_cell.length_a_su should not be deleted when _cell.length_a is excluded" + assert "_cell.length_a_su" in merged_block, ( + "_cell.length_a_su should not be deleted when _cell.length_a is excluded" + ) # Test for looped entries with exclusion assert merged_block["_atom_site.fract_x"][0] == "0.234", "Failed to exclude _atom_site.fract_x from merging" - assert ( - "_atom_site.fract_x_su" in merged_block - ), "_atom_site.fract_x_su should not be deleted when _atom_site.fract_x is excluded" + assert "_atom_site.fract_x_su" in merged_block, ( + "_atom_site.fract_x_su should not be deleted when _atom_site.fract_x is excluded" + ) # Test for su entry without an existing base entry - assert ( - "_cell.length_c_su" in merged_block - ), "_cell.length_c_su should not be deleted when _cell.length_c does not exist" + assert "_cell.length_c_su" in merged_block, ( + "_cell.length_c_su should not be deleted when _cell.length_c does not exist" + ) # Ensure other entries not in exclude list are merged correctly assert merged_block["_cell.length_b"] == "20.00(2)", "Failed to merge cell.length.b and its SU correctly" @@ -307,9 +307,9 @@ def test_merge_su_block(exclude, sample_block_with_su): # Additional tests for looped entries not excluded assert merged_block["_atom_site.fract_y"][1] == "0.68", "Failed to merge _atom_site.fract_y and format correctly" - assert ( - merged_block["_atom_site.fract_z"][2] == "-0.012(9)" - ), "Failed to merge _atom_site.fract_z and its SU correctly" + assert merged_block["_atom_site.fract_z"][2] == "-0.012(9)", ( + "Failed to merge _atom_site.fract_z and its SU correctly" + ) assert "_atom_site.fract_y_su" not in merged_block, "Did not delete SU entry where corresponding entry existed" assert "_atom_site.fract_z_su" not in merged_block, "Did not delete SU entry where corresponding entry existed" @@ -371,12 +371,12 @@ def test_merge_su_cif(cif_model_with_mergable_blocks): assert "_cell.length_c" not in block1, "Block1: Unexpectedly found _cell.length_c which shouldn't exist" # Check looped entries in block1 for correct merging assert block1["_atom_site.fract_x"][0] == "0.234", "Block1: Failed to exclude _atom_site.fract_x from merge" - assert ( - block1["_atom_site.fract_y"][2] == "-0.79(7)" - ), "Block1: Failed to merge _atom_site.fract_y and its SU correctly" - assert ( - block1["_atom_site.fract_z"][1] == "-0.90(9)" - ), "Block1: Failed to merge _atom_site.fract_z and its SU correctly" + assert block1["_atom_site.fract_y"][2] == "-0.79(7)", ( + "Block1: Failed to merge _atom_site.fract_y and its SU correctly" + ) + assert block1["_atom_site.fract_z"][1] == "-0.90(9)", ( + "Block1: Failed to merge _atom_site.fract_z and its SU correctly" + ) # Assertions for block2, ensuring modifications are processed and merged correctly block2 = processed_cif["block2"] @@ -385,9 +385,9 @@ def test_merge_su_cif(cif_model_with_mergable_blocks): assert block2["_cell.length_c"] == "30.00(4)", "Block2: Failed to add and merge _cell.length_c and its SU correctly" # Check looped entries in block2 for correct merging assert block2["_atom_site.fract_x"][0] == "0.123", "Block1: Failed to exclude _atom_site.fract_x from merge" - assert ( - block2["_atom_site.fract_y"][2] == "-0.68(7)" - ), "Block2: Failed to merge _atom_site.fract_y and its SU correctly" - assert ( - block2["_atom_site.fract_z"][1] == "-0.89(9)" - ), "Block2: Failed to merge _atom_site.fract_z and its SU correctly" + assert block2["_atom_site.fract_y"][2] == "-0.68(7)", ( + "Block2: Failed to merge _atom_site.fract_y and its SU correctly" + ) + assert block2["_atom_site.fract_z"][1] == "-0.89(9)", ( + "Block2: Failed to merge _atom_site.fract_z and its SU correctly" + ) diff --git a/tests/robots/eval/test_eval_files.py b/tests/robots/eval/test_eval_files.py index fe333f5..f736921 100644 --- a/tests/robots/eval/test_eval_files.py +++ b/tests/robots/eval/test_eval_files.py @@ -248,9 +248,9 @@ def test_rmat_to_rmat_file(tmp_path): # Compare each line of the file content with the expected content for line, expected_line in zip(content, expected_content): - assert ( - line == expected_line - ), f"Line in file does not match expected line: {line.strip()} != {expected_line.strip()}" + assert line == expected_line, ( + f"Line in file does not match expected line: {line.strip()} != {expected_line.strip()}" + ) def test_rmat_to_cif_file(tmp_path): diff --git a/tests/robots/eval/test_eval_robots.py b/tests/robots/eval/test_eval_robots.py index 415399d..152419f 100644 --- a/tests/robots/eval/test_eval_robots.py +++ b/tests/robots/eval/test_eval_robots.py @@ -39,14 +39,14 @@ def mocked_subprocess_call( Instead of calling the program, check if init file was created. """ init_file = Path(cwd) / f"{program_name}.init" - assert ( - program_name == expected_program_name - ), f"Expected program name {expected_program_name}, got {program_name}" + assert program_name == expected_program_name, ( + f"Expected program name {expected_program_name}, got {program_name}" + ) assert init_file.exists(), f"Init file {init_file} was not created." if expected_init_content is not None: - assert ( - init_file.read_text(encoding="UTF-8") == expected_init_content - ), "Init file content does not match the expected value." + assert init_file.read_text(encoding="UTF-8") == expected_init_content, ( + "Init file content does not match the expected value." + ) if raise_os_error and not shell: raise OSError("Mocked OS error")