From c4a115c21f77638cb0e73791c77b335f95ed7034 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 30 May 2016 21:14:53 -0700 Subject: [PATCH 001/204] create NMR FireTasks stub --- mpworks/firetasks/nmr_tasks.py | 336 +++++++++++++++++++++++++++++ mpworks/workflows/snl_to_wf_nmr.py | 79 +++++++ 2 files changed, 415 insertions(+) create mode 100644 mpworks/firetasks/nmr_tasks.py create mode 100644 mpworks/workflows/snl_to_wf_nmr.py diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py new file mode 100644 index 00000000..1578ea46 --- /dev/null +++ b/mpworks/firetasks/nmr_tasks.py @@ -0,0 +1,336 @@ +from monty.os.path import zpath +import os +import json +from pymongo import MongoClient +import numpy as np +from decimal import Decimal + +__author__ = 'Wei Chen' +__credits__ = 'Joseph Montoya' +__maintainer__ = 'Joseph Montoya ', + projects=["Elasticity"]) + tasks = [AddSNLTask()] + snl_priority = fw_spec.get('priority', 1) + spec = {'task_type': 'Add Deformed Struct to SNL database', + 'snl': snl.as_dict(), + '_queueadapter': QA_DB, + '_priority': snl_priority} + if 'snlgroup_id' in fw_spec and isinstance(snl, MPStructureNL): + spec['force_mpsnl'] = snl.as_dict() + spec['force_snlgroup_id'] = fw_spec['snlgroup_id'] + del spec['snl'] + fws.append(Firework(tasks, spec, + name=get_slug(f + '--' + spec['task_type']), + fw_id=-1000+i*10)) + connections[-1000+i*10] = [-999+i*10] + spec = snl_to_wf._snl_to_spec(snl, + parameters={'exact_structure':True}) + spec = update_spec_force_convergence(spec) + spec['deformation_matrix'] = d_struct_set.deformations[i].tolist() + spec['original_task_id'] = fw_spec["task_id"] + spec['_priority'] = fw_spec['_priority']*2 + #Turn off dupefinder for deformed structure + del spec['_dupefinder'] + spec['task_type'] = "Optimize deformed structure" + fws.append(Firework([VaspWriterTask(), SetupElastConstTask(), + get_custodian_task(spec)], + spec, + name=get_slug(f + '--' + spec['task_type']), + fw_id=-999+i*10)) + + priority = fw_spec['_priority']*3 + spec = {'task_type': 'VASP db insertion', + '_priority': priority, + '_allow_fizzled_parents': True, + '_queueadapter': QA_DB, + 'elastic_constant':"deformed_structure", + 'clean_task_doc':True, + 'deformation_matrix':d_struct_set.deformations[i].tolist(), + 'original_task_id':fw_spec["task_id"]} + fws.append(Firework([VaspToDBTask(), AddElasticDataToDBTask()], spec, + name=get_slug(f + '--' + spec['task_type']), + fw_id=-998+i*10)) + connections[-999+i*10] = [-998+i*10] + wf.append(Workflow(fws, connections)) + return FWAction(additions=wf) + + +class AddElasticDataToDBTask(FireTaskBase, FWSerializable): + _fw_name = "Add Elastic Data to DB" + + def run_task(self, fw_spec): + db_dir = os.environ['DB_LOC'] + db_path = os.path.join(db_dir, 'tasks_db.json') + i = fw_spec['original_task_id'] + + with open(db_path) as f: + db_creds = json.load(f) + connection = MongoClient(db_creds['host'], db_creds['port']) + tdb = connection[db_creds['database']] + tdb.authenticate(db_creds['admin_user'], db_creds['admin_password']) + tasks = tdb[db_creds['collection']] + elasticity = tdb['elasticity'] + ndocs = tasks.find({"original_task_id": i, + "state":"successful"}).count() + existing_doc = elasticity.find_one({"relaxation_task_id" : i}) + if existing_doc: + print "Updating: " + i + else: + print "New material: " + i + d = {"analysis": {}, "error": [], "warning": []} + d["ndocs"] = ndocs + o = tasks.find_one({"task_id" : i}, + {"pretty_formula" : 1, "spacegroup" : 1, + "snl" : 1, "snl_final" : 1, "run_tags" : 1}) + if not o: + raise ValueError("Cannot find original task id") + # Get stress from deformed structure + d["deformation_tasks"] = {} + ss_dict = {} + for k in tasks.find({"original_task_id": i}, + {"deformation_matrix":1, + "calculations.output":1, + "state":1, "task_id":1}): + defo = k['deformation_matrix'] + d_ind = np.nonzero(defo - np.eye(3)) + delta = Decimal((defo - np.eye(3))[d_ind][0]) + # Normal deformation + if d_ind[0] == d_ind[1]: + dtype = "_".join(["d", str(d_ind[0][0]), + "{:.0e}".format(delta)]) + # Shear deformation + else: + dtype = "_".join(["s", str(d_ind[0] + d_ind[1]), + "{:.0e}".format(delta)]) + sm = IndependentStrain(defo) + d["deformation_tasks"][dtype] = {"state" : k["state"], + "deformation_matrix" : defo, + "strain" : sm.tolist(), + "task_id": k["task_id"]} + if k["state"] == "successful": + st = Stress(k["calculations"][-1]["output"] \ + ["ionic_steps"][-1]["stress"]) + ss_dict[sm] = st + d["snl"] = o["snl"] + if "run_tags" in o.keys(): + d["run_tags"] = o["run_tags"] + for tag in o["run_tags"]: + if isinstance(tag, dict): + if "input_id" in tag.keys(): + d["input_mp_id"] = tag["input_id"] + d["snl_final"] = o["snl_final"] + d["pretty_formula"] = o["pretty_formula"] + + # Old input mp-id style + if o["snl"]["about"].get("_mp_id"): + d["material_id"] = o["snl"]["about"]["_mp_id"] + + # New style + elif "input_mp_id" in d: + d["material_id"] = d["input_mp_id"] + else: + d["material_id"] = None + d["relaxation_task_id"] = i + + calc_struct = Structure.from_dict(o["snl_final"]) + # TODO: + # JHM: This test is unnecessary at the moment, but should be redone + """ + conventional = is_conventional(calc_struct) + if conventional: + d["analysis"]["is_conventional"] = True + else: + d["analysis"]["is_conventional"] = False + """ + d["spacegroup"]=o.get("spacegroup", "Unknown") + + if ndocs >= 20: + # Perform Elastic tensor fitting and analysis + result = ElasticTensor.from_stress_dict(ss_dict) + d["elastic_tensor"] = result.tolist() + kg_average = result.kg_average + d.update({"K_Voigt":kg_average[0], "G_Voigt":kg_average[1], + "K_Reuss":kg_average[2], "G_Reuss":kg_average[3], + "K_Voigt_Reuss_Hill":kg_average[4], + "G_Voigt_Reuss_Hill":kg_average[5]}) + d["universal_anisotropy"] = result.universal_anisotropy + d["homogeneous_poisson"] = result.homogeneous_poisson + if ndocs < 24: + d["warning"].append("less than 24 tasks completed") + + # Perform filter checks + symm_t = result.symmetrized + d["symmetrized_tensor"] = symm_t.tolist() + d["analysis"]["not_rare_earth"] = True + for s in calc_struct.species: + if s.is_rare_earth_metal: + d["analysis"]["not_rare_earth"] = False + eigvals = np.linalg.eigvals(symm_t) + eig_positive = np.all((eigvals > 0) & np.isreal(eigvals)) + d["analysis"]["eigval_positive"] = bool(eig_positive) + c11 = symm_t[0][0] + c12 = symm_t[0][1] + c13 = symm_t[0][2] + c23 = symm_t[1][2] + d["analysis"]["c11_c12"]= not (abs((c11-c12)/c11) < 0.05 + or c11 < c12) + d["analysis"]["c11_c13"]= not (abs((c11-c13)/c11) < 0.05 + or c11 < c13) + d["analysis"]["c11_c23"]= not (abs((c11-c23)/c11) < 0.1 + or c11 < c23) + d["analysis"]["K_R"] = not (d["K_Reuss"] < 2) + d["analysis"]["G_R"] = not (d["G_Reuss"] < 2) + d["analysis"]["K_V"] = not (d["K_Voigt"] < 2) + d["analysis"]["G_V"] = not (d["G_Voigt"] < 2) + filter_state = np.all(d["analysis"].values()) + d["analysis"]["filter_pass"] = bool(filter_state) + d["analysis"]["eigval"] = list(eigvals) + + # TODO: + # JHM: eventually we can reintroduce the IEEE conversion + # but as of now it's not being used, and it should + # be in pymatgen + """ + # IEEE Conversion + try: + ieee_tensor = IEEE_conversion.get_ieee_tensor(struct_final, result) + d["elastic_tensor_IEEE"] = ieee_tensor[0].tolist() + d["analysis"]["IEEE"] = True + except Exception as e: + d["elastic_tensor_IEEE"] = None + d["analysis"]["IEEE"] = False + d["error"].append("Unable to get IEEE tensor: {}".format(e)) + """ + # Add thermal properties + nsites = calc_struct.num_sites + volume = calc_struct.volume + natoms = calc_struct.composition.num_atoms + weight = calc_struct.composition.weight + num_density = 1e30 * nsites / volume + mass_density = 1.6605e3 * nsites * volume * weight / \ + (natoms * volume) + tot_mass = sum([e.atomic_mass for e in calc_struct.species]) + avg_mass = 1.6605e-27 * tot_mass / natoms + y_mod = 9e9 * result.k_vrh * result.g_vrh / \ + (3. * result.k_vrh * result.g_vrh) + trans_v = 1e9 * result.k_vrh / mass_density**0.5 + long_v = 1e9 * result.k_vrh + \ + 4./3. * result.g_vrh / mass_density**0.5 + clarke = 0.87 * 1.3806e-23 * avg_mass**(-2./3.) * \ + mass_density**(1./6.) * y_mod**0.5 + cahill = 1.3806e-23 / 2.48 * num_density**(2./3.) * long_v + \ + 2 * trans_v + snyder_ac = 0.38483 * avg_mass * \ + (long_v + 2./3.*trans_v)**3. / \ + (300. * num_density**(-2./3.) * nsites**(1./3.)) + snyder_opt = 1.66914e-23 * (long_v + 2./3.*trans_v) / \ + num_density**(-2./3.) * \ + (1 - nsites**(-1./3.)) + snyder_total = snyder_ac + snyder_opt + debye = 2.489e-11 * avg_mass**(-1./3.) * \ + mass_density**(-1./6.) * y_mod**0.5 + + d["thermal"]={"num_density" : num_density, + "mass_density" : mass_density, + "avg_mass" : avg_mass, + "num_atom_per_unit_formula" : natoms, + "youngs_modulus" : y_mod, + "trans_velocity" : trans_v, + "long_velocity" : long_v, + "clarke" : clarke, + "cahill" : cahill, + "snyder_acou_300K" : snyder_ac, + "snyder_opt" : snyder_opt, + "snyder_total" : snyder_total, + "debye": debye + } + else: + d['state'] = "Fewer than 20 successful tasks completed" + return FWAction() + + if o["snl"]["about"].get("_kpoint_density"): + d["kpoint_density"]= o["snl"]["about"].get("_kpoint_density") + + if d["error"]: + raise ValueError("Elastic analysis failed: {}".format(d["error"])) + elif d["analysis"]["filter_pass"]: + d["state"] = "successful" + else: + d["state"] = "filter_failed" + elasticity.update({"relaxation_task_id": d["relaxation_task_id"]}, + d, upsert=True) + return FWAction() diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py new file mode 100644 index 00000000..d6b711f9 --- /dev/null +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -0,0 +1,79 @@ +from pymatgen.io.vasp import Poscar +from mpworks.firetasks.elastic_tasks import SetupElastConstTask, SetupFConvergenceTask, SetupDeformedStructTask + +__author__ = 'weichen' + +from fireworks.core.firework import Firework, Workflow +from fireworks.utilities.fw_utilities import get_slug +from mpworks.firetasks.custodian_task import get_custodian_task +from mpworks.firetasks.snl_tasks import AddSNLTask +from mpworks.firetasks.vasp_io_tasks import VaspCopyTask, VaspWriterTask, \ + VaspToDBTask +from mpworks.firetasks.vasp_setup_tasks import SetupGGAUTask +from mpworks.snl_utils.mpsnl import get_meta_from_structure, MPStructureNL +from mpworks.workflows.wf_settings import QA_DB, QA_VASP, QA_CONTROL +from pymatgen import Composition +from mpworks.workflows import snl_to_wf +from mpworks.firetasks.elastic_tasks import update_spec_force_convergence +from collections import defaultdict + + +def snl_to_wf_elastic(snl, parameters): + # parameters["user_vasp_settings"] specifies user defined incar/kpoints parameters + fws = [] + connections = defaultdict(list) + parameters = parameters if parameters else {} + + snl_priority = parameters.get('priority', 1) + priority = snl_priority * 2 # once we start a job, keep going! + + f = Composition(snl.structure.composition.reduced_formula).alphabetical_formula + + # add the SNL to the SNL DB and figure out duplicate group + tasks = [AddSNLTask()] + spec = {'task_type': 'Add to SNL database', 'snl': snl.as_dict(), + '_queueadapter': QA_DB, '_priority': snl_priority} + if 'snlgroup_id' in parameters and isinstance(snl, MPStructureNL): + spec['force_mpsnl'] = snl.as_dict() + spec['force_snlgroup_id'] = parameters['snlgroup_id'] + del spec['snl'] + fws.append(Firework(tasks, spec, + name=get_slug(f + '--' + spec['task_type']), fw_id=0)) + connections[0] = [1] + + parameters["exact_structure"] = True + # run GGA structure optimization for force convergence + spec = snl_to_wf._snl_to_spec(snl, parameters=parameters) + user_vasp_settings = parameters.get("user_vasp_settings") + spec = update_spec_force_convergence(spec, user_vasp_settings) + spec['run_tags'].append("origin") + spec['_priority'] = priority + spec['_queueadapter'] = QA_VASP + del spec['_dupefinder'] + spec['task_type'] = "Vasp force convergence optimize structure (2x)" + tasks = [VaspWriterTask(), get_custodian_task(spec)] + fws.append(Firework(tasks, spec, + name=get_slug(f + '--' + spec['task_type']), fw_id=1)) + + # insert into DB - GGA structure optimization + spec = {'task_type': 'VASP db insertion', '_priority': priority, + '_allow_fizzled_parents': True, '_queueadapter': QA_DB, + 'clean_task_doc':True, 'elastic_constant':"force_convergence"} + fws.append(Firework([VaspToDBTask()], spec, + name=get_slug(f + '--' + spec['task_type']), fw_id=2)) + connections[1] = [2] + + spec = {'task_type': 'Setup Deformed Struct Task', '_priority': priority, + '_queueadapter': QA_CONTROL} + fws.append(Firework([SetupDeformedStructTask()], spec, + name=get_slug(f + '--' + spec['task_type']),fw_id=3)) + connections[2] = [3] + + wf_meta = get_meta_from_structure(snl.structure) + wf_meta['run_version'] = 'May 2013 (1)' + + if '_materialsproject' in snl.data and 'submission_id' in snl.data['_materialsproject']: + wf_meta['submission_id'] = snl.data['_materialsproject']['submission_id'] + + return Workflow(fws, connections, name=Composition( + snl.structure.composition.reduced_formula).alphabetical_formula, metadata=wf_meta) From 09b736b8368ebaafd57ea1b3651f38870551a199 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 30 May 2016 23:14:35 -0700 Subject: [PATCH 002/204] add triple jump structure relaxation strategy parameters --- mpworks/firetasks/triple_jump_relax_set.yaml | 61 ++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 mpworks/firetasks/triple_jump_relax_set.yaml diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml new file mode 100644 index 00000000..3cceaad6 --- /dev/null +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -0,0 +1,61 @@ +STEP1: + INCAR: + EDIFF: -1.0e-06 + EDIFFG: -0.1 + IBRION: 1 + ISIF: 3 + 'ISMEAR:': -5 + LREAL: AUTO + NELMIN: 5 + NSW: 200 + PREC: ACCURATE + SIGMA: 0 + POTCAR: + C: C + H: H + Mg: Mg_sv + O: O +STEP2: + INCAR: + ADDGRID: true + EDIFF: -1.0e-08 + EDIFFG: -0.01 + IBRION: 3 + IOPT: 7 + ISIF: 3 + 'ISMEAR:': -5 + LREAL: AUTO + NELMIN: 5 + NSW: 200 + POTIM: 0 + PREC: HIGH + ROPT_PER_ATOM: -0.0001 + SIGMA: 0 + POTCAR: + C: C_h + H: H_h + Mg: Mg_sv + O: O_h +STEP3: + INCAR: + ADDGRID: true + EDIFF: -1.0e-08 + EDIFFG: -0.002 + ENAUG_ENHANCE_RATIO: 0.75 + ENCUT_ENHANCE_RATIO: 0.75 + IBRION: 3 + IOPT: 7 + ISIF: 3 + 'ISMEAR:': -5 + LREAL: AUTO + NELMIN: 5 + NSW: 100 + POTIM: 0 + PREC: HIGH + ROPT_PER_ATOM: -0.0001 + SIGMA: 0 + POTCAR: + C: C_h + H: H_h + Mg: Mg_sv + O: O_h From d93cb3051d2cfcf15336b2a7219c9cbe31adc90f Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 30 May 2016 23:15:36 -0700 Subject: [PATCH 003/204] fix typo in ISMEAR --- mpworks/firetasks/triple_jump_relax_set.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 3cceaad6..5e176794 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -4,7 +4,7 @@ STEP1: EDIFFG: -0.1 IBRION: 1 ISIF: 3 - 'ISMEAR:': -5 + ISMEAR: -5 LREAL: AUTO NELMIN: 5 NSW: 200 @@ -23,7 +23,7 @@ STEP2: IBRION: 3 IOPT: 7 ISIF: 3 - 'ISMEAR:': -5 + ISMEAR: -5 LREAL: AUTO NELMIN: 5 NSW: 200 @@ -46,7 +46,7 @@ STEP3: IBRION: 3 IOPT: 7 ISIF: 3 - 'ISMEAR:': -5 + ISMEAR: -5 LREAL: AUTO NELMIN: 5 NSW: 100 From dc91100cc3b489868b7dde2984ad21c2b25c05d5 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 10:51:51 -0700 Subject: [PATCH 004/204] also set ENCUT in step 1 --- mpworks/firetasks/triple_jump_relax_set.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 5e176794..62327ac5 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -2,12 +2,14 @@ STEP1: INCAR: EDIFF: -1.0e-06 EDIFFG: -0.1 + ENCUT_ENHANCE_RATIO: 0.2 IBRION: 1 ISIF: 3 ISMEAR: -5 LREAL: AUTO NELMIN: 5 NSW: 200 + POTIM: 0.3 PREC: ACCURATE SIGMA: 0 POTCAR: From b3a2939671ddf8afaf50ed0b2f4bc80b420a8a02 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 10:54:23 -0700 Subject: [PATCH 005/204] set EDIFF=1.0E-10 in step 3 --- mpworks/firetasks/triple_jump_relax_set.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 62327ac5..dec86d52 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -41,7 +41,7 @@ STEP2: STEP3: INCAR: ADDGRID: true - EDIFF: -1.0e-08 + EDIFF: -1.0e-10 EDIFFG: -0.002 ENAUG_ENHANCE_RATIO: 0.75 ENCUT_ENHANCE_RATIO: 0.75 From 0cec9e05696bc269a5cda4b048f524c2d4796ed2 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 12:11:31 -0700 Subject: [PATCH 006/204] also include K-points in triple jump relaxation settings --- mpworks/firetasks/triple_jump_relax_set.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index dec86d52..76169d91 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -12,6 +12,8 @@ STEP1: POTIM: 0.3 PREC: ACCURATE SIGMA: 0 + KPOINTS: + grid_density: 1000 POTCAR: C: C H: H @@ -33,6 +35,8 @@ STEP2: PREC: HIGH ROPT_PER_ATOM: -0.0001 SIGMA: 0 + KPOINTS: + grid_density: 3000 POTCAR: C: C_h H: H_h @@ -56,6 +60,8 @@ STEP3: PREC: HIGH ROPT_PER_ATOM: -0.0001 SIGMA: 0 + KPOINTS: + grid_density: 6000 POTCAR: C: C_h H: H_h From 276edf9344a573cf25430f1d253364a475d985a4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 12:28:53 -0700 Subject: [PATCH 007/204] tune FIRE optimizer parameters in step 3 --- mpworks/firetasks/triple_jump_relax_set.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 76169d91..72b4060d 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -49,17 +49,20 @@ STEP3: EDIFFG: -0.002 ENAUG_ENHANCE_RATIO: 0.75 ENCUT_ENHANCE_RATIO: 0.75 + FTIMEMAX: 0.5 IBRION: 3 IOPT: 7 ISIF: 3 ISMEAR: -5 LREAL: AUTO + MAXMOVE: 0.05 NELMIN: 5 NSW: 100 POTIM: 0 PREC: HIGH ROPT_PER_ATOM: -0.0001 SIGMA: 0 + TIMESTEP: 0.05 KPOINTS: grid_density: 6000 POTCAR: From 8db9fbfc66612a89101dfa2aefaee4dc6ffe69c6 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 15:39:19 -0700 Subject: [PATCH 008/204] translate config file to VaspInputSet --- mpworks/firetasks/nmr_tasks.py | 422 +++++++---------------------- mpworks/workflows/snl_to_wf_nmr.py | 40 +-- 2 files changed, 125 insertions(+), 337 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1578ea46..0417dd0a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,336 +1,118 @@ -from monty.os.path import zpath +import copy import os -import json -from pymongo import MongoClient -import numpy as np -from decimal import Decimal - -__author__ = 'Wei Chen' -__credits__ = 'Joseph Montoya' -__maintainer__ = 'Joseph Montoya ', - projects=["Elasticity"]) - tasks = [AddSNLTask()] - snl_priority = fw_spec.get('priority', 1) - spec = {'task_type': 'Add Deformed Struct to SNL database', - 'snl': snl.as_dict(), - '_queueadapter': QA_DB, - '_priority': snl_priority} - if 'snlgroup_id' in fw_spec and isinstance(snl, MPStructureNL): - spec['force_mpsnl'] = snl.as_dict() - spec['force_snlgroup_id'] = fw_spec['snlgroup_id'] - del spec['snl'] - fws.append(Firework(tasks, spec, - name=get_slug(f + '--' + spec['task_type']), - fw_id=-1000+i*10)) - connections[-1000+i*10] = [-999+i*10] - spec = snl_to_wf._snl_to_spec(snl, - parameters={'exact_structure':True}) - spec = update_spec_force_convergence(spec) - spec['deformation_matrix'] = d_struct_set.deformations[i].tolist() - spec['original_task_id'] = fw_spec["task_id"] - spec['_priority'] = fw_spec['_priority']*2 - #Turn off dupefinder for deformed structure - del spec['_dupefinder'] - spec['task_type'] = "Optimize deformed structure" - fws.append(Firework([VaspWriterTask(), SetupElastConstTask(), - get_custodian_task(spec)], - spec, - name=get_slug(f + '--' + spec['task_type']), - fw_id=-999+i*10)) - - priority = fw_spec['_priority']*3 - spec = {'task_type': 'VASP db insertion', - '_priority': priority, - '_allow_fizzled_parents': True, - '_queueadapter': QA_DB, - 'elastic_constant':"deformed_structure", - 'clean_task_doc':True, - 'deformation_matrix':d_struct_set.deformations[i].tolist(), - 'original_task_id':fw_spec["task_id"]} - fws.append(Firework([VaspToDBTask(), AddElasticDataToDBTask()], spec, - name=get_slug(f + '--' + spec['task_type']), - fw_id=-998+i*10)) - connections[-999+i*10] = [-998+i*10] - wf.append(Workflow(fws, connections)) - return FWAction(additions=wf) -class AddElasticDataToDBTask(FireTaskBase, FWSerializable): - _fw_name = "Add Elastic Data to DB" + # mpvis = MPGGAVaspInputSet(user_incar_settings=incar_enforce) if enforce_gga else MPVaspInputSet(user_incar_settings=incar_enforce) - def run_task(self, fw_spec): - db_dir = os.environ['DB_LOC'] - db_path = os.path.join(db_dir, 'tasks_db.json') - i = fw_spec['original_task_id'] + mpvis = None - with open(db_path) as f: - db_creds = json.load(f) - connection = MongoClient(db_creds['host'], db_creds['port']) - tdb = connection[db_creds['database']] - tdb.authenticate(db_creds['admin_user'], db_creds['admin_password']) - tasks = tdb[db_creds['collection']] - elasticity = tdb['elasticity'] - ndocs = tasks.find({"original_task_id": i, - "state":"successful"}).count() - existing_doc = elasticity.find_one({"relaxation_task_id" : i}) - if existing_doc: - print "Updating: " + i - else: - print "New material: " + i - d = {"analysis": {}, "error": [], "warning": []} - d["ndocs"] = ndocs - o = tasks.find_one({"task_id" : i}, - {"pretty_formula" : 1, "spacegroup" : 1, - "snl" : 1, "snl_final" : 1, "run_tags" : 1}) - if not o: - raise ValueError("Cannot find original task id") - # Get stress from deformed structure - d["deformation_tasks"] = {} - ss_dict = {} - for k in tasks.find({"original_task_id": i}, - {"deformation_matrix":1, - "calculations.output":1, - "state":1, "task_id":1}): - defo = k['deformation_matrix'] - d_ind = np.nonzero(defo - np.eye(3)) - delta = Decimal((defo - np.eye(3))[d_ind][0]) - # Normal deformation - if d_ind[0] == d_ind[1]: - dtype = "_".join(["d", str(d_ind[0][0]), - "{:.0e}".format(delta)]) - # Shear deformation - else: - dtype = "_".join(["s", str(d_ind[0] + d_ind[1]), - "{:.0e}".format(delta)]) - sm = IndependentStrain(defo) - d["deformation_tasks"][dtype] = {"state" : k["state"], - "deformation_matrix" : defo, - "strain" : sm.tolist(), - "task_id": k["task_id"]} - if k["state"] == "successful": - st = Stress(k["calculations"][-1]["output"] \ - ["ionic_steps"][-1]["stress"]) - ss_dict[sm] = st - d["snl"] = o["snl"] - if "run_tags" in o.keys(): - d["run_tags"] = o["run_tags"] - for tag in o["run_tags"]: - if isinstance(tag, dict): - if "input_id" in tag.keys(): - d["input_mp_id"] = tag["input_id"] - d["snl_final"] = o["snl_final"] - d["pretty_formula"] = o["pretty_formula"] + incar = mpvis.get_incar(structure) + poscar = mpvis.get_poscar(structure) + kpoints = mpvis.get_kpoints(structure) + potcar = mpvis.get_potcar(structure) - # Old input mp-id style - if o["snl"]["about"].get("_mp_id"): - d["material_id"] = o["snl"]["about"]["_mp_id"] + spec['vasp'] = {} + spec['vasp']['incar'] = incar.as_dict() + spec['vasp']['poscar'] = poscar.as_dict() + spec['vasp']['kpoints'] = kpoints.as_dict() + spec['vasp']['potcar'] = potcar.as_dict() - # New style - elif "input_mp_id" in d: - d["material_id"] = d["input_mp_id"] - else: - d["material_id"] = None - d["relaxation_task_id"] = i + # Add run tags of pseudopotential + spec['run_tags'] = spec.get('run_tags', [potcar.functional]) + spec['run_tags'].extend(potcar.symbols) - calc_struct = Structure.from_dict(o["snl_final"]) - # TODO: - # JHM: This test is unnecessary at the moment, but should be redone - """ - conventional = is_conventional(calc_struct) - if conventional: - d["analysis"]["is_conventional"] = True - else: - d["analysis"]["is_conventional"] = False - """ - d["spacegroup"]=o.get("spacegroup", "Unknown") - - if ndocs >= 20: - # Perform Elastic tensor fitting and analysis - result = ElasticTensor.from_stress_dict(ss_dict) - d["elastic_tensor"] = result.tolist() - kg_average = result.kg_average - d.update({"K_Voigt":kg_average[0], "G_Voigt":kg_average[1], - "K_Reuss":kg_average[2], "G_Reuss":kg_average[3], - "K_Voigt_Reuss_Hill":kg_average[4], - "G_Voigt_Reuss_Hill":kg_average[5]}) - d["universal_anisotropy"] = result.universal_anisotropy - d["homogeneous_poisson"] = result.homogeneous_poisson - if ndocs < 24: - d["warning"].append("less than 24 tasks completed") + # Add run tags of +U + u_tags = ['%s=%s' % t for t in + zip(poscar.site_symbols, incar.get('LDAUU', [0] * len(poscar.site_symbols)))] + spec['run_tags'].extend(u_tags) - # Perform filter checks - symm_t = result.symmetrized - d["symmetrized_tensor"] = symm_t.tolist() - d["analysis"]["not_rare_earth"] = True - for s in calc_struct.species: - if s.is_rare_earth_metal: - d["analysis"]["not_rare_earth"] = False - eigvals = np.linalg.eigvals(symm_t) - eig_positive = np.all((eigvals > 0) & np.isreal(eigvals)) - d["analysis"]["eigval_positive"] = bool(eig_positive) - c11 = symm_t[0][0] - c12 = symm_t[0][1] - c13 = symm_t[0][2] - c23 = symm_t[1][2] - d["analysis"]["c11_c12"]= not (abs((c11-c12)/c11) < 0.05 - or c11 < c12) - d["analysis"]["c11_c13"]= not (abs((c11-c13)/c11) < 0.05 - or c11 < c13) - d["analysis"]["c11_c23"]= not (abs((c11-c23)/c11) < 0.1 - or c11 < c23) - d["analysis"]["K_R"] = not (d["K_Reuss"] < 2) - d["analysis"]["G_R"] = not (d["G_Reuss"] < 2) - d["analysis"]["K_V"] = not (d["K_Voigt"] < 2) - d["analysis"]["G_V"] = not (d["G_Voigt"] < 2) - filter_state = np.all(d["analysis"].values()) - d["analysis"]["filter_pass"] = bool(filter_state) - d["analysis"]["eigval"] = list(eigvals) + # add user run tags + if 'run_tags' in parameters: + spec['run_tags'].extend(parameters['run_tags']) + del spec['parameters']['run_tags'] - # TODO: - # JHM: eventually we can reintroduce the IEEE conversion - # but as of now it's not being used, and it should - # be in pymatgen - """ - # IEEE Conversion - try: - ieee_tensor = IEEE_conversion.get_ieee_tensor(struct_final, result) - d["elastic_tensor_IEEE"] = ieee_tensor[0].tolist() - d["analysis"]["IEEE"] = True - except Exception as e: - d["elastic_tensor_IEEE"] = None - d["analysis"]["IEEE"] = False - d["error"].append("Unable to get IEEE tensor: {}".format(e)) - """ - # Add thermal properties - nsites = calc_struct.num_sites - volume = calc_struct.volume - natoms = calc_struct.composition.num_atoms - weight = calc_struct.composition.weight - num_density = 1e30 * nsites / volume - mass_density = 1.6605e3 * nsites * volume * weight / \ - (natoms * volume) - tot_mass = sum([e.atomic_mass for e in calc_struct.species]) - avg_mass = 1.6605e-27 * tot_mass / natoms - y_mod = 9e9 * result.k_vrh * result.g_vrh / \ - (3. * result.k_vrh * result.g_vrh) - trans_v = 1e9 * result.k_vrh / mass_density**0.5 - long_v = 1e9 * result.k_vrh + \ - 4./3. * result.g_vrh / mass_density**0.5 - clarke = 0.87 * 1.3806e-23 * avg_mass**(-2./3.) * \ - mass_density**(1./6.) * y_mod**0.5 - cahill = 1.3806e-23 / 2.48 * num_density**(2./3.) * long_v + \ - 2 * trans_v - snyder_ac = 0.38483 * avg_mass * \ - (long_v + 2./3.*trans_v)**3. / \ - (300. * num_density**(-2./3.) * nsites**(1./3.)) - snyder_opt = 1.66914e-23 * (long_v + 2./3.*trans_v) / \ - num_density**(-2./3.) * \ - (1 - nsites**(-1./3.)) - snyder_total = snyder_ac + snyder_opt - debye = 2.489e-11 * avg_mass**(-1./3.) * \ - mass_density**(-1./6.) * y_mod**0.5 + # add exact structure run tag automatically if we have a unique situation + if 'exact_structure' in parameters and parameters['exact_structure'] and snl.structure != snl.structure.get_primitive_structure(): + spec['run_tags'].extend('exact_structure') - d["thermal"]={"num_density" : num_density, - "mass_density" : mass_density, - "avg_mass" : avg_mass, - "num_atom_per_unit_formula" : natoms, - "youngs_modulus" : y_mod, - "trans_velocity" : trans_v, - "long_velocity" : long_v, - "clarke" : clarke, - "cahill" : cahill, - "snyder_acou_300K" : snyder_ac, - "snyder_opt" : snyder_opt, - "snyder_total" : snyder_total, - "debye": debye - } - else: - d['state'] = "Fewer than 20 successful tasks completed" - return FWAction() + spec['_dupefinder'] = DupeFinderVasp().to_dict() + spec['vaspinputset_name'] = mpvis.__class__.__name__ + spec['task_type'] = 'GGA+U optimize structure (2x)' if spec['vasp'][ + 'incar'].get('LDAU', False) else 'GGA optimize structure (2x)' - if o["snl"]["about"].get("_kpoint_density"): - d["kpoint_density"]= o["snl"]["about"].get("_kpoint_density") + return spec - if d["error"]: - raise ValueError("Elastic analysis failed: {}".format(d["error"])) - elif d["analysis"]["filter_pass"]: - d["state"] = "successful" - else: - d["state"] = "filter_failed" - elasticity.update({"relaxation_task_id": d["relaxation_task_id"]}, - d, upsert=True) - return FWAction() diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index d6b711f9..193642c7 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -1,33 +1,38 @@ -from pymatgen.io.vasp import Poscar -from mpworks.firetasks.elastic_tasks import SetupElastConstTask, SetupFConvergenceTask, SetupDeformedStructTask - -__author__ = 'weichen' +from collections import defaultdict -from fireworks.core.firework import Firework, Workflow +from fireworks import Firework from fireworks.utilities.fw_utilities import get_slug -from mpworks.firetasks.custodian_task import get_custodian_task -from mpworks.firetasks.snl_tasks import AddSNLTask -from mpworks.firetasks.vasp_io_tasks import VaspCopyTask, VaspWriterTask, \ - VaspToDBTask -from mpworks.firetasks.vasp_setup_tasks import SetupGGAUTask -from mpworks.snl_utils.mpsnl import get_meta_from_structure, MPStructureNL -from mpworks.workflows.wf_settings import QA_DB, QA_VASP, QA_CONTROL from pymatgen import Composition -from mpworks.workflows import snl_to_wf -from mpworks.firetasks.elastic_tasks import update_spec_force_convergence -from collections import defaultdict + +from mpworks.firetasks.snl_tasks import AddSNLTask +from mpworks.snl_utils.mpsnl import MPStructureNL +from mpworks.workflows.wf_settings import QA_DB + +__author__ = 'Xiaohui Qu' +__copyright__ = 'Copyright 2016, The Materials Project' +__version__ = '0.1' +__maintainer__ = 'Xiaohui Qu' +__email__ = 'xhqu1981@gmail.com' +__date__ = 'May 31, 2016' + + +""" +This is modified from Wei Chen's snl_to_wf_elastic. +""" def snl_to_wf_elastic(snl, parameters): # parameters["user_vasp_settings"] specifies user defined incar/kpoints parameters fws = [] connections = defaultdict(list) + cur_fwid = 0 parameters = parameters if parameters else {} snl_priority = parameters.get('priority', 1) priority = snl_priority * 2 # once we start a job, keep going! f = Composition(snl.structure.composition.reduced_formula).alphabetical_formula + nick_name = parameters.get("nick_name", f) # add the SNL to the SNL DB and figure out duplicate group tasks = [AddSNLTask()] @@ -37,9 +42,10 @@ def snl_to_wf_elastic(snl, parameters): spec['force_mpsnl'] = snl.as_dict() spec['force_snlgroup_id'] = parameters['snlgroup_id'] del spec['snl'] + addsnl_fwid = cur_fwid + cur_fwid += 1 fws.append(Firework(tasks, spec, - name=get_slug(f + '--' + spec['task_type']), fw_id=0)) - connections[0] = [1] + name=get_slug(nick_name + '--' + spec['task_type']), fw_id=cur_fwid)) parameters["exact_structure"] = True # run GGA structure optimization for force convergence From e3f22d3c4d6e53fc34c41398c6d369c289de22c2 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 16:08:34 -0700 Subject: [PATCH 009/204] don't write CHGCAR & WAVCAR in NMR calculations --- mpworks/firetasks/triple_jump_relax_set.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 72b4060d..5766e43c 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -6,7 +6,10 @@ STEP1: IBRION: 1 ISIF: 3 ISMEAR: -5 + ISTART: 0 + LCHARG: false LREAL: AUTO + LWAVE: false NELMIN: 5 NSW: 200 POTIM: 0.3 @@ -28,7 +31,10 @@ STEP2: IOPT: 7 ISIF: 3 ISMEAR: -5 + ISTART: 0 + LCHARG: false LREAL: AUTO + LWAVE: false NELMIN: 5 NSW: 200 POTIM: 0 @@ -54,7 +60,10 @@ STEP3: IOPT: 7 ISIF: 3 ISMEAR: -5 + ISTART: 0 + LCHARG: false LREAL: AUTO + LWAVE: false MAXMOVE: 0.05 NELMIN: 5 NSW: 100 From c954df92dce5eb015251ca846e4ab0994a734c28 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 21:33:22 -0700 Subject: [PATCH 010/204] add NMR tensor set --- mpworks/firetasks/nmr_tasks.py | 4 +-- mpworks/firetasks/nmr_tensor_set.yaml | 50 +++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 mpworks/firetasks/nmr_tensor_set.yaml diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 0417dd0a..be026638 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -50,12 +50,12 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): config_name = "Triple Jump Relax S1" elif istep_triple_jump == -1: # NMR Chemical Shit calculations - config_file = os.path.join(module_dir, "nmr_set.yaml") + config_file = os.path.join(module_dir, "nmr_tensor_set.yaml") config_key = "CS" config_name = "NMR CS" elif istep_triple_jump == -2: # NMR Chemical Shit calculations - config_file = os.path.join(module_dir, "nmr_set.yaml") + config_file = os.path.join(module_dir, "nmr_tensor_set.yaml") config_key = "EFG" config_name = "NMR EFG" else: diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml new file mode 100644 index 00000000..6bf4928f --- /dev/null +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -0,0 +1,50 @@ +CS: + INCAR: + DQ: 0.001 + EDIFF: -1.0e-10 + ENCUT_ENHANCE_RATIO: 0.75 + ICHIBARE: 1 + ISMEAR: -5 + ISTART: 0 + LCHARG: false + LCHIMAG: true + LNMR_SYM_RED: true + LREAL: AUTO + LWAVE: false + NELMIN: 5 + NSLPLINE: true + PREC: ACCURATE + SIGMA: 0 + KPOINTS: + grid_density: 6000 + POTCAR: + C: C_h + H: H_h + Mg: Mg_sv + O: O_h +EFG: + INCAR: + EDIFF: -1.0e-06 + ENCUT_ENHANCE_RATIO: 0.2 + ISMEAR: -5 + ISTART: 0 + LCHARG: false + LEFG: true + LREAL: AUTO + LWAVE: false + NELMIN: 5 + PREC: ACCURATE + QUAD_EFG: + Al: + Al-27: 146.6 + Ca: + Ca-41: -66.5 + Ca-43: -40.8 + SIGMA: 0 + KPOINTS: + grid_density: 3000 + POTCAR: + C: C + H: H + Mg: Mg_sv + O: O From b6c61550bdbe45ecee6abfd3c013da73b8ac31b9 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 22:08:09 -0700 Subject: [PATCH 011/204] add nuclear quadrupole moments for more elements --- mpworks/firetasks/nmr_tensor_set.yaml | 78 ++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 6bf4928f..ae873fed 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -34,12 +34,88 @@ EFG: LWAVE: false NELMIN: 5 PREC: ACCURATE - QUAD_EFG: + QUAD_EFG_MAP: + H: + H-2: 2.860 + Li: + Li-6: -0.808 + Li-7: -40.1 + Be: + Be-9: 52.88 + B: + B-10: 84.59 + B-11: 40.59 + C: + C-11: 33.27 + N: + N-14: 20.44 + O: + O-17: -25.58 + F: + F-19: -94.2 + Ne: + Ne-21: 101.55 + Na: + Na-23: 104.1 + Mg: + Mg-25: 199.4 Al: Al-27: 146.6 + S: + S-33: -67.8 + S-35: 47.1 + Cl: + Cl-35: -81.65 + Cl-37: -64.35 + K: + K-39: 58.5 + K-40: -73.0 + K-41: 71.1 Ca: Ca-41: -66.5 Ca-43: -40.8 + Sc: + Sc-45: -220.2 + Ti: + Ti-47: 302.10 + Ti-49: 247.11 + V: + V-50: 210.40 + V-51: -52.10 + Cr: + Cr-53: -150.50 + Mn: + Mn-55: 330.10 + Fe: + Fe-57: 160.0 + Co: + Co-59: 420.30 + Ni: + Ni-61: 162.15 + Cu: + Cu-63: -220.15 + Cu-65: -204.14 + Zn: + Zn-67: 150.15 + Sr: + Sr-87: 305.2 + In: + In-113: 759.8 + In-115: 770.8 + Sn: + Sn-119: -132.1 + Sb: + Sb-121: -543.11 + Sb-123: -692.14 + I: + I-127: -696.12 + I-129: -604.10 + La: + La-139: 200.6 + Hg: + Hg-201: 387.6 + Ra: + Ra-223: 1210.3 SIGMA: 0 KPOINTS: grid_density: 3000 From bb05f86176b6eae5557f5d3f3c06541296ec6c7c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 22:50:15 -0700 Subject: [PATCH 012/204] support EFG --- mpworks/firetasks/nmr_tasks.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index be026638..1c4cbd6b 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -17,12 +17,31 @@ This is modified from Wei Chen & Joseph Montoya's elastic_tasks. """ -def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce): +def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): + if element not in nqm_dict: + return 0.0 + d = nqm_dict[element] + if len(d) > 1: + prefered_isotopes = set(parameters.get("isotopes", [])) + pi = prefered_isotopes & set(list(d.keys())) + if len(pi) == 1: + return d[pi[0]] + if len(pi) >= 1: + raise ValueError("Multiple isotope is requested \"{}\", " + "please request only one for each elements".format(list(pi))) + isotopes = list(d.keys()) + isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False) + return d[isotopes[0]] + else: + return d.values()[0] + +def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce, parameters): trial_set = DictSet(structure, name=config_name, config_dict=config_dict, user_incar_settings=incar_enforce) trial_potcar = trial_set.potcar all_enmax = [sp.enmax for sp in trial_potcar] all_eaug = [sp.eaug for sp in trial_potcar] + all_elements = [sp.element for sp in trial_potcar] num_species = len(all_enmax) processed_config_dict = copy.deepcopy(config_dict) for k1, pot_values in [("ENCUT", all_enmax), ("ENAUG", all_eaug)]: @@ -35,8 +54,13 @@ def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce processed_config_dict["INCAR"].pop("ROPT_PER_ATOM") processed_config_dict["INCAR"]["ROPT"] = \ [config_dict["INCAR"]["ROPT_PER_ATOM"]] * num_species + if "QUAD_EFG_MAP" in config_dict["INCAR"]: + nqm_map = processed_config_dict["INCAR"].pop("QUAD_EFG_MAP") + quad_efg = [_get_nuclear_quadrupole_moment(el, nqm_map, parameters) for el in all_elements] + processed_config_dict["INCAR"]["QUAD_EFG"] = quad_efg vis = DictSet(structure, name=config_name, config_dict=processed_config_dict, user_incar_settings=incar_enforce) + print(vis.incar) return vis def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): @@ -70,7 +94,7 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): else: structure = snl.structure.get_primitive_structure() mpvis = _config_dict_to_input_set(config_dict, config_name, structure, - incar_enforce) + incar_enforce, parameters=parameters) exit() From 851f14995dba22992be620917fd6dd4f786f1906 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 22:52:23 -0700 Subject: [PATCH 013/204] fix return type bug --- mpworks/firetasks/nmr_tasks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1c4cbd6b..ecfd43c0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -33,7 +33,7 @@ def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False) return d[isotopes[0]] else: - return d.values()[0] + return list(d.values())[0] def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce, parameters): trial_set = DictSet(structure, name=config_name, config_dict=config_dict, @@ -60,6 +60,7 @@ def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce processed_config_dict["INCAR"]["QUAD_EFG"] = quad_efg vis = DictSet(structure, name=config_name, config_dict=processed_config_dict, user_incar_settings=incar_enforce) + print(all_elements) print(vis.incar) return vis From 6493653382daee8243a88e660259be13cb37eeae Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 22:57:05 -0700 Subject: [PATCH 014/204] correct type should be list rather than set --- mpworks/firetasks/nmr_tasks.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index ecfd43c0..8dc80716 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -25,7 +25,7 @@ def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): prefered_isotopes = set(parameters.get("isotopes", [])) pi = prefered_isotopes & set(list(d.keys())) if len(pi) == 1: - return d[pi[0]] + return d[list(pi)[0]] if len(pi) >= 1: raise ValueError("Multiple isotope is requested \"{}\", " "please request only one for each elements".format(list(pi))) @@ -60,8 +60,6 @@ def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce processed_config_dict["INCAR"]["QUAD_EFG"] = quad_efg vis = DictSet(structure, name=config_name, config_dict=processed_config_dict, user_incar_settings=incar_enforce) - print(all_elements) - print(vis.incar) return vis def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): From b6c12f24b9ad3e8dee5300d4dc739f76f51071ba Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 31 May 2016 23:06:18 -0700 Subject: [PATCH 015/204] finalize snl_to_nmr_spec() --- mpworks/firetasks/nmr_tasks.py | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 8dc80716..1560735b 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -92,21 +92,13 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): structure = snl.structure else: structure = snl.structure.get_primitive_structure() + mpvis = _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce, parameters=parameters) - exit() - - - - - # mpvis = MPGGAVaspInputSet(user_incar_settings=incar_enforce) if enforce_gga else MPVaspInputSet(user_incar_settings=incar_enforce) - - mpvis = None - - incar = mpvis.get_incar(structure) - poscar = mpvis.get_poscar(structure) - kpoints = mpvis.get_kpoints(structure) - potcar = mpvis.get_potcar(structure) + incar = mpvis.incar + poscar = mpvis.poscar + kpoints = mpvis.kpoints + potcar = mpvis.potcar spec['vasp'] = {} spec['vasp']['incar'] = incar.as_dict() @@ -133,9 +125,8 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): spec['run_tags'].extend('exact_structure') spec['_dupefinder'] = DupeFinderVasp().to_dict() - spec['vaspinputset_name'] = mpvis.__class__.__name__ - spec['task_type'] = 'GGA+U optimize structure (2x)' if spec['vasp'][ - 'incar'].get('LDAU', False) else 'GGA optimize structure (2x)' + spec['vaspinputset_name'] = mpvis.name + spec['task_type'] = mpvis.name return spec From bc04f0231d56182516c5c9962b6e53f437709c6e Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 13:26:17 -0700 Subject: [PATCH 016/204] add options to remove velocities from CONTCAR --- mpworks/firetasks/vasp_io_tasks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 82b146e8..9dba9373 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -72,6 +72,8 @@ def __init__(self, parameters=None): self.files = parameters.get('files', default_files) # files to move self.use_contcar = parameters.get('use_CONTCAR', True) # whether to move CONTCAR to POSCAR + self.keep_velocities = parameters.get('keep_velocities', True) # whether to keep the + # velocities in POSCAR/CONTCAR, if set to False, velocities will be removed. if self.use_contcar: self.files.append('CONTCAR') @@ -103,7 +105,10 @@ def run_task(self, fw_spec): f.close() os.remove(dest_file) - + if self.use_contcar and not self.keep_velocities: + shutil.move("POSCAR", "POSCAR.orig.velocity") + poscar = Poscar.from_file("POSCAR.orig.velocity", read_velocities=False) + poscar.write_file(filename="POSCAR") return FWAction(stored_data={'copied_files': self.files}) From e6b42229fb1cd85184ccc0914106b796bb48e684 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 13:27:02 -0700 Subject: [PATCH 017/204] use Python3 style print --- mpworks/firetasks/vasp_io_tasks.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 9dba9373..667426c6 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -91,9 +91,9 @@ def run_task(self, fw_spec): if prev_filename.endswith('.gz'): dest_file += '.gz' - print 'COPYING', prev_filename, dest_file + print('COPYING', prev_filename, dest_file) if self.missing_CHGCAR_OK and 'CHGCAR' in dest_file and not os.path.exists(zpath(prev_filename)): - print 'Skipping missing CHGCAR' + print('Skipping missing CHGCAR') else: shutil.copy2(prev_filename, dest_file) if '.gz' in dest_file: @@ -178,7 +178,7 @@ def run_task(self, fw_spec): snlgroup_id = d['snlgroup_id_final'] if 'snlgroup_id_final' in d else d['snlgroup_id'] update_spec.update({'mpsnl': mpsnl, 'snlgroup_id': snlgroup_id}) - print 'ENTERED task id:', t_id + print('ENTERED task id:', t_id) stored_data = {'task_id': t_id} if d['state'] == 'successful': update_spec['analysis'] = d['analysis'] @@ -194,7 +194,7 @@ def run_task(self, fw_spec): output_dir = last_relax(os.path.join(prev_dir, 'vasprun.xml')) ueh = UnconvergedErrorHandler(output_filename=output_dir) if ueh.check() and unconverged_tag not in fw_spec['run_tags']: - print 'Unconverged run! Creating dynamic FW...' + print('Unconverged run! Creating dynamic FW...') spec = {'prev_vasp_dir': prev_dir, 'prev_task_type': fw_spec['task_type'], From 5420ba77961d73e8f0000236daaa4e4701ff4f64 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 14:19:34 -0700 Subject: [PATCH 018/204] add triple jump relaxation to workflows --- mpworks/firetasks/nmr_tasks.py | 5 ++- mpworks/workflows/snl_to_wf_nmr.py | 64 ++++++++++++++++++++---------- 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1560735b..60ab2cf1 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -87,7 +87,10 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): parent_config_dict = yaml.load(stream=f) config_dict = parent_config_dict[config_key] - incar_enforce = {'NPAR': 2} + if "NMR" in config_name: + incar_enforce = {'NPAR': 4} + else: + incar_enforce = {'KPAR': 4} if 'exact_structure' in parameters and parameters['exact_structure']: structure = snl.structure else: diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 193642c7..824e4c05 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -1,12 +1,16 @@ from collections import defaultdict from fireworks import Firework +from fireworks.core.firework import Tracker from fireworks.utilities.fw_utilities import get_slug from pymatgen import Composition +from mpworks.firetasks.custodian_task import get_custodian_task +from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec from mpworks.firetasks.snl_tasks import AddSNLTask +from mpworks.firetasks.vasp_io_tasks import VaspWriterTask, VaspCopyTask, VaspToDBTask from mpworks.snl_utils.mpsnl import MPStructureNL -from mpworks.workflows.wf_settings import QA_DB +from mpworks.workflows.wf_settings import QA_DB, QA_VASP __author__ = 'Xiaohui Qu' __copyright__ = 'Copyright 2016, The Materials Project' @@ -48,26 +52,44 @@ def snl_to_wf_elastic(snl, parameters): name=get_slug(nick_name + '--' + spec['task_type']), fw_id=cur_fwid)) parameters["exact_structure"] = True - # run GGA structure optimization for force convergence - spec = snl_to_wf._snl_to_spec(snl, parameters=parameters) - user_vasp_settings = parameters.get("user_vasp_settings") - spec = update_spec_force_convergence(spec, user_vasp_settings) - spec['run_tags'].append("origin") - spec['_priority'] = priority - spec['_queueadapter'] = QA_VASP - del spec['_dupefinder'] - spec['task_type'] = "Vasp force convergence optimize structure (2x)" - tasks = [VaspWriterTask(), get_custodian_task(spec)] - fws.append(Firework(tasks, spec, - name=get_slug(f + '--' + spec['task_type']), fw_id=1)) - - # insert into DB - GGA structure optimization - spec = {'task_type': 'VASP db insertion', '_priority': priority, - '_allow_fizzled_parents': True, '_queueadapter': QA_DB, - 'clean_task_doc':True, 'elastic_constant':"force_convergence"} - fws.append(Firework([VaspToDBTask()], spec, - name=get_slug(f + '--' + spec['task_type']), fw_id=2)) - connections[1] = [2] + # run Triple Jump Structure Relaxation to Converge to a Very Small Force + geom_fwid = None + db_fwid = None + for istep in [1, 2, 3]: + spec = snl_to_nmr_spec(snl, istep, parameters) + trackers = [Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), + Tracker('OSZICAR'), Tracker('OUTCAR.relax1'), Tracker('OUTCAR.relax2')] + trackers_db = [Tracker('FW_job.out'), Tracker('FW_job.error')] + # run GGA structure optimization + spec['_priority'] = priority + spec['_queueadapter'] = QA_VASP + spec['_trackers'] = trackers + tasks = [VaspWriterTask()] + if istep >= 2: + parameters["use_CONTCAR"] = True + parameters["files"] = "CONTCAR" + parameters["keep_velocities"] = False + tasks.append(VaspCopyTask(parameters=parameters)) + tasks.append(get_custodian_task(spec)) + geom_fwid = cur_fwid + cur_fwid += 1 + fws.append(Firework(tasks, spec, name=get_slug(f + '--' + spec['task_type']), + fw_id=geom_fwid)) + geom_task_type = spec['task_type'] + + # insert into DB - GGA structure optimization + spec = {'task_type': 'VASP db insertion', '_priority': priority * 2, + '_allow_fizzled_parents': True, '_queueadapter': QA_DB, "_dupefinder": DupeFinderDB().to_dict(), + '_trackers': trackers_db} + db_fwid = cur_fwid + cur_fwid += 1 + fws.append( + Firework([VaspToDBTask()], spec, name=get_slug(f + '--' + spec['task_type'] + + '--' + geom_task_type), + fw_id=db_fwid)) + connections[geom_fwid] = [db_fwid] + if istep == 1: + connections[addsnl_fwid] = [geom_fwid] spec = {'task_type': 'Setup Deformed Struct Task', '_priority': priority, '_queueadapter': QA_CONTROL} From 82d6b56cbe78a0d28318d9e16f9a919eddb713d3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 15:21:54 -0700 Subject: [PATCH 019/204] add class NmrVaspToDBTask --- mpworks/firetasks/nmr_tasks.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 60ab2cf1..55acdb45 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,9 +1,12 @@ import copy import os import yaml +from pymatgen.io.vasp import Outcar, zpath from pymatgen.io.vasp.sets import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp +from mpworks.firetasks.vasp_io_tasks import VaspToDBTask +from mpworks.workflows.wf_utils import get_loc __author__ = 'Xiaohui Qu' __copyright__ = 'Copyright 2016, The Materials Project' @@ -133,3 +136,27 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): return spec + +class NmrVaspToDBTask(VaspToDBTask): + _fw_name = "NMR Tensor to Database Task" + + def __init__(self, parameters=None): + super(NmrVaspToDBTask, self).__init__(parameters) + + def run_task(self, fw_spec): + prev_dir = get_loc(fw_spec['prev_vasp_dir']) + outcar = Outcar(zpath(os.path.join(prev_dir, "vasprun.xml"))) + prev_task_type = fw_spec['prev_task_type'] + nmr_fields = dict() + if prev_task_type == "NMR CS": + cs = outcar.read_chemical_shifts() + cs_fiels = {"chemical_shifts": [x.as_dict() for x in cs]} + nmr_fields.update(cs_fiels) + elif prev_task_type == "NMR EFG": + efg = outcar.read_nmr_efg() + efg_fields = {"efg": efg} + nmr_fields.update(efg_fields) + else: + raise ValueError("Unsupported Task Type: \"{}\"".format(prev_task_type)) + self.additional_fields.update(nmr_fields) + super(NmrVaspToDBTask, self).run_task(fw_spec) From 5413b2a117270a39791029f2986003fc8ad607dd Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 22:13:23 -0700 Subject: [PATCH 020/204] refactor FW creation to functions --- mpworks/workflows/snl_to_wf_nmr.py | 123 +++++++++++++++++++---------- 1 file changed, 80 insertions(+), 43 deletions(-) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 824e4c05..885691a2 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -5,6 +5,7 @@ from fireworks.utilities.fw_utilities import get_slug from pymatgen import Composition +from mpworks.dupefinders.dupefinder_vasp import DupeFinderDB from mpworks.firetasks.custodian_task import get_custodian_task from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec from mpworks.firetasks.snl_tasks import AddSNLTask @@ -24,6 +25,33 @@ This is modified from Wei Chen's snl_to_wf_elastic. """ +def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, snl): + spec = snl_to_nmr_spec(snl, istep, parameters) + trackers = [Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), + Tracker('OSZICAR'), Tracker('OUTCAR.relax1'), Tracker('OUTCAR.relax2')] + spec['_priority'] = priority + spec['_queueadapter'] = QA_VASP + spec['_trackers'] = trackers + tasks = [VaspWriterTask()] + if copy_contcar: + parameters["use_CONTCAR"] = True + parameters["files"] = "CONTCAR" + parameters["keep_velocities"] = False + tasks.append(VaspCopyTask(parameters=parameters)) + tasks.append(get_custodian_task(spec)) + vasp_fw = Firework(tasks, spec, name=get_slug(nick_name + '--' + spec['task_type']), + fw_id=fwid) + return vasp_fw + +def get_nmr_db_fw(nick_name, fwid, prev_task_type, priority, task_class): + trackers_db = [Tracker('FW_job.out'), Tracker('FW_job.error')] + spec = {'task_type': 'VASP db insertion', '_priority': priority * 2, + '_allow_fizzled_parents': True, '_queueadapter': QA_DB, "_dupefinder": DupeFinderDB().to_dict(), + '_trackers': trackers_db} + db_fw = Firework([task_class()], spec, name=get_slug(nick_name + '--' + spec['task_type'] + + '--' + prev_task_type), + fw_id=fwid) + return db_fw def snl_to_wf_elastic(snl, parameters): # parameters["user_vasp_settings"] specifies user defined incar/kpoints parameters @@ -53,55 +81,64 @@ def snl_to_wf_elastic(snl, parameters): parameters["exact_structure"] = True # run Triple Jump Structure Relaxation to Converge to a Very Small Force - geom_fwid = None - db_fwid = None + geom_calc_fwid = None + geom_db_fwid = None for istep in [1, 2, 3]: - spec = snl_to_nmr_spec(snl, istep, parameters) - trackers = [Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), - Tracker('OSZICAR'), Tracker('OUTCAR.relax1'), Tracker('OUTCAR.relax2')] - trackers_db = [Tracker('FW_job.out'), Tracker('FW_job.error')] - # run GGA structure optimization - spec['_priority'] = priority - spec['_queueadapter'] = QA_VASP - spec['_trackers'] = trackers - tasks = [VaspWriterTask()] - if istep >= 2: - parameters["use_CONTCAR"] = True - parameters["files"] = "CONTCAR" - parameters["keep_velocities"] = False - tasks.append(VaspCopyTask(parameters=parameters)) - tasks.append(get_custodian_task(spec)) - geom_fwid = cur_fwid + # Geometry Optimization + copy_contcar = istep >= 2 + geom_calc_fwid = cur_fwid cur_fwid += 1 - fws.append(Firework(tasks, spec, name=get_slug(f + '--' + spec['task_type']), - fw_id=geom_fwid)) - geom_task_type = spec['task_type'] - - # insert into DB - GGA structure optimization - spec = {'task_type': 'VASP db insertion', '_priority': priority * 2, - '_allow_fizzled_parents': True, '_queueadapter': QA_DB, "_dupefinder": DupeFinderDB().to_dict(), - '_trackers': trackers_db} - db_fwid = cur_fwid - cur_fwid += 1 - fws.append( - Firework([VaspToDBTask()], spec, name=get_slug(f + '--' + spec['task_type'] - + '--' + geom_task_type), - fw_id=db_fwid)) - connections[geom_fwid] = [db_fwid] + vasp_fw = get_nmr_vasp_fw(geom_calc_fwid, copy_contcar, istep, nick_name, parameters, priority, snl) + fws.append(vasp_fw) + geom_task_type = vasp_fw.spec['task_type'] if istep == 1: - connections[addsnl_fwid] = [geom_fwid] + connections[addsnl_fwid] = [geom_calc_fwid] + else: + prev_db_fwid = geom_db_fwid + connections[prev_db_fwid] = [geom_calc_fwid] + + # insert into DB + task_class = VaspToDBTask + prev_task_type = geom_task_type + + geom_db_fwid = cur_fwid + cur_fwid += 1 + db_fw = get_nmr_db_fw(nick_name, geom_db_fwid, prev_task_type, priority, task_class) + fws.append(db_fw) + connections[geom_calc_fwid] = [geom_db_fwid] - spec = {'task_type': 'Setup Deformed Struct Task', '_priority': priority, - '_queueadapter': QA_CONTROL} - fws.append(Firework([SetupDeformedStructTask()], spec, - name=get_slug(f + '--' + spec['task_type']),fw_id=3)) - connections[2] = [3] - wf_meta = get_meta_from_structure(snl.structure) - wf_meta['run_version'] = 'May 2013 (1)' + # Calculate NMR Tensors + for istep in [-1, -2]: + # -1: Chemical Shift, -2: EFG + # Geometry Optimization + copy_contcar = istep >= 2 + geom_calc_fwid = cur_fwid + cur_fwid += 1 + vasp_fw = get_nmr_vasp_fw(geom_calc_fwid, copy_contcar, istep, nick_name, parameters, priority, snl) + fws.append(vasp_fw) + geom_task_type = vasp_fw.spec['task_type'] + if istep == 1: + connections[addsnl_fwid] = [geom_calc_fwid] + else: + prev_db_fwid = geom_db_fwid + connections[prev_db_fwid] = [geom_calc_fwid] + + # insert into DB + task_class = VaspToDBTask + prev_task_type = geom_task_type - if '_materialsproject' in snl.data and 'submission_id' in snl.data['_materialsproject']: - wf_meta['submission_id'] = snl.data['_materialsproject']['submission_id'] + geom_db_fwid = cur_fwid + cur_fwid += 1 + db_fw = get_nmr_db_fw(nick_name, geom_db_fwid, prev_task_type, priority, task_class) + fws.append(db_fw) + connections[geom_calc_fwid] = [geom_db_fwid] return Workflow(fws, connections, name=Composition( snl.structure.composition.reduced_formula).alphabetical_formula, metadata=wf_meta) + + + + + + From 2e4e8daacacfa03e6d7172cbcba818471fe3bd85 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 22:31:54 -0700 Subject: [PATCH 021/204] add NMR tensor calculation to workflow --- mpworks/workflows/snl_to_wf_nmr.py | 47 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 885691a2..c5473ff6 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -1,16 +1,17 @@ +import copy from collections import defaultdict from fireworks import Firework -from fireworks.core.firework import Tracker +from fireworks.core.firework import Tracker, Workflow from fireworks.utilities.fw_utilities import get_slug from pymatgen import Composition from mpworks.dupefinders.dupefinder_vasp import DupeFinderDB from mpworks.firetasks.custodian_task import get_custodian_task -from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec +from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask from mpworks.firetasks.snl_tasks import AddSNLTask from mpworks.firetasks.vasp_io_tasks import VaspWriterTask, VaspCopyTask, VaspToDBTask -from mpworks.snl_utils.mpsnl import MPStructureNL +from mpworks.snl_utils.mpsnl import MPStructureNL, get_meta_from_structure from mpworks.workflows.wf_settings import QA_DB, QA_VASP __author__ = 'Xiaohui Qu' @@ -88,7 +89,8 @@ def snl_to_wf_elastic(snl, parameters): copy_contcar = istep >= 2 geom_calc_fwid = cur_fwid cur_fwid += 1 - vasp_fw = get_nmr_vasp_fw(geom_calc_fwid, copy_contcar, istep, nick_name, parameters, priority, snl) + vasp_fw = get_nmr_vasp_fw(geom_calc_fwid, copy_contcar, istep, nick_name, + copy.deepcopy(parameters), priority, copy.deepcopy(snl)) fws.append(vasp_fw) geom_task_type = vasp_fw.spec['task_type'] if istep == 1: @@ -98,9 +100,8 @@ def snl_to_wf_elastic(snl, parameters): connections[prev_db_fwid] = [geom_calc_fwid] # insert into DB - task_class = VaspToDBTask + task_class = VaspToDBTask prev_task_type = geom_task_type - geom_db_fwid = cur_fwid cur_fwid += 1 db_fw = get_nmr_db_fw(nick_name, geom_db_fwid, prev_task_type, priority, task_class) @@ -112,30 +113,30 @@ def snl_to_wf_elastic(snl, parameters): for istep in [-1, -2]: # -1: Chemical Shift, -2: EFG # Geometry Optimization - copy_contcar = istep >= 2 - geom_calc_fwid = cur_fwid + nmr_calc_fwid = cur_fwid cur_fwid += 1 - vasp_fw = get_nmr_vasp_fw(geom_calc_fwid, copy_contcar, istep, nick_name, parameters, priority, snl) + vasp_fw = get_nmr_vasp_fw(nmr_calc_fwid, True, istep, nick_name, copy.deepcopy(parameters), + priority, copy.deepcopy(snl)) fws.append(vasp_fw) - geom_task_type = vasp_fw.spec['task_type'] - if istep == 1: - connections[addsnl_fwid] = [geom_calc_fwid] - else: - prev_db_fwid = geom_db_fwid - connections[prev_db_fwid] = [geom_calc_fwid] + nmr_task_type = vasp_fw.spec['task_type'] + connections[geom_calc_fwid] = [nmr_calc_fwid] # insert into DB - task_class = VaspToDBTask - prev_task_type = geom_task_type - - geom_db_fwid = cur_fwid + task_class = NmrVaspToDBTask + prev_task_type = nmr_task_type + nmr_db_fwid = cur_fwid cur_fwid += 1 - db_fw = get_nmr_db_fw(nick_name, geom_db_fwid, prev_task_type, priority, task_class) + db_fw = get_nmr_db_fw(nick_name, nmr_db_fwid, prev_task_type, priority, task_class) fws.append(db_fw) - connections[geom_calc_fwid] = [geom_db_fwid] + connections[nmr_calc_fwid] = [nmr_db_fwid] + + wf_meta = get_meta_from_structure(snl.structure) + wf_meta['run_version'] = 'June 2016 (1)' + + if '_materialsproject' in snl.data and 'submission_id' in snl.data['_materialsproject']: + wf_meta['submission_id'] = snl.data['_materialsproject']['submission_id'] - return Workflow(fws, connections, name=Composition( - snl.structure.composition.reduced_formula).alphabetical_formula, metadata=wf_meta) + return Workflow(fws, connections, name=nick_name, metadata=wf_meta) From a37ae90c894657cea1efccd461b755f47c7524a9 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 22:34:28 -0700 Subject: [PATCH 022/204] add NMR workflow to submission framework --- mpworks/processors/process_submissions.py | 5 ++++- mpworks/workflows/snl_to_wf_nmr.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mpworks/processors/process_submissions.py b/mpworks/processors/process_submissions.py index c7ada0b8..ed2d051f 100644 --- a/mpworks/processors/process_submissions.py +++ b/mpworks/processors/process_submissions.py @@ -5,6 +5,7 @@ from mpworks.submission.submission_mongo import SubmissionMongoAdapter from mpworks.workflows.snl_to_wf import snl_to_wf from mpworks.workflows.snl_to_wf_elastic import snl_to_wf_elastic +from mpworks.workflows.snl_to_wf_nmr import snl_to_wf_nmr from mpworks.workflows.wf_utils import NO_POTCARS from pymatgen.matproj.snl import StructureNL @@ -77,7 +78,9 @@ def submit_new_workflow(self): # create a workflow if "Elasticity" in snl.projects: - wf=snl_to_wf_elastic(snl, job['parameters']) + wf = snl_to_wf_elastic(snl, job['parameters']) + elif "NMR" in snl.projects: + wf = snl_to_wf_nmr(snl, job['parameters']) else: wf = snl_to_wf(snl, job['parameters']) self.launchpad.add_wf(wf) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index c5473ff6..60a80efc 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -54,7 +54,7 @@ def get_nmr_db_fw(nick_name, fwid, prev_task_type, priority, task_class): fw_id=fwid) return db_fw -def snl_to_wf_elastic(snl, parameters): +def snl_to_wf_nmr(snl, parameters): # parameters["user_vasp_settings"] specifies user defined incar/kpoints parameters fws = [] connections = defaultdict(list) From 26631a9eb213cb2ad2e3e9b650380d283220f0ab Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 22:36:36 -0700 Subject: [PATCH 023/204] use Python3 style print in process_submissions --- mpworks/processors/process_submissions.py | 24 +++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mpworks/processors/process_submissions.py b/mpworks/processors/process_submissions.py index ed2d051f..b000f484 100644 --- a/mpworks/processors/process_submissions.py +++ b/mpworks/processors/process_submissions.py @@ -31,11 +31,11 @@ def run(self, sleep_time=None, infinite=False): sleep_time = sleep_time if sleep_time else 30 while True: self.submit_all_new_workflows() - print "Updating existing workflows..." + print("Updating existing workflows...") self.update_existing_workflows() # for updating the display if not infinite: break - print 'sleeping', sleep_time + print('sleeping', sleep_time) time.sleep(sleep_time) def submit_all_new_workflows(self): @@ -55,23 +55,23 @@ def submit_new_workflow(self): snl = StructureNL.from_dict(job) if len(snl.structure.sites) > SubmissionProcessor.MAX_SITES: self.sma.update_state(submission_id, 'REJECTED', 'too many sites', {}) - print 'REJECTED WORKFLOW FOR {} - too many sites ({})'.format( - snl.structure.formula, len(snl.structure.sites)) + print('REJECTED WORKFLOW FOR {} - too many sites ({})'.format( + snl.structure.formula, len(snl.structure.sites))) elif not job['is_valid']: self.sma.update_state(submission_id, 'REJECTED', 'invalid structure (atoms too close)', {}) - print 'REJECTED WORKFLOW FOR {} - invalid structure'.format( - snl.structure.formula) + print('REJECTED WORKFLOW FOR {} - invalid structure'.format( + snl.structure.formula)) elif len(set(NO_POTCARS) & set(job['elements'])) > 0: self.sma.update_state(submission_id, 'REJECTED', 'invalid structure (no POTCAR)', {}) - print 'REJECTED WORKFLOW FOR {} - invalid element (No POTCAR)'.format( - snl.structure.formula) + print('REJECTED WORKFLOW FOR {} - invalid element (No POTCAR)'.format( + snl.structure.formula)) elif not job['is_ordered']: self.sma.update_state(submission_id, 'REJECTED', 'invalid structure (disordered)', {}) - print 'REJECTED WORKFLOW FOR {} - invalid structure'.format( - snl.structure.formula) + print('REJECTED WORKFLOW FOR {} - invalid structure'.format( + snl.structure.formula)) else: snl.data['_materialsproject'] = snl.data.get('_materialsproject', {}) snl.data['_materialsproject']['submission_id'] = submission_id @@ -84,7 +84,7 @@ def submit_new_workflow(self): else: wf = snl_to_wf(snl, job['parameters']) self.launchpad.add_wf(wf) - print 'ADDED WORKFLOW FOR {}'.format(snl.structure.formula) + print('ADDED WORKFLOW FOR {}'.format(snl.structure.formula)) except: self.jobs.find_and_modify({'submission_id': submission_id}, {'$set': {'state': 'ERROR'}}) @@ -102,7 +102,7 @@ def update_existing_workflows(self): try: self.update_wf_state(submission_id) except: - print 'ERROR while processing s_id', submission_id + print('ERROR while processing s_id', submission_id) traceback.print_exc() From bce99f520cba37396d63690438862ee6f877dcec Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 1 Jun 2016 22:49:01 -0700 Subject: [PATCH 024/204] conform to PEP8 --- mpworks/firetasks/nmr_tasks.py | 8 ++++++-- mpworks/workflows/snl_to_wf_nmr.py | 13 +++++-------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 55acdb45..f6ce283a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -20,6 +20,7 @@ This is modified from Wei Chen & Joseph Montoya's elastic_tasks. """ + def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): if element not in nqm_dict: return 0.0 @@ -38,6 +39,7 @@ def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): else: return list(d.values())[0] + def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce, parameters): trial_set = DictSet(structure, name=config_name, config_dict=config_dict, user_incar_settings=incar_enforce) @@ -65,12 +67,13 @@ def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce user_incar_settings=incar_enforce) return vis + def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): parameters = parameters if parameters else {} spec = {'parameters': parameters} module_dir = os.path.abspath(os.path.dirname(__file__)) - if 1<= istep_triple_jump <= 3: + if 1 <= istep_triple_jump <= 3: config_file = os.path.join(module_dir, "triple_jump_relax_set.yaml") config_key = "STEP{}".format(istep_triple_jump) config_name = "Triple Jump Relax S1" @@ -127,7 +130,8 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): del spec['parameters']['run_tags'] # add exact structure run tag automatically if we have a unique situation - if 'exact_structure' in parameters and parameters['exact_structure'] and snl.structure != snl.structure.get_primitive_structure(): + if 'exact_structure' in parameters and parameters['exact_structure'] and \ + snl.structure != snl.structure.get_primitive_structure(): spec['run_tags'].extend('exact_structure') spec['_dupefinder'] = DupeFinderVasp().to_dict() diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 60a80efc..7dfe6dce 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -26,6 +26,7 @@ This is modified from Wei Chen's snl_to_wf_elastic. """ + def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, snl): spec = snl_to_nmr_spec(snl, istep, parameters) trackers = [Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), @@ -33,7 +34,8 @@ def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, spec['_priority'] = priority spec['_queueadapter'] = QA_VASP spec['_trackers'] = trackers - tasks = [VaspWriterTask()] + tasks = list() + tasks.append(VaspWriterTask()) if copy_contcar: parameters["use_CONTCAR"] = True parameters["files"] = "CONTCAR" @@ -44,6 +46,7 @@ def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, fw_id=fwid) return vasp_fw + def get_nmr_db_fw(nick_name, fwid, prev_task_type, priority, task_class): trackers_db = [Tracker('FW_job.out'), Tracker('FW_job.error')] spec = {'task_type': 'VASP db insertion', '_priority': priority * 2, @@ -54,6 +57,7 @@ def get_nmr_db_fw(nick_name, fwid, prev_task_type, priority, task_class): fw_id=fwid) return db_fw + def snl_to_wf_nmr(snl, parameters): # parameters["user_vasp_settings"] specifies user defined incar/kpoints parameters fws = [] @@ -108,7 +112,6 @@ def snl_to_wf_nmr(snl, parameters): fws.append(db_fw) connections[geom_calc_fwid] = [geom_db_fwid] - # Calculate NMR Tensors for istep in [-1, -2]: # -1: Chemical Shift, -2: EFG @@ -137,9 +140,3 @@ def snl_to_wf_nmr(snl, parameters): wf_meta['submission_id'] = snl.data['_materialsproject']['submission_id'] return Workflow(fws, connections, name=nick_name, metadata=wf_meta) - - - - - - From cc3649dc71b4ebea7200f5a21232a7e4cf97865d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 00:09:18 -0700 Subject: [PATCH 025/204] use Algo = Fast for triple jump relaxations --- mpworks/firetasks/triple_jump_relax_set.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 5766e43c..6837006f 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -1,5 +1,6 @@ STEP1: INCAR: + ALGO: FAST EDIFF: -1.0e-06 EDIFFG: -0.1 ENCUT_ENHANCE_RATIO: 0.2 @@ -25,6 +26,7 @@ STEP1: STEP2: INCAR: ADDGRID: true + ALGO: FAST EDIFF: -1.0e-08 EDIFFG: -0.01 IBRION: 3 @@ -51,6 +53,7 @@ STEP2: STEP3: INCAR: ADDGRID: true + ALGO: FAST EDIFF: -1.0e-10 EDIFFG: -0.002 ENAUG_ENHANCE_RATIO: 0.75 From 83fc651aebfbac382fd743193da857a2150286ad Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 12:09:43 -0700 Subject: [PATCH 026/204] go through DB authentication only when the user/password pair is present --- mpworks/processors/submit_canonical.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/processors/submit_canonical.py b/mpworks/processors/submit_canonical.py index e3e57e72..c6b1549f 100644 --- a/mpworks/processors/submit_canonical.py +++ b/mpworks/processors/submit_canonical.py @@ -36,7 +36,8 @@ def clear_env(): conn = MongoClient(db_creds['host'], db_creds['port']) db = conn[db_creds['database']] - db.authenticate(db_creds['admin_user'], db_creds['admin_password']) + if db_creds['admin_user'] is not None: + db.authenticate(db_creds['admin_user'], db_creds['admin_password']) db.tasks.remove() db.boltztrap.remove() db.counter.remove() From e9fe0e90583668634e00de12da5bdc411ffa561e Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 14:01:47 -0700 Subject: [PATCH 027/204] use deferred import to fix the import error from pymatgen.io.sets --- mpworks/processors/process_submissions.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mpworks/processors/process_submissions.py b/mpworks/processors/process_submissions.py index b000f484..e3b88df2 100644 --- a/mpworks/processors/process_submissions.py +++ b/mpworks/processors/process_submissions.py @@ -3,9 +3,6 @@ from fireworks.core.launchpad import LaunchPad from mpworks.snl_utils.mpsnl import MPStructureNL from mpworks.submission.submission_mongo import SubmissionMongoAdapter -from mpworks.workflows.snl_to_wf import snl_to_wf -from mpworks.workflows.snl_to_wf_elastic import snl_to_wf_elastic -from mpworks.workflows.snl_to_wf_nmr import snl_to_wf_nmr from mpworks.workflows.wf_utils import NO_POTCARS from pymatgen.matproj.snl import StructureNL @@ -78,10 +75,18 @@ def submit_new_workflow(self): # create a workflow if "Elasticity" in snl.projects: + from mpworks.workflows.snl_to_wf_elastic import snl_to_wf_elastic wf = snl_to_wf_elastic(snl, job['parameters']) elif "NMR" in snl.projects: + # The deferred imported is a dirty fix to avoid the import + # error from pymatgen.io.vasp.sets, although pointing to + # sets_deprecated would make things work temporally, the + # ultimate solution would be to use the new style VaspInputSets + # in pymatgen. NMR workflow uses the new style API. + from mpworks.workflows.snl_to_wf_nmr import snl_to_wf_nmr wf = snl_to_wf_nmr(snl, job['parameters']) else: + from mpworks.workflows.snl_to_wf import snl_to_wf wf = snl_to_wf(snl, job['parameters']) self.launchpad.add_wf(wf) print('ADDED WORKFLOW FOR {}'.format(snl.structure.formula)) From 99bcfef59ef894c0297322daa8a6d621d467ccec Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 14:09:46 -0700 Subject: [PATCH 028/204] fix the file list type bug --- mpworks/workflows/snl_to_wf_nmr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 7dfe6dce..f0569da8 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -38,7 +38,7 @@ def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, tasks.append(VaspWriterTask()) if copy_contcar: parameters["use_CONTCAR"] = True - parameters["files"] = "CONTCAR" + parameters["files"] = ["CONTCAR"] parameters["keep_velocities"] = False tasks.append(VaspCopyTask(parameters=parameters)) tasks.append(get_custodian_task(spec)) From 7ca459216d84f7c8a7f9ae7b8a5142ee2b87d0ff Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 14:18:08 -0700 Subject: [PATCH 029/204] fix fw_id bug --- mpworks/workflows/snl_to_wf_nmr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index f0569da8..5ac01ffb 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -82,7 +82,7 @@ def snl_to_wf_nmr(snl, parameters): addsnl_fwid = cur_fwid cur_fwid += 1 fws.append(Firework(tasks, spec, - name=get_slug(nick_name + '--' + spec['task_type']), fw_id=cur_fwid)) + name=get_slug(nick_name + '--' + spec['task_type']), fw_id=addsnl_fwid)) parameters["exact_structure"] = True # run Triple Jump Structure Relaxation to Converge to a Very Small Force From 84ffcd1f738bb185a8e2c8c0734799a991678f02 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 14:39:07 -0700 Subject: [PATCH 030/204] pointing to sets_deprecated to avoid the import error --- mpworks/firetasks/nmr_tasks.py | 2 +- mpworks/firetasks/vasp_setup_tasks.py | 2 +- mpworks/workflows/snl_to_wf.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index f6ce283a..cdbef0ec 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -2,7 +2,7 @@ import os import yaml from pymatgen.io.vasp import Outcar, zpath -from pymatgen.io.vasp.sets import DictSet +from pymatgen.io.vasp.sets_deprecated import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.vasp_io_tasks import VaspToDBTask diff --git a/mpworks/firetasks/vasp_setup_tasks.py b/mpworks/firetasks/vasp_setup_tasks.py index f39e55c4..f7d9a75e 100644 --- a/mpworks/firetasks/vasp_setup_tasks.py +++ b/mpworks/firetasks/vasp_setup_tasks.py @@ -5,7 +5,7 @@ from fireworks.core.firework import FireTaskBase, FWAction from pymatgen.io.vasp.outputs import Vasprun, Outcar from pymatgen.io.vasp.inputs import VaspInput, Incar, Poscar, Kpoints, Potcar -from pymatgen.io.vasp.sets import MPVaspInputSet, MPStaticVaspInputSet, \ +from pymatgen.io.vasp.sets_deprecated import MPVaspInputSet, MPStaticVaspInputSet, \ MPNonSCFVaspInputSet from pymatgen.symmetry.bandstructure import HighSymmKpath diff --git a/mpworks/workflows/snl_to_wf.py b/mpworks/workflows/snl_to_wf.py index 812d9628..56f7270b 100644 --- a/mpworks/workflows/snl_to_wf.py +++ b/mpworks/workflows/snl_to_wf.py @@ -12,7 +12,7 @@ from mpworks.workflows.wf_settings import QA_DB, QA_VASP, QA_CONTROL from pymatgen import Composition from pymatgen.io.cif import CifParser -from pymatgen.io.vasp.sets import MPVaspInputSet, MPGGAVaspInputSet +from pymatgen.io.vasp.sets_deprecated import MPVaspInputSet, MPGGAVaspInputSet from pymatgen.matproj.snl import StructureNL __author__ = 'Anubhav Jain' From 6835ca6a169dd0cda4b881bfe2ca8a2dafab977c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 14:44:44 -0700 Subject: [PATCH 031/204] fix typo. DictSet is in new style InputSet API --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index cdbef0ec..f6ce283a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -2,7 +2,7 @@ import os import yaml from pymatgen.io.vasp import Outcar, zpath -from pymatgen.io.vasp.sets_deprecated import DictSet +from pymatgen.io.vasp.sets import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.vasp_io_tasks import VaspToDBTask From ecb2ee60b04dfb563e70800f0523e14e6c6676b4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 14:47:54 -0700 Subject: [PATCH 032/204] fix triple jump relax FireWork name --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index f6ce283a..f7008cd6 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -76,7 +76,7 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): if 1 <= istep_triple_jump <= 3: config_file = os.path.join(module_dir, "triple_jump_relax_set.yaml") config_key = "STEP{}".format(istep_triple_jump) - config_name = "Triple Jump Relax S1" + config_name = "Triple Jump Relax S{}".format(istep_triple_jump) elif istep_triple_jump == -1: # NMR Chemical Shit calculations config_file = os.path.join(module_dir, "nmr_tensor_set.yaml") From 71fd3ada8e0dccad62faf4a9deaf46555d68be6f Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 15:08:41 -0700 Subject: [PATCH 033/204] fix FireWorks links dict --- mpworks/workflows/snl_to_wf_nmr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 5ac01ffb..2f153cc1 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -122,7 +122,7 @@ def snl_to_wf_nmr(snl, parameters): priority, copy.deepcopy(snl)) fws.append(vasp_fw) nmr_task_type = vasp_fw.spec['task_type'] - connections[geom_calc_fwid] = [nmr_calc_fwid] + connections[geom_calc_fwid].extend([nmr_calc_fwid]) # insert into DB task_class = NmrVaspToDBTask From e6849d1c7d8159a8079f97f386ef4597eaa9d801 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 16:04:52 -0700 Subject: [PATCH 034/204] pymongo find_and_modify() is deprecated, replace with find_one_and_update() --- mpworks/submission/submission_mongo.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mpworks/submission/submission_mongo.py b/mpworks/submission/submission_mongo.py index 3cfb2523..083d6f98 100644 --- a/mpworks/submission/submission_mongo.py +++ b/mpworks/submission/submission_mongo.py @@ -93,7 +93,7 @@ def _update_indices(self): self.jobs.ensure_index('submitter_email') def _get_next_submission_id(self): - return self.id_assigner.find_and_modify( + return self.id_assigner.find_one_and_update( query={}, update={'$inc': {'next_submission_id': 1}})[ 'next_submission_id'] @@ -140,7 +140,7 @@ def resubmit(self, submission_id, snl_db=None): updates['parameters'] = self.jobs.find_one({'submission_id': submission_id}, {'parameters': 1})['parameters'] updates['parameters'].update({"mpsnl": mpsnl.as_dict(), "snlgroup_id": snlgroup_id}) - self.jobs.find_and_modify({'submission_id': submission_id}, {'$set': updates}) + self.jobs.find_one_and_update({'submission_id': submission_id}, {'$set': updates}) def cancel_submission(self, submission_id): @@ -165,8 +165,8 @@ def to_dict(self): return d def update_state(self, submission_id, state, state_details, task_dict): - self.jobs.find_and_modify({'submission_id': submission_id}, - {'$set': {'state': state, 'state_details': state_details, 'task_dict': task_dict}}) + self.jobs.find_one_and_update({'submission_id': submission_id}, + {'$set': {'state': state, 'state_details': state_details, 'task_dict': task_dict}}) @classmethod def from_dict(cls, d): From 98a76f7199eb5fb56398ef71bae69c2e1da96589 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 16:12:09 -0700 Subject: [PATCH 035/204] fix parameter bugs in find_one_and_update() --- mpworks/submission/submission_mongo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/submission/submission_mongo.py b/mpworks/submission/submission_mongo.py index 083d6f98..9fc0bf13 100644 --- a/mpworks/submission/submission_mongo.py +++ b/mpworks/submission/submission_mongo.py @@ -94,7 +94,7 @@ def _update_indices(self): def _get_next_submission_id(self): return self.id_assigner.find_one_and_update( - query={}, update={'$inc': {'next_submission_id': 1}})[ + filter={}, update={'$inc': {'next_submission_id': 1}})[ 'next_submission_id'] def _restart_id_assigner_at(self, next_submission_id): From 67a2b6141b31249cee664cd0670e9d7cb1dfba45 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 16:18:11 -0700 Subject: [PATCH 036/204] ensure_index() is deprecated, replace with create_index() --- mpworks/processors/process_submissions.py | 6 +++--- mpworks/submission/submission_mongo.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mpworks/processors/process_submissions.py b/mpworks/processors/process_submissions.py index e3b88df2..d0551f7c 100644 --- a/mpworks/processors/process_submissions.py +++ b/mpworks/processors/process_submissions.py @@ -42,7 +42,7 @@ def submit_all_new_workflows(self): def submit_new_workflow(self): # finds a submitted job, creates a workflow, and submits it to FireWorks - job = self.jobs.find_and_modify({'state': 'SUBMITTED'}, {'$set': {'state': 'WAITING'}}) + job = self.jobs.find_one_and_update({'state': 'SUBMITTED'}, {'$set': {'state': 'WAITING'}}) if job: submission_id = job['submission_id'] try: @@ -91,8 +91,8 @@ def submit_new_workflow(self): self.launchpad.add_wf(wf) print('ADDED WORKFLOW FOR {}'.format(snl.structure.formula)) except: - self.jobs.find_and_modify({'submission_id': submission_id}, - {'$set': {'state': 'ERROR'}}) + self.jobs.find_one_and_update({'submission_id': submission_id}, + {'$set': {'state': 'ERROR'}}) traceback.print_exc() return submission_id diff --git a/mpworks/submission/submission_mongo.py b/mpworks/submission/submission_mongo.py index 9fc0bf13..e3d9d578 100644 --- a/mpworks/submission/submission_mongo.py +++ b/mpworks/submission/submission_mongo.py @@ -88,9 +88,9 @@ def _reset(self): self.jobs.remove() def _update_indices(self): - self.jobs.ensure_index('submission_id', unique=True) - self.jobs.ensure_index('state') - self.jobs.ensure_index('submitter_email') + self.jobs.create_index('submission_id', unique=True) + self.jobs.create_index('state') + self.jobs.create_index('submitter_email') def _get_next_submission_id(self): return self.id_assigner.find_one_and_update( From 20ac71f020b90deddd943c7a79e725c6c83b951b Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 21:05:44 -0700 Subject: [PATCH 037/204] reduce structure to primitive cell from the begining therefore reduce only once in the full workflow creation --- mpworks/firetasks/nmr_tasks.py | 14 +++----------- mpworks/workflows/snl_to_wf_nmr.py | 20 ++++++++++++++++---- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index f7008cd6..fdea4297 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -68,7 +68,7 @@ def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce return vis -def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): +def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_run_tags=()): parameters = parameters if parameters else {} spec = {'parameters': parameters} @@ -92,15 +92,12 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): with open(config_file) as f: parent_config_dict = yaml.load(stream=f) config_dict = parent_config_dict[config_key] - if "NMR" in config_name: incar_enforce = {'NPAR': 4} else: incar_enforce = {'KPAR': 4} - if 'exact_structure' in parameters and parameters['exact_structure']: - structure = snl.structure - else: - structure = snl.structure.get_primitive_structure() + spec['run_tags'] = spec.get('run_tags', []) + spec['run_tags'].extend(additional_run_tags) mpvis = _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce, parameters=parameters) @@ -129,11 +126,6 @@ def snl_to_nmr_spec(snl, istep_triple_jump, parameters=None): spec['run_tags'].extend(parameters['run_tags']) del spec['parameters']['run_tags'] - # add exact structure run tag automatically if we have a unique situation - if 'exact_structure' in parameters and parameters['exact_structure'] and \ - snl.structure != snl.structure.get_primitive_structure(): - spec['run_tags'].extend('exact_structure') - spec['_dupefinder'] = DupeFinderVasp().to_dict() spec['vaspinputset_name'] = mpvis.name spec['task_type'] = mpvis.name diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 2f153cc1..573b3c07 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -27,8 +27,8 @@ """ -def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, snl): - spec = snl_to_nmr_spec(snl, istep, parameters) +def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, structure, additional_run_tags): + spec = snl_to_nmr_spec(structure, istep, parameters, additional_run_tags) trackers = [Tracker('FW_job.out'), Tracker('FW_job.error'), Tracker('vasp.out'), Tracker('OUTCAR'), Tracker('OSZICAR'), Tracker('OUTCAR.relax1'), Tracker('OUTCAR.relax2')] spec['_priority'] = priority @@ -71,6 +71,17 @@ def snl_to_wf_nmr(snl, parameters): f = Composition(snl.structure.composition.reduced_formula).alphabetical_formula nick_name = parameters.get("nick_name", f) + if 'exact_structure' in parameters and parameters['exact_structure']: + structure = snl.structure + else: + structure = snl.structure.get_primitive_structure() + + additional_run_tags = [] + # add exact structure run tag automatically if we have a unique situation + if 'exact_structure' in parameters and parameters['exact_structure'] and \ + snl.structure != structure: + additional_run_tags.append('exact_structure') + # add the SNL to the SNL DB and figure out duplicate group tasks = [AddSNLTask()] spec = {'task_type': 'Add to SNL database', 'snl': snl.as_dict(), @@ -94,7 +105,8 @@ def snl_to_wf_nmr(snl, parameters): geom_calc_fwid = cur_fwid cur_fwid += 1 vasp_fw = get_nmr_vasp_fw(geom_calc_fwid, copy_contcar, istep, nick_name, - copy.deepcopy(parameters), priority, copy.deepcopy(snl)) + copy.deepcopy(parameters), priority, copy.deepcopy(structure), + additional_run_tags) fws.append(vasp_fw) geom_task_type = vasp_fw.spec['task_type'] if istep == 1: @@ -119,7 +131,7 @@ def snl_to_wf_nmr(snl, parameters): nmr_calc_fwid = cur_fwid cur_fwid += 1 vasp_fw = get_nmr_vasp_fw(nmr_calc_fwid, True, istep, nick_name, copy.deepcopy(parameters), - priority, copy.deepcopy(snl)) + priority, copy.deepcopy(structure), additional_run_tags) fws.append(vasp_fw) nmr_task_type = vasp_fw.spec['task_type'] connections[geom_calc_fwid].extend([nmr_calc_fwid]) From b62f9a92a21a4bd3a2c879b4f864eb652d93ca70 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 2 Jun 2016 22:59:57 -0700 Subject: [PATCH 038/204] add options to control whether to update vasp in vasp_io_tasks --- mpworks/firetasks/vasp_io_tasks.py | 5 +++-- mpworks/workflows/snl_to_wf_nmr.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 667426c6..652f903d 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -183,8 +183,9 @@ def run_task(self, fw_spec): if d['state'] == 'successful': update_spec['analysis'] = d['analysis'] update_spec['output'] = d['output'] - update_spec['vasp']={'incar':d['calculations'][-1]['input']['incar'], - 'kpoints':d['calculations'][-1]['input']['kpoints']} + if self.get("update_input", True): + update_spec['vasp']={'incar':d['calculations'][-1]['input']['incar'], + 'kpoints':d['calculations'][-1]['input']['kpoints']} update_spec["task_id"]=t_id return FWAction(stored_data=stored_data, update_spec=update_spec) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 573b3c07..c6ca3a9c 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -52,7 +52,7 @@ def get_nmr_db_fw(nick_name, fwid, prev_task_type, priority, task_class): spec = {'task_type': 'VASP db insertion', '_priority': priority * 2, '_allow_fizzled_parents': True, '_queueadapter': QA_DB, "_dupefinder": DupeFinderDB().to_dict(), '_trackers': trackers_db} - db_fw = Firework([task_class()], spec, name=get_slug(nick_name + '--' + spec['task_type'] + + db_fw = Firework([task_class(parameters={"update_input": False})], spec, name=get_slug(nick_name + '--' + spec['task_type'] + '--' + prev_task_type), fw_id=fwid) return db_fw From fe0293d96e8ced4875d3a898ff6c9e02755cb9be Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 3 Jun 2016 11:13:24 -0700 Subject: [PATCH 039/204] fix OUTCAR file name bug --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index fdea4297..485282fb 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -141,7 +141,7 @@ def __init__(self, parameters=None): def run_task(self, fw_spec): prev_dir = get_loc(fw_spec['prev_vasp_dir']) - outcar = Outcar(zpath(os.path.join(prev_dir, "vasprun.xml"))) + outcar = Outcar(zpath(os.path.join(prev_dir, "OUTCAR"))) prev_task_type = fw_spec['prev_task_type'] nmr_fields = dict() if prev_task_type == "NMR CS": From 4a5c1a495c3270f6e74b3c278a6994c91d108999 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 3 Jun 2016 11:17:15 -0700 Subject: [PATCH 040/204] fix KPAR/NPAR settings --- mpworks/firetasks/nmr_tasks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 485282fb..5b9c22b4 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -92,10 +92,10 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru with open(config_file) as f: parent_config_dict = yaml.load(stream=f) config_dict = parent_config_dict[config_key] - if "NMR" in config_name: - incar_enforce = {'NPAR': 4} - else: + if config_name == "NMR CS": incar_enforce = {'KPAR': 4} + else: + incar_enforce = {'NPAR': 4} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) From aec3a01a42e665880fc41b63ae1dcd804559e18f Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 3 Jun 2016 12:32:06 -0700 Subject: [PATCH 041/204] fix NMR tensor retriving method --- mpworks/firetasks/nmr_tasks.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 5b9c22b4..b4afc916 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,7 +1,8 @@ import copy import os import yaml -from pymatgen.io.vasp import Outcar, zpath +from monty.os.path import zpath +from pymatgen.io.vasp import Outcar from pymatgen.io.vasp.sets import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp @@ -145,12 +146,12 @@ def run_task(self, fw_spec): prev_task_type = fw_spec['prev_task_type'] nmr_fields = dict() if prev_task_type == "NMR CS": - cs = outcar.read_chemical_shifts() - cs_fiels = {"chemical_shifts": [x.as_dict() for x in cs]} + outcar.read_chemical_shifts() + cs_fiels = {"chemical_shifts": [x.as_dict() for x in outcar.data["chemical_shifts"]]} nmr_fields.update(cs_fiels) elif prev_task_type == "NMR EFG": - efg = outcar.read_nmr_efg() - efg_fields = {"efg": efg} + outcar.read_nmr_efg() + efg_fields = {"efg": outcar.data["efg"]} nmr_fields.update(efg_fields) else: raise ValueError("Unsupported Task Type: \"{}\"".format(prev_task_type)) From 5e7c8340b503770cd201a343f2a7e71ec0a3c491 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 3 Jun 2016 20:49:02 -0700 Subject: [PATCH 042/204] use prec=accurate throught the triple jump relaxation --- mpworks/firetasks/triple_jump_relax_set.yaml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 6837006f..1865871b 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -15,7 +15,7 @@ STEP1: NSW: 200 POTIM: 0.3 PREC: ACCURATE - SIGMA: 0 + SIGMA: 0.1 KPOINTS: grid_density: 1000 POTCAR: @@ -29,6 +29,7 @@ STEP2: ALGO: FAST EDIFF: -1.0e-08 EDIFFG: -0.01 + ENAUG_ENHANCE_RATIO: 0.25 IBRION: 3 IOPT: 7 ISIF: 3 @@ -40,9 +41,8 @@ STEP2: NELMIN: 5 NSW: 200 POTIM: 0 - PREC: HIGH - ROPT_PER_ATOM: -0.0001 - SIGMA: 0 + PREC: ACCURATE + SIGMA: 0.03 KPOINTS: grid_density: 3000 POTCAR: @@ -57,7 +57,6 @@ STEP3: EDIFF: -1.0e-10 EDIFFG: -0.002 ENAUG_ENHANCE_RATIO: 0.75 - ENCUT_ENHANCE_RATIO: 0.75 FTIMEMAX: 0.5 IBRION: 3 IOPT: 7 @@ -65,15 +64,14 @@ STEP3: ISMEAR: -5 ISTART: 0 LCHARG: false - LREAL: AUTO + LREAL: false LWAVE: false MAXMOVE: 0.05 - NELMIN: 5 + NELMIN: 10 NSW: 100 POTIM: 0 - PREC: HIGH - ROPT_PER_ATOM: -0.0001 - SIGMA: 0 + PREC: ACCURATE + SIGMA: 0.01 TIMESTEP: 0.05 KPOINTS: grid_density: 6000 From 3114d70d6d56811a82eb7c5cafc9bf5b2f565bcd Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 4 Jun 2016 19:25:36 -0700 Subject: [PATCH 043/204] Shyue Ping removed name parameter from DictSet, refactor to adapt this change --- mpworks/firetasks/nmr_tasks.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index b4afc916..681ef6f1 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -41,8 +41,8 @@ def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): return list(d.values())[0] -def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce, parameters): - trial_set = DictSet(structure, name=config_name, config_dict=config_dict, +def _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters): + trial_set = DictSet(structure, config_dict=config_dict, user_incar_settings=incar_enforce) trial_potcar = trial_set.potcar all_enmax = [sp.enmax for sp in trial_potcar] @@ -64,7 +64,7 @@ def _config_dict_to_input_set(config_dict, config_name, structure, incar_enforce nqm_map = processed_config_dict["INCAR"].pop("QUAD_EFG_MAP") quad_efg = [_get_nuclear_quadrupole_moment(el, nqm_map, parameters) for el in all_elements] processed_config_dict["INCAR"]["QUAD_EFG"] = quad_efg - vis = DictSet(structure, name=config_name, config_dict=processed_config_dict, + vis = DictSet(structure, config_dict=processed_config_dict, user_incar_settings=incar_enforce) return vis @@ -100,7 +100,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) - mpvis = _config_dict_to_input_set(config_dict, config_name, structure, + mpvis = _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters=parameters) incar = mpvis.incar poscar = mpvis.poscar @@ -128,8 +128,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru del spec['parameters']['run_tags'] spec['_dupefinder'] = DupeFinderVasp().to_dict() - spec['vaspinputset_name'] = mpvis.name - spec['task_type'] = mpvis.name + spec['task_type'] = config_name return spec From e4c66ddea83f971c92d6973771c51c8e2f9664a1 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 4 Jun 2016 19:53:44 -0700 Subject: [PATCH 044/204] change the default input set to the ongoing one in the task --- mpworks/firetasks/custodian_task.py | 2 ++ mpworks/firetasks/nmr_tasks.py | 1 + 2 files changed, 3 insertions(+) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index e3b7a8e7..ed005a5c 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -155,6 +155,8 @@ def get_custodian_task(spec): jobs = VaspJob.double_relaxation_run(v_exe) elif 'static' in task_type or 'deformed' in task_type: jobs = [VaspJob(v_exe)] + elif 'NMR' in task_type or "Triple Jump Relax" in task_type: + jobs = [VaspJob(v_exe, default_vasp_input_set=spec["custodian_default_input_set"])] else: # non-SCF runs jobs = [VaspJob(v_exe)] diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 681ef6f1..e4eaf0ec 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -112,6 +112,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru spec['vasp']['poscar'] = poscar.as_dict() spec['vasp']['kpoints'] = kpoints.as_dict() spec['vasp']['potcar'] = potcar.as_dict() + mpvis["custodian_default_input_set"] = mpvis # Add run tags of pseudopotential spec['run_tags'] = spec.get('run_tags', [potcar.functional]) From ecf42050f96791cd47ec4b20256e07c9238b56cf Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 4 Jun 2016 22:28:16 -0700 Subject: [PATCH 045/204] fix bugs in setting default custodian input set --- mpworks/firetasks/nmr_tasks.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index e4eaf0ec..45026db8 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -108,11 +108,11 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru potcar = mpvis.potcar spec['vasp'] = {} - spec['vasp']['incar'] = incar.as_dict() - spec['vasp']['poscar'] = poscar.as_dict() - spec['vasp']['kpoints'] = kpoints.as_dict() - spec['vasp']['potcar'] = potcar.as_dict() - mpvis["custodian_default_input_set"] = mpvis + spec['vasp']['incar'] = incar + spec['vasp']['poscar'] = poscar + spec['vasp']['kpoints'] = kpoints + spec['vasp']['potcar'] = potcar + spec["custodian_default_input_set"] = mpvis # Add run tags of pseudopotential spec['run_tags'] = spec.get('run_tags', [potcar.functional]) From 96764ab2ddf9a8fdc2eec82028cf0e3fbbb000a8 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 4 Jun 2016 22:50:47 -0700 Subject: [PATCH 046/204] fix INCAR keyword --- mpworks/firetasks/nmr_tensor_set.yaml | 2 +- mpworks/firetasks/triple_jump_relax_set.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index ae873fed..c64fb041 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -14,7 +14,7 @@ CS: NELMIN: 5 NSLPLINE: true PREC: ACCURATE - SIGMA: 0 + SIGMA: 0.01 KPOINTS: grid_density: 6000 POTCAR: diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 1865871b..96754505 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -29,7 +29,7 @@ STEP2: ALGO: FAST EDIFF: -1.0e-08 EDIFFG: -0.01 - ENAUG_ENHANCE_RATIO: 0.25 + ENCUT_ENHANCE_RATIO: 0.25 IBRION: 3 IOPT: 7 ISIF: 3 @@ -56,7 +56,7 @@ STEP3: ALGO: FAST EDIFF: -1.0e-10 EDIFFG: -0.002 - ENAUG_ENHANCE_RATIO: 0.75 + ENCUT_ENHANCE_RATIO: 0.75 FTIMEMAX: 0.5 IBRION: 3 IOPT: 7 From a92752e59743e7ecaf4ed7ec760ebfdd3313adb0 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 4 Jun 2016 23:00:12 -0700 Subject: [PATCH 047/204] use LREAL=Auto for all steps --- mpworks/firetasks/triple_jump_relax_set.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 96754505..5c4ceac9 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -64,7 +64,7 @@ STEP3: ISMEAR: -5 ISTART: 0 LCHARG: false - LREAL: false + LREAL: AUTO LWAVE: false MAXMOVE: 0.05 NELMIN: 10 From 239658263d57f938af5165fb1a54d2635ac459b3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 5 Jun 2016 18:23:09 -0700 Subject: [PATCH 048/204] use SetupTask style FireTask scheme in NMR calculations --- mpworks/firetasks/nmr_tasks.py | 24 ++++++++++++++++++------ mpworks/workflows/snl_to_wf_nmr.py | 11 ++--------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 45026db8..cdbe850a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -104,14 +104,10 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru incar_enforce, parameters=parameters) incar = mpvis.incar poscar = mpvis.poscar - kpoints = mpvis.kpoints potcar = mpvis.potcar - spec['vasp'] = {} - spec['vasp']['incar'] = incar - spec['vasp']['poscar'] = poscar - spec['vasp']['kpoints'] = kpoints - spec['vasp']['potcar'] = potcar + spec["input_set_config_dict"] = config_dict + spec["input_set_incar_enforce"] = incar_enforce spec["custodian_default_input_set"] = mpvis # Add run tags of pseudopotential @@ -157,3 +153,19 @@ def run_task(self, fw_spec): raise ValueError("Unsupported Task Type: \"{}\"".format(prev_task_type)) self.additional_fields.update(nmr_fields) super(NmrVaspToDBTask, self).run_task(fw_spec) + +class DictVaspSetupTask(): + _fw_name = "Dict Vasp Input Setup Task" + + def run_task(self, fw_spec): + config_dict = fw_spec["input_set_config_dict"] + incar_enforce = fw_spec["input_set_incar_enforce"] + mpsnl = fw_spec["mpsnl"] + structure = mpsnl.structure + vis = DictSet(structure, config_dict=config_dict, + user_incar_settings=incar_enforce) + + vis.incar.write_file("INCAR") + vis.poscar.write_file("POSCAR") + vis.potcar.write_file("POTCAR") + vis.kpoints.write_file("KPOINTS") diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index c6ca3a9c..8ff82428 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -8,7 +8,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderDB from mpworks.firetasks.custodian_task import get_custodian_task -from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask +from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask, DictVaspSetupTask from mpworks.firetasks.snl_tasks import AddSNLTask from mpworks.firetasks.vasp_io_tasks import VaspWriterTask, VaspCopyTask, VaspToDBTask from mpworks.snl_utils.mpsnl import MPStructureNL, get_meta_from_structure @@ -34,14 +34,7 @@ def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, spec['_priority'] = priority spec['_queueadapter'] = QA_VASP spec['_trackers'] = trackers - tasks = list() - tasks.append(VaspWriterTask()) - if copy_contcar: - parameters["use_CONTCAR"] = True - parameters["files"] = ["CONTCAR"] - parameters["keep_velocities"] = False - tasks.append(VaspCopyTask(parameters=parameters)) - tasks.append(get_custodian_task(spec)) + tasks = [DictVaspSetupTask(), get_custodian_task(spec)] vasp_fw = Firework(tasks, spec, name=get_slug(nick_name + '--' + spec['task_type']), fw_id=fwid) return vasp_fw From b7f5095bec6982099c004bc506682667393ef2fe Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 5 Jun 2016 18:29:24 -0700 Subject: [PATCH 049/204] fix bugs in FireTask implementation --- mpworks/firetasks/nmr_tasks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index cdbe850a..1d838f3c 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,6 +1,8 @@ import copy import os import yaml +from fireworks import FireTaskBase +from fireworks.utilities.fw_serializers import FWSerializable from monty.os.path import zpath from pymatgen.io.vasp import Outcar from pymatgen.io.vasp.sets import DictSet @@ -154,7 +156,8 @@ def run_task(self, fw_spec): self.additional_fields.update(nmr_fields) super(NmrVaspToDBTask, self).run_task(fw_spec) -class DictVaspSetupTask(): + +class DictVaspSetupTask(FireTaskBase, FWSerializable): _fw_name = "Dict Vasp Input Setup Task" def run_task(self, fw_spec): From 7beb21b6d6aa6fc9b2408c7712699ffb524a933d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 5 Jun 2016 18:41:53 -0700 Subject: [PATCH 050/204] fix bugs in input set dict setup --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1d838f3c..9d16c310 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -108,7 +108,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru poscar = mpvis.poscar potcar = mpvis.potcar - spec["input_set_config_dict"] = config_dict + spec["input_set_config_dict"] = mpvis.config_dict spec["input_set_incar_enforce"] = incar_enforce spec["custodian_default_input_set"] = mpvis From c2cf7d74893e39b9964e3412f3af3db813685c59 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 5 Jun 2016 21:22:41 -0700 Subject: [PATCH 051/204] add triple jump relax to mpsnl update_spec --- mpworks/drones/mp_vaspdrone.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mpworks/drones/mp_vaspdrone.py b/mpworks/drones/mp_vaspdrone.py index cfd0f2f3..2a2071eb 100644 --- a/mpworks/drones/mp_vaspdrone.py +++ b/mpworks/drones/mp_vaspdrone.py @@ -250,7 +250,9 @@ def process_fw(self, dir_name, d): d['deformation_matrix'] = fw_dict['spec']['deformation_matrix'] d['original_task_id'] = fw_dict['spec']['original_task_id'] if not self.update_duplicates: - if 'optimize structure' in d['task_type'] and 'output' in d: + if ('optimize structure' in d['task_type'] or + 'Triple Jump Relax' in d['task_type']) \ + and 'output' in d: # create a new SNL based on optimized structure new_s = Structure.from_dict(d['output']['crystal']) old_snl = StructureNL.from_dict(d['snl']) From 6d307da6a75133bf51cbb4910c8c6a3789dbb1be Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 5 Jun 2016 21:42:17 -0700 Subject: [PATCH 052/204] let the NMR tensor depends on DB task such that mpsnl can be updated correctly --- mpworks/workflows/snl_to_wf_nmr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index 8ff82428..ea76eeb5 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -127,7 +127,7 @@ def snl_to_wf_nmr(snl, parameters): priority, copy.deepcopy(structure), additional_run_tags) fws.append(vasp_fw) nmr_task_type = vasp_fw.spec['task_type'] - connections[geom_calc_fwid].extend([nmr_calc_fwid]) + connections[geom_db_fwid].extend([nmr_calc_fwid]) # insert into DB task_class = NmrVaspToDBTask From 07c0e21f01fb0c28d17b6592050b6c20db5e0405 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 5 Jun 2016 21:54:58 -0700 Subject: [PATCH 053/204] also set vaspinputset_name --- mpworks/firetasks/nmr_tasks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 9d16c310..23af3191 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -128,6 +128,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru spec['_dupefinder'] = DupeFinderVasp().to_dict() spec['task_type'] = config_name + spec['vaspinputset_name'] = config_name + ' DictSet' return spec From 0d1dc77c2e9d1838ef08cae41012286ffc9bbda1 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 6 Jun 2016 12:05:14 -0700 Subject: [PATCH 054/204] update POTCAR strategy --- mpworks/firetasks/nmr_tensor_set.yaml | 19 ++++++++++++ mpworks/firetasks/triple_jump_relax_set.yaml | 31 +++++++++++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index c64fb041..5542d2a5 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -22,6 +22,16 @@ CS: H: H_h Mg: Mg_sv O: O_h + Na: Na_sv + Al: Al + Si: Si + P: P_h + Cl: Cl_h + K: K_pv + Ca: Ca_pv + Y: Y_sv + Gd: Gd + EFG: INCAR: EDIFF: -1.0e-06 @@ -124,3 +134,12 @@ EFG: H: H Mg: Mg_sv O: O + Na: Na_sv + Al: Al + Si: Si + P: P + Cl: Cl + K: K_pv + Ca: Ca_pv + Y: Y_sv + Gd: Gd diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 5c4ceac9..f6639009 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -21,8 +21,18 @@ STEP1: POTCAR: C: C H: H - Mg: Mg_sv + Mg: Mg O: O + Na: Na + Al: Al + Si: Si + P: P + Cl: Cl + K: K_pv + Ca: Ca_pv + Y: Y_sv + Gd: Gd + STEP2: INCAR: ADDGRID: true @@ -50,6 +60,16 @@ STEP2: H: H_h Mg: Mg_sv O: O_h + Na: Na_sv + Al: Al + Si: Si + P: P_h + Cl: Cl_h + K: K_pv + Ca: Ca_pv + Y: Y_sv + Gd: Gd + STEP3: INCAR: ADDGRID: true @@ -80,3 +100,12 @@ STEP3: H: H_h Mg: Mg_sv O: O_h + Na: Na_sv + Al: Al + Si: Si + P: P_h + Cl: Cl_h + K: K_pv + Ca: Ca_pv + Y: Y_sv + Gd: Gd From d2af2dbdbb8678424eb5952e80a10b9682344a42 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 6 Jun 2016 13:46:43 -0700 Subject: [PATCH 055/204] use symprec=1.0E-8 for all the triple jump relax and chemical shift calculations --- mpworks/firetasks/nmr_tensor_set.yaml | 1 + mpworks/firetasks/triple_jump_relax_set.yaml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 5542d2a5..eaf547dd 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -15,6 +15,7 @@ CS: NSLPLINE: true PREC: ACCURATE SIGMA: 0.01 + SYMPREC: 1.0E-8 KPOINTS: grid_density: 6000 POTCAR: diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index f6639009..922dbb5c 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -53,6 +53,7 @@ STEP2: POTIM: 0 PREC: ACCURATE SIGMA: 0.03 + SYMPREC: 1.0E-8 KPOINTS: grid_density: 3000 POTCAR: @@ -92,6 +93,7 @@ STEP3: POTIM: 0 PREC: ACCURATE SIGMA: 0.01 + SYMPREC: 1.0E-8 TIMESTEP: 0.05 KPOINTS: grid_density: 6000 From c83c1bd9ea983a4ab0f0b26aced1a79c698aa3cb Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 7 Jun 2016 11:16:35 -0700 Subject: [PATCH 056/204] also use symprec in EFG calculations --- mpworks/firetasks/nmr_tensor_set.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index eaf547dd..a95ac963 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -127,7 +127,8 @@ EFG: Hg-201: 387.6 Ra: Ra-223: 1210.3 - SIGMA: 0 + SIGMA: 0.05 + SYMPREC: 1.0E-8 KPOINTS: grid_density: 3000 POTCAR: From 2004f3db295e18fcf257442c34c318297ac239f8 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 11 Jun 2016 23:04:43 -0700 Subject: [PATCH 057/204] update chemical shift parsing --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 23af3191..ba0c6309 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -146,7 +146,7 @@ def run_task(self, fw_spec): nmr_fields = dict() if prev_task_type == "NMR CS": outcar.read_chemical_shifts() - cs_fiels = {"chemical_shifts": [x.as_dict() for x in outcar.data["chemical_shifts"]]} + cs_fiels = {"chemical_shifts": [x.as_dict() for x in outcar.data["chemical_shifts"]["valence_only"]]} nmr_fields.update(cs_fiels) elif prev_task_type == "NMR EFG": outcar.read_nmr_efg() From 082d811799e3fa41229cc4ec2bc42ea66a4d2447 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 13:42:09 -0700 Subject: [PATCH 058/204] setup MOVE_TO_GARDEN for nmr workflows --- mpworks/firetasks/nmr_tasks.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index ba0c6309..c84c1bdb 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,4 +1,5 @@ import copy +import json import os import yaml from fireworks import FireTaskBase @@ -9,6 +10,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.vasp_io_tasks import VaspToDBTask +from mpworks.workflows import wf_settings from mpworks.workflows.wf_utils import get_loc __author__ = 'Xiaohui Qu' @@ -140,6 +142,16 @@ def __init__(self, parameters=None): super(NmrVaspToDBTask, self).__init__(parameters) def run_task(self, fw_spec): + db_dir = os.environ['DB_LOC'] + db_path = os.path.join(db_dir, 'tasks_db.json') + with open(db_path) as f: + db_creds = json.load(f) + if 'prod' in db_creds['database']: + wf_settings.MOVE_TO_GARDEN_PROD = True + elif 'test' in db_creds['database']: + wf_settings.MOVE_TO_GARDEN_DEV = True + if 'nmr' not in wf_settings.GARDEN: + wf_settings.GARDEN = os.path.join(wf_settings.GARDEN, 'nmr') prev_dir = get_loc(fw_spec['prev_vasp_dir']) outcar = Outcar(zpath(os.path.join(prev_dir, "OUTCAR"))) prev_task_type = fw_spec['prev_task_type'] From d025ff4f05fe523572cb5da36d7cb570b96eb5f3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 14:38:03 -0700 Subject: [PATCH 059/204] add support for job packing --- mpworks/firetasks/custodian_task.py | 37 ++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index ed005a5c..613e8012 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -1,6 +1,8 @@ from gzip import GzipFile import logging import socket + +from fireworks.fw_config import FWData from monty.os.path import which from custodian.vasp.handlers import VaspErrorHandler, NonConvergingErrorHandler, \ FrozenJobErrorHandler, MeshSymmetryErrorHandler, PositiveEnergyErrorHandler @@ -97,10 +99,14 @@ def run_task(self, fw_spec): if nproc is None: raise ValueError("None of the env vars {} found to set nproc!".format(env_vars)) - v_exe = shlex.split('{} -n {} {}'.format(mpi_cmd, nproc, fw_env.get("vasp_cmd", "vasp"))) - gv_exe = shlex.split('{} -n {} {}'.format(mpi_cmd, nproc, fw_env.get("gvasp_cmd", "gvasp"))) + fw_data = FWData() + if (not fw_data.MULTIPROCESSING) or (fw_data.NODE_LIST is None): + v_exe = shlex.split('{} -n {} {}'.format(mpi_cmd, nproc, fw_env.get("vasp_cmd", "vasp"))) + gv_exe = shlex.split('{} -n {} {}'.format(mpi_cmd, nproc, fw_env.get("gvasp_cmd", "gvasp"))) + else: + v_exe, gv_exe = self._get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd, nproc) - print 'host:', os.environ['HOSTNAME'] + print('host:', os.environ['HOSTNAME']) for job in self.jobs: job.vasp_cmd = v_exe @@ -137,6 +143,31 @@ def run_task(self, fw_spec): return FWAction(stored_data=stored_data, update_spec=update_spec) + def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): + tasks_per_node_flag = {"srun": "--ntasks-per-node", + "mpirun": "--npernode", + "aprun": "-N"} + nodelist_flag = {"srun": "--nodelist", + "mpirun": "--host", + "aprun": "-L"} + v_exe = shlex.split('{mpi_cmd} -n {nproc} {tpn_flag} {tpn} {nl_flag} {nl} {vasp_cmd}'.format( + mpi_cmd=mpi_cmd, + nproc=nproc, + tpn_flag=tasks_per_node_flag[mpi_cmd], + tpn=fw_data.SUB_NPROCS, + nl_flag=nodelist_flag[mpi_cmd], + nl=','.join(fw_data.NODE_LIST), + vasp_cmd=fw_env.get("vasp_cmd", "vasp"))) + gv_exe = shlex.split('{mpi_cmd} -n {nproc} {tpn_flag} {tpn} {nl_flag} {nl} {vasp_cmd}'.format( + mpi_cmd=mpi_cmd, + nproc=nproc, + tpn_flag=tasks_per_node_flag[mpi_cmd], + tpn=fw_data.SUB_NPROCS, + nl_flag=nodelist_flag[mpi_cmd], + nl=','.join(fw_data.NODE_LIST), + vasp_cmd=fw_env.get("gvasp_cmd", "gvasp"))) + return v_exe, gv_exe + def _write_formula_file(self, fw_spec): filename = get_slug( 'JOB--' + fw_spec['mpsnl'].structure.composition.reduced_formula + '--' From 0d075a815bf5dee0d30d8eb0bc210beee3f81c66 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 15:18:56 -0700 Subject: [PATCH 060/204] prefix GARDEN variables with module name --- mpworks/firetasks/vasp_io_tasks.py | 6 +++--- mpworks/workflows/wf_utils.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 652f903d..11283109 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -20,7 +20,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.custodian_task import get_custodian_task from mpworks.firetasks.vasp_setup_tasks import SetupUnconvergedHandlerTask -from mpworks.workflows.wf_settings import QA_VASP, QA_DB, MOVE_TO_GARDEN_PROD, MOVE_TO_GARDEN_DEV +from mpworks.workflows import wf_settings from mpworks.workflows.wf_utils import last_relax, get_loc, move_to_garden from pymatgen import Composition from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints @@ -148,10 +148,10 @@ def run_task(self, fw_spec): else: self.additional_fields['run_tags'] = fw_spec['_fizzled_parents'][0]['spec']['run_tags'] - if MOVE_TO_GARDEN_DEV: + if wf_settings.MOVE_TO_GARDEN_DEV: prev_dir = move_to_garden(prev_dir, prod=False) - elif MOVE_TO_GARDEN_PROD: + elif wf_settings.MOVE_TO_GARDEN_PROD: prev_dir = move_to_garden(prev_dir, prod=True) # get the directory containing the db file diff --git a/mpworks/workflows/wf_utils.py b/mpworks/workflows/wf_utils.py index 4c04c5dc..3a2fe668 100644 --- a/mpworks/workflows/wf_utils.py +++ b/mpworks/workflows/wf_utils.py @@ -4,7 +4,7 @@ import time import traceback from monty.os.path import zpath -from mpworks.workflows.wf_settings import RUN_LOCS, GARDEN +from mpworks.workflows import wf_settings __author__ = 'Anubhav Jain' @@ -64,7 +64,7 @@ def get_loc(m_dir): return m_dir block_part = get_block_part(m_dir) - for preamble in RUN_LOCS: + for preamble in wf_settings.RUN_LOCS: new_loc = os.path.join(preamble, block_part) if os.path.exists(new_loc): return new_loc @@ -74,7 +74,7 @@ def get_loc(m_dir): def move_to_garden(m_dir, prod=False): block_part = get_block_part(m_dir) - garden_part = GARDEN if prod else GARDEN+'/dev' + garden_part = wf_settings.GARDEN if prod else wf_settings.GARDEN+'/dev' f_dir = os.path.join(garden_part, block_part) if os.path.exists(m_dir) and not os.path.exists(f_dir) and m_dir != f_dir: try: From 9dd67acc6f05825510e977338c479b37be8c1114 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 15:28:58 -0700 Subject: [PATCH 061/204] fix nprocs per node --- mpworks/firetasks/custodian_task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 613e8012..d76d7b1f 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -154,7 +154,7 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): mpi_cmd=mpi_cmd, nproc=nproc, tpn_flag=tasks_per_node_flag[mpi_cmd], - tpn=fw_data.SUB_NPROCS, + tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), nl_flag=nodelist_flag[mpi_cmd], nl=','.join(fw_data.NODE_LIST), vasp_cmd=fw_env.get("vasp_cmd", "vasp"))) @@ -162,7 +162,7 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): mpi_cmd=mpi_cmd, nproc=nproc, tpn_flag=tasks_per_node_flag[mpi_cmd], - tpn=fw_data.SUB_NPROCS, + tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), nl_flag=nodelist_flag[mpi_cmd], nl=','.join(fw_data.NODE_LIST), vasp_cmd=fw_env.get("gvasp_cmd", "gvasp"))) From 5add4aefc01c882500189931c2f7f38533658bc6 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 16:22:08 -0700 Subject: [PATCH 062/204] refactor GARDEN variable to singleton to unable dynamic adjustment --- mpworks/firetasks/nmr_tasks.py | 10 ++++---- mpworks/firetasks/vasp_io_tasks.py | 6 ++--- mpworks/workflows/wf_settings.py | 37 +++++++++++++++++++----------- mpworks/workflows/wf_utils.py | 6 ++--- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index c84c1bdb..e15c84b0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -10,7 +10,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.vasp_io_tasks import VaspToDBTask -from mpworks.workflows import wf_settings +from mpworks.workflows.wf_settings import WFSettings from mpworks.workflows.wf_utils import get_loc __author__ = 'Xiaohui Qu' @@ -147,11 +147,11 @@ def run_task(self, fw_spec): with open(db_path) as f: db_creds = json.load(f) if 'prod' in db_creds['database']: - wf_settings.MOVE_TO_GARDEN_PROD = True + WFSettings().MOVE_TO_GARDEN_PROD = True elif 'test' in db_creds['database']: - wf_settings.MOVE_TO_GARDEN_DEV = True - if 'nmr' not in wf_settings.GARDEN: - wf_settings.GARDEN = os.path.join(wf_settings.GARDEN, 'nmr') + WFSettings().MOVE_TO_GARDEN_DEV = True + if 'nmr' not in WFSettings.GARDEN: + WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') prev_dir = get_loc(fw_spec['prev_vasp_dir']) outcar = Outcar(zpath(os.path.join(prev_dir, "OUTCAR"))) prev_task_type = fw_spec['prev_task_type'] diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 11283109..c01567d3 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -20,7 +20,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.custodian_task import get_custodian_task from mpworks.firetasks.vasp_setup_tasks import SetupUnconvergedHandlerTask -from mpworks.workflows import wf_settings +from mpworks.workflows.wf_settings import WFSettings, QA_VASP, QA_DB from mpworks.workflows.wf_utils import last_relax, get_loc, move_to_garden from pymatgen import Composition from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints @@ -148,10 +148,10 @@ def run_task(self, fw_spec): else: self.additional_fields['run_tags'] = fw_spec['_fizzled_parents'][0]['spec']['run_tags'] - if wf_settings.MOVE_TO_GARDEN_DEV: + if WFSettings().MOVE_TO_GARDEN_DEV: prev_dir = move_to_garden(prev_dir, prod=False) - elif wf_settings.MOVE_TO_GARDEN_PROD: + elif WFSettings.MOVE_TO_GARDEN_PROD: prev_dir = move_to_garden(prev_dir, prod=True) # get the directory containing the db file diff --git a/mpworks/workflows/wf_settings.py b/mpworks/workflows/wf_settings.py index 01ea9e39..57ff8c2a 100644 --- a/mpworks/workflows/wf_settings.py +++ b/mpworks/workflows/wf_settings.py @@ -1,3 +1,5 @@ +from monty.design_patterns import singleton + __author__ = 'Anubhav Jain' __copyright__ = 'Copyright 2013, The Materials Project' __version__ = '0.1' @@ -12,18 +14,27 @@ QA_DB = {'nnodes': 1, 'nodes' : 1, 'walltime': '2:00:00'} QA_CONTROL = {'nnodes': 1, 'nodes': 1, 'walltime': '00:30:00'} -MOVE_TO_GARDEN_DEV = False -MOVE_TO_GARDEN_PROD = False +@singleton +class WFSettings(object): + """ + This class stores settings for the Workflows. Use Singleton to enable runtime dynamic chage + """ + + def __init__(self): + self.MOVE_TO_GARDEN_DEV = False + self.MOVE_TO_GARDEN_PROD = False + self.GARDEN = '/project/projectdirs/matgen/garden' -GARDEN = '/project/projectdirs/matgen/garden' + @property + def RUN_LOCS(self): + return [self.GARDEN, self.GARDEN + '/dev', + '/project/projectdirs/matgen/garden/control_blocks', + '/project/projectdirs/matgen/scratch', + '/global/scratch/sd/matcomp/', '/global/homes/m/matcomp', + '/scratch/scratchdirs/matcomp/', '/scratch2/scratchdirs/matcomp/', + '/global/scratch/sd/matcomp/aj_tests/', + '/global/scratch/sd/matcomp/wc_tests/', + '/global/scratch/sd/matcomp/aj_prod/', + '/global/scratch2/sd/matcomp/mp_prod/', + '/global/scratch2/sd/matcomp/mp_prod_hopper/'] -RUN_LOCS = [GARDEN, GARDEN+'/dev', - '/project/projectdirs/matgen/garden/control_blocks', - '/project/projectdirs/matgen/scratch', - '/global/scratch/sd/matcomp/', '/global/homes/m/matcomp', - '/scratch/scratchdirs/matcomp/', '/scratch2/scratchdirs/matcomp/', - '/global/scratch/sd/matcomp/aj_tests/', - '/global/scratch/sd/matcomp/wc_tests/', - '/global/scratch/sd/matcomp/aj_prod/', - '/global/scratch2/sd/matcomp/mp_prod/', - '/global/scratch2/sd/matcomp/mp_prod_hopper/'] diff --git a/mpworks/workflows/wf_utils.py b/mpworks/workflows/wf_utils.py index 3a2fe668..b1f958ec 100644 --- a/mpworks/workflows/wf_utils.py +++ b/mpworks/workflows/wf_utils.py @@ -4,7 +4,7 @@ import time import traceback from monty.os.path import zpath -from mpworks.workflows import wf_settings +from mpworks.workflows.wf_settings import WFSettings __author__ = 'Anubhav Jain' @@ -64,7 +64,7 @@ def get_loc(m_dir): return m_dir block_part = get_block_part(m_dir) - for preamble in wf_settings.RUN_LOCS: + for preamble in WFSettings().RUN_LOCS: new_loc = os.path.join(preamble, block_part) if os.path.exists(new_loc): return new_loc @@ -74,7 +74,7 @@ def get_loc(m_dir): def move_to_garden(m_dir, prod=False): block_part = get_block_part(m_dir) - garden_part = wf_settings.GARDEN if prod else wf_settings.GARDEN+'/dev' + garden_part = WFSettings().GARDEN if prod else WFSettings().GARDEN+'/dev' f_dir = os.path.join(garden_part, block_part) if os.path.exists(m_dir) and not os.path.exists(f_dir) and m_dir != f_dir: try: From 60094df28f451475a28abea073a8ddeb0b2faf0c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 16:41:28 -0700 Subject: [PATCH 063/204] fix typos in accessing WFSettings --- mpworks/firetasks/nmr_tasks.py | 2 +- mpworks/firetasks/vasp_io_tasks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index e15c84b0..dd45f7f0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -150,7 +150,7 @@ def run_task(self, fw_spec): WFSettings().MOVE_TO_GARDEN_PROD = True elif 'test' in db_creds['database']: WFSettings().MOVE_TO_GARDEN_DEV = True - if 'nmr' not in WFSettings.GARDEN: + if 'nmr' not in WFSettings().GARDEN: WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') prev_dir = get_loc(fw_spec['prev_vasp_dir']) outcar = Outcar(zpath(os.path.join(prev_dir, "OUTCAR"))) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index c01567d3..99ee40db 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -151,7 +151,7 @@ def run_task(self, fw_spec): if WFSettings().MOVE_TO_GARDEN_DEV: prev_dir = move_to_garden(prev_dir, prod=False) - elif WFSettings.MOVE_TO_GARDEN_PROD: + elif WFSettings().MOVE_TO_GARDEN_PROD: prev_dir = move_to_garden(prev_dir, prod=True) # get the directory containing the db file From 94a615a1c4738ec85db8c79828a58cb4ff8fe1b8 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 17:15:51 -0700 Subject: [PATCH 064/204] add TripleJumpRelaxVaspToDBTask to setup NMR only Garden locations --- mpworks/firetasks/nmr_tasks.py | 20 ++++++++++++++++++++ mpworks/workflows/snl_to_wf_nmr.py | 4 ++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index dd45f7f0..1589c20d 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -170,6 +170,26 @@ def run_task(self, fw_spec): super(NmrVaspToDBTask, self).run_task(fw_spec) +class TripleJumpRelaxVaspToDBTask(VaspToDBTask): + _fw_name = "Triple Jump Relax to Database Task" + + def __init__(self, parameters=None): + super(TripleJumpRelaxVaspToDBTask, self).__init__(parameters) + + def run_task(self, fw_spec): + db_dir = os.environ['DB_LOC'] + db_path = os.path.join(db_dir, 'tasks_db.json') + with open(db_path) as f: + db_creds = json.load(f) + if 'prod' in db_creds['database']: + WFSettings().MOVE_TO_GARDEN_PROD = True + elif 'test' in db_creds['database']: + WFSettings().MOVE_TO_GARDEN_DEV = True + if 'nmr' not in WFSettings().GARDEN: + WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') + super(TripleJumpRelaxVaspToDBTask, self).run_task(fw_spec) + + class DictVaspSetupTask(FireTaskBase, FWSerializable): _fw_name = "Dict Vasp Input Setup Task" diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index ea76eeb5..c1a4659a 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -8,7 +8,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderDB from mpworks.firetasks.custodian_task import get_custodian_task -from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask, DictVaspSetupTask +from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask, DictVaspSetupTask, TripleJumpRelaxVaspToDBTask from mpworks.firetasks.snl_tasks import AddSNLTask from mpworks.firetasks.vasp_io_tasks import VaspWriterTask, VaspCopyTask, VaspToDBTask from mpworks.snl_utils.mpsnl import MPStructureNL, get_meta_from_structure @@ -109,7 +109,7 @@ def snl_to_wf_nmr(snl, parameters): connections[prev_db_fwid] = [geom_calc_fwid] # insert into DB - task_class = VaspToDBTask + task_class = TripleJumpRelaxVaspToDBTask prev_task_type = geom_task_type geom_db_fwid = cur_fwid cur_fwid += 1 From 966b722e20f279e479c7e25e6e761e82a522578b Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 20:17:05 -0700 Subject: [PATCH 065/204] fix return value bug --- mpworks/firetasks/nmr_tasks.py | 41 ++++++++++++++++------------------ 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1589c20d..d97b35d7 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -3,6 +3,7 @@ import os import yaml from fireworks import FireTaskBase +from fireworks.core.firework import FWAction from fireworks.utilities.fw_serializers import FWSerializable from monty.os.path import zpath from pymatgen.io.vasp import Outcar @@ -73,6 +74,19 @@ def _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters) return vis +def _change_garden_setting(self): + db_dir = os.environ['DB_LOC'] + db_path = os.path.join(db_dir, 'tasks_db.json') + with open(db_path) as f: + db_creds = json.load(f) + if 'prod' in db_creds['database']: + WFSettings().MOVE_TO_GARDEN_PROD = True + elif 'test' in db_creds['database']: + WFSettings().MOVE_TO_GARDEN_DEV = True + if 'nmr' not in WFSettings().GARDEN: + WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') + + def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_run_tags=()): parameters = parameters if parameters else {} spec = {'parameters': parameters} @@ -142,16 +156,7 @@ def __init__(self, parameters=None): super(NmrVaspToDBTask, self).__init__(parameters) def run_task(self, fw_spec): - db_dir = os.environ['DB_LOC'] - db_path = os.path.join(db_dir, 'tasks_db.json') - with open(db_path) as f: - db_creds = json.load(f) - if 'prod' in db_creds['database']: - WFSettings().MOVE_TO_GARDEN_PROD = True - elif 'test' in db_creds['database']: - WFSettings().MOVE_TO_GARDEN_DEV = True - if 'nmr' not in WFSettings().GARDEN: - WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') + _change_garden_setting() prev_dir = get_loc(fw_spec['prev_vasp_dir']) outcar = Outcar(zpath(os.path.join(prev_dir, "OUTCAR"))) prev_task_type = fw_spec['prev_task_type'] @@ -167,7 +172,7 @@ def run_task(self, fw_spec): else: raise ValueError("Unsupported Task Type: \"{}\"".format(prev_task_type)) self.additional_fields.update(nmr_fields) - super(NmrVaspToDBTask, self).run_task(fw_spec) + return super(NmrVaspToDBTask, self).run_task(fw_spec) class TripleJumpRelaxVaspToDBTask(VaspToDBTask): @@ -177,17 +182,8 @@ def __init__(self, parameters=None): super(TripleJumpRelaxVaspToDBTask, self).__init__(parameters) def run_task(self, fw_spec): - db_dir = os.environ['DB_LOC'] - db_path = os.path.join(db_dir, 'tasks_db.json') - with open(db_path) as f: - db_creds = json.load(f) - if 'prod' in db_creds['database']: - WFSettings().MOVE_TO_GARDEN_PROD = True - elif 'test' in db_creds['database']: - WFSettings().MOVE_TO_GARDEN_DEV = True - if 'nmr' not in WFSettings().GARDEN: - WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') - super(TripleJumpRelaxVaspToDBTask, self).run_task(fw_spec) + _change_garden_setting() + return super(TripleJumpRelaxVaspToDBTask, self).run_task(fw_spec) class DictVaspSetupTask(FireTaskBase, FWSerializable): @@ -205,3 +201,4 @@ def run_task(self, fw_spec): vis.poscar.write_file("POSCAR") vis.potcar.write_file("POTCAR") vis.kpoints.write_file("KPOINTS") + return FWAction(stored_data={"vasp_input_set": vis.as_dict()}) From 87b20ca22237ffde75d36c40de25e1844ce9c777 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 12 Jun 2016 22:47:22 -0700 Subject: [PATCH 066/204] fix parameter typo --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index d97b35d7..129c6d5d 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -74,7 +74,7 @@ def _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters) return vis -def _change_garden_setting(self): +def _change_garden_setting(): db_dir = os.environ['DB_LOC'] db_path = os.path.join(db_dir, 'tasks_db.json') with open(db_path) as f: From 1177cc742c478e114459b10b00b20a0669f8fff0 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 22 Jun 2016 12:45:49 -0700 Subject: [PATCH 067/204] fix mpirun name --- mpworks/firetasks/custodian_task.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index d76d7b1f..13c98e92 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -150,20 +150,21 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): nodelist_flag = {"srun": "--nodelist", "mpirun": "--host", "aprun": "-L"} + mpirun = mpi_cmd.split()[0] v_exe = shlex.split('{mpi_cmd} -n {nproc} {tpn_flag} {tpn} {nl_flag} {nl} {vasp_cmd}'.format( mpi_cmd=mpi_cmd, nproc=nproc, - tpn_flag=tasks_per_node_flag[mpi_cmd], + tpn_flag=tasks_per_node_flag[mpirun], tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), - nl_flag=nodelist_flag[mpi_cmd], + nl_flag=nodelist_flag[mpirun], nl=','.join(fw_data.NODE_LIST), vasp_cmd=fw_env.get("vasp_cmd", "vasp"))) gv_exe = shlex.split('{mpi_cmd} -n {nproc} {tpn_flag} {tpn} {nl_flag} {nl} {vasp_cmd}'.format( mpi_cmd=mpi_cmd, nproc=nproc, - tpn_flag=tasks_per_node_flag[mpi_cmd], + tpn_flag=tasks_per_node_flag[mpirun], tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), - nl_flag=nodelist_flag[mpi_cmd], + nl_flag=nodelist_flag[mpirun], nl=','.join(fw_data.NODE_LIST), vasp_cmd=fw_env.get("gvasp_cmd", "gvasp"))) return v_exe, gv_exe From 282be4a52e559f041728d37a6ce1841451a2eecc Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 1 Jul 2016 11:37:59 -0700 Subject: [PATCH 068/204] also specify number of node for srun --- mpworks/firetasks/custodian_task.py | 36 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 13c98e92..85eb8b49 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -150,23 +150,27 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): nodelist_flag = {"srun": "--nodelist", "mpirun": "--host", "aprun": "-L"} + ranks_num_flag = {"srun": "--ntasks", + "mpirun": "-n", + "aprun": "-n"} + nodes_spec = {"srun": "--nodes {}".format(len(fw_data.NODE_LIST)), + "mpirun": "", + "aprun": ""} mpirun = mpi_cmd.split()[0] - v_exe = shlex.split('{mpi_cmd} -n {nproc} {tpn_flag} {tpn} {nl_flag} {nl} {vasp_cmd}'.format( - mpi_cmd=mpi_cmd, - nproc=nproc, - tpn_flag=tasks_per_node_flag[mpirun], - tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), - nl_flag=nodelist_flag[mpirun], - nl=','.join(fw_data.NODE_LIST), - vasp_cmd=fw_env.get("vasp_cmd", "vasp"))) - gv_exe = shlex.split('{mpi_cmd} -n {nproc} {tpn_flag} {tpn} {nl_flag} {nl} {vasp_cmd}'.format( - mpi_cmd=mpi_cmd, - nproc=nproc, - tpn_flag=tasks_per_node_flag[mpirun], - tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), - nl_flag=nodelist_flag[mpirun], - nl=','.join(fw_data.NODE_LIST), - vasp_cmd=fw_env.get("gvasp_cmd", "gvasp"))) + vasp_cmds = [fw_env.get("vasp_cmd", "vasp"), fw_env.get("gvasp_cmd", "gvasp")] + vasp_exes = [shlex.split('{mpi_cmd} {nodes_spec} {ranks_flag} {nproc} {tpn_flag} {tpn} ' + '{nl_flag} {nl} {vasp_cmd}'. + format(mpi_cmd=mpi_cmd, + nodes_spec=nodes_spec, + ranks_flag=ranks_num_flag, + nproc=nproc, + tpn_flag=tasks_per_node_flag[mpirun], + tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), + nl_flag=nodelist_flag[mpirun], + nl=','.join(fw_data.NODE_LIST), + vasp_cmd=vasp_cmd)) + for vasp_cmd in vasp_cmds] + v_exe, gv_exe = vasp_exes return v_exe, gv_exe def _write_formula_file(self, fw_spec): From ad8e9a3be8c9736262b9f1930c2a403cc439ad2f Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 1 Jul 2016 11:52:26 -0700 Subject: [PATCH 069/204] fix nodes and ranks flag --- mpworks/firetasks/custodian_task.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 85eb8b49..d889660f 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -160,15 +160,15 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): vasp_cmds = [fw_env.get("vasp_cmd", "vasp"), fw_env.get("gvasp_cmd", "gvasp")] vasp_exes = [shlex.split('{mpi_cmd} {nodes_spec} {ranks_flag} {nproc} {tpn_flag} {tpn} ' '{nl_flag} {nl} {vasp_cmd}'. - format(mpi_cmd=mpi_cmd, - nodes_spec=nodes_spec, - ranks_flag=ranks_num_flag, - nproc=nproc, - tpn_flag=tasks_per_node_flag[mpirun], - tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), - nl_flag=nodelist_flag[mpirun], - nl=','.join(fw_data.NODE_LIST), - vasp_cmd=vasp_cmd)) + format(mpi_cmd=mpi_cmd, + nodes_spec=nodes_spec[mpirun], + ranks_flag=ranks_num_flag[mpirun], + nproc=nproc, + tpn_flag=tasks_per_node_flag[mpirun], + tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), + nl_flag=nodelist_flag[mpirun], + nl=','.join(fw_data.NODE_LIST), + vasp_cmd=vasp_cmd)) for vasp_cmd in vasp_cmds] v_exe, gv_exe = vasp_exes return v_exe, gv_exe From 524ea161c351159015e28addd6f9cf1c59eb3ba7 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 7 Jul 2016 21:33:11 -0700 Subject: [PATCH 070/204] Don't honor the SLURM_NTASKS in case of job packing, Because SLURM_NTASKS is referring to total number of processes of the parent job, query the number of processor for the sub job through job packing central management variables. --- mpworks/firetasks/custodian_task.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index d889660f..15f84cba 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -93,9 +93,11 @@ def run_task(self, fw_spec): # TODO: last two env vars, i.e. SGE and LoadLeveler, are untested env_vars = ['PBS_NP', 'SLURM_NTASKS', 'NSLOTS', 'LOADL_TOTAL_TASKS'] + nproc = None for env_var in env_vars: nproc = os.environ.get(env_var, None) - if nproc is not None: break + if nproc is not None: + break if nproc is None: raise ValueError("None of the env vars {} found to set nproc!".format(env_vars)) @@ -104,7 +106,7 @@ def run_task(self, fw_spec): v_exe = shlex.split('{} -n {} {}'.format(mpi_cmd, nproc, fw_env.get("vasp_cmd", "vasp"))) gv_exe = shlex.split('{} -n {} {}'.format(mpi_cmd, nproc, fw_env.get("gvasp_cmd", "gvasp"))) else: - v_exe, gv_exe = self._get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd, nproc) + v_exe, gv_exe = self._get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd) print('host:', os.environ['HOSTNAME']) @@ -143,7 +145,7 @@ def run_task(self, fw_spec): return FWAction(stored_data=stored_data, update_spec=update_spec) - def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): + def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd): tasks_per_node_flag = {"srun": "--ntasks-per-node", "mpirun": "--npernode", "aprun": "-N"} @@ -157,13 +159,17 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd, nproc): "mpirun": "", "aprun": ""} mpirun = mpi_cmd.split()[0] + fw_data = FWData() + # Don't honor the SLURM_NTASKS in case of job packing, Because SLURM_NTASKS is referring + # to total number of processes of the parent job + sub_nproc = fw_data.SUB_NPROCS vasp_cmds = [fw_env.get("vasp_cmd", "vasp"), fw_env.get("gvasp_cmd", "gvasp")] vasp_exes = [shlex.split('{mpi_cmd} {nodes_spec} {ranks_flag} {nproc} {tpn_flag} {tpn} ' '{nl_flag} {nl} {vasp_cmd}'. format(mpi_cmd=mpi_cmd, nodes_spec=nodes_spec[mpirun], ranks_flag=ranks_num_flag[mpirun], - nproc=nproc, + nproc=sub_nproc, tpn_flag=tasks_per_node_flag[mpirun], tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), nl_flag=nodelist_flag[mpirun], From 2c6a11d06d5560f9413dc6b6a379965f1a64fba4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 7 Jul 2016 21:37:37 -0700 Subject: [PATCH 071/204] conform to PEP8 --- mpworks/firetasks/custodian_task.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 15f84cba..a8412441 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -25,6 +25,7 @@ __email__ = 'ajain@lbl.gov' __date__ = 'Mar 15, 2013' + def check_incar(task_type): errors = [] incar = Incar.from_file("INCAR") @@ -43,10 +44,10 @@ def check_incar(task_type): if 'static' in task_type and not incar["LCHARG"]: errors.append("LCHARG must be True for static runs") - if 'Uniform' in task_type and incar["ICHARG"]!=11: + if 'Uniform' in task_type and incar["ICHARG"] != 11: errors.append("ICHARG must be 11 for Uniform runs") - if 'band structure' in task_type and incar["ICHARG"]!=11: + if 'band structure' in task_type and incar["ICHARG"] != 11: errors.append("ICHARG must be 11 for band structure runs") if 'GGA+U' in task_type: @@ -119,7 +120,8 @@ def run_task(self, fw_spec): raise ValueError("Critical error: INCAR does not pass checks: {}".format(incar_errors)) logging.basicConfig(level=logging.DEBUG) - c = Custodian(self.handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, validators=[VasprunXMLValidator()]) # manual gzip + c = Custodian(self.handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, + validators=[VasprunXMLValidator()]) # manual gzip custodian_out = c.run() if self.gzip_output: @@ -145,7 +147,8 @@ def run_task(self, fw_spec): return FWAction(stored_data=stored_data, update_spec=update_spec) - def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd): + @staticmethod + def _get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd): tasks_per_node_flag = {"srun": "--ntasks-per-node", "mpirun": "--npernode", "aprun": "-N"} @@ -179,10 +182,11 @@ def _get_vasp_cmd_in_job_packing(self, fw_data, fw_env, mpi_cmd): v_exe, gv_exe = vasp_exes return v_exe, gv_exe - def _write_formula_file(self, fw_spec): + @staticmethod + def _write_formula_file(fw_spec): filename = get_slug( - 'JOB--' + fw_spec['mpsnl'].structure.composition.reduced_formula + '--' - + fw_spec['task_type']) + 'JOB--' + fw_spec['mpsnl'].structure.composition.reduced_formula + + '--' + fw_spec['task_type']) with open(filename, 'w+') as f: f.write('') From 29e9491f0cac39cf0160e40107028052ae677152 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 17 Jul 2016 19:22:32 -0700 Subject: [PATCH 072/204] use ISYM=0 instead of SYMPREC --- mpworks/firetasks/triple_jump_relax_set.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 922dbb5c..56a792e8 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -3,7 +3,7 @@ STEP1: ALGO: FAST EDIFF: -1.0e-06 EDIFFG: -0.1 - ENCUT_ENHANCE_RATIO: 0.2 + ENCUT_ENHANCE_RATIO: 0.1 IBRION: 1 ISIF: 3 ISMEAR: -5 @@ -39,12 +39,13 @@ STEP2: ALGO: FAST EDIFF: -1.0e-08 EDIFFG: -0.01 - ENCUT_ENHANCE_RATIO: 0.25 + ENCUT_ENHANCE_RATIO: 0.2 IBRION: 3 IOPT: 7 ISIF: 3 ISMEAR: -5 ISTART: 0 + ISYM: 0 LCHARG: false LREAL: AUTO LWAVE: false @@ -53,7 +54,6 @@ STEP2: POTIM: 0 PREC: ACCURATE SIGMA: 0.03 - SYMPREC: 1.0E-8 KPOINTS: grid_density: 3000 POTCAR: @@ -77,13 +77,14 @@ STEP3: ALGO: FAST EDIFF: -1.0e-10 EDIFFG: -0.002 - ENCUT_ENHANCE_RATIO: 0.75 + ENCUT_ENHANCE_RATIO: 0.4 FTIMEMAX: 0.5 IBRION: 3 IOPT: 7 ISIF: 3 ISMEAR: -5 ISTART: 0 + ISYM: 0 LCHARG: false LREAL: AUTO LWAVE: false @@ -93,7 +94,6 @@ STEP3: POTIM: 0 PREC: ACCURATE SIGMA: 0.01 - SYMPREC: 1.0E-8 TIMESTEP: 0.05 KPOINTS: grid_density: 6000 From 52e030e7c5f543c65b6d3bc7e45ea3b17b3489d0 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 17 Jul 2016 20:20:33 -0700 Subject: [PATCH 073/204] use tighter tolerance in SNLGroup search for NMR --- mpworks/snl_utils/mpsnl.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/mpworks/snl_utils/mpsnl.py b/mpworks/snl_utils/mpsnl.py index cee62fa7..eefbcbc9 100644 --- a/mpworks/snl_utils/mpsnl.py +++ b/mpworks/snl_utils/mpsnl.py @@ -181,9 +181,17 @@ def add_if_belongs(self, cand_snl): #try a structure fit to the canonical structure - # use default Structure Matcher params from April 24, 2013, as suggested by Shyue - # we are using the ElementComparator() because this is how we want to group results - sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5, primitive_cell=True, scale=True, + if "NMR" not in cand_snl.projects: + # use default Structure Matcher params from April 24, 2013, as suggested by Shyue + # we are using the ElementComparator() because this is how we want to group results + ltol = 0.2 + stol = 0.3 + angle_tol = 5.0 + else: + ltol = 0.02 + stol = 0.03 + angle_tol = 0.5 + sm = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol, primitive_cell=True, scale=True, attempt_supercell=False, comparator=ElementComparator()) if not sm.fit(cand_snl.structure, self.canonical_structure): @@ -198,8 +206,9 @@ def add_if_belongs(self, cand_snl): if has_species_properties(cand_snl.structure): for snl in self.species_snl: - sms = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5, primitive_cell=True, scale=True, - attempt_supercell=False, comparator=SpeciesComparator()) + sms = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol, primitive_cell=True, + scale=True, attempt_supercell=False, + comparator=SpeciesComparator()) if sms.fit(cand_snl.structure, snl.structure): spec_group = snl.snl_id self.species_groups[snl.snl_id].append(cand_snl.snl_id) From eee23dc2d57a03b8ecf1ea29cc1c1576807e1aa4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 17 Jul 2016 20:34:38 -0700 Subject: [PATCH 074/204] use the same settings in NMR tensor calculations --- mpworks/firetasks/nmr_tensor_set.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index a95ac963..49bd1b6a 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -2,10 +2,11 @@ CS: INCAR: DQ: 0.001 EDIFF: -1.0e-10 - ENCUT_ENHANCE_RATIO: 0.75 + ENCUT_ENHANCE_RATIO: 0.4 ICHIBARE: 1 ISMEAR: -5 ISTART: 0 + ISYM: 0 LCHARG: false LCHIMAG: true LNMR_SYM_RED: true @@ -15,7 +16,6 @@ CS: NSLPLINE: true PREC: ACCURATE SIGMA: 0.01 - SYMPREC: 1.0E-8 KPOINTS: grid_density: 6000 POTCAR: @@ -39,6 +39,7 @@ EFG: ENCUT_ENHANCE_RATIO: 0.2 ISMEAR: -5 ISTART: 0 + ISYM: 0 LCHARG: false LEFG: true LREAL: AUTO @@ -128,7 +129,6 @@ EFG: Ra: Ra-223: 1210.3 SIGMA: 0.05 - SYMPREC: 1.0E-8 KPOINTS: grid_density: 3000 POTCAR: From d6ccbd7eb5289e34c1c43f8af0169dbd3adbaad2 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 20 Jul 2016 12:20:18 -0700 Subject: [PATCH 075/204] don't use default input set in NMR VaspJob setup --- mpworks/firetasks/custodian_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index a8412441..b7ce2369 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -202,7 +202,7 @@ def get_custodian_task(spec): elif 'static' in task_type or 'deformed' in task_type: jobs = [VaspJob(v_exe)] elif 'NMR' in task_type or "Triple Jump Relax" in task_type: - jobs = [VaspJob(v_exe, default_vasp_input_set=spec["custodian_default_input_set"])] + jobs = [VaspJob(v_exe)] else: # non-SCF runs jobs = [VaspJob(v_exe)] From 3064013b2e916751f7853a3dfcd37c23f9b00fa5 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 22 Jul 2016 16:34:23 -0700 Subject: [PATCH 076/204] use set operation to simplify the logic of task type --- mpworks/firetasks/custodian_task.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 829bd423..1d517f7a 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -201,9 +201,7 @@ def get_custodian_task(spec): if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe) - elif 'static' in task_type or 'deformed' in task_type: - jobs = [VaspJob(v_exe)] - elif 'NMR' in task_type or "Triple Jump Relax" in task_type: + elif {'static', 'deformed', 'NMR', 'Triple Jump Relax'} & set(task_type): jobs = [VaspJob(v_exe)] else: # non-SCF runs From a4294f18c8a7059f6c98933ed78b3dbc600d4df6 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 22 Jul 2016 16:36:27 -0700 Subject: [PATCH 077/204] change requirements back to pmg4+ --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 02cdd954..78067974 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,4 +5,4 @@ PyYAML==3.11 requests==2.6.0 six==1.9.0 xmltodict==0.9.2 -pymatgen<=3.7.1 +pymatgen>=4.0.0 diff --git a/setup.py b/setup.py index 785592ce..3eb14f93 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ license='modified BSD', packages=find_packages(), zip_safe=False, - install_requires=["pymatgen>=3.0,<4.0", "FireWorks>=0.9", "custodian>=0.7"], + install_requires=["pymatgen>=4.0", "FireWorks>=0.9", "custodian>=0.7"], classifiers=["Programming Language :: Python :: 2.7", "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Science/Research", "Intended Audience :: System Administrators", "Intended Audience :: Information Technology", From 2bd185f171ece530262bf170fdd9875b740e0c3d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 24 Jul 2016 23:33:52 -0700 Subject: [PATCH 078/204] change all the print to Python3 style --- mpworks/check_snl/builders/init_plotly.py | 2 +- mpworks/check_snl/check_snl.py | 34 ++++----- .../check_snl/scripts/sg_changes_examples.py | 10 +-- .../scripts/sg_default_bad_snls_check.py | 2 +- mpworks/drones/mp_vaspdrone.py | 8 +- mpworks/examples/firetasks_ex.py | 2 +- mpworks/firetasks/boltztrap_tasks.py | 10 +-- mpworks/firetasks/controller_tasks.py | 20 ++--- mpworks/firetasks/elastic_tasks.py | 8 +- mpworks/firetasks_staging/surface_tasks.py | 74 +++++++++---------- mpworks/fix_scripts/add_icsd_materials.py | 14 ++-- mpworks/fix_scripts/add_old_taskids.py | 10 +-- mpworks/fix_scripts/clear_FWs.py | 8 +- mpworks/fix_scripts/find_missing_snl.py | 16 ++-- mpworks/fix_scripts/fix_bad_crystals.py | 14 ++-- .../fix_scripts/fix_bs_controller_tasks.py | 64 ++++++++-------- mpworks/fix_scripts/fix_float_priorities.py | 2 +- mpworks/fix_scripts/fix_mpcomplete.py | 10 +-- mpworks/fix_scripts/fix_unmoved_dirs.py | 2 +- .../legacy/actions/add_snl_final.py | 2 +- .../legacy/actions/do_fw_conversion.py | 2 +- .../legacy/actions/do_icsd_to_snl.py | 4 +- .../legacy/actions/do_mps_to_snl.py | 10 +-- .../legacy/actions/do_task_conversion.py | 10 +-- .../actions/do_task_conversion_fixes.py | 6 +- mpworks/fix_scripts/legacy/mps_to_snl.py | 2 +- mpworks/fix_scripts/legacy/old_task_drone.py | 2 +- mpworks/fix_scripts/legacy/task_to_fw.py | 2 +- mpworks/fix_scripts/reparse_old_tasks.py | 14 ++-- .../fix_scripts/reparse_old_tasks_again.py | 14 ++-- mpworks/fix_scripts/rerun_boltztrap.py | 8 +- mpworks/fix_scripts/submit_bo_jobs.py | 6 +- .../maintenance_scripts/classify_fizzled.py | 6 +- mpworks/maintenance_scripts/modify_snl.py | 22 +++--- mpworks/maintenance_scripts/reparse_tasks.py | 12 +-- mpworks/osti_doi/__main__.py | 8 +- mpworks/snl_utils/mpsnl.py | 4 +- mpworks/snl_utils/snl_mongo.py | 2 +- mpworks/workflows/surface_wf.py | 14 ++-- 39 files changed, 230 insertions(+), 230 deletions(-) diff --git a/mpworks/check_snl/builders/init_plotly.py b/mpworks/check_snl/builders/init_plotly.py index 263d1bed..da8a6fa7 100644 --- a/mpworks/check_snl/builders/init_plotly.py +++ b/mpworks/check_snl/builders/init_plotly.py @@ -90,4 +90,4 @@ fig['layout'] = layout py.plot(fig, filename='builder_stream', auto_open=False) else: - print 'plotly ImportError' + print('plotly ImportError') diff --git a/mpworks/check_snl/check_snl.py b/mpworks/check_snl/check_snl.py index 989df0d3..d12b9f28 100644 --- a/mpworks/check_snl/check_snl.py +++ b/mpworks/check_snl/check_snl.py @@ -45,7 +45,7 @@ num_snl_streams = div_plus_mod(num_snls, num_ids_per_stream) num_snlgroup_streams = div_plus_mod(num_snlgroups, num_ids_per_stream) num_jobs = div_plus_mod(num_pairs_max, num_pairs_per_job) -print num_snl_streams, num_snlgroup_streams, num_jobs +print(num_snl_streams, num_snlgroup_streams, num_jobs) checks = ['spacegroups', 'groupmembers', 'canonicals'] categories = [ 'SG Change', 'SG Default', 'PybTeX', 'Others' ] @@ -326,7 +326,7 @@ def analyze(args): snlgroup_keys = {} for d in snlgrp_cursor: snlgroup_keys[d['snlgroup_id']] = d['canonical_snl']['snlgroup_key'] - print snlgroup_keys[40890] + print(snlgroup_keys[40890]) sma2 = SNLMongoAdapter.from_file( os.path.join(os.environ['DB_LOC'], 'materials_db.yaml') ) @@ -353,7 +353,7 @@ def analyze(args): 'band_gap': band_gap, 'task_id': material['task_id'], 'volume_per_atom': volume_per_atom } - print snlgroup_data[40890] + print(snlgroup_data[40890]) filestem = 'mpworks/check_snl/results/bad_snlgroups_2_' with open(filestem+'in_matdb.csv', 'wb') as f, \ open(filestem+'notin_matdb.csv', 'wb') as g: @@ -402,7 +402,7 @@ def analyze(args): rms_dist = matcher.get_rms_dist(primary_structure, secondary_structure) if rms_dist is not None: rms_dist_str = "({0:.3g},{1:.3g})".format(*rms_dist) - print rms_dist_str + print(rms_dist_str) row = [ category, composition, primary_id, primary_sg_num, @@ -420,13 +420,13 @@ def analyze(args): out_fig = Figure() badsnls_trace = Scatter(x=[], y=[], text=[], mode='markers', name='SG Changes') bisectrix = Scatter(x=[0,230], y=[0,230], mode='lines', name='bisectrix') - print 'pulling bad snls from plotly ...' + print('pulling bad snls from plotly ...') bad_snls = OrderedDict() for category, text in zip(fig['data'][2]['y'], fig['data'][2]['text']): for snl_id in map(int, text.split('
')): bad_snls[snl_id] = category with open('mpworks/check_snl/results/bad_snls.csv', 'wb') as f: - print 'pulling bad snls from database ...' + print('pulling bad snls from database ...') mpsnl_cursor = sma.snl.find({ 'snl_id': { '$in': bad_snls.keys() }, 'about.projects': {'$ne': 'CederDahn Challenge'} @@ -435,7 +435,7 @@ def analyze(args): writer.writerow([ 'snl_id', 'category', 'snlgroup_key', 'nsites', 'remarks', 'projects', 'authors' ]) - print 'writing bad snls to file ...' + print('writing bad snls to file ...') for mpsnl_dict in mpsnl_cursor: mpsnl = MPStructureNL.from_dict(mpsnl_dict) row = [ mpsnl.snl_id, bad_snls[mpsnl.snl_id], mpsnl.snlgroup_key ] @@ -450,8 +450,8 @@ def analyze(args): badsnls_trace['y'].append(sf.get_spacegroup_number()) badsnls_trace['text'].append(mpsnl.snl_id) if bad_snls[mpsnl.snl_id] == 'SG default': - print sg_num, sf.get_spacegroup_number() - print 'plotting out-fig ...' + print(sg_num, sf.get_spacegroup_number()) + print('plotting out-fig ...') out_fig['data'] = Data([bisectrix, badsnls_trace]) out_fig['layout'] = Layout( showlegend=False, hovermode='closest', @@ -467,7 +467,7 @@ def analyze(args): ltol=0.2, stol=0.3, angle_tol=5, primitive_cell=False, scale=True, attempt_supercell=True, comparator=ElementComparator() ) - print 'pulling data from plotly ...' + print('pulling data from plotly ...') trace = Scatter(x=[], y=[], text=[], mode='markers', name='mismatches') bad_snls = OrderedDict() # snlgroup_id : [ mismatching snl_ids ] for category, text in zip(fig['data'][2]['y'], fig['data'][2]['text']): @@ -475,7 +475,7 @@ def analyze(args): for entry in text.split('
'): fields = entry.split(':') snlgroup_id = int(fields[0].split(',')[0]) - print snlgroup_id + print(snlgroup_id) snlgrp_dict = sma.snlgroups.find_one({ 'snlgroup_id': snlgroup_id }) snlgrp = SNLGroup.from_dict(snlgrp_dict) s1 = snlgrp.canonical_structure.get_primitive_structure() @@ -483,7 +483,7 @@ def analyze(args): for i, snl_id in enumerate(fields[1].split(',')): mpsnl_dict = sma.snl.find_one({ 'snl_id': int(snl_id) }) if 'CederDahn Challenge' in mpsnl_dict['about']['projects']: - print 'skip CederDahn: %s' % snl_id + print('skip CederDahn: %s' % snl_id) continue mpsnl = MPStructureNL.from_dict(mpsnl_dict) s2 = mpsnl.structure.get_primitive_structure() @@ -496,13 +496,13 @@ def analyze(args): if len(bad_snls[snlgroup_id]) < 1: bad_snls.pop(snlgroup_id, None) with open('mpworks/check_snl/results/bad_snlgroups.csv', 'wb') as f: - print 'pulling bad snlgroups from database ...' + print('pulling bad snlgroups from database ...') snlgroup_cursor = sma.snlgroups.find({ 'snlgroup_id': { '$in': bad_snls.keys() }, }) writer = csv.writer(f) writer.writerow(['snlgroup_id', 'snlgroup_key', 'mismatching snl_ids']) - print 'writing bad snlgroups to file ...' + print('writing bad snlgroups to file ...') for snlgroup_dict in snlgroup_cursor: snlgroup = SNLGroup.from_dict(snlgroup_dict) row = [ @@ -510,7 +510,7 @@ def analyze(args): ' '.join(bad_snls[snlgroup.snlgroup_id]) ] writer.writerow(row) - print 'plotting out-fig ...' + print('plotting out-fig ...') out_fig = Figure() out_fig['data'] = Data([trace]) out_fig['layout'] = Layout( @@ -544,11 +544,11 @@ def analyze(args): snlgroup_id = start_id + d['x'][idx] mismatch_snl_id, canonical_snl_id = d['text'][idx].split(' != ') bad_snlgroups[snlgroup_id] = int(mismatch_snl_id) - print errors + print(errors) fig_data = fig['data'][-1] fig_data['x'] = [ errors[color] for color in fig_data['marker']['color'] ] filename = _get_filename() - print filename + print(filename) #py.plot(fig, filename=filename) with open('mpworks/check_snl/results/bad_snls.csv', 'wb') as f: mpsnl_cursor = sma.snl.find({ 'snl_id': { '$in': bad_snls.keys() } }) diff --git a/mpworks/check_snl/scripts/sg_changes_examples.py b/mpworks/check_snl/scripts/sg_changes_examples.py index 466c2626..e3dad567 100644 --- a/mpworks/check_snl/scripts/sg_changes_examples.py +++ b/mpworks/check_snl/scripts/sg_changes_examples.py @@ -34,8 +34,8 @@ def _get_mp_link(mp_id): fig = py.get_figure('tschaume',11) df = DataFrame.from_dict(fig['data'][1]).filter(['x','y','text']) grouped_x = df.groupby('x') -print '|===============================' -print '| old SG | close to bisectrix | far from bisectrix' +print('|===============================') +print('| old SG | close to bisectrix | far from bisectrix') for n,g in grouped_x: if g.shape[0] < 2: continue # at least two entries at same old SG grouped_y = g.groupby('y') @@ -50,9 +50,9 @@ def _get_mp_link(mp_id): if ratios[0] > 0.2 or ratios[1] < 0.8: continue snlgroup_ids = _get_snlgroup_id(first['text']), _get_snlgroup_id(last['text']) mp_ids = _get_mp_id(snlgroup_ids[0]), _get_mp_id(snlgroup_ids[1]) - print '| %d | %d (%d) -> %d -> %s | %d (%d) -> %d -> %s' % ( + print('| %d | %d (%d) -> %d -> %s | %d (%d) -> %d -> %s' % ( first['x'], first['text'], first['y'], snlgroup_ids[0], _get_mp_link(mp_ids[0]), last['text'], last['y'], snlgroup_ids[1], _get_mp_link(mp_ids[1]) - ) -print '|===============================' + )) +print('|===============================') diff --git a/mpworks/check_snl/scripts/sg_default_bad_snls_check.py b/mpworks/check_snl/scripts/sg_default_bad_snls_check.py index 24345e9a..034fd592 100644 --- a/mpworks/check_snl/scripts/sg_default_bad_snls_check.py +++ b/mpworks/check_snl/scripts/sg_default_bad_snls_check.py @@ -119,4 +119,4 @@ nonvalid_snlids.append(snl['snl_id']) else: valid_snlids.append(snl['snl_id']) -print len(valid_snlids), len(nonvalid_snlids) +print(len(valid_snlids), len(nonvalid_snlids)) diff --git a/mpworks/drones/mp_vaspdrone.py b/mpworks/drones/mp_vaspdrone.py index 2a2071eb..8bcc63fb 100644 --- a/mpworks/drones/mp_vaspdrone.py +++ b/mpworks/drones/mp_vaspdrone.py @@ -66,9 +66,9 @@ def assimilate(self, path, launches_coll=None): d["dir_name"] = get_block_part(d["dir_name_full"]) d["stored_data"] = {} except: - print 'COULD NOT GET DIR NAME' + print('COULD NOT GET DIR NAME') pprint.pprint(d) - print traceback.format_exc() + print(traceback.format_exc()) raise ValueError('IMPROPER PARSING OF {}'.format(path)) if not self.simulate: @@ -166,7 +166,7 @@ def assimilate(self, path, launches_coll=None): d['is_compatible'] = bool(mpc.process_entry(entry)) except: traceback.print_exc() - print 'ERROR in getting compatibility' + print('ERROR in getting compatibility') d['is_compatible'] = None @@ -311,7 +311,7 @@ def process_fw(self, dir_name, d): vasp_signals['last_relax_dir'] = last_relax_dir ## see what error signals are present - print "getting signals for dir :{}".format(last_relax_dir) + print("getting signals for dir :{}".format(last_relax_dir)) sl = SignalDetectorList() sl.append(VASPInputsExistSignal()) diff --git a/mpworks/examples/firetasks_ex.py b/mpworks/examples/firetasks_ex.py index 39d6068a..d1f1fcbf 100644 --- a/mpworks/examples/firetasks_ex.py +++ b/mpworks/examples/firetasks_ex.py @@ -89,7 +89,7 @@ def run_task(self, fw_spec): t_id = drone.assimilate(prev_dir) if t_id: - print 'ENTERED task id:', t_id + print('ENTERED task id:', t_id) stored_data = {'task_id': t_id} update_spec = {'prev_vasp_dir': prev_dir, 'prev_task_type': fw_spec['prev_task_type']} return FWAction(stored_data=stored_data, update_spec=update_spec) diff --git a/mpworks/firetasks/boltztrap_tasks.py b/mpworks/firetasks/boltztrap_tasks.py index 9538fdad..402adf8b 100644 --- a/mpworks/firetasks/boltztrap_tasks.py +++ b/mpworks/firetasks/boltztrap_tasks.py @@ -162,13 +162,13 @@ def run_task(self, fw_spec): nelect = m_task['calculations'][0]['input']['parameters']['NELECT'] bs_id = m_task['calculations'][0]['band_structure_fs_id'] - print bs_id, type(bs_id) + print(bs_id, type(bs_id)) fs = gridfs.GridFS(tdb, 'band_structure_fs') bs_dict = json.loads(fs.get(bs_id).read()) bs_dict['structure'] = m_task['calculations'][0]['output']['crystal'] bs = BandStructure.from_dict(bs_dict) - print 'Band Structure found:', bool(bs) - print nelect + print('Band Structure found:', bool(bs)) + print(nelect) # run Boltztrap runner = BoltztrapRunner(bs, nelect) @@ -254,7 +254,7 @@ def run_task(self, fw_spec): except: import traceback traceback.print_exc() - print 'COULD NOT GET FINE MESH DATA' + print('COULD NOT GET FINE MESH DATA') # add is_compatible mpc = MaterialsProjectCompatibility("Advanced") @@ -273,7 +273,7 @@ def run_task(self, fw_spec): ted["is_compatible"] = bool(mpc.process_entry(entry)) except: traceback.print_exc() - print 'ERROR in getting compatibility, task_id: {}'.format(m_task["task_id"]) + print('ERROR in getting compatibility, task_id: {}'.format(m_task["task_id"])) ted["is_compatible"] = None tdb.boltztrap.insert(jsanitize(ted)) diff --git a/mpworks/firetasks/controller_tasks.py b/mpworks/firetasks/controller_tasks.py index 55425211..77072563 100644 --- a/mpworks/firetasks/controller_tasks.py +++ b/mpworks/firetasks/controller_tasks.py @@ -33,10 +33,10 @@ def __init__(self, parameters=None): self.metal_cutoff = parameters.get('metal_cutoff', 0.05) def run_task(self, fw_spec): - print 'sleeping 10s for Mongo' + print('sleeping 10s for Mongo') time.sleep(10) - print 'done sleeping' - print 'the gap is {}, the cutoff is {}'.format(fw_spec['analysis']['bandgap'], self.gap_cutoff) + print('done sleeping') + print('the gap is {}, the cutoff is {}'.format(fw_spec['analysis']['bandgap'], self.gap_cutoff)) if fw_spec['analysis']['bandgap'] >= self.gap_cutoff: static_dens = 90 uniform_dens = 1000 @@ -51,7 +51,7 @@ def run_task(self, fw_spec): else: user_incar_settings = {} - print 'Adding more runs...' + print('Adding more runs...') type_name = 'GGA+U' if 'GGA+U' in fw_spec['prev_task_type'] else 'GGA' @@ -123,7 +123,7 @@ def run_task(self, fw_spec): wf = Workflow(fws, connections) - print 'Done adding more runs...' + print('Done adding more runs...') return FWAction(additions=wf) @@ -145,13 +145,13 @@ def __init__(self, parameters=None): self.gap_cutoff = parameters.get('gap_cutoff', 0.5) # see e-mail from Geoffroy, 5/1/2013 def run_task(self, fw_spec): - print 'sleeping 10s for Mongo' + print('sleeping 10s for Mongo') time.sleep(10) - print 'done sleeping' - print 'the gap is {}, the cutoff is {}'.format(fw_spec['analysis']['bandgap'], self.gap_cutoff) + print('done sleeping') + print('the gap is {}, the cutoff is {}'.format(fw_spec['analysis']['bandgap'], self.gap_cutoff)) if fw_spec['analysis']['bandgap'] >= self.gap_cutoff: - print 'Adding more runs...' + print('Adding more runs...') type_name = 'GGA+U' if 'GGA+U' in fw_spec['prev_task_type'] else 'GGA' snl = fw_spec['mpsnl'] @@ -211,7 +211,7 @@ def run_task(self, fw_spec): wf = Workflow(fws, connections) - print 'Done adding more runs...' + print('Done adding more runs...') return FWAction(additions=wf) return FWAction() diff --git a/mpworks/firetasks/elastic_tasks.py b/mpworks/firetasks/elastic_tasks.py index 33bd1a4f..12372eae 100644 --- a/mpworks/firetasks/elastic_tasks.py +++ b/mpworks/firetasks/elastic_tasks.py @@ -148,9 +148,9 @@ def run_task(self, fw_spec): "state":"successful"}).count() existing_doc = elasticity.find_one({"relaxation_task_id" : i}) if existing_doc: - print "Updating: " + i + print("Updating: " + i) else: - print "New material: " + i + print("New material: " + i) d = {"analysis": {}, "error": [], "warning": []} d["ndocs"] = ndocs o = tasks.find_one({"task_id" : i}, @@ -178,8 +178,8 @@ def run_task(self, fw_spec): "{:.0e}".format(delta)]) sm = IndependentStrain(defo) if dtype in d["deformation_tasks"].keys(): - print "old_task: {}".format(d["deformation_tasks"][dtype]["task_id"]) - print "new_task: {}".format(k["task_id"]) + print("old_task: {}".format(d["deformation_tasks"][dtype]["task_id"])) + print("new_task: {}".format(k["task_id"])) raise ValueError("Duplicate deformation task in database.") d["deformation_tasks"][dtype] = {"state" : k["state"], "deformation_matrix" : defo, diff --git a/mpworks/firetasks_staging/surface_tasks.py b/mpworks/firetasks_staging/surface_tasks.py index 7ea82491..2d1dea5d 100644 --- a/mpworks/firetasks_staging/surface_tasks.py +++ b/mpworks/firetasks_staging/surface_tasks.py @@ -154,10 +154,10 @@ def run_task(self, fw_spec): dec.process_decoded(self.get("potcar_fuctional", 'PBE')) # Will continue an incomplete job from a previous contcar file if it exists - print 'cwd is %s' %(os.getcwd()) - print 'the folder is %s' %(folder) - print os.path.join(os.getcwd(), folder) - print cwd+'/'+folder + print('cwd is %s' %(os.getcwd())) + print('the folder is %s' %(folder)) + print(os.path.join(os.getcwd(), folder)) + print(cwd+'/'+folder) path = cwd+'/'+folder # path = os.path.join(os.getcwd(), folder) @@ -170,17 +170,17 @@ def run_task(self, fw_spec): # print os.stat(os.path.join(path, 'CONTCAR.gz')).st_size !=0 def continue_vasp(contcar): - print folder, 'already exists, will now continue calculation' - print 'making prev_run folder' + print(folder, 'already exists, will now continue calculation') + print('making prev_run folder') os.system('mkdir %s' %(newfolder)) - print 'moving outputs to prev_run' + print('moving outputs to prev_run') os.system('mv %s/* %s/prev_run' %(path, path)) - print 'moving outputs as inputs for next calculation' + print('moving outputs as inputs for next calculation') os.system('cp %s/%s %s/INCAR %s/POTCAR %s/KPOINTS %s' %(newfolder, contcar, newfolder, newfolder, newfolder, path)) - print 'unzipping new inputs' + print('unzipping new inputs') os.system('gunzip %s/*' %(path)) - print 'copying contcar as new poscar' + print('copying contcar as new poscar') if contcar == 'CONTCAR.relax1.gz': os.system('mv %s/CONTCAR.relax1 %s/POSCAR' %(path , path)) else: @@ -273,7 +273,7 @@ def run_task(self, fw_spec): min_vacuum_size = dec.process_decoded(self.get("min_vacuum_size", 10)) miller_index = dec.process_decoded(self.get("miller_index")) - print 'about to make mplb' + print('about to make mplb') mplb = MPSlabVaspInputSet(user_incar_settings=user_incar_settings, k_product=k_product, @@ -284,12 +284,12 @@ def run_task(self, fw_spec): # cell is already oriented with the miller index, entering (0,0,1) # into SlabGenerator is the same as obtaining a slab in the # orienetation of the original miller index. - print 'about to copy contcar' + print('about to copy contcar') contcar = Poscar.from_file("%s/CONTCAR.relax2.gz" %(cwd+folder)) relax_orient_uc = contcar.structure - print 'made relaxed oriented structure' - print relax_orient_uc - print 'making slab' + print('made relaxed oriented structure') + print(relax_orient_uc) + print('making slab') slabs = SlabGenerator(relax_orient_uc, (0,0,1), min_slab_size=min_slab_size, @@ -298,43 +298,43 @@ def run_task(self, fw_spec): # Whether or not to create a list of Fireworks # based on different slab terminations - print 'deciding terminations' + print('deciding terminations') slab_list = slabs.get_slabs() if terminations else [slabs.get_slab()] qe = QueryEngine(**vaspdbinsert_parameters) optional_data = ["state"] - print 'query bulk entry for job completion' + print('query bulk entry for job completion') bulk_entry = qe.get_entries({'chemsys': relax_orient_uc.composition.reduced_formula, 'structure_type': 'oriented_unit_cell', 'miller_index': miller_index}, optional_data=optional_data) - print 'chemical formula', relax_orient_uc.composition.reduced_formula - print 'fomular data type is ', type(relax_orient_uc.composition.reduced_formula) - print 'checking job completion' - print bulk_entry + print('chemical formula', relax_orient_uc.composition.reduced_formula) + print('fomular data type is ', type(relax_orient_uc.composition.reduced_formula)) + print('checking job completion') + print(bulk_entry) for entry in bulk_entry: - print 'for loop' - print entry.data['state'] + print('for loop') + print(entry.data['state']) if entry.data['state'] != 'successful': - print "%s bulk calculations were incomplete, cancelling FW" \ - %(relax_orient_uc.composition.reduced_formula) + print("%s bulk calculations were incomplete, cancelling FW" \ + %(relax_orient_uc.composition.reduced_formula)) return FWAction() else: - print entry.data['state'] + print(entry.data['state']) FWs = [] for slab in slab_list: - print slab + print(slab) new_folder = folder.replace('bulk', 'slab')+'_shift%s' \ %(slab.shift) # Will continue an incomplete job from a previous contcar file if it exists - print 'cwd is %s' %(os.getcwd()) - print 'the folder is %s' %(new_folder) - print os.path.join(os.getcwd(), new_folder) - print cwd+'/'+new_folder + print('cwd is %s' %(os.getcwd())) + print('the folder is %s' %(new_folder)) + print(os.path.join(os.getcwd(), new_folder)) + print(cwd+'/'+new_folder) path = cwd+'/'+new_folder # path = os.path.join(os.getcwd(), folder) @@ -347,17 +347,17 @@ def run_task(self, fw_spec): # print os.stat(os.path.join(path, 'CONTCAR.gz')).st_size !=0 def continue_vasp(contcar): - print folder, 'already exists, will now continue calculation' - print 'making prev_run folder' + print(folder, 'already exists, will now continue calculation') + print('making prev_run folder') os.system('mkdir %s' %(newfolder)) - print 'moving outputs to prev_run' + print('moving outputs to prev_run') os.system('mv %s/* %s/prev_run' %(path, path)) - print 'moving outputs as inputs for next calculation' + print('moving outputs as inputs for next calculation') os.system('cp %s/%s %s/INCAR %s/POTCAR %s/KPOINTS %s' %(newfolder, contcar, newfolder, newfolder, newfolder, path)) - print 'unzipping new inputs' + print('unzipping new inputs') os.system('gunzip %s/*' %(path)) - print 'copying contcar as new poscar' + print('copying contcar as new poscar') if contcar == 'CONTCAR.relax1.gz': os.system('mv %s/CONTCAR.relax1 %s/POSCAR' %(path , path)) else: diff --git a/mpworks/fix_scripts/add_icsd_materials.py b/mpworks/fix_scripts/add_icsd_materials.py index 39a8e652..a44f459a 100644 --- a/mpworks/fix_scripts/add_icsd_materials.py +++ b/mpworks/fix_scripts/add_icsd_materials.py @@ -62,13 +62,13 @@ def process_material(self, material_id): icsd_ids = self.get_icsd_ids_from_snlgroup(snlgroup_id) self.materials.find_and_modify({"task_ids": material_id}, {"$set": {"icsd_id": icsd_ids}}) - print material_id, icsd_ids - print 'FINISHED', material_id + print(material_id, icsd_ids) + print('FINISHED', material_id) except: - print '-----' - print 'ENCOUNTERED AN EXCEPTION!!!', material_id + print('-----') + print('ENCOUNTERED AN EXCEPTION!!!', material_id) traceback.print_exc() - print '-----' + print('-----') def get_icsd_ids_from_snlgroup(self, snlgroup_id): @@ -93,11 +93,11 @@ def _analyze(data): o = ICSDBuilder() o.setup() materials = o.materials - print materials.count() + print(materials.count()) m_data = [] for d in materials.find({}, {'task_id': 1}, timeout=False): m_data.append(d['task_id']) pool = multiprocessing.Pool(8) pool.map(_analyze, m_data) - print 'DONE' \ No newline at end of file + print('DONE') \ No newline at end of file diff --git a/mpworks/fix_scripts/add_old_taskids.py b/mpworks/fix_scripts/add_old_taskids.py index 13324763..a3c5662e 100644 --- a/mpworks/fix_scripts/add_old_taskids.py +++ b/mpworks/fix_scripts/add_old_taskids.py @@ -51,12 +51,12 @@ def process_task(self, task_id): try: task_id_deprecated = int(task_id.split('-')[-1]) self.tasks.update({"task_id": task_id}, {"$set": {"task_id_deprecated": task_id_deprecated}}) - print 'FINISHED', task_id + print('FINISHED', task_id) except: - print '-----' - print 'ENCOUNTERED AN EXCEPTION!!!', task_id + print('-----') + print('ENCOUNTERED AN EXCEPTION!!!', task_id) traceback.print_exc() - print '-----' + print('-----') def _analyze(data): @@ -80,4 +80,4 @@ def _analyze(data): q = {} for d in tasks.find(q, {'task_id': 1}, timeout=False): o.process_task(d['task_id']) - print 'DONE' \ No newline at end of file + print('DONE') \ No newline at end of file diff --git a/mpworks/fix_scripts/clear_FWs.py b/mpworks/fix_scripts/clear_FWs.py index ad510447..96c25ee8 100644 --- a/mpworks/fix_scripts/clear_FWs.py +++ b/mpworks/fix_scripts/clear_FWs.py @@ -45,15 +45,15 @@ def _defuse_fw(data): # archive READY WORKFLOWS for d in lp.workflows.find({"state": "READY"}, {'nodes': 1}): fw_ids.append(d['nodes'][0]) - print 'GOT all fw_ids...' + print('GOT all fw_ids...') pool = multiprocessing.Pool(8) states = pool.map(_archive_fw, fw_ids) - print 'DONE', all(states) + print('DONE', all(states)) # defuse any READY/WAITING FWs for d in lp.fireworks.find({"state": {"$in":["READY", "WAITING"]}}, {'fw_id': 1}): fw_ids.append(d['fw_id']) - print 'GOT all fw_ids...' + print('GOT all fw_ids...') pool = multiprocessing.Pool(8) states = pool.map(_defuse_fw, fw_ids) - print 'DONE', all(states) \ No newline at end of file + print('DONE', all(states)) \ No newline at end of file diff --git a/mpworks/fix_scripts/find_missing_snl.py b/mpworks/fix_scripts/find_missing_snl.py index d2e15824..bfa254b8 100644 --- a/mpworks/fix_scripts/find_missing_snl.py +++ b/mpworks/fix_scripts/find_missing_snl.py @@ -22,26 +22,26 @@ all_snl_ids = [] # snl ids that have a group all_missing_ids = [] # snl ids missing a group idx = 0 - print 'GETTING GROUPS' + print('GETTING GROUPS') for x in snldb.snlgroups.find({}, {"all_snl_ids": 1}): all_snl_ids.extend(x['all_snl_ids']) - print 'CHECKING SNL' + print('CHECKING SNL') for x in snldb.snl.find({}, {'snl_id': 1}, timeout=False): - print x['snl_id'] + print(x['snl_id']) if x['snl_id'] not in all_snl_ids: - print x['snl_id'], '*********' + print(x['snl_id'], '*********') all_missing_ids.append(x['snl_id']) - print 'FIXING / ADDING GROUPS' - print all_missing_ids + print('FIXING / ADDING GROUPS') + print(all_missing_ids) for snl_id in all_missing_ids: try: mpsnl = MPStructureNL.from_dict(snldb.snl.find_one({"snl_id": snl_id})) snldb.build_groups(mpsnl) - print 'SUCCESSFUL', snl_id + print('SUCCESSFUL', snl_id) except: - print 'ERROR with snl_id', snl_id + print('ERROR with snl_id', snl_id) traceback.print_exc() diff --git a/mpworks/fix_scripts/fix_bad_crystals.py b/mpworks/fix_scripts/fix_bad_crystals.py index e6ba53d3..dfa41acf 100644 --- a/mpworks/fix_scripts/fix_bad_crystals.py +++ b/mpworks/fix_scripts/fix_bad_crystals.py @@ -40,7 +40,7 @@ def detect(): n_groups = snlgroups.find({"all_snl_ids":{"$in":[old_s['snl_id'], new_s['snl_id']]}}).count() if n_groups != 1: # The crystal_id is bad - print crystal_id + print(crystal_id) def fix(): @@ -90,7 +90,7 @@ def fix(): for c_id in bad_crystal_ids: if c_id == 100892 or c_id == 100202: - print 'SKIP' + print('SKIP') else: # FIX SNL @@ -122,7 +122,7 @@ def fix(): for s in submissions.find({'about._materialsproject.deprecated.crystal_id_deprecated': c_id}, {'submission_id': 1}): submissions.update({'submission_id': s['submission_id']}, {'$pushAll': {"about.remarks": ['DEPRECATED', 'SEVERE BUG IN ICSD CONVERSION']}}) - print 'FIXED', c_id + print('FIXED', c_id) def find_alternate_canonical(): @@ -139,10 +139,10 @@ def find_alternate_canonical(): for s in snl.find({"snl_id": {"$in": g['all_snl_ids']}, "about.remarks": {"$ne": "DEPRECATED"}}): canonical_mpsnl = MPStructureNL.from_dict(s) snldb.switch_canonical_snl(g['snlgroup_id'], canonical_mpsnl) - print g['snlgroup_id'] + print(g['snlgroup_id']) break - print 'DONE' + print('DONE') def archive_deprecated_fws(): # find all snlgroups that are deprecated, and archive all WFs that have deprecated fw_ids so we don't run them @@ -157,11 +157,11 @@ def archive_deprecated_fws(): for g in snlgroups.find({'canonical_snl.about.remarks':'DEPRECATED'}, {'snlgroup_id': 1}): while lpdb.fireworks.find_one({'spec.snlgroup_id': g['snlgroup_id'], 'state': {'$ne': 'ARCHIVED'}}, {'fw_id': 1}): fw = lpdb.fireworks.find_one({'spec.snlgroup_id': g['snlgroup_id'], 'state': {'$ne': 'ARCHIVED'}}, {'fw_id': 1}) - print fw['fw_id'] + print(fw['fw_id']) lpdb.archive_wf(fw['fw_id']) - print 'DONE' + print('DONE') diff --git a/mpworks/fix_scripts/fix_bs_controller_tasks.py b/mpworks/fix_scripts/fix_bs_controller_tasks.py index 33144ef9..623c41a9 100644 --- a/mpworks/fix_scripts/fix_bs_controller_tasks.py +++ b/mpworks/fix_scripts/fix_bs_controller_tasks.py @@ -24,7 +24,7 @@ db.authenticate(creds['username'], creds['password']) materials = db['materials'] tasks = db['tasks'] -print materials.count() +print(materials.count()) def append_wf(fw_id, parent_fw_id=None): wf = lpdb.workflows.find_one({'nodes':fw_id}, {'parent_links':1,'links':1,'name':1}) @@ -38,25 +38,25 @@ def append_wf(fw_id, parent_fw_id=None): if child_fw['spec']['task_type'] == 'Controller: add Electronic Structure v2': if child_fw['state'] == 'DEFUSED': lpdb.reignite_fw(child_fw_id) - print 'AddEStructureTask v2', child_fw_id , 'reignited for', fw_id + print('AddEStructureTask v2', child_fw_id , 'reignited for', fw_id) elif child_fw['state'] == 'FIZZLED': lpdb.rerun_fw(child_fw_id) - print 'AddEStructureTask v2', child_fw_id , 'marked for rerun for', fw_id + print('AddEStructureTask v2', child_fw_id , 'marked for rerun for', fw_id) elif child_fw['state'] == 'COMPLETED': - print 'AddEStructureTask v2 already successfully run for', fw_id + print('AddEStructureTask v2 already successfully run for', fw_id) sec_child_fw_id = wf['links'][str(child_fw_id)][0] sec_child_fw = lpdb.fireworks.find_one({'fw_id': sec_child_fw_id}, {'spec.task_type':1, 'state':1}) if sec_child_fw['state'] == 'FIZZLED': lpdb.rerun_fw(sec_child_fw_id) - print 'FIZZLED -> marked for rerun:', sec_child_fw_id, sec_child_fw['spec']['task_type'] + print('FIZZLED -> marked for rerun:', sec_child_fw_id, sec_child_fw['spec']['task_type']) else: - print 'AddEStructureTask v2 added but neither DEFUSED, FIZZLED, or COMPLETED for', fw_id + print('AddEStructureTask v2 added but neither DEFUSED, FIZZLED, or COMPLETED for', fw_id) return f = lpdb.get_wf_summary_dict(fw_id)['name'].replace(' ', '_') name = get_slug(f + '--' + spec['task_type']) fw = Firework([AddEStructureTask()], spec, name=name) lpdb.append_wf(Workflow([fw]), [parent_fw_id]) - print name, 'added for', fw_id + print(name, 'added for', fw_id) except ValueError: raise ValueError('could not append controller task to wf', wf['name']) @@ -140,7 +140,7 @@ def append_wf(fw_id, parent_fw_id=None): ] mp_ids = [ "mp-134", "mp-127", "mp-58", "mp-135", "mp-70", "mp-1" ] mp_ids = [doc['task_id'] for doc in materials.find({'has_bandstructure': False}, {'task_id':1})] - print '#mp_ids =', len(mp_ids) + print('#mp_ids =', len(mp_ids)) counter = Counter() materials_wBS = [] @@ -150,7 +150,7 @@ def append_wf(fw_id, parent_fw_id=None): if material['has_bandstructure']: materials_wBS.append((mp_id, material['pretty_formula'])) counter['has_bandstructure'] += 1 - print matidx, '========', mp_id, snlgroup_id, '=============' + print(matidx, '========', mp_id, snlgroup_id, '=============') fw_list = list(lpdb.fireworks.find( {'spec.snlgroup_id': snlgroup_id}, {'_id': 0, 'state': 1, 'name': 1, 'fw_id': 1, 'spec.snlgroup_id': 1, 'spec.task_type': 1, 'launches': 1} @@ -162,7 +162,7 @@ def append_wf(fw_id, parent_fw_id=None): has_gga_static = True if fw['state'] == 'FIZZLED': #counter[fw['spec']['task_type']] += 1 - print '--'.join([fw['name'], str(fw['fw_id'])]), fw['state'] + print('--'.join([fw['name'], str(fw['fw_id'])]), fw['state']) launch_dir = lpdb.launches.find_one({'launch_id': fw['launches'][0]}, {'launch_dir':1})['launch_dir'] launch_subdir = '/'.join(launch_dir.split('/')[-2:]) if 'oasis' in launch_dir: @@ -180,12 +180,12 @@ def append_wf(fw_id, parent_fw_id=None): try: os.chdir(launch_dir) except: - print ' |===> could not find launch directory in usual locations' + print(' |===> could not find launch directory in usual locations') lpdb.rerun_fw(fw['fw_id']) - print ' |===> marked for RERUN' + print(' |===> marked for RERUN') counter['LOCATION_NOT_FOUND'] += 1 continue - print ' |===>', launch_dir + print(' |===>', launch_dir) vaspout = os.path.join(launch_dir, "vasp.out") if not os.path.exists(vaspout): vaspout = os.path.join(launch_dir, "vasp.out.gz") @@ -200,9 +200,9 @@ def append_wf(fw_id, parent_fw_id=None): counter['GGA_static_' + err] += 1 if 'brmix' in d['errors']: #lpdb.rerun_fw(fw['fw_id']) - print ' |===> BRMIX error -> marked for RERUN with alternative strategy' + print(' |===> BRMIX error -> marked for RERUN with alternative strategy') else: - print ' |===> no vasp error indicated -> TODO' + print(' |===> no vasp error indicated -> TODO') counter['GGA_STATIC_NO_VASP_ERROR'] += 1 os.chdir(cwd) else: @@ -211,7 +211,7 @@ def append_wf(fw_id, parent_fw_id=None): {'state': 1, '_id': 0, 'fw_states': 1, 'nodes': 1, 'updated_on': 1, 'parent_links': 1} ) if workflow is None: - print ' |==> workflow not found', fw['fw_id'] + print(' |==> workflow not found', fw['fw_id']) counter['WF_NOT_FOUND'] += 1 continue is_new = bool(datetime(2016, 1, 1) < workflow['updated_on']) @@ -220,11 +220,11 @@ def append_wf(fw_id, parent_fw_id=None): if fw_state == 'FIZZLED': fw_fizzled = lpdb.fireworks.find_one({'fw_id': int(fw_id_fizzled)}, {'_id': 0, 'name': 1, 'fw_id': 1, 'spec.task_type': 1}) counter[fw_fizzled['spec']['task_type']] += 1 - print url, is_new, material['has_bandstructure'], fw_id_fizzled - print 'http://fireworks.dash.materialsproject.org/wf/'+str(fw['fw_id']), workflow['state'] - print ' |==>', '--'.join([fw_fizzled['name'], fw_id_fizzled]) + print(url, is_new, material['has_bandstructure'], fw_id_fizzled) + print('http://fireworks.dash.materialsproject.org/wf/'+str(fw['fw_id']), workflow['state']) + print(' |==>', '--'.join([fw_fizzled['name'], fw_id_fizzled])) if fnmatch(fw_fizzled['spec']['task_type'], '*Boltztrap*'): - print ' |====> marked for RERUN (Boltztrap, physical constants from scipy, missing libmkl_lapack.so, BoltzTrap_TE -> pymatgen)' + print(' |====> marked for RERUN (Boltztrap, physical constants from scipy, missing libmkl_lapack.so, BoltzTrap_TE -> pymatgen)') #lpdb.rerun_fw(fw_fizzled['fw_id']) continue elif fw_fizzled['spec']['task_type'] == 'GGA Uniform v2': @@ -233,42 +233,42 @@ def append_wf(fw_id, parent_fw_id=None): fw_id_rerun = str(workflow['parent_links'][fw_id_rerun][-1]) fw_rerun = lpdb.fireworks.find_one({'fw_id': int(fw_id_rerun)}, {'_id': 0, 'spec.task_type': 1}) if fw_rerun['spec']['task_type'] != 'VASP db insertion': - print 'http://fireworks.dash.materialsproject.org/wf/'+fw_id_rerun + print('http://fireworks.dash.materialsproject.org/wf/'+fw_id_rerun) break #lpdb.rerun_fw(int(fw_id_rerun)) - print ' |====> marked for RERUN (could not get valid results from prev_vasp_dir, GGAstatic vasprun.xml validation error)' + print(' |====> marked for RERUN (could not get valid results from prev_vasp_dir, GGAstatic vasprun.xml validation error)') elif fw_fizzled['spec']['task_type'] == 'GGA band structure v2': - print ' |===> marked for RERUN (trial & error)' + print(' |===> marked for RERUN (trial & error)') #try: # lpdb.rerun_fw(fw_fizzled['fw_id']) #except: # print ' |===> could not rerun firework' # counter['WF_LOCKED'] += 1 elif fw_fizzled['spec']['task_type'] == 'VASP db insertion': - print ' |===> marked for RERUN (trial & error)' + print(' |===> marked for RERUN (trial & error)') #lpdb.rerun_fw(fw_fizzled['fw_id']) #sys.exit(0) break elif workflow['state'] == 'COMPLETED': - print url, is_new, material['has_bandstructure'], workflow['nodes'][0] + print(url, is_new, material['has_bandstructure'], workflow['nodes'][0]) if not is_new and not material['has_bandstructure']: #lpdb.rerun_fw(fw['fw_id']) - print ' |===> marked for RERUN with alternative brmix strategy (WF completed but BS missing)' + print(' |===> marked for RERUN with alternative brmix strategy (WF completed but BS missing)') counter['WF_COMPLETED_MISSING_BS'] += 1 #sys.exit(0) else: counter['COMPLETED'] += 1 if not has_gga_static: - print 'ERROR: no GGA static run found!' - print '\n'.join([ + print('ERROR: no GGA static run found!') + print('\n'.join([ '--'.join([fw['name'], str(fw['fw_id']), fw['state']]) for fw in fw_list - ]) + ])) counter['NO_GGA_STATIC'] += 1 #break else: - print 'ERROR: no fireworks found!' + print('ERROR: no fireworks found!') counter['NO_FWS'] += 1 #break - print '#mp_ids =', len(mp_ids) - print counter + print('#mp_ids =', len(mp_ids)) + print(counter) #print materials_wBS diff --git a/mpworks/fix_scripts/fix_float_priorities.py b/mpworks/fix_scripts/fix_float_priorities.py index edfad619..f9f9595b 100644 --- a/mpworks/fix_scripts/fix_float_priorities.py +++ b/mpworks/fix_scripts/fix_float_priorities.py @@ -17,7 +17,7 @@ for fw in lpdb.fireworks.find({"spec._tasks.1.max_errors":{"$type": 1}}, {"fw_id": 1, "state": 1, "spec._tasks": 1}, timeout=False): - print fw['fw_id'], fw['state'] + print(fw['fw_id'], fw['state']) lpdb.fireworks.find_and_modify({"fw_id": fw['fw_id']}, {"$set": {"spec._tasks.1.max_errors": int(5)}}) if fw['state'] == 'FIZZLED': lpdb.rerun_fw(fw['fw_id']) \ No newline at end of file diff --git a/mpworks/fix_scripts/fix_mpcomplete.py b/mpworks/fix_scripts/fix_mpcomplete.py index 83f8534a..a58513cb 100644 --- a/mpworks/fix_scripts/fix_mpcomplete.py +++ b/mpworks/fix_scripts/fix_mpcomplete.py @@ -34,7 +34,7 @@ projection=projection, return_document=ReturnDocument.AFTER ) - print doc['fw_id'], '----> walltime updated' + print(doc['fw_id'], '----> walltime updated') if 'nnodes' in doc['spec']['_queueadapter'] and not 'nodes' in doc['spec']['_queueadapter']: launchpad.fireworks.find_one_and_update( {'fw_id': doc['fw_id']}, @@ -42,7 +42,7 @@ projection=projection, return_document=ReturnDocument.AFTER ) - print doc['fw_id'], '----> nodes key renamed' + print(doc['fw_id'], '----> nodes key renamed') if 'pre_rocket' in doc['spec']['_queueadapter']: launchpad.fireworks.find_one_and_update( m_query, @@ -50,12 +50,12 @@ projection=projection, return_document=ReturnDocument.AFTER ) - print doc['fw_id'], '----> pre_rocket dropped' + print(doc['fw_id'], '----> pre_rocket dropped') if 'prev_vasp_dir' in doc['spec'] and not os.path.exists(doc['spec']['prev_vasp_dir']): block_dir = doc['spec']['prev_vasp_dir'].split('/')[-2:] launch_dir = '/'.join('/oasis/projects/nsf/csd436/phuck/garden'.split('/') + block_dir) if not os.path.exists(launch_dir): - print doc['fw_id'], '---->', '/'.join(block_dir), 'does not exists!' + print(doc['fw_id'], '---->', '/'.join(block_dir), 'does not exists!') continue fw_ids.append(doc['fw_id']) -print 'fixed', fw_ids +print('fixed', fw_ids) diff --git a/mpworks/fix_scripts/fix_unmoved_dirs.py b/mpworks/fix_scripts/fix_unmoved_dirs.py index 17ea77bf..b44cde51 100644 --- a/mpworks/fix_scripts/fix_unmoved_dirs.py +++ b/mpworks/fix_scripts/fix_unmoved_dirs.py @@ -26,7 +26,7 @@ def detect(): block_part = get_block_part(d) garden_dir = os.path.join(GARDEN_PATH, block_part) if os.path.exists(garden_dir): - print garden_dir + print(garden_dir) if __name__ == '__main__': diff --git a/mpworks/fix_scripts/legacy/actions/add_snl_final.py b/mpworks/fix_scripts/legacy/actions/add_snl_final.py index 9ce82df0..e2a99241 100644 --- a/mpworks/fix_scripts/legacy/actions/add_snl_final.py +++ b/mpworks/fix_scripts/legacy/actions/add_snl_final.py @@ -24,4 +24,4 @@ for d in new_tasks.find({'snlgroup_id_final': {'$exists': False}}, {'task_id': 1, 'snl': 1, 'snlgroup_id': 1, 'snlgroup_changed': 1}): new_tasks.update({'task_id': d['task_id']}, {'$set': {'snl_final': d['snl'], 'snlgroup_id_final': d['snlgroup_id'], 'snlgroup_changed': False}}) count+=1 - print count + print(count) diff --git a/mpworks/fix_scripts/legacy/actions/do_fw_conversion.py b/mpworks/fix_scripts/legacy/actions/do_fw_conversion.py index 38e6aab6..ac0310fe 100644 --- a/mpworks/fix_scripts/legacy/actions/do_fw_conversion.py +++ b/mpworks/fix_scripts/legacy/actions/do_fw_conversion.py @@ -28,7 +28,7 @@ db2.authenticate(db_creds['admin_user'], db_creds['admin_password']) new_tasks = db2['tasks'] - print new_tasks.count() + print(new_tasks.count()) new_tasks.ensure_index("task_id", unique=True) new_tasks.ensure_index("task_id_deprecated", unique=True) diff --git a/mpworks/fix_scripts/legacy/actions/do_icsd_to_snl.py b/mpworks/fix_scripts/legacy/actions/do_icsd_to_snl.py index a0201334..e2ad719f 100644 --- a/mpworks/fix_scripts/legacy/actions/do_icsd_to_snl.py +++ b/mpworks/fix_scripts/legacy/actions/do_icsd_to_snl.py @@ -40,6 +40,6 @@ snldb.add_snl(snl) except: traceback.print_exc() - print 'ERROR - icsd id:', icsd_dict['icsd_id'] + print('ERROR - icsd id:', icsd_dict['icsd_id']) - print 'DONE' + print('DONE') diff --git a/mpworks/fix_scripts/legacy/actions/do_mps_to_snl.py b/mpworks/fix_scripts/legacy/actions/do_mps_to_snl.py index 8b4d4223..72794873 100644 --- a/mpworks/fix_scripts/legacy/actions/do_mps_to_snl.py +++ b/mpworks/fix_scripts/legacy/actions/do_mps_to_snl.py @@ -36,7 +36,7 @@ prev_ids = [] # MPS ids that we already took care of - print 'INITIALIZING' + print('INITIALIZING') if RESET: snldb._reset() time.sleep(10) # makes me sleep better at night @@ -45,7 +45,7 @@ for mps in snldb.snl.find({}, {"about._materialsproject.deprecated.mps_ids": 1}): prev_ids.extend(mps['about']['_materialsproject']['deprecated']['mps_ids']) - print 'PROCESSING' + print('PROCESSING') for mps in db.mps.find(timeout=False): try: if not mps['mps_id'] in prev_ids: @@ -53,9 +53,9 @@ if snl: snldb.add_snl(snl) else: - print 'SKIPPING', mps['mps_id'] + print('SKIPPING', mps['mps_id']) except: traceback.print_exc() - print 'ERROR - mps id:', mps['mps_id'] + print('ERROR - mps id:', mps['mps_id']) - print 'DONE' + print('DONE') diff --git a/mpworks/fix_scripts/legacy/actions/do_task_conversion.py b/mpworks/fix_scripts/legacy/actions/do_task_conversion.py index 9bc0670e..37d8b7b5 100644 --- a/mpworks/fix_scripts/legacy/actions/do_task_conversion.py +++ b/mpworks/fix_scripts/legacy/actions/do_task_conversion.py @@ -58,12 +58,12 @@ def process_task(self, task_id): t = self.old_tasks.find_one({'task_id': task_id}) try: t_id, d = self.drone.assimilate(t) - print 'ENTERED', t_id + print('ENTERED', t_id) except: - print 'ERROR entering', t['task_id'] + print('ERROR entering', t['task_id']) traceback.print_exc() else: - print 'skip' + print('skip') def _analyze(task_id): @@ -77,10 +77,10 @@ def parallel_build(min, max): for i in tasks_old.find({'task_id': {'$gte': min, '$lt': max}}, {'task_id': 1}): task_ids.append(i['task_id']) - print 'GOT all tasks...' + print('GOT all tasks...') pool = multiprocessing.Pool(16) pool.map(_analyze, task_ids) - print 'DONE' + print('DONE') if __name__ == '__main__': o = OldTaskBuilder() diff --git a/mpworks/fix_scripts/legacy/actions/do_task_conversion_fixes.py b/mpworks/fix_scripts/legacy/actions/do_task_conversion_fixes.py index b01dc0bb..17f1bb3c 100644 --- a/mpworks/fix_scripts/legacy/actions/do_task_conversion_fixes.py +++ b/mpworks/fix_scripts/legacy/actions/do_task_conversion_fixes.py @@ -54,9 +54,9 @@ def process_task(self, task_id): t = self.old_tasks.find_one({'task_id': task_id}) try: t_id, d = self.drone.assimilate(t) - print 'ENTERED', t_id + print('ENTERED', t_id) except: - print 'ERROR entering', t['task_id'] + print('ERROR entering', t['task_id']) traceback.print_exc() @@ -83,5 +83,5 @@ def process_task(self, task_id): t = o.new_tasks.find_one({"task_id": new_task_id}, {"state": 1}) if t: o.new_tasks.remove({'task_id': new_task_id}) - print 'REPARSING', old_task_id + print('REPARSING', old_task_id) o.process_task(old_task_id) diff --git a/mpworks/fix_scripts/legacy/mps_to_snl.py b/mpworks/fix_scripts/legacy/mps_to_snl.py index 1257a53f..f11b8ce0 100644 --- a/mpworks/fix_scripts/legacy/mps_to_snl.py +++ b/mpworks/fix_scripts/legacy/mps_to_snl.py @@ -19,7 +19,7 @@ def mps_dict_to_snl(mps_dict): return None if 'Carbon Capture Storage Initiative (CCSI)' in m['about']['metadata']['project_names']: - print 'rejected old CCSI' + print('rejected old CCSI') return None mps_ids = [m['mps_id']] diff --git a/mpworks/fix_scripts/legacy/old_task_drone.py b/mpworks/fix_scripts/legacy/old_task_drone.py index 3611924f..b30ef79f 100644 --- a/mpworks/fix_scripts/legacy/old_task_drone.py +++ b/mpworks/fix_scripts/legacy/old_task_drone.py @@ -193,7 +193,7 @@ def process_fw(self, old_task, d): vasp_signals['last_relax_dir'] = last_relax_dir ## see what error signals are present - print "getting signals for dir :{}".format(last_relax_dir) + print("getting signals for dir :{}".format(last_relax_dir)) sl = SignalDetectorList() sl.append(VASPInputsExistSignal()) diff --git a/mpworks/fix_scripts/legacy/task_to_fw.py b/mpworks/fix_scripts/legacy/task_to_fw.py index 0bc3c71f..e3bcfc35 100644 --- a/mpworks/fix_scripts/legacy/task_to_fw.py +++ b/mpworks/fix_scripts/legacy/task_to_fw.py @@ -57,6 +57,6 @@ def task_dict_to_wf(task_dict, launchpad): launchpad.add_wf(wf, reassign_all=False) launchpad._upsert_launch(launches[0]) - print 'ADDED', fw_id + print('ADDED', fw_id) # return fw_id return fw_id \ No newline at end of file diff --git a/mpworks/fix_scripts/reparse_old_tasks.py b/mpworks/fix_scripts/reparse_old_tasks.py index 4adb4d91..2eec8efc 100644 --- a/mpworks/fix_scripts/reparse_old_tasks.py +++ b/mpworks/fix_scripts/reparse_old_tasks.py @@ -73,14 +73,14 @@ def process_task(self, path): logger.error("Bad run stats for {}.".format(path)) self.tasks.update({'dir_name_full': path}, {'$set': {"run_stats": run_stats}}) - print 'FINISHED', path + print('FINISHED', path) else: - print 'SKIPPING', path + print('SKIPPING', path) except: - print '-----' - print 'ENCOUNTERED AN EXCEPTION!!!', path + print('-----') + print('ENCOUNTERED AN EXCEPTION!!!', path) traceback.print_exc() - print '-----' + print('-----') def _analyze(data): @@ -104,7 +104,7 @@ def _analyze(data): q = {'submission_id': {'$exists': True}} # these are all old-style tasks for d in tasks.find(q, {'dir_name_full': 1}): m_data.append(d['dir_name_full']) - print 'GOT all tasks...' + print('GOT all tasks...') pool = multiprocessing.Pool(16) pool.map(_analyze, m_data) - print 'DONE' + print('DONE') diff --git a/mpworks/fix_scripts/reparse_old_tasks_again.py b/mpworks/fix_scripts/reparse_old_tasks_again.py index 232d14cd..186c3ba0 100644 --- a/mpworks/fix_scripts/reparse_old_tasks_again.py +++ b/mpworks/fix_scripts/reparse_old_tasks_again.py @@ -73,14 +73,14 @@ def process_task(self, path): logger.error("Bad run stats for {}.".format(path)) self.tasks.update({'dir_name_full': path}, {'$set': {"run_stats": run_stats}}) - print 'FINISHED', path + print('FINISHED', path) else: - print 'SKIPPING', path + print('SKIPPING', path) except: - print '-----' - print 'ENCOUNTERED AN EXCEPTION!!!', path + print('-----') + print('ENCOUNTERED AN EXCEPTION!!!', path) traceback.print_exc() - print '-----' + print('-----') def _analyze(data): @@ -105,9 +105,9 @@ def _analyze(data): for line in f: old_task = line.split(' ')[1].strip() m_data.append(tasks.find_one({"task_id":old_task}, {'dir_name_full': 1})["dir_name_full"]) - print 'GOT all tasks...' + print('GOT all tasks...') # print len(m_data) # print m_data[1] pool = multiprocessing.Pool(2) pool.map(_analyze, m_data) - print 'DONE' + print('DONE') diff --git a/mpworks/fix_scripts/rerun_boltztrap.py b/mpworks/fix_scripts/rerun_boltztrap.py index 67cd9873..06912dd9 100644 --- a/mpworks/fix_scripts/rerun_boltztrap.py +++ b/mpworks/fix_scripts/rerun_boltztrap.py @@ -62,7 +62,7 @@ last_line = ferr.readlines()[-1].strip() if 'TIME LIMIT' in last_line: lpdb.rerun_fw(fw_doc['fw_id']) - print '[{}] rerun due to TIME LIMIT'.format(fw_doc['fw_id']) + print('[{}] rerun due to TIME LIMIT'.format(fw_doc['fw_id'])) else: counter['RECENT_BTZ_FWS_' + fw_doc['state']] += 1 else: @@ -71,7 +71,7 @@ #parent_fw = lpdb.fireworks.find_one({'fw_id': parent_fw_id}, {'state':1}) #if parent_fw['state'] == 'COMPLETED': counter['RECENT_BTZ_FWS_' + fw_doc['state']] += 1 -print counter +print(counter) nfws = 0 for fw_doc in lpdb.fireworks.find( @@ -85,8 +85,8 @@ if 'parent job unsuccessful' in last_line or 'Could not find task' in last_line: parent_fw_id = wf['parent_links'][str(fw_doc['fw_id'])][-1] lpdb.rerun_fw(parent_fw_id) - print '[{}] {} --> marked parent {} for rerun'.format(nfws, fw_doc['fw_id'], parent_fw_id) + print('[{}] {} --> marked parent {} for rerun'.format(nfws, fw_doc['fw_id'], parent_fw_id)) else: #lpdb.rerun_fw(fw_doc['fw_id']) - print '[{}] {} --> {}'.format(nfws, fw_doc['fw_id'], last_line) + print('[{}] {} --> {}'.format(nfws, fw_doc['fw_id'], last_line)) nfws += 1 diff --git a/mpworks/fix_scripts/submit_bo_jobs.py b/mpworks/fix_scripts/submit_bo_jobs.py index 2f2c5105..8f115067 100644 --- a/mpworks/fix_scripts/submit_bo_jobs.py +++ b/mpworks/fix_scripts/submit_bo_jobs.py @@ -20,11 +20,11 @@ for s in os.listdir(os.path.join(module_dir, "snls")): if '.json' in s: - print 'submitting', s + print('submitting', s) with open(os.path.join(module_dir, "snls",s)) as f: snl = StructureNL.from_dict(json.load(f)) sma.submit_snl(snl, 'anubhavster@gmail.com', {"priority": 10}) - print 'DONE submitting', s + print('DONE submitting', s) -print 'DONE!' \ No newline at end of file +print('DONE!') \ No newline at end of file diff --git a/mpworks/maintenance_scripts/classify_fizzled.py b/mpworks/maintenance_scripts/classify_fizzled.py index 2f644071..a3ded4ef 100644 --- a/mpworks/maintenance_scripts/classify_fizzled.py +++ b/mpworks/maintenance_scripts/classify_fizzled.py @@ -69,7 +69,7 @@ def get_task_info(fw_id, tdb): except_str = l['action']['stored_data'].get('_exception') if 'Disk quota exceeded' in except_str: except_dict['DISK_QUOTA_EXCEEDED'] = except_dict['DISK_QUOTA_EXCEEDED']+1 - print l['fw_id'], '*' + print(l['fw_id'], '*') lpdb.rerun_fw(l['fw_id']) elif 'No such file' in except_str: # this is due to missing CHGCAR from Michael's old runs @@ -110,7 +110,7 @@ def get_task_info(fw_id, tdb): else: except_dict[except_str] = except_dict[except_str]+1 - print '-----' + print('-----') for k, v in except_dict.iteritems(): - print {"{}\t{}".format(v, k)} + print({"{}\t{}".format(v, k)}) diff --git a/mpworks/maintenance_scripts/modify_snl.py b/mpworks/maintenance_scripts/modify_snl.py index 08044fb4..8863dd81 100644 --- a/mpworks/maintenance_scripts/modify_snl.py +++ b/mpworks/maintenance_scripts/modify_snl.py @@ -66,35 +66,35 @@ def modify_snl(snl_id, new_snl, colls, reject_bad_tasks=False): snl_d['snl_timestamp'] = snl_old['snl_timestamp'] # insert the new SNL into the snl collection - print 'INSERTING SNL_ID', {'snl_id': snl_id}, snl_d + print('INSERTING SNL_ID', {'snl_id': snl_id}, snl_d) colls.snl.update({'snl_id': snl_id}, snl_d) # update the canonical SNL of the group for s in colls.snlgroups.find({'canonical_snl.about._materialsproject.snl_id': snl_id}, {'snlgroup_id': 1}): - print 'CHANGING SNLGROUP_ID', s['snlgroup_id'] + print('CHANGING SNLGROUP_ID', s['snlgroup_id']) colls.snlgroups.find_and_modify({'snlgroup_id': s['snlgroup_id']}, {'$set': {'canonical_snl': snl_d}}) # update FWs pt 1 for f in colls.fireworks.find({'spec.mpsnl.about._materialsproject.snl_id': snl_id}, {'fw_id': 1}): - print 'CHANGING FW_ID', f['fw_id'] + print('CHANGING FW_ID', f['fw_id']) colls.fireworks.find_and_modify({'fw_id': f['fw_id']}, {'$set': {'spec.mpsnl': snl_d}}) # update FWs pt 2 for f in colls.fireworks.find({'spec.force_mpsnl.about._materialsproject.snl_id': snl_id}, {'fw_id': 1}): - print 'CHANGING FW_ID', f['fw_id'] + print('CHANGING FW_ID', f['fw_id']) colls.fireworks.find_and_modify({'fw_id': f['fw_id']}, {'$set': {'spec.force_mpsnl': snl_d}}) # update Launches for l in colls.launches.find({'action.update_spec.mpsnl.about._materialsproject.snl_id': snl_id}, {'launch_id': 1}): - print 'CHANGING LAUNCH_ID', l['launch_id'] + print('CHANGING LAUNCH_ID', l['launch_id']) colls.launches.find_and_modify({'launch_id': l['launch_id']}, {'$set': {'action.update_spec.mpsnl': snl_d}}) # update tasks initial for t in colls.tasks.find({'snl.about._materialsproject.snl_id': snl_id}, {'task_id': 1}): - print 'CHANGING init TASK_ID', t['task_id'] + print('CHANGING init TASK_ID', t['task_id']) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$set': {'snl': snl_d}}) if reject_bad_tasks: - print 'REJECTING TASK_ID', t['task_id'] + print('REJECTING TASK_ID', t['task_id']) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$set': {'state': 'rejected'}}) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$push': {'analysis.errors_MP.critical_signals': 'BAD STRUCTURE SNL'}}) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$inc': {'analysis.errors_MP.num_critical': 1}}) @@ -102,17 +102,17 @@ def modify_snl(snl_id, new_snl, colls, reject_bad_tasks=False): # update tasks final for t in colls.tasks.find({'snl_final.about._materialsproject.snl_id': snl_id}, {'task_id': 1}): - print 'CHANGING final TASK_ID', t['task_id'] + print('CHANGING final TASK_ID', t['task_id']) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$set': {'snl_final': snl_d}}) if reject_bad_tasks: - print 'REJECTING TASK_ID', t['task_id'] + print('REJECTING TASK_ID', t['task_id']) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$set': {'state': 'rejected'}}) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$push': {'analysis.errors_MP.critical_signals': 'BAD STRUCTURE SNL'}}) colls.tasks.find_and_modify({'task_id': t['task_id']}, {'$inc': {'analysis.errors_MP.num_critical': 1}}) # note: for now we are not fixing submissions in order to keep a record of submissions accurate, and also because the SNL assignment comes after submission - print 'DONE PROCESSING', snl_id + print('DONE PROCESSING', snl_id) def get_deprecated_snl(snl_id, colls): @@ -128,6 +128,6 @@ def get_deprecated_snl(snl_id, colls): snl_id = 1579 snl_new = get_deprecated_snl(snl_id, colls) - print snl_new.as_dict() + print(snl_new.as_dict()) modify_snl(snl_id, snl_new, colls, reject_bad_tasks=True) \ No newline at end of file diff --git a/mpworks/maintenance_scripts/reparse_tasks.py b/mpworks/maintenance_scripts/reparse_tasks.py index f8c7f057..6e7c171f 100644 --- a/mpworks/maintenance_scripts/reparse_tasks.py +++ b/mpworks/maintenance_scripts/reparse_tasks.py @@ -63,12 +63,12 @@ def process_task(self, data): self.tasks.update({"task_id": t_id}, {"$set": {"snl_final": prev_info['snl_final'], "snlgroup_id_final": prev_info['snlgroup_id_final'], "snlgroup_changed": prev_info['snlgroup_changed']}}) - print 'FINISHED', t_id + print('FINISHED', t_id) except: - print '-----' - print 'ENCOUNTERED AN EXCEPTION!!!', data[0] + print('-----') + print('ENCOUNTERED AN EXCEPTION!!!', data[0]) traceback.print_exc() - print '-----' + print('-----') def _analyze(data): @@ -108,8 +108,8 @@ def _analyze(data): for d in tasks.find(q, {'dir_name_full': 1, 'task_type': 1, 'task_id': 1}, timeout=False): if d['task_id'] in finished_tasks: - print 'DUPLICATE', d['task_id'] + print('DUPLICATE', d['task_id']) else: o.process_task((d['dir_name_full'], 'Uniform' in d['task_type'])) # m_data.append((d['dir_name_full'], 'Uniform' in d['task_type'])) - print 'DONE' \ No newline at end of file + print('DONE') \ No newline at end of file diff --git a/mpworks/osti_doi/__main__.py b/mpworks/osti_doi/__main__.py index 4adbda01..b67a74d1 100644 --- a/mpworks/osti_doi/__main__.py +++ b/mpworks/osti_doi/__main__.py @@ -24,15 +24,15 @@ logger.setLevel(getattr(logging, loglevel)) db_yaml = 'materials_db_{}.yaml'.format('prod' if args.prod else 'dev') -print db_yaml +print(db_yaml) if args.reset or args.info or args.plotly: matad = OstiMongoAdapter.from_config(db_yaml=db_yaml) if args.reset: matad._reset() elif args.info: - print '{} DOIs in DOI collection.'.format(matad.doicoll.count()) + print('{} DOIs in DOI collection.'.format(matad.doicoll.count())) dois = matad.get_all_dois() - print '{}/{} materials have DOIs.'.format(len(dois), matad.matcoll.count()) + print('{}/{} materials have DOIs.'.format(len(dois), matad.matcoll.count())) elif args.plotly: import os, datetime import plotly.plotly as py @@ -56,7 +56,7 @@ ) for idx,count in enumerate(counts) ]) filename = 'dois_{}'.format(today) - print py.plot(data, filename=filename, auto_open=False) + print(py.plot(data, filename=filename, auto_open=False)) else: # generate records for either n or all (n=0) not-yet-submitted materials # OR generate records for specific materials (submitted or not) diff --git a/mpworks/snl_utils/mpsnl.py b/mpworks/snl_utils/mpsnl.py index eefbcbc9..e0665c33 100644 --- a/mpworks/snl_utils/mpsnl.py +++ b/mpworks/snl_utils/mpsnl.py @@ -171,12 +171,12 @@ def add_if_belongs(self, cand_snl): chemsys = '-'.join(elsyms) if ( cand_snl.structure.num_sites > 1500 or self.canonical_structure.num_sites > 1500) and chemsys == 'C-Ce': - print 'SKIPPING LARGE C-Ce' + print('SKIPPING LARGE C-Ce') return False, None # make sure the structure is not already in all_structures if cand_snl.snl_id in self.all_snl_ids: - print 'WARNING: add_if_belongs() has detected that you are trying to add the same SNL id twice!' + print('WARNING: add_if_belongs() has detected that you are trying to add the same SNL id twice!') return False, None #try a structure fit to the canonical structure diff --git a/mpworks/snl_utils/snl_mongo.py b/mpworks/snl_utils/snl_mongo.py index 961cf417..af377d1f 100644 --- a/mpworks/snl_utils/snl_mongo.py +++ b/mpworks/snl_utils/snl_mongo.py @@ -122,7 +122,7 @@ def add_mpsnl(self, mpsnl, force_new=False, snlgroup_guess=None): def _add_if_belongs(self, snlgroup, mpsnl, testing_mode): match_found, spec_group = snlgroup.add_if_belongs(mpsnl) if match_found: - print 'MATCH FOUND, grouping (snl_id, snlgroup): {}'.format((mpsnl.snl_id, snlgroup.snlgroup_id)) + print('MATCH FOUND, grouping (snl_id, snlgroup): {}'.format((mpsnl.snl_id, snlgroup.snlgroup_id))) if not testing_mode: self.snlgroups.update({'snlgroup_id': snlgroup.snlgroup_id}, snlgroup.as_dict()) diff --git a/mpworks/workflows/surface_wf.py b/mpworks/workflows/surface_wf.py index 51038b19..16a1be8b 100644 --- a/mpworks/workflows/surface_wf.py +++ b/mpworks/workflows/surface_wf.py @@ -103,9 +103,9 @@ def __init__(self, api_key, list_of_elements=[], indices_dict=None, spa = SpacegroupAnalyzer(prim_unit_cell, symprec=symprec, angle_tolerance=angle_tolerance) conv_unit_cell = spa.get_conventional_standard_structure() - print conv_unit_cell + print(conv_unit_cell) unit_cells_dict[el] = [conv_unit_cell, min(e_per_atom)] - print el + print(el) self.api_key = api_key @@ -146,9 +146,9 @@ def from_max_index(self, max_index, max_normal_search=True, get_symmetrically_distinct_miller_indices(self.unit_cells_dict[el][0], max_index) - print 'surface ', el + print('surface ', el) - print '# ', el + print('# ', el) if max_only: for hkl in list_of_indices: @@ -310,17 +310,17 @@ def launch_workflow(self, launchpad_dir="", k_product=50, job=None, for key in self.miller_dict.keys(): # Enumerate through all compounds in the dictionary, # the key is the compositional formula of the compound - print key + print(key) for miller_index in self.miller_dict[key]: # Enumerates through all miller indices we # want to create slabs of that compound from - print str(miller_index) + print(str(miller_index)) max_norm = max(miller_index) if self.max_normal_search else None # Whether or not we want to use the # max_normal_search algorithm from surface.py - print 'true or false max norm is ', max_norm, self.max_normal_search + print('true or false max norm is ', max_norm, self.max_normal_search) slab = SlabGenerator(self.unit_cells_dict[key][0], miller_index, self.ssize, self.vsize, max_normal_search=max_norm) From 05381c08d6fce4028a01bd55ae80981786c1d2b7 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 25 Jul 2016 15:44:17 -0700 Subject: [PATCH 079/204] explicitly use UTF-16 for README.rst --- README.rst | Bin 46168 -> 91818 bytes setup.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 3e8f3cbc338dd1786953000b6d84f06be7baea13..e224ceb329e5d9315ed4ebabdb07ff1fde0db3e9 100644 GIT binary patch literal 91818 zcmdVD?UP*Bk>1%~x4-(6BX%QNnu$>hHUWv`NVaCAU5Fw@X!rsH&A~Ek2TgzgDG>xP z1Vsq5_WbGg=I{RM={)zPs=G-@6AlD`?y9=yCqC9R2%x?ZMF> z>%DJk)X$E7U+?)U|9@8RzEiJ#U03(4C-usc>GLP`>GArn>bDm==^hMH*&#JjrvretUZKhkErvy?4DneXIUw2BiIV`t-~C>De?E*FGv~A5WinQvZH) z^u;tPSM+tP=iwxM=4L*w;`0yd{|A!3f*)-3KdM!Hodb$=woJ?0U`=|AOG>%+P zOD62WD%BpZJ*szFOCwfKN((RRFY<#EKl^n_@TC4Q_UHBLKD&VIpX~5~ozN`O_Uubv zn0wB)Z>R;=g@0hDCnW>x1KHn4;%FKyYH76hY1s}hYn`d(=nN}rkC*k+AM1)IC5;yM zli(fvAl<{AhoB4m3k-i=|NiFa*Y*1nfp}CvZ^iT1^HO!-L zf*fXkSf6@cSEDnodQrcFHYG%#Z6W3qSB;M7ozcBUk3Bg$EqT7KSHG(N(bJusVqR_ieZBil{drb1|KjLI{rP;-@)xDU`-L05<3^3URlUT(F z`JXrgkMS=!m(f10pJ6qAkCcg6%q=I(t)b>vw>-sq?(b#;d+^$`>3?hmPCx=!&bpWn zG>mK=R+mH3H0wCT&mAW~wXsCmp4Qk8CwqVz zCr3ZrWkkj_`1Dz={Od^qwEDw(rPb=JN12(YuAixYnzlx5+(XklbWYze=CS0m2Ih+Om86xsD&YJtR?_W0#$ z`TbFi2N$C$al{N<9n{DBV0ZJTFwW<7C1b$%;1kXvQW3AdDSfcoe6|@R{%H1rf8(WH z2PE^*n?yqE1SH^1uF%oRCiZ2OG7dRwBv~@zyc%Gps|MgVGjq_62INVsK`w~}VsHLedMt&Nz!Fjs zslGV*x96YK56IoMN+Xa@FTL`g>S}T?-kb1y@;5_d6F(28tMF7aSn@15dOaF>9NGeX zpVi3GJ~E+6GA!Qjcm7bX->=`9e-;gEMdqVsP^4Zq>==8;#`EW84dNM!N@BYcFNx!8IO$Ug6=c zI(Y@nlWlW7NaOuPGg;);bmg*gVe~K~I=F}{VE*}Pu=V4@UP}o#P`?nlu&GutDs?sk zo}XucMupNB>-s_G)EYItSmBjOys!k{FwI zWe<}X&Aj&0>e!$sBiTMaMx*-1VV%aV6E=Wr$w~3%wkJPf-)4QW01CD8 zvl@kb3$)uC`FipoEiLyax0Y-0o7O651hMGVx%gDCJcxO}Ud2jQFI&AJx+S3Di^-S1 z*kOX-!&drlDmT0rj@Zg}qOFcIOGX_vVs7Uv>K2XOpEL?u?oa3hkLGP77m-{1VyZ^2 z)%#~9E&9J!f0j7_ucD*;oOt zqQM{^Z=iPO-!TgCv-NoT0{UfJoGUu&$>N~F{EgLsL~;3#Rj=Kx>K%RI!ygp$y)|j* zt7$b1Q;hY&z?xIbazvN*td$h7zuS+IKWaHHgyXPJ(F;{a0hcF@B*wlM&| zEwQ}LQm=yM!>d@2eP{*COo*{rP3i|QfF}f#_>A}>hNNqiEH+V+Jm61tRmVlpL{$2E z$85O*tHVk7jkznBV>Y-*hEoD6G1$%s5IN17)*4<{2d_@U>#^0aY(^Zz7hsie_@@&# z=%3iFAajBDm-#n!!l$+RFYCRI4%(aXK z(?>s#ra{26Hrnn-#QNyLp3#T{#%bPLcH{TsLyTGX=xKA7;mY)I$cM=f!SM50wb!{) zkWFO_&R^F5)>jYe_jO*>m7vjLS&27e$@c19KD1{C+fccPS=SJMO5DPxnOoFQgLws9 zpGcz&*Ggt~E#MXGgrAUik5P=*z!Dv_#P61bEXKGOx!T7^nHZgjK(8AQ6yq-^ZvJ)6 z&>5P1Z|;MSlHXbdD09{_UbC!U_ze1Ni9ro>Tl+-|T|JmtU~@c+HK_-9hWMlOa)v(B zT3Om4+=jeQ3&U+$)*_;jP|wg}=>(dkfpF|g|DK<_9wWEqJ!C>hcE`vt<7e}c$^AwI zh(fwkKdo!m48(ZoioffA)N7RkUi}XpsOyuZ$!y@P*QT*S-owh$eo>PA_dU`a@{;g` zd=G-O8Yo9I>--Jf?2p@PZQPhL$iw&)_ZRszmLF}K{!M#Sc9YP&StHU~ff3m{%jI=Y za7Z^Y#(G7~0N)4u>UYe{BNJA@!Z1}Ml8lC!o&Dy{%>Pr_43A?MJ(xBM|EAUm?|0wv zcQpo`1Q{ozXN_m2wdd0~UjNpFVSP`(y*J75WsRzQbXZ}&-z>O$-{;Os8uqn18y{o9 zetEEcq|Dg%L&8IOb(G9~l#6hMJ?RhG(M2{ap?%iaTH9^Od!Luq7?Ey3?-PPHSkB%e zc{Vfk`|fuz_tMf^a2cxK*zi`K~=U?g`8JC~V9qC`=#KRu7n^SS4Y zf-To}8g-Kuz%Ng#ODC6xpX?@(5im;PxIPij7lq7(KG7@s?v8*swF9=?qkdAWKr69? zm~9<%euEX!$NFl@_3ezX@AP)f_HDi2{RP%A`b4Tz*ZAY1M-U$*_R%qzBUz?+uhwgC z13tb}W5ZnJSMj_#UweY&VO{fZ$2E8ACt^CeW>&amXEJFyoG9!F#Cw5uTP^nmgffW z(SOaHVyk*N6gOXWPO+ZfoZ@Ch@AUt%Q~U@&jQ{QPVn5qHc4$3fU@Zle-k4Pr2eQ*f zT*qea-~9O8x^Eo)ANA_Ix6%2;@*TS#hi!kkyl^@4VHSMV7F^Db{VtQuF0VuC`=!@I z)-u}5cvsC{m-D6Tgm1p840aotok_w8u!{NW_I{)648wzsC7OsXogMD$mp&@&f>O9D zjGjoEYOMWexqGwhRh`yI*GuN>Q>}SXchsyy*1b&M?o~)gq)6OdpD#N-`d#&NKde43 z{TTH@t?|v|x$l%liD=P}^{8CWr;TT1KH(vFVZJXscGs32d-Fn~1^X~P&(gNxgV-nC zfv!m3tgAoV$%(|Tt`fxuKAw;X%Z+(iV>kM_RyD!=3=6NUyBd1(i?R&W1QnYZtS6%y zec>abzPT6G+i}T(r+7}FNB{AJ!QfGCjyVOA;bmkw%gl&e``*#R^Lwwj6^-Z~o*X>huMIe*)i4x+PosU9SNr>mVvkp4#uz(&J=D2K> zeA6h!Z=7QQy*WXPRbMZ!rGkSC=BHHd){5x&;5nRMw6Bvaud9N1OuDFDZ!YzF(aF}! zoyWQCr?NWwH1C#{>HIBg8m!VQXC-J$-99PpT)kH_UV#L+YRp*0yc;}|=eMVC<*)mr z97xdDKqe^mf7Sttv{&*WKFg{>51eKFEf+3pxISw%T70Q9w6nA<*$}G$uS)aN;!@*~ zu^z0N581EcOh`t4Uiq@?<)=>zkF6&F!5dn_SGbjB!@pfTl(b zs5K?@tQkIl{TsjcYRq{B9N*ru*cdm)td2SeiR__hInJK&N-TL?Kh>X;|Fh14c7=4i ze*Wy_|EhOR>!)?xTAn*}G;Je)z|^j~_Ks?F7=*gderQ>$MmIl|2kw;p##8KD8c0>t zXIhnRsS|26;;APA)47Zux(C2{3#3};Vy%gDTe>!|-WBoKU%>wqnb z8&3XB{SOPwZ69ooJeSPH{pE{nda|(sxE3_Jf|djHn~k}j0DhMJ0sP92GCv(pQSe9Eo?5}40`N2=ki$h3vZWU=q>?B2c7lgjN9$*8)&FRPCXzx;V*8_xg0 z!egIr8@ax!?Z~aiyHwEeiz%m>pDLisg9SNJfQM(MuWNnm_LALGwemj^BeoqgQ7kd< zE@9bic3tYcqdud+=I~r&ktljxuj1SCGVhAEL2vI<-(_6E4@e7yj~yZT51hk5#h)c&=_|}g!|I#-eTCSEtxkm zji(nKi!2sBVu|C~-4%4sfiHXld88yoJ zKI&U`fI#^c%YY;@6Dke&>sVuFN>~ZaY|jDj?VF?bw$RGkr8j4Yo%4P$yFTM7B8``5 z9j}=xqROg_)f1*FpHlBs4_0=_iwESM3i41fI@d$X&>U@k`dZ06M^gJcBgws1FN_I} z6A!Ufa7T1~BP+x0=>)`!qeo%^Ii>6l3iD1lpG=yciExgJo<1+V@AGFZ*`C_|3&+b9}V5Q;p?4Epn|9E|ET(2s&dKSs8{Tr$T?hvFjkRHM7B$EyX-bcZTCt z3}yz#a;4_mzQRtgt6HCBqSAV!-o00nf%*ONbIu6Q$$hU&O}xs=3LM`mX%dm6|F$H) z0CKRotkHG@OK1UPAaVCm+p{iVuU$oOe17^Qwlt<=F=wKm2X2xjtj~NW!onSAWh3GY zJqKgMlDGa)D*`3-@Sjc=;pcen@wo+(S=ih9nPuTeet+`JQUN9nR9c&nXftCF182{#3ceJd>prLUk+I6h&Z`UXK*&DFC ztpB?@mH&1z^_{xjN;LOapl|daNs$Fl$BJzsEJ{pgHIAs3$VQgCGraci3$y&coxhA8 z&!@rbfp{#xFttIK?$?a9Lpxj{AXhh+9xE5U`gKoxgj+eCxu;ZG-mciUSf;i7{ z`NsStBEN6;AkK3XWz!F*I-@)leQaw-$&fkm1zT!WEy2k?{eJyq9eYlU49j!{fZb|8 zti}{o3wLC1d(Mc<+-PeR>PMZUPj$3TCSHffyAr3$t?yIe}wUD!8jcJ|Qz zaa!lI^E?5(M1#ji|8l86FxRxxalv@e4LieFl1Pp=K}U~(wQo##kWn}Tdt>JVqQ~>B zHwvrR=-Fg1s;<#)pbIuy3OPd&!8@R*7}^8`dPNA_v)>bDOxx*W`fa zrOvMC@9PVUAjc*4f(COVea*Xt`LuTR>_YdrU^w$XS9YyF_hQmP;vcvTgOPcdLx3K1 zPj6>?Y%`K%QfTc~MW(lkC+{^9i6S;b-agx`4*NO2nZ(WZ7;W`5SNy%R|<2i;>YPvDqUj0kL>j=ZDa9Iw0t z=6FzJa(+KKthxDbOLJtdjRmt8{9-NB{CfP6`CZo<@!2<^=WlI@-Xbv;+R|`yC>l`9 z_CD=LB~M7t%Z#bXSXFO)oxZ{>MjH?$mk@1IccM@B zky4XKPCm)0qPryvc$Jr}rO8;xPT~Hy>pibwMAlBf0&eAJYC6v3XVm42zAH-NU-VR| z{ODZ4@9e^MRcTKzwU|||Iwto*mq@An%M&QD={wV>jI`YqTYF;f;nXB0U<^BX%W{wX1pRt3N^cc^hPay-ulUpLI=PNv^M=t()jT!HKYllf{ z4Q6|C<%~Re`qxKBg?I5o{ZR?u{<-2T^PL(KR6!p z>C``5m;63!!grcKwxeXSzcHr>U&1_a3Ne#BF1e44ZtgMqYVUc%mt1jLS6{Cj-ShC_ z0k|Lma-PmQ~_39rU3 z@zTaesu*~CZKXztNcZ1hi&!W>r*?H*ZGEF`z52`g zyVee8$?57oDqFzej76lU!$^$kcXM8C{wi$!r->Q*sTu5O>fLL3KdBXgVdddTcgjs~ z2zfuA-ls>@PusA2MZCdEXin|$liFF_H1eX7}j{ zY{UxV=b|y*PtKDafI~Q&*C2Y5D+Vd^ifQ!E%-5?4(~!-uF3&%+9`VPT^jCPzXzV*( zpYFVGI#EwvyrrLOWk(q2$@$1xF_khFCOUxhkynuC*#;)#@8>zv`R?N!3G~*}14<>| zNMIf9MNf-AST~>dd>8M7p1G&Am7`6O3eKMuZ-r-mSF7P^!|9!VGVu+oMCvUa{pX_} zoj=v&M@Rp4hxfB{42$IMb^3c4Ah}Si6FE-m&Kx9RZOMl0s*bkUk|!E-#PyzlC>r-R zGUu8_Vlg=O+-fBenKcLq-oS6_T@Ydvf#v-KYhFt-=GR&OwVf{KzPDsB-rZh1cJ8cp zs~2$DIw@Mw4$TC4U4DCa#}IPsvQL3mqzAd?%#I-NvL1`QyY46M;Ah5&oS_3Vtyg|B zj@o(Qshwo(L6+Cjl@HUhSIZd%ji}21j;*sb2se5b2xVpA|+X?xN6ieR3t6FPho7k2s`6TbqyM^!6!I%?7 z|MrIY{3z{wcC|({p6}C$!L9BEb94W9`T_l{Ex5z+#wa!Fu`_G3!oz5p^R#;iL=E$8%F(6>)hqou5K>4D_g zpy?A}Wp_c+zp;MYwm!L-d_*>gJoLA>nXMz-$dSh0Yg0@db3FFs8(Dai(UDO&#e)8N zzXlY%Tax2x)P$F5WrEfoB&U*XxDoDK~_zJ&EJq3m%BQ&}tA%jNx=7C*=){^4t=gr}Bql<$DtxG~ z4)5CsC+>*iRH*WUwaIa5fx4J_lj{1frd6Bc5%-ZQ5-Y)e1iG)Gt=y+1K6*AJw(F;& zrd#Q*>~_8&GP0i$E`4*#t-_{fB_H2Rb5%C|m*R}XZ5U=`oy&Jwm$Q>o*)3NO`=q3L zkqKIct#0Z*&Ym;H_wAy#4Ey8_Yi-HcSJP>@Tn}~hwnOm+&%0MB>K>|`v+kX?6l3&A zR0)ptsXp~97sQj9XMKkNbrjY*+>I5-_{fMx?JU>hcg{yXHQ#;Q_dJP20Qax(rXFMN ziTIOe>AR*0v*LYvZ|2|L)i74U`Q}=1Q0k9S`krAf!&da{YI8UDf_KBUTir0Me>O!L zzda6SGY%M+#!3YFi21#ny7bN8l=k4VhxM6xWhCbkC5=3#v!H!vtM^TUUu4JF_<@JEJ*s#6EY8 zmd)~0dlR?(CPH^GTjK7$#sps-PQ`uBxxHJjzzIFd_??3K+4d;6>ulYJ)faucMmak$N@M{IL>a7t zmE?0QBe${I*oH1xI%mZRm6^FKb#tX{K4=BojB zk0z@kC0TvWQidh46;IM(MUFroiie8@b8^^+Pn=Hm7z}Q=kI(eohxosi_Eejwv46+) zcnVy?9{wo19{O6nP8QelUIlB&frP0@je1CGmfCAycTn)1SXVz{G<1;K$LNBGrEj%` z@A;FQv>;L?hQks;J8=%1bKWKOPwozjEJR2;n#9m|>Zff!@F_!7P}(qzJ2C?3;S=Y$ ztkr=9;}ql5t7FY(zVow_k%y|rCn2C!&u{Q~v;#AG2FG<fjz@D8k?XU;98Rj^OA=lhJh9VIzY;<-|#Zzc)D zS{3zoRV#@yVFi4N`I3i4Gm-ZYHpevtQ^81>KbF>$?@#iofk=4ByXSpqdnaQrYuS}e z#-%ss>NZm-v6| z2%WF?;99Y**#gWH4*)|!!M+X-xh7Y)M&@_Mz>$--_x9fL;)}G5J(#7X+3Mh2MMhY2 zY1fN)2h@|YP)C6hd~!efMlVB0eUbu=&#P&@h{%(AlIRV`_HN*DamKp0AGDIu@IK!R zP1b6DGv^rc!<@30_vcv=-st@*$-TTH%(Z@64;Fl<<|0qw+GX#bd=Z9ZUUMZzh8L{o zqgB};e6hV(7ae`qeF1Z0;}+~|99y5OK;qU@dLia9_y@hW=#^77W=2HT2laZ^&{gwz z)*A~`HIesjuaXW9HZ|JJPVp@m3@h_w*d>EurEog>rK>%+;}U&cHD+C5{xuqVePbTg zF{(YplgOOT9W(!Q2k`rG578&B2F>#dD)=)3X3v_Lz&hh#71+z17yGQO>wRb2J5O^z zSniOyX5-LV*{2Xvw%BH_vy!8#z-bpk#NnCZ9Ol{D#|CY#(U@!{9ow-QKaq+M3=x&S zD%~FDjK*U$JAX!tF&{0Ce!z^0%$+4#+4DMi27mGXo2BX4hptYt`_2p0Le9Oi62C<# z7~A4wMl>RmQwPnXjAJjWySXhbOsK`tCZ|2faopR$zGi5R7gmI&JR>5SV1h^q_BJ(~EJ`)Xsg5)}hfMuuD?O^Fe6xl6Y{r0Jw_=&PNM@Tc$DOt}OQH`;5;{YCZ7VZDLT!ex zwVjz~J>Fp(Guxd>w{qfml2~iV;nB$Y*;4jtS|)nkdK?UhrDSpAJ{AlQ+2tXwWTvdk zXvMSWumC*0Z(Z05?TT@8f_yfJ#^%hW&dh}1QFmnyJxh-on$hw^=Hq(CPx|CneIoN0 z#dwFFviU|yc)MhRf3(H-TQ!<@-JGwWhtLrVU8M3OmKp(gwichcjAs7tsb`Ky2dhi1 zg9xyIwZpN*hxXt@xDXM@iAGpQ;t#QkJmjoi8NUaMI)Q$jozeZAhhEmQ@@l+2xJhUB zd1W=c-%*9pSzqImxzTBMVvv&O$IA{PuRQ}#9i{Ck!n(~I){IwCqdHP{E$Hl~Z-Jd* zig25{bMMjIOZ}TiC33|t`80Of(seXKbfS+}n&0(w^-!#7Yzbmxw`jN{lv&B;{FTlX z`z%O>MbXuKb?6U7_)TGaWAJD}r9fIx-&wMw$GhgrID>dK0 zX3vI|-VOO9NO1V;Pl#Ne zQQKej#HP766=wE0mQVFru@^z63J z2Z8M09(`0#$fv5=o}bT*->iP3FJ^${!%X$cG zjh3;Fdon!n8ji>}bCX%l*_uw&5;d&ZYzI{4xtNTc{sHTYtoSvZ;Ou|*dgo6Xy;E1H zV?N)}qY;pp7kP%K6;1LWK`K=%cGgOa>)k+P53}&DmcU=oc)G#Moj~#jJk*FKQxTiD zcO1d-&+Cfp{&^oq<8OZU9t2WfP*N0jKVu@d}JYWk$Y0eB-BWgu(+k zD>!-qYh=ts^FzBWu#jgF?oC=eBuRRKtO|{9&+p72+5WVrM%0s; zBfNp8;7pKVJp^)j&V)O=Ti0%NP|;!be8fK;jBUxLZ|tbq#&5zvg6=+?OwUKa?s{9K zX=HkGiFMdNvL~7EHQhJw>ov>ad4F0xqMy{KdhXF5&=)w2Jmc)FI#u|K39+6`NFEj( z$ys_Z)*E|g`>eI}Y_vs1pHW_(OT@|4!TceN*jR=ZP z@O~n$r@hR3WNz`jkH_Xcuch~4wa5f3skL*RpVuWzZI9RKFOgGEaA#*-ky1fX$#W_l4W5y$_K8y*~e zd_-*|;VO6TVS=15VO8^QJlpSCNV0`fqPTVaXp2O@F;YjCF-ne_hf%>cdNxvzv3SJT z-FhDe^^7M`0qIzYod{MzZi$A_4fi@QHg^ThSFb%q&U$^s?eucN#ofs#?Sadm6C)Hl zbE0PcREi*apH`{VddzP_ZKY_#~)1Z zTt<7rBD`TVc8!r>zm#MJ+--wKeYQ#1bgO@~_8jzNrVq=nS4t*)l_;wwx^J5E6X86u zvfKlUy{Kb``;zPD(eJmlU?#GEUw8W|W967H#Fo}lFyeCWZbaz5QNiA$34LZ#GDt^Y zEKST0Z-D<>wN5k;HiTp0NOB9%%WgCsHCX9!{f@nY46E++Q${ae<^^lpjx5+6_;@X= z@Xn1jF3iNkpHI%E0Zm{Z^mgB0iTyrnGkmkA5gSU*YRLk|P5^$@=S;aCJ<(yuLi8E! zjYT8K4<~o-3x-(3z3ShLQjJgJxID@;nlUgQg-m4DaBJ$(tlMrLog(WmeY-P#G=_zn zH~Q3KDpssH+RHpaJ0!D|o52hx_&xmy8rAk8#D;(NeTE)b^m}$5NBDGepnik@<2&G) zTy`sB1?%w&xq4YOZJ=&?dTcl{@&vDWmY8)Eno+A*COY7=#-^n=m_yj5btR)7-9Ls0 zZ=~ah1shl*vTZj^w z?iuex*sJcR9+yX%|HY@jUqAKM5_cj2`BMW1ZJ`e=(-O z!>6zYd8Ba*onjH?+`iV1(wHYMu(CTzKT1YFjFI7L^dURHJ6STZp0#!L(NaV|ONPfr zc`Ai>wS&Z1!T35qiI(U(-ure|IzXS7(LOjIJE!yexxOr|iOiM(O`1ixo0WLoRky24 zR`RIEPxcI2yx(EJFM!UgOEvdVE#mnIvqdt$NHnfkOOX#CnfpFwC%yJqi!K%;lQ}b< zy?u1etK@P>F8UImSiKdgcFd>1;gfTT@DWZ`jLOmYm&7lu+LEg=CVhd%2s=b2+Ja-~!`wymH;`w#JF3#0JtSL0<3s z{N~+wv}~=+=c9?`b8eP$%e0XRT+P{P%#)i(WLx-*SjtJ;rQR6FPf=4x8rEQ^Xyi9I z*lN$GzM{>nmaF+@K=-sioU%GB34)n_WYfgGj>nu+9KAhq5ciB6uv8=s@-p|*0++#} z`}I_U8Rbz)kXX9LMI@TrlHE}|5Unxw798$kNaZ+G4eTW6o)`D4pYC#-vnj7YQ}!)7 z3(lF6?l_o92eD{2{bsbq4wA9dnw~xxaScy5w=j~9+-h&f*Eu5|og@}U^A{lri~Afc z;y75*pP0*>3k;dn=m%EgjLWs1L(i*sPRoh;?pMm9K7Yde(`wzP!F^Sz8}l}BZ(ocN z==ihK;vRBx_vJ|sEJ4QwSt2#-Fe91Yk6~;xhmQ6bAFCS;IE`RbTKZsfPOjW$Uv~hA zxH%ba*T?+}X2ajyi(l3q^X?R5^iCJiWv=gL1+3Cf2$=ivJlAurvP|lKw1w@_<8mbO zF?(k%30{LFcme*n&tmfD8)_ag7B7>yx>9AELuNV{UTsDGND`oxw&C)@XjcoaXRqc)*Ap{qK{tKaF*j9FONI z4Av{Kn|bqAq$T@GZKhsC2?&7QuzOC$V~f$hh&x6Ou91~SRvx8EOmlSXinQnls}SwO z@K}?zdxy-@reGGS98Ydb1|o*j_m#WxSg@6oIq4~kYu)0SvGc+;@|jo7C0w%_pW2p; z)uTn@23C<7{XOgGTq#k26(5&9;Zo`&8LYi@zjl$sommkiDx5F#;Dzx0*@P1@i}ig* zKVBHCx_-KU>1v-&vJg?wbnYGnb946gsVZY%joNM8?UMP@?S7+1{7ZJb-zfcVccT9i zuiEV(op9s)rm>Uxwjc_^MNNA zYxL{nBsm9{)$#m{Lmm#=mUprItYX7=>!K5Z4t3FgRV>U!?;%pa65(vNnpr7WGRy^`4Gt_{mOB7%)40$ySUe@IBch2$uO-2gr|X z>7}n{MhAW>k+ppRJ;TJ*2X>hpQ%?!rI}b?y?fc~T?iQd6-@~HY(q_HM6ty({zT;gr zh3E719U}d+Vqf|tdbizhZAsPxgIMVpJIyXS_w_!GSaV*ZnD^k<`?`%|7LeYMa})nP znG?N-m53k9GbX{i@w+>X$$;J|3D8>*F+UaH$a;c0>rQXiXFW$`!vfjm$j{ix6P+(g z+w*g_+`*H+K;mbjW1>5ndUQu`N=-lu*XN`uozkx8im{= z$4DMM$1f;L_D4jYKNmtPjv3$BcLI5lCwlEj!1~u*Wml(9%w^T^%EcWL;sPswx!c)q z3!}Tp0tVQ9^(-HVhaJe@V4!FSFGeRzZ>RQw*{u1D$7Wk{AJ$}~cn_oSyS2;MrHeeH z&eC7eV&hpxG_f&j^-j}SLJBhaAV<99)fpmoJ=Q5-+?{ldG)v6dpP;V{51}pA#~n&U z0lH^F_uBv3H{LFtxu?4Pl-Vj3C%t(_1A(b;vt#!{0o&j& zK^wSS%Zhg1uTRTyOC2ybS_KdOoKMc{_~p2<3fB=Qnkz@S=l565`!=#|&LG8>`0BD= zaqqM}l^&F*{_!9($<{*iXV=f+wEemR!Cm)x2BY(ooanq+Hhx^6%#H{$!xHu^dCw}X zp9YIYJ72+ObQC&bj&V1umZ{eJjLf92>AqnzVkB_|31rBu6raRL=1+bQN8l6OpuxKigR`CU} zV3y9v@se0@-C5ky@w}Sh{H;hlE6Mx3W+D>nas4nVdBAM_=V!y`%x#VrCLsr5_4oJx{~A%_%QkLsCTZ3-z_EK1iJ6* zKkKJm#j5y1PGE_GUSH!$+rj&JHZ?OE@!U^9f3nZ=NnzR?H1mBt#Kgo5qPf3;EO?d~ zf&_+hkn4z^QS?~IjV6S z^fi|##{oN(bo938&)esTcWc{Rt51$+u){2%-u?XNB{3Lahmq_;4wJ36V*j-jw=d_w zUq7xf#}1;agf&thMgzws_Yygb==!6bElJk3Kv*+*80#@E-K-Jk$e-IT^Nn#09tjGP z&7&h2VQuH_ReP34c@lqQ9LFIRvPL)}wn0+%eG{#sDQ&XU)zZF>%eAnMkxK@brFrG} zh_e=VYX-2&TFx`X%IK{y&L-CEGcR*Vo#omkear93qIcto$Sw4tRV$yRx5}5|fltH( z!&zt^X=9lsqshDSIFTS+I*&(V#~6JPh3GojmBu1)u6?ME3G6)+b+q0RV{2QrEkRVW z!|{BJ5ri}6Ydtgy`w3^kJR_4Wzzk$D?bXpLoQD3Fr&sV2avm}gA`V@O%*3-}xNhvd zf-UsX-9fJH`$xKdVhzN6EcD(q4p$}qA#uEbRTwcl`)Uoz8Kd89X*PQ`Xz=^+h1mv@ z8)1$1u9{bo#+;sKxS?}arFPXG8c$Wz>mh>EEf6a?joI1$STR}gb;fYJ*dBeE5p+(W zSC~&Dg*rm=wT4)ME4r+PkUgg>#M6jyumpDK74+Cx6e*}$@Wh;0HluEvy0(@l`m(xo ztyJpd_hf%ddw>{pl0D2+CauqFWfML_G+9T>b*yBy86|yR_NnPt&!xpX|4Y4kt>#ZQ zwm-8NquDy%=4?UBL;tA`_5*(dyfc+uJ!DZFUYsyJ*qG&WKZF5Ap$ORFB&hO z*97D;+p>~j`aRF@PKdI`vDO@UaLfkeMzWUAYc1)Mxi0ykdnho%@+2#9UUZFqzm9Ei z3L2rFL392*KDB1#!FUL}D}L`vvPNSoD9uT-cqcm>)}Lf0`t;cpuU6PCPD#Dgo)Mj! zBiNl>R?_GNX61Ekh?SF5EYWWkcNuH#%i8Z2C*fVEB`pzc%_h9>&M8v#-3Fj$$wtQ; z*LMuoGUQ*x+0W_^8aETMivas3-W!LE6a0+ssPD}L$0y%&@)aGk0`@q3E?Pg0qj#Pd zgXa?A4kb|=LC%Yj>PjAh3sZ}XyvpdC7<(oK*n_QiNucaY^=WZ zc#{8+Ea;{4eP@(knC;tIq4z`e)6DS-SYrNM?ed^0nqAxBJ6Px^2efC&lc1S(Vf8R5 zT=1;;m#F25x105e-_{@g-_p>YGw1c`?20({$TsG6BO*pF407knuOLhAQkDiWV8oN#EZ21WsKLTd(Sy!%=jzpl_yV$v%jmEjUnpv z)3W*I)3hhQiT;@bAHW0foJe<>=UI=t5B80>&PTu@+1*FaE`g?#LOtfRScI z!1henYt-m%$xv@i^Wnev3%LogX2cw5L&InS-vwPCm8|F(PheLmvV>pQJ8gR}xnWyM z=3z|zN;ZW>T5rY|_N+N+zwe$cWL?g<-`B(gxmzMS>(}yd-1yXNn9RP(C}VO7wGmBh z>GiB6C#T8YjXVL?J!_Ve9n8-9w>>92R-QcJ+UEap-*x_6hSXN*2HOxl;?I%U{)}KkadyYwnQWbPBN_7b&Z3dq%nt3NyYjLf1OB^0j9xU_v*TC%%o?cUQZzBQpE%gI z^N63Y?>63$56mH@_?F@H!rm_49D_b?1S>Y(7E8XwOEk%4d^Ka>UTeG>NEzxn9zgTy`<;1q+j_z@T z<&hEPZtDU8P1^ctiONK)R3VQWXBS!YTBik}B#W%xaK_R}F&%;q*Dr;gb z)W7_X=6zbYHLGzJHuVXTS|*%eZ3q61vaJ>J!}WQTNI<0k`iW0mb6P*cl4fFLbubd# zMGnaBU*7i&7WF5Q5~Sg4oe$Z~$vm*ZItPkQ(u>84TtOG`Ui}O2-LL;yD{(aMc7D1( z@$8f6?B1`n(j9Wy!&nf;wsNz#>ytz1Af;}m-s1Od%^$Bc-*~)USKZp-$h`?7e$F4E zH?7+}wyVxQ+FcEDa^Tv|^4KotuCX`$aro{@Wf|K4+O)33ikYkXNRqI%=RVCz$v$1@zQ+t&74r^L9d)NER<(bFY<;6u(hWaacf zz54R!NJfV4>zixG6Nao|#1$H|dqUSOxi|b~JVdURa>NHn>aN`q0mr*lEd7f4qv;rnNdZ{8W23@m2EYdh`mPa#;4`eKHU~ZfYq{No!3M;zrnpkuKBqo znI^j$K_mC=gV)@D=04AEP3LE%(7o0m+n?53@9ACJ8j3H(ny=S&R2Jx_Yb)z$vX$00y^wTrViPE9J zUa!NbtU$jaO5$H+VqPJCtz{m0ll+F98QEY9pD+2aR@B|VIl7pSHG;itJ zGLpKNAVR`rV0OG0pmRIq@Fcf=kZm+&>}Ua*kiWYu%pd(V3(h}xTsB^F1Zz`s^G{<{ z_gS{s)RB_^$c=BGe|~!v@dz?-a|!Bi)}5I-%gbwtcUd2wM#k`7SbiDTw`{Nf+0ktO z12Kt@eZt@}F%KDJYotN~qHQ?zw6qp0ZFv;cjjN$+%sNK@k+(jswT>svp6?_DyXLNa zhtIyeh4oF}@mw7c^MT(N&Rl=_*whJ;IXET9Paq>t8}J?Ow1d0MZ7MR=SaLf%+Vc_ z;Cgk3^_>*A%4Qw4&!6lwThTa9WC0`cY}X8pm)#ZTxZIW#Ef9NOOt!Y?obh>*ri%5-xO?97qNy9Ykad9@xfg3&OQrWZrr=I z0#7=Vy)y&oSn3H>6mo0Ek+bI!0fu9Y`HmZ7p&4~<{-E6Z^uhf7>F=gVy@$+?+5HB9 zC12UmLgO+W+(G2H>Nf|mdwu9m-JjQbZcetGu|U0wPWNdc`B8 z73LwXj?rIT5J5ZhAP)rlbV$ka!s$z-Gu~<0lwCyb#pAO^Xy*r+AD2ucV=6H#qc*T6 z0;{fwY~T2~?XMjf7?m8D{5;+eZ;MvCwjv5%+wIBH)nWADh?7-w9)4J_9M(T5%q-vo z9)d+80UlwVOl^*qV2<8v{dwtXnPJ{8Tr(ybAhV<{@00!e^pQ-K>#!@=#$L-+$rW?H zU4ABg^{g&DvJZ)9BDugij${;&3C84vt_k)2d}=(CNKVn;=fT-GdEv%{a>jZ-S!o#s zM4l@>-F3y&5%zzy6fP2ZNY^r%6Gx_&+*L+Doq=sz=5WSMo}rcCL@LYeQNRJ8ARqB* zOZsrH#M89hUFJ**OoFbn23E_@T~p|#OMk{XjEr_=N4w~aD48SKEV4YR+065=*KfbB zE61)uw8tmYXO9LK=LmP4bu~?79aK4R39(B4KqIVrtB223kpcafCDNsE7gfkPIscV0YWF4XRX{obuw7vB2)REVUCyBfENnDrRVyuP_$sqf18)k~lmWW4} zVra`I@OEm3KH0EO)*Gd{`=xJWMzf5K-OW+6LxyFx^T@TOluSmusw{)8rV8NQvT!;# z(JH%!beEYKsfZ!&7AfUE+a2O0GNyP6y=U>v{kY)D74?p~uW;_4oU%^?Zu@QG+FDc1 zLU=B^c0OZitiQb;@CW{R-g&-p(B5D&gvz4|{nvLRQ;(6=5$&vOGCRIE)%hO zzkjIz(LcZcq5e%C;o2SXMqQcDP(>v3e0cPC=f1}3uoF25c)eFuD^l4ozmFt^0n_K$9%bqrBFH7s*<%}G}FybzI{4h0~kPK^m*B0}~ zYU8);CF^#tgnZ3UnuCmyqqBH0`9|UMjT-Y#G2MKx5Pll(-st)eoQc*$8u;$}4Bzz$ z0`5@Qb5cID)OcJxSDov5Xq)|{_#Z1GV?A{LcuU_W44p_mRi9v!I*i~FvdDzE<``o1)PxWrE>E0Ck+qcN< z?^mQqofK9e&pIv^r&9>yE${D5hwtw0%IGwAl%a930HzadU>>3`=XW>00g)7;3}t+{N=idqUYUt)vb z&Hj9f-dnYxd4NpZTDn3ezS9PxUM zw`7UvEccEhnFtvP_V2!Ncbn;yM0yZE@ zCHEsnHBxxzTG{C3Ia5%Hl2s-8g#m(MPo3!fjUYII4q9@_?Yzn=DxW#z!DHvO`{8pB z#=}mkR@sV4*U#&nb2>dJt z$+=)~cxtIh&`wNmJ+1MVtSmM-AX|;(c+ni)jrOhM^EHnr;>OI?3S7VS-)l$vd~x)= zwI9xV7w1)WNz*&`KDR!7Pr4e^A|TQT0gv zj;u7iJ!0K@ICtdEb0Vr1EivkK>uz$N0mRcy$!$BzffXuzM#4TsbP<|-iT_(We8vhZ zr!B3;t&i!DqBH&2&JxU=3}*gJ)Q{^OcIvFXdIfjumF1nL@7HI^jGj(eaX-x~C~YfI zyRhqv-)(6P`9wBnPh`Try6j55~-!( z{$_2GmFVe89h3$&t}JwBf8d96ca#Xq&5c45^0cl}3$j069@oK!;Jx61&)ojoSTG#sL)wPtVMzW`+^^IQW47rzaX>N;1yL=jn zJsIUAdSri)4Dwp_GjA43Z%;p8PO3-)X5dE7St5y%M=9VPI|R9BC^3#bXghk5L+m9+ z2Ivn%k=p!{nkF@QXYwtTP&+r7H3_z_D=AU~iK9ibb2IBw zR^UD;qptJ9k(m+`ksl=Qn3)a$?;?stxf+F4s{G= zIn4E7EwhM5Rvli4jl^IxdHmsG3%yn+j}PmE{gixWloMNq_d2$K`Z4e5E}@Yqr(r$4 z!`W#@ZpA+)o6j1Qr>d)Vw@27uIS;PDF+piW~Q@W`1iEeD_`jhFs z^X}I)CLC(VY`HdS4)hkhMVfx1B-mE>(Ku*{C1FeIyZJNDJG0Mttd5-SPT3BA6ZJRi zm0R@={)8sj^~@FUC$nHR?csf@^vAH`I{H}4fDh)9cRbaHjO%+=jTE3<4|bLFZYn28 z$32E|j%CNbk=Dq+UNfgw=~qA5Wfau*teTNv1J-(X#Y+*SS*&RaL|4&&K%klH|N%LP7|Nm%# z1wJt{%QrQK9?5J-Wytz%>opj`;QdGeu-L%*VX!=$@69$fwRK zTVPN1tzwhkOm@v)$_}x$UuJg1r*`wy{#bVl?o1YI@5F!m4mkES$We$}Aa2Wwyw7-` zg>~{-X50Ssx+Z9BC6C+RiH&V#sIDNa;abN$jZ;==D=jUZdaX}mkb|P%`7?~*Mldn< z_{~PBC_(7YCU*Tv{nWVyntA(*3WNf>}*vyBRZC= zOkS;Zkf%G7u!>J&#g{>H0Z}P22#g`*X zE^{h8A*l5|P@WDFn0l||_VmxyxB%H%shQY(S4Cc=#~WQ7V#xWmVHiB$3Tb^3MczBx z@wXXU-!=1PH>7(p zYnHZOvaes0CuP9XoZzE-Wy~f25Fh*S^S?*#a`K(B7f$>dF=K<#HenDDC+`uXtd4$v z^v@FmaxMl$(DB2T9ch;}K%>*lS2WA5#7+8PdICsgXMQVsaTUAk^KRraE1KtB;p(gh zl(f`zpMFq(=$?FCvCmJ<`J_L6c#Y4ek$yboe4zhystA}Li9W9NjdvZ7YtTpL*&`Vi z8pp%kV~LfPD;Tp4kbPFyP|1?5ICA%$KCRcdKBo@nxk~)Mbq`y0)snb^&7*ZH+0-rW z_c*&A1;b!_cMbogqBNP_n%j7u`pM*#?$XT90;adM6{BQSbmLmPvPJ^;6PT~>s0CmR{~p>k2a~2OR15O(C`NhqH+aTe<~#+8?{hUw=Ez z`)XP24J;OrH(;IX^7VeozuvJ)C=DMgCMS3xBJpm!JF3bSl^VP*8y#z(#gz z=1-&QuEF+M${*Bq@u{wYtPZjJ(_*7WURxRcC2LK7|FcPx>(OEjJuuI?=D9QDp5)So z{EmJ+l>wI}zV)G0S=Y1LUE#}wq#2RAk@V>~} z4;sgvIqLcD?}qEddRP2t;a|(s_hk|5)&2Rre3?Hjy_|}MJS*%F?Vdl8W{Nd^(%!zR zr*l0YWyGW=HFJADU4dWl4mRp1QtXQbGE(lXqT9Sg%UYM~I#Y)E`Q&Nu6<)uY`qt+R zGDQ*Ax;2)kU5HtI55}CuVG-{`1Y_nCWP5l!y(IEg`M?g_SoH_<^kV+0^wekGcqP%w zdf)1veV4VI%e@cpu=wlRE%zQgI!sC&4XYwfelVQ<*J_2$u`=e=9 z`#&{b0nr|<5}Db-|7>?Xd|@6it`}z1w#-5{gdBX5U8298VmDVkpU^}H=cH=ypX^@0 zfBqYO^!dD0n9ANG zjapb+T>eUSNM>jK6JA44L^1ad?Av!`OJ?J)AM?q9Id{Q0T~YHnG;U`sSrVSht7JdP zj$0?;gq);Fmob{u@BZT~!sVLYp-&9d&vkBC{>}*r-yKj$g zF;3&lr)T>~<=Ku^!KD`98AW&x_s%*t^M=7YQ(2$mg0Z>dUkS_%d}ilq$&E#p=X9tI z$S?E@xGsncKA4-F2>if<@Q3>)o`}h_0`2PgT<>?Maq#<84`_b8yHI2{BW88MRch+= zJ;$DAV@{hdS@UwyjNLKT3(LSjWD@tvyU=*F4exDXcFV7_f;m$p{|isdbH^oK$c_)6 z(||d;Vz3&(qqWSw3Ib1LEqQ;rXZ>1P7?gV_muM!+<(oX-DDCiz&`#+#9ZqU1cbwr#WmL|1!EA8(#huioSg$R_;a%rJK@~17SxYtOIRUS zz#`V{_E^S_fz0;t^eH}V-p&(KL_NskTKgkdg077bFJ3se4-(z0@rZ25&WOmHYFxI; zN+S1`x31%+^G3>Lb-`?MICo}yLgbA}H#x7hm1(5U!s=nroOcGJjKbZ5GN@gy_F=S6 zj`+Mj@t4_S{{QL>!Zu3C8DCC0$PcSh|6ygC+{=1a6~z0MPx8}8m9xH6@BEL#3sNt? z#Q{$vr!s3sm*W`(*%~YAzimHWF{?>k%C&bVdymgMUG6CcBlD{G*5pTwncA4x2M2_s z6T|T{y3LR4y{*n=B)zDw^XKR+t0S@&A}^gqG{h=oCx82Vs!3dDyaFravr7#T3HqD{ zS{`ehoGj4W(ydSnu#uiIYGs|5+2L8%z~G!341chvV2+is+NB;Pz z@a(t^W61jXo;cR|bdsMv{KMS|>XWTgn{fDtWe0a;BKgG{jx66#d%y(!Ov@T?^f@wbqGL=`m4x&83$6 z=Jh7LV$A7t5LO`Q9{e|Ns8OOBIX>C%vx#ZRU=mm7JCI`@`Jz5Q)=qc>E0>9udQM)3 z-ttML0qH>uQP3HK(LTB1U61xytK!ZOtR= z{tuatXa@sm6w5P~drDX(^};yvy?TAkBU*9plVHy?Tw^s4XK}wop2Z+S^F&HmXIzau z*HMmK1}VR-&oPd7RxPceG|AC=mF8;MV?9EbZ7Tx*YquGGvjO3PuXq~O|rENSRlJSu&G zLn2e-c5S8aS3&iDwfzw1Jv^hQ$>7rVUo`l?r6)Sb8udr4 zJf5F2vfJ_OJ#V$I9m8BB$l&KFFS6Lqk`$+4$f`SyuYS!~4&*-y%Sn%nTO~_gFtg>60SZ$=Hhl01W zV*WM9B2HkV&np_Q*L-95Tmv)%2EAtsB!GeIQ#QcUeu7BZBDn1CapJ?P;|OLXzcRON zByQ{*kep>Z>YyE@Lx}D)<#F1^={;-hW4R0@U zy*hN94#Pfiy#JmTz2G? zQIFhm>rwOFFXZ6-AM=nif*CwI&uga&LLyd5hU4$nLga{^$0Y*=KV-UQ^pAJr@PE*~ z-49Lt%PtqWHD_3P-Zo>xv+16YuZhFtY3dQQX4j?9BU7=mCZxu0y*HS3jIXrDh=*9r zQy+Ymv!lc!vw!r%t9{ZP?YWD1fr1=vVJ3bO9>Oi^5Hu3x6VW75nvqelk?U3yp1qb=4Li`$;Yeg zH-J<8iW6dbUo>Y&h@5>sbDnOQr)@9q4o2&Jw!%-(Yv`QOu&2Xb02m~@Bdg)}&LHDE zNY1l`K*zkVmF`DSKCeuCHT7|-T&scQcP(HKVmh{F4Mu%%2FA#?*h3D&;7%klA2z$j zpTHd!HiwsumOJQVcc9&UoU483Azj$3{{i0d_MLu8X0R*ky*pVgIzkG6 zhU@1HBJzp-=$Wz&xSx;MpIuR(U`PgI4Z7!9K}QtV+lY$2FOAJeXc(tYwVl)7Z~v z-kYT&*X`-m;Mz2lFQ~VAim_uSwkW4DjOP`c$u`bJ-~) G`u_np_&5Fl literal 46168 zcmcJY>v9}NdZvGGUqwlFgg_W*NR%{VYeut+p-4$%iK0blxC(OvET9WOi+#X!HwePJ z>zI4+uZ>v$-3yy5+2?(~%&hDKDasRONF>l*m6iD&-|vuFpMUN@gKwWan=f822l4CY zgZx1=?G~5avbnySwyVo-cF>&PELZcn^LBE6Gig`j`HTs1%X~3Sm+0%Qhcu&h)t?l;X9kKkDcX=r9LZ~k;MUNPu= zdOaDRGr^J9Gn=pCvu@Q*?wX59_j-Ib>4we874yBk8IQW=cB~mOa~9mazMiz6c-PFC zl&9CsFFfN!)n<9OTy;}@y11Fm#St=uS$7VtS~xZ8uDjU?sxd`PCvoPWIlCE8AOIJ)*NlD5vXF`_IS@}m&KiBmP6`a0 zA7+T+>So5Ble+`P)>7s(#7A1d^Pqoo(cLC?L9-iX+%B8P55IrlR5x_>y8-60(3|)U} zDQI+qtcvtv-&~WHZMl?TylAK6$ry97LN)ZM1w?$re2DM%0Wu!21tNGR({`o@a`|G- zRKk@D50V8jxdE9%bd(9op5LsJldv?mNbRjZpChZ)9F~Y5&&)%0uez{LpJf3W zokK5tA$F^ql?$(!+M*lTHlcnuOP=pvewv^0prndC3?;-&ok2XD)0j!(EKR}L7^*V^ z8*=Dw+Kt+~!`1wde=Ik$Ik(+pGHjmla2fmHNYcQdohTrR6~+e1WL-vcJ5YEuna(lw zGn@&^hM;`iyO;xu8C3 z(O!>7+>5MC?*2u#qiYb}o5@O|`&D-NXC2DMx9v+YYE7DLE;eQ=5uJi@bc0AS8Lw6n zRMsi-hUFUz!OWWLcCi`@lYlHFDqPS1$j)8J+YGyCmp6@v@7C3;@#WRw>IzC+UP-u; zi<4>XjZe{>EUqyT)QumQZQGn73GLaO0gB7*rk@Q4`}WTJ``Hfc?=!iu4$YoiEuJ=i zH=Hjn?@5tuFIn!QYXcYEqQI!QrC++ibBZDur3n$hOD{YrAVRJuVS^@PX!*zEr%xK} z-s5Kvux9s~%ke8Tpj|NWg?w}oxL~hM~ry=K_e-E|oj+yoOea2Wt32EWGt zVl!|r;7Lz_!_nDp8lZSHTpzAebSOmO`lB_Xz;+-^O@U#da)HP1^oVlx`UA2llDx=E zU>QtFBe?)rGPF|3i}7oK6avsK7V|~2z*8h@B`kpdKwt2B3^2M`WW$Y_EfX$I;hiR2 zu4clB+5A?bBE4vDR`V%1TFy0FaoBqPGSp$qrZsyR1Qi-IvUT?wtNro@%N{(}q(5S124K)V=TItDV&e7z~7!Xn9^>+I1CSw z@t&Saey*f#yqr(GSe%_A2d-mbf`XVd1q?HmY)^K}Z<`P6Y=Jv-(L( z#KBxsoVI8o6;u`?L8y$eg*NmxJ_Bc)F}}=?WF}!aO??RzBRsT~NlLWNo1VSvoZqYN zPJ&XcsQ^0UWd^82P9;$?qd;lf&|trbqo`IG+j$SJDN*k1&N^1+{2OF)+D*@t==k=@ zLxNCiDyYqtKn|5@-Ln{phghYPBwL8ZH!XPMs`-9?{!(zONH&R*?xy2{?=f$SS6Rd~ z03vsdjQfHh`*O)Fx%TTSozXl-Yi%V9MwkGwusqEXcdZ%vb9_Bt$X49%Q%!K`udo%p zgIVX%yV{C!|IuJ@obo8*QQ+bvA_0`Ji#x4A8KT*X`=84fDkt=I31(m~TIN>(FN#qhloBVqo)_0#EN|Vk?cYHgq!q;yQjroJnZp zkx>^*a*a$Km)VYmRY^&%+Z1_ZT(#E?FNX(PfE43_=x(FCpC1srq}*rDo9+GK>35Is z={BPI!Qc^!r}^MX5$=NPTSAK@L;=_`v_B`oNfhII@@gdq*)iLde$sr$m@;7Rpc;e0 zkG(YuTf=NTSv>&0qfff5R<3X-9wg^S?KHy^iHvBA3EGq$DCY-J*Oc zm~?^s{X4c5S@=Gs-TR5)wj$ldkaqH39<(%N&m=K0$^711xoeM1lNe4M~pghZh+KO&k z45sr58N3O(U#&Jws!XZD6V^Gid0DK0jX)n|qO1ee06O9-D4zA>{@e6pw4opOn=f~* zL1N$#8H^l3^0}b7La{QfK^6~B%SoiNIAZ)*oK)>QC0L2K#N6U!bfckx5L+*F-*?6pO-ce;OdlVi}lB#>m)S#?&UhEmbz( zG3w4;TfK^O$y7PpP)*AvV3QAokYJDoqoNr@VH=WtE-qnwk+0kfsoYHH^g^lnpG!=0 zt+Q|&xAA*&`DgFRhPO*<$m)9ty zAhW7uq!suyG7wnDJn2Xo!IjGUKZyeUK#%*wgNIMQSBgbFA5_IU4Z0%juQVo0%5{0+ zr9MBuzkdMUtn5rMUl26)nh>ur!})^jrPeMVg+o!A0iGjs%!oDhvplDQM>#RDTq@*5 zm6!-e?FbGtD2}y*2xZK-VRKCN{h;J%cnnpFD6y<(PE|?MS|D74FFe1nc4-i3c99E4 z+~OILJGE@2Io}c7VB|*dltLBsORBQ;S9(n zTBcbVO7ynr%+g_4`!Vs`AIV3`I&=#?W$>@te+Dv&8`sIJPv+4-M4+b&_V3AM%TMn=9a?rH?T#c8icpzZwo~pKFjl5hLjmRKbo@}}XL@NQW!d76J zl#P-BV6_R>6%OoQ+&({*9`0MjT)+_Z2ldrbDi{2|e*ED0`-A2mpM3ZAKqLm|Hw%~R0!hplq2~hcD1HH$ncKL$*Q+8E$zTn#SBmVV@TMw< z@abD&f^3%}183HX!lIj29np~HB3LD=D{g#=sfkqcpjyADaSL_nXj6qOHHDxVOW8<< zvn$eAk`{RwnJu2gc-c_O-zbmKlRANvXi~9B()XoWYRe}V4riE@HECn2JR#?Oz+{Go zDv&NZl0BG>$`b3@!J|QD4c!LK{7e=XWbvhItXzeWt42!SE1AHvvMQM1gJ66ts-Lv+ zdUiqJD_I92GE9_6UIfPQ!Ij=>$)$r}V5u$cF;mqr$#kq?%6>>G0s01cWcCJ6Qb%IF ztQ`cZW)XEh76uv$@Hp6CS3Xoid1`mMI8e@%k$LOgi`E|DH(X&Yx0nd3Fm0Ig>YphP@S(z>QxgWC{Y3< zrIT&+H2{P;mxef)_KK7%$7NS!k+D?+qJL{i+EO>@eX?*5R^ON=SBhp^N3 zdQXG@VSY9gS~``#Jm~G!@}ED@qCWT^&_2t9aj5k%kcTF$F`^!?fC}&$jk5jPG%+qL z@d0FtC}eAC4_}ADjefKo0tPaPprs7byU=}RnK+n*%tfuRx=O3s-1cZ-mtRVRuvgH} z$lO;Uu^C%LG*wh6tOl9^HH-m3b_*rzqR1nf(@_UbTXdHgcSksMwa&$_vu<_Uq3_Cr z=}90qhmZU|y&lI0Ty)+>s3z@;2F*&JZA7BWTQ1zfl@j3taw{?~4CG!3bwB?eT?*;q z3-g2dr4T_grO^+)u9O<7@SHZOfGlrFX_cyQrJl-@6TM*-%=tn!se*22YJ2z6^Vgt8 zD-9OHh^Bh33-yXQrViE(vLxA#BALqa1mNm}mo@2s8`4sI{_<2tZ||RbfF+DgT3 zPAnFN4{kLZ8xtkya>EriB2w+Np&cMop%InXTlqpd(c}FhoH4HZi6-L<5Wb_HVc)?C z+=PXru@ZhPoSy^vKpW>uQX_FDgN^4QW?F6o4^C#J&p;Q_CyelM7?k<--J9S+AN4&* zSFOi{bb}?zc21NROp7quxx! zHhe)pujEm%1NQOwj#q9Um4w^ekpy$y3=5i+BxA}x1T@TcQbLOyAjKNz`DqZJho#tW zm}=!XD(PE`S(jFYSunx00OVJzA7GW{?xeDH57M$8C%Wa^M`Y<_zpMcu^YfQN|9leu zMbj86>-qX`^myzbO>!QbLF%SaMIjM&o|B7F&Mn$n;inNu5mnJo5eWIq;U_~2>nD5~zbFmhC9lH?{c5u6DMRa3Ks)=6@GTLmFVtWwt` zYyu_fYu}K>pjU^Io+&aM(%XYHE6^;OJ=I(lZbit`L2TyU7wpjL_LGw)_+oF=!5})V z^TdQ7*b1e_0Whb^dKhUZLCezmiAmtOHdc;RcYFPg#nxd9>Mg(2!IfBH zX`IsEK7H{0@yUa)2yT2)>_KFLYZpEF{<}Xsd~mXMkw)5s$nM(Fk`Q2m36TZy6nyK1 z(FnQZo@AU#?Fy?cJITgPe38aUlh7#XQB8XxaWZu1BphVeRRjid!FsiJF0Wfq;hr|V z01dfV0}-&k%)35Y(z`T@&+_Q77V;f?+}+fZu@~h~#IjAO2QE}f^#o-lrqNGJ)`I0q z-MPzjPknwYkcKPtJqnAlRYsXXwV7%ZHQ|smsU?D4UWyq#Rw@$6%Zi>;>R)7ZXptlx z=$=AdrEzYrsQA0aL3cyo9iAPa$*IcqSPbt0+GZ~ZJL7~FhZ=*Hq}cYVN13{jg2lsZ z+%4`B#4!c6y|6?}rYAB#C#h>^m;CUONPd5jny{4j!j4DssqhN zrbH9b11_kPzz%clSHn*+ns?ZujiCH60;n{ZAc6*R&#!D+9?lZK=*F{icBjP>KTLy4 zu9B!xM*CJfB@Fk?@u&eY9~{3*H{`LfCz@x%T;Khp?ia9JrzA!2C2b(Z&d zCHfR|o1c^kR=aZ$Yx1;P-6%zWxLn<7o0Yyb2Of}eSkMD6CDezz3NqUzns@mm0KFf) z{#10dhdkoR+n_yR^bH8#b4&TP%_+-Ed6<6G-6IMN$6D(FQ_9`G&U?z%NEiz1CLeJt zAqv>kswFhQv|;4BFON;v5hWy^Iz`@>a$S>=6-&6gvo5=tFu`|V+JI1$de>5E|XBct3qN-4og$e3vnvVkRDVQN}uCSiEg zM4LiNN&H^eD;BQpM9I?)!Vw50$44GmcIqHs}j!y2yWjh=Hi|}0eiuJM7j4YP#)|PU32w1jLRTD^5 z<`AVK#i?;1vWf}OB!_)Dr_fH?r`0qS`Igio_84K8fOr-88tqoADq$`c`+^9``r943 zAUu?M!>m~H*2DYFzbupA_4@U8X0%hUu%R};ar5$we@!Q%#uQu92KW?6YV$rBI#%@2Fo>lg* zstL^tVlNPckc}JCSGnORp_!{(^rwVo6yyFXlL&zwF zLH%NDXAW{mv`Hi&pc$g*M-BKXk&lTe!k0Uk(jKQ&aAl6fmkJC)GzM`tTSg~k?vlc@ zj-Kae)Q*-lkyk4$^T(uYlbF4q-3213)Mm2o9Am>av}v-}41ra_%iMmDq*8~p8<%`YPPBF8(_=pR>#&Y@$x+ob$#xLh8I56G7?eRlp7@Upuw(J%%_XN42|cGodDsNBhl*;{ zGqWX?mB_@2*wxLJ-cZh?*PM`6s&8{lsmypJG%yDeTPI7^~P<2^--$RmHK~` zH#%s({NbA?_5BBFgZ%nun1^q_P;w)kfP{;CcEu?arAO60XkvzT1MHA~sD(RJYj>fM z^%HcKN#o?w@p~X1`pP_JxwdK`KoHQ4IIfUy=!=0oqB`t4lto zma&AvGW#M0(`ssU`8Q2GzGdnf715x7z`nuq8FJ0mf0{>(W%Q}D zr(`jdR7<5f9UgSeiYBv?p5H4 zBu}bYUvb|VpZo-G9?>N#9%|1682WtI1Z8lY-QkIv$=Z<3-p$MhSqXLaSgEkbfcbt| z*76=U5;59RTP9(gs?0@dAR<(Yv=KDT)#xNY)f6x!k6>b{j#XcwZfiEGkN1M4^`##r zLq&8I77?>7=>RFYr<@0?%N?O5mvtla8>SBTEwmC$kEpX+Z3v;kyqJ68-rvjsL{m=N zG{uMI?c@SAItEXo8q~00+$JE9@^dnY(G5f3Bd+wl$OzH1V;5Od#bGDOD_M))6y^2w zY$fL8!{D}qNtX*H8@6zAt~$O>LZQa#WhaA4f&f3tv0;(1SSPG5tv|W%G^o)6!C>+q zt{0%_y=ns4*afV_EICU?+awH7Sv2lS05qhH zdZE$}5JDW|t8@0nt6I{#)xHZ!wr-F@HY6cVfYM{o%{2GyyYf_q%{N`tj`L#~ne1{D zJF^ZuFiWtPXM=$h)@H3@_5!o_|HEMR{$CAdUHz9cGSEoSU9ha;bm8A74V;iD2nR~R ziphNbGFpKW5d8ElaP?vXu727)B}hTe)=2iQh;<+t6t{l{v3^}Z>ZjyNHxTAJ#>(6l zY}ZMMoY?XT)1pIx28meSX6I;1tj+lr!|e@|HUm&>J}xq4(!fmI8rE4o^A{PyxfmcL z8-VmuHM3g^e3#kvnFK+{ii-|bB5NbDPCc-=tLhqN6ID9&D>Z0W-vXJ!GG?lz(tw7` zZ6JvBSGh3K?|agWve4uB05+88Q!kSX2uiIf^AokY6Y+3s0oe&8Ao5b=jHF4`A8sek z<77}SY^GKDTz4(l$MK$hp1(kJ~`00TKpv18l-NA*Dby}JJ!sa3~DvSEjPj- zVYW+$kp>rz1hk?1>gc~7E2}DtRX@+Ih6)$mRHoVV4>V+Aw^E#T%UaVpXoRw`l}o`3 zyC|m5N)k=d4|J%nSL2403M3f)@8Nj&@ zFCxt_EtxOdvsxdNg^44#b^`0qGD~_QlvSe?+U%S5s7m++&XxOKmM;`Uaww$hz z&tNQI9NiLupbN~1pIrt^130%8$RRh#=dtnb5)X3AAzSwQ^#ojN~tghUK4Ewo1aKuV@6daGPN(%$~4X4MPW4o`eutu6_DEws=; zX_d5mr{gLFCKMO5GOQ09=ymv==Z@t$i4Iupdro;(9Fk17I*sF8pcM15Ky^@d7vLZi zHt4GSCIaT9hcr^-Zjvc=zG;sgZUievc12Zo+5Tj*itpm^j8r~Ayy?BbLJM55;V;>_ zN5W?+=_?;@Tc495C|yd1I1`kr9fVCOyhwG)Ry4}OXTKmi)+SpW6_G9n4iR;v_&)oR zTo8#e=|=fP)`7xl@HyE)d~@1)%Y2z-F8>;_nfGA^Rp{xL2H4!OGI-e!0Q=CzuP=68 z7zORr$Uv&i9W{paF}DOi9n^^;ac)&Qpt>Su+L0H50mBfbLQ<(La{VK^;^w$AjA*$T z;qEcx30pwns&b_X@0;$wePGcd4wLg%Psf}@$~oaUskiMarP zDJ!wLO3C`{;i`k=EX)by$K@&*hy_GId@v@0CaaVOreR*tIaE#lOF2TYStNwwn;52- zngBC7OiBY5QCk}fy^>{8qD21|Kc*mKIUnfv9;?*TjcxtpwkQ+!RWrLY0e<W<6(Q+_`@Ck^2{2vD5AOF=rTqX#G;I9RT1C>c*`66kKjY^ZwKnjDl znT&H6^_uD(6-f(jE3X_KYuhaE%P_xsTN&nZ1GN82V1h}nP4s6ly8}N>e^AM~lBg6) zxAZn_`5R~LEc9dC&~2)jBO0mh#$nPok$VWMq|CZ3^|bv-O5=*|L|a}Qr+6xWoIP013zPb2=tj<29jY0)J)9X#*&&l^X7_NI~Uz|0=t+# z7jrw)G7NSG#dl~lS;I?jtVT)!1UWU-8WZG6NaYc4Wh-$ZSQn!s13qD;R5iQg z1v%ZzmPLusW0@6u6rAuGCtM-vZl~2|OF$P8)W`K;36#&3b5mTA85jA(0q1%ybuc?( zDIu>~C7@B?D&cJ1>gHUhGTf&lBfEpmvn1Zy)G!#N0zeAdT=tC(JPBw&xZ)fuMWv{d zfQwW134Qt`DedhBIq)5*IYFxaz$iErXDXQ`f~y5K_A~Aeff|C{eR4yz2xDgGGs@48 ze0dUY8BbxB{%BDF8+Cvv$ER~n2gs4!51%W@LVeA9^?|BaB@hi`N=nlC`7wW}9$7>W zbKp2&v$MkKErzIq_`IRi+p*kk1yOSpj6iY%SR4$(gp@0cBeUXR8GuBy%BQYtM~g>a zzt;MCA}eG1)I-Re^IhiuUyDG{0iKiIH$bZ#rzzZCuyG1r>~66udlhak(hggdgX_YQ z#MrXJ80)J5#OE#M`(-tyvtC5QRnC^^EvM|@Heh9C1NQTp^BsZStP+3mHPw z=?zKo2(lTnWSl#XDo2kyRzJNGg!@9{5OZ|Eg3fZvnyW|a6oJ?Vhv;xk+VZCHiJ>E8 zKy<3&du;L)iG=|e9wFz%T=qUIr}103EI_%LelOkr4vy`oWMxl6+1zrW z0Y4vP(My0M|EgS+U}&vR0hUXSCiR+_r}2sxhL6)UGGju|oXkr37z$ohr=rQ|Mm14~ zr_lhBI{GwK3u-WJwyAaZ9WxKcDeP)zbRD2+UrY_F83nUk7V5*kGOaXHFI*h6jRa!8)%KA*6k zHLGUP%o1h`-DwdTcCdWj>~9+kZ};xC*9OEBP7B|reJt9-w06pSM*KIFu-Lw;%LPQDb$3$A4Ud}>o3ZQM1wdi z4ql(|d8Tj3oBW?J!o&PVvGV_~3Wzxcy{{EN^Fxgc#ak=LX>{G<{(rS9oM~(x z)5m@tOb)$6=({q7NM+<|N%%}QM^S=Qr7ByW@$8iruU4p0#45FWr;_>Pc84RPQ~lMuGybSet|%HhayPn+JF zE8^K6Fy&VJ5j~uST^mvIj=GVJFnbuMgmu3Wkl+KKc=Mn%QHw)*r0o4+i@@h3*k zz1m4l*PE#I)-xIY6!`SJ1P}-f6GW&I9O1#UX!Ua_su!N`fITW>K_4?lQgf!idInTO zf*is-=g-g_b!*pku>Qt1&OwVyH=?JgGupfRe824l)6i1-xvDchHGe$P1_dQTtN>B3 zGugR`jV59AsmUs?O&t)DIQs^t%{4Lti44zQ6S<}q5U$6;#M}2!bIhL5St}v@yC2q) z+vb_@iU!goGS;Yxm9l0M;Y=g70U)SG8h*`3L~BD06CK_fZ2NhVp0plb;1{we&ZG}@ zI{-iLtT8D#%#JRgwZ=Qe|X){B? zpOB2Cg=nx|T*i(?gz4lJy6g5z79(lE_bs9(rdX`cvq&A&YYv({)<=k>jHhhjAPFMx z=F-XG2VswZJ%QZ{t~58&!KFn0$REaGK9`pY%UY7takf*LiUB%YO7J)!7)>S7$)6(9 zJoy1O2Wjf}_tow7PT(8Cz3KK?J2PrI*ad?=*H)qG4T^v;IeL$wY=p1^xtAz1^aC?&OSwPN}~E7p)sdPa58Lq!Sv5cDADtUOjGz@Xw)vXqu1 zF#w(Ri=`^*@S)VAjf9y*ic}mfI$`5b$~RYIT7sCFKev11jD>jbpI)z4LI4q*rn~Cp zquhjDZL8&di{#ab>6D0*Q4mH0Oz;yV zmk}#DCxiwIYcg`AzhBm*Yz$9|T$NJY+!Md^MKvkOI%}k-v2+_Zd(-Rd+wo|1_4y}% zFEsJhoZY0H%EmLxIjmCJva`!Vd%t;D@h$x2POK2@y;liyE^NT>Z z$MPFQ-%fLrEX}91NsxE*HnYf1c=ro27m)xnWRD6da1s*X4GIqp2%qM5W~XntUa!C~ zzshEOWULR1!@KH85%`zhr2}7*lko2M=!|+(xH(~!B>wkBvlrbeK}+~06t0gG>Fd5 z%+JgE-mhL!V|g8?lOwx1M0i!dN;Hls&#@Uhj`AUB!5?Jn$D}b!1|^cny+zfmm)ar( z5WyOZUm-!WOMwv{uMXM>r}!nGM4>C8_VZH`2ES#L=cl{^uKV5obH3nE_D3RDJ_8YF zu7l#OwPp*=h-+5nK%73F(LSX21?%$B_$+JsIEjozB8JM{c|abCju!QkxT-!NXKQzrm{F$G!vZ~wX1(VvJ#o9(yvM) zMlS+5lXL#jrS%m?Yh#dBTBWJji^WE!XBt34vY(`yqh8kMACXRfHVGvPV0S5-H@{A2 zjU49#{&}ajUOBcl4;fyaOQ512p+nG0QjuggeP!TK6-Zg;Je3nA5~86!P-dA=A^r{&=o!sh)SF}Y%9obX;Tp=o~_ zzW)JGE&9WxVu*!|FgDb5M*eM{_+2ubD3+BKjs9w=H zYz}HaPvm}XdLKa7=UU&vK-7vKEmpB~e;_HW9(b7BF`|m9CU@|V`(*GuNO5*?H@CJj(h>iJ1| zIr#uOr5Pv~A?E9qVnM*CxG-fp=n!G@+NUS=nUU?fEOjkujVgn|_jQ3xCec|w@LDvh z;|{goK0&7*2iZP?ey7*vQ07r1pdUCtkjGIX)KPwaL+&4ty_#H8P8I*3j&sR73YDLc zP0+iL-R$7n6@H2pr$I2qt-6$#>wb}yu_#aL_=_)|pZ@Ul;rGu^pBz8<$Kx*_K6RRy z1?Ed0J$s;(z~%6+opRJ%kfe%Jj^s4?e#t`g@AK3D&L6Okg^dbkl8pOJ7L6(xK;5)cO4o=2Y{&^t0Fj)i5mcza+0YRX(8Z(bH}5mx;@sT77V`b%Jl77Qbt z+rfxorjuNKj9mSIXAnR7I-{~)MA5qI7fV%6WIl7DX6Kj@CrDOE+m;l^p|>-a#Bop^ z!=gfPlE+?l)(Ya~&8$$xr{gfWT&l2vQ# zCo?gg>Sfd}y83+z$0N6liKJcR9& zESH|5w6h)(;e$x(w_1>JzNbo_~!}nqWD17Z5xjp(G^CsEQLf z>!neE7qGE9@|81xV`P4|5t;ve(-W+co_vVMEY7)?Er^%ocz%7ak=CQ^J{-zkTpk^% z&Yg?{`dKwOzazYE3Q%IKGz6xT?d`Xfdqslvii|Vrr)4iG`cmcj6cUs$^^?8#II}`U zrOY`YCrG`Pp62T?2K?Z46lDozw=vxj10h+TR0I2NwNPLkZD)iVqXMsW%OS$k__`f& zz9mz|-AbI-U#XSnn&U47J7rX*z?Hyg*xZH!s$o1{u^Qtk{mWUR+e2v%)L+=a<`|P9 zKo3=|+EqNp5XjbpsiEjJ$Vv<9VZ$D49!A_7%uH%d(gU;|uT!ry%sWnVB z?Sn!~DAcR8cWYB2Z3rbaZ;}Dy^Ehz?itneoRyWeiD$3gdAQ}-AK*H}bt%OOMINy-} zUdl~A;5yHrhGb{aRS@XoG$3YvhsFN>hSbL`Q5L~b^qY?)+FJ`c{N2Vp4*f}tS-P>+ zD;T9ML1qc360{15wg%Mfx8@}Jf0eY$jXjR(vIf~(6_=D=W-{%~qn*lIm=V3|&LU@t z@IGRz`HL67Rowpg_}hoSOR$EQOP?L}>^L2r8GyZb5ztmHjHfxrq~&Uw3ZE=jns~JB z+e)OmNG@h^OwyC@NhK1T96$Nmt*9v{oh=qsSPmtsx)=hMZPj8?ifot@g1?tQWkBW( z@B3v8`yWqrbQ_+=FT|^SS`7l^L}RF2=*ZXSC2U_Y#J!?FKMirm`Rb>+>*?_~soAh# zj^(LCh0mK0s;MPnf8f!V(-wsMwA@iou!76|?J*?oUbFw301uU*gy-~^)9VF8=e6+4 z8r`wvBfb~{I!=oPJ3YHQWnnIca~k;G`_t>gk3RW__wVTmW;z1@bj)^NlIH2b$I@y( zPG^E!lHJmpPB@hSl?UWniDUpph^Z&rybcYeJYH-ceN<~N?jhC!51?I7{ymCuAN*l{ zHEX_@cg=5!9)LsR$$z`l-$U%u?>Q znoU5!U%4~Ad^aoVt~ryw*9klTeJuCj4%D~holrbEbiI%!eJ3)Od})||?Legx3x@cH z0K$eeI&B04j#IH9A_<+Hr#I_u7J7h`^0>zjPQL!*@ySC{Sl@mTQHlslY=%LiUlu(k zCNnhnKFo18?w>U;UR=&kt7X1;5fQ^Bn*yqj^9?~U*|{|!D|%%ok&xnIAh6(N<%O4E zg1lI6zS4n{f%*|MZBqT!;YS~a#i;sXbEAzP6ML0Q<;upfcRE3m{7BjBlL9Dxcx5|! zrGwAO1@|wj5X^PC_j)m=4!fe$jRY_N{MjQ~Xve(G%^q9c@i1R%%_s_AM7mEjS7Em6 ztwZ(J3A))TTcim$`|Un9`Pu*&{s`News$Ei(_V7c5_t%YI*km~CqEZ2Hh;%HBwo#Y zC>yLYA+!I*G32fU+F+2;p7ue-%b&{=Lcm=paX(y@o>TJnlTWL{E+>kH#R9I@O*N>6 zHvA`8UOJ?cBnY;kzbRxv9^+*K>q42nmC$TVBcMet91!L(mLV5A7ouAs;v#06O#;&c zv@nmus-`D%$vH?=1=cVe{nkG2*=iAwmNLkRINW)b0)hBMlhO0Vo)iFELN+1WTM>?% z07v6X2?=J$NTjFGe}bK9-fn0f$JT>7tpw>(4P6KF%eMa@bEBd`FroZcgsQc)?7==t|u?D zfaJD%c&Mg_oO?w4JbN8JA^gvL$i!x`THi=Yp!lz! zd|19*4(~M%a8h1Sez(^x0%0#J6~c(DF~KTmiU>T_hH+Df3ZsD8Dnt z#n{JfGrr_o^|?G-X$j~T8T-`DqL$NjE>xU?t^pFNR~qYW#*bBxF^O?eax|H9>6nlNf|CwX7xSu%T62@NjEOZg~(Gdd_>92;8&3c@MT&|>fT0sxyP9tE+ zTJa@GG1K}=aHLacCv2WkdPq(g*S;yDc6()Cy2Ndp_AYd155Uplfgjg75ERa zzIeep4E{xo&z34Po-?*B6q>4AZ(h7`LSH!aHgX$>#Pqb)5rhE!Q@?br3vfWytPfIL zP~oEDLt)yled-z)i5kjsl(i@old2m@(D9UU5M@cjx-~kXBk||Oi$6t9nOS9aVp+Umx_hDFy3LQ$1DZobue~0R_+FM`7SF}*)0-vp z$A{DA(+~M-IUzgH=seNGGyU`7hl9c2Cz(qp)J0TQ@)3?W-A;^pYhCUPg33i&ew4yu zs5t0XxrewYrcf3RMcq!S?L49`32BOM<2kP~lwjX0jkn5YWKR{~Om-j+iYW)$C!q}! zr=F+5;2%Qa^o@gj;TQHFaIO_Jw^ho+Dk*9vvX+8V3Rg%Z`rw3M^L(N%&s56w>rvIK z4Sej+X<@4)E!@0yp1>CB=d-*~(gY)D^48f#Yg#2!iPmvIQ0%!018Zwh@?PExY-3BW zr&3rs&y#z-{Vp4Eh66+W1`@w$gyak#q}@w7ZGf~!IWY^OjI3xSpDlP4$kUW#bUoefxxS&qZPDs$SK}fw8V^r1+v&h7JsY z;}h<|!Z&9qo?FFZJ-NCp2t?*HLaN-7lG$2xlY5iqY4(w1d7}OKA4jgM=wk#E`MiUw zT_AN2y?8%%d6K7@_TS|N0T50pY^VC8%xq7w_`h;`xe z@D(qY0SW0uS&j?xo3O7|tLx=wM@N^a=FORMOGhOhO)Z_1_RKFB@5jhe?J3&Jt%sL| z)}B?SW|0(FixseZ^IeT>dT=h*5wx!L?Zw4-u-rI_WF}6&c(GN52kE49lpG1{C}8Df zFpO#tex%w<@k8uVyW6q3L*5Aj$7YnQn$2FeTuzE<%~(X$dKj2!A`a?3bLb=V;L@tA zo^B#_ktHP?HNICVF~Lj!m^`EMA2e&-ZJ*DuWwxg`1dX<;9H?~ul6#!ow{Q|6Sf!Jb z;=T63o(o3-erge?pK(x*unA$fH>O)|51 zLKX{qYauGDF$7Ke`HdT7p0vCeIO~rMD;k37a9Plp(47gJ>0nNCdR%pLwfS|8eY6v) z7`U$ZC510X%A0%hg=8cuy?m55WMuQ$HyE4RwqqQ9BA1i%oUyxUv9@`^Cxzz5Gv_1` z8O?LE5iqSkouygN*wJIyobnD+lGeW{jJm%wjFO1!AlW=rAC931S9J};$E~Sf$KBzY zOwI}G>?y$PNhuOws{mIKNeRAeMPW^qWym$M6oRqs?Uqwtpbag;s1Rt)r=0LFo@hBH zx9hVNbbLGA)m!R_6^*cq8U3IWkXJW?D6t<~P9XNaYbzIeLPHZp{}d7GS<%^Ng_m|H zi`5P=QS>m-6dN0a`B4=IoiQRHb3W0=oZwERl=QauW88!QY!GSCF{%08n|uY6lDnQ$ zztzq&U`|T0BTt}Y1q>7rWt;~wL9VXDP8?hp;2RMdKoMjhWat@y~KV0Kz#cH|vUD{8pjyG4Ea;X~(5mMYx<*536 z-Hiqm#N-`q45{i9WoIagg{xH{t~>QL{SGZxY10r06TITwk{Agv)Gbq-hy_?H6Z0J9 zG59!nY5fM;0jtKDa$zFHottZpNXd9B^(>{CNyOxnVYY1pme$(^;}wBRl%H2>Q^%V7 z{TS;lO(9S;6Q#pcX^U$?W52x9*tCdUmmFo0a{6vsD|?M!cjrESZJhzpl~oM$AaV}c zHG~Z0=I|&M;K(HZ@xB07D-|d=Zk3jv1p}pmYa8!^=SfNtNi~{F8*+DrB-FBJE5b>8 zGQOfO!uM4^Q(a=4MT93zy4mIGim;F(iFP}?aYMl{|3_U*<)UZ{l1%XmzU;eL1@^$9 zvw+^Y9Hly1PFUQTst)OGPdarQZ6K%uwZU?BSCU>c^e+d zZo_KK*3I Date: Mon, 25 Jul 2016 15:45:06 -0700 Subject: [PATCH 080/204] fix typo --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 48f2a36e..c2054a15 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup(name='MPWorks', version=__version__, description='Materials Project codes', - long_description=open(os.path.join(module_dir, 'README.rst', encoding="UTF-16")).read(), + long_description=open(os.path.join(module_dir, 'README.rst'), encoding="UTF-16").read(), url='https://github.com/materialsproject/MPWorks', author='Anubhav Jain', author_email='anubhavster@gmail.com', From 1a218b340e8368bcaa79b802b5ea6cf9eee24273 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 25 Jul 2016 15:51:38 -0700 Subject: [PATCH 081/204] don't use encoding parameter in Python2 --- setup.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index c2054a15..1fa73930 100644 --- a/setup.py +++ b/setup.py @@ -7,18 +7,22 @@ __email__ = "ajain@lbl.gov" __date__ = "Mar 15, 2013" -from setuptools import setup, find_packages +import os, sys + from mpworks import __version__ -import os -import multiprocessing, logging # AJ: for some reason this is needed to not have "python setup.py test" freak out +from setuptools import setup, find_packages module_dir = os.path.dirname(os.path.abspath(__file__)) if __name__ == "__main__": + if sys.version_info[0] > 2: + readme_text = open(os.path.join(module_dir, 'README.rst'), encoding="UTF-16").read() + else: + readme_text = open(os.path.join(module_dir, 'README.rst')).read() setup(name='MPWorks', version=__version__, description='Materials Project codes', - long_description=open(os.path.join(module_dir, 'README.rst'), encoding="UTF-16").read(), + long_description=readme_text, url='https://github.com/materialsproject/MPWorks', author='Anubhav Jain', author_email='anubhavster@gmail.com', From 1be2585bc19056544357a33f0dfb74259daaafdf Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 25 Jul 2016 16:39:17 -0700 Subject: [PATCH 082/204] replace basestring with str in Python3 --- mpworks/submission/submission_mongo.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mpworks/submission/submission_mongo.py b/mpworks/submission/submission_mongo.py index e3d9d578..99ebc32c 100644 --- a/mpworks/submission/submission_mongo.py +++ b/mpworks/submission/submission_mongo.py @@ -1,6 +1,7 @@ import json import os import datetime +import sys from pymongo import MongoClient, DESCENDING from mpworks.snl_utils.mpsnl import MPStructureNL @@ -34,7 +35,11 @@ def reconstitute_dates(obj_dict): if isinstance(obj_dict, list): return [reconstitute_dates(v) for v in obj_dict] - if isinstance(obj_dict, basestring): + if sys.version_info[0] > 2: + str_type = str + else: + str_type = basestring + if isinstance(obj_dict, str_type): try: return datetime.datetime.strptime(obj_dict, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: From 94f0e5d612e5f08a992e25872e946dadbcbacb0e Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 25 Jul 2016 16:51:47 -0700 Subject: [PATCH 083/204] fix indent bug --- mpworks/firetasks/boltztrap_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/boltztrap_tasks.py b/mpworks/firetasks/boltztrap_tasks.py index 402adf8b..46f3c642 100644 --- a/mpworks/firetasks/boltztrap_tasks.py +++ b/mpworks/firetasks/boltztrap_tasks.py @@ -233,7 +233,7 @@ def run_task(self, fw_spec): ted['kappa_best_dope19'] = self.get_extreme(ted, 'kappa_eigs', maximize=False, max_didx=4) try: - from mpcollab.thermoelectrics.boltztrap_TE import BoltzSPB + from mpcollab.thermoelectrics.boltztrap_TE import BoltzSPB bzspb = BoltzSPB(ted) maxpf_p = bzspb.get_maximum_power_factor('p', temperature=0, tau=1E-14, ZT=False, kappal=0.5,\ otherprops=('get_seebeck_mu_eig', 'get_conductivity_mu_eig', \ From d8e6fb4776d3a7487accef3ec4e5ed323374f637 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 25 Jul 2016 22:09:59 -0700 Subject: [PATCH 084/204] make iterate dict items Python3 compatible --- mpworks/check_snl/builders/base.py | 4 ++-- mpworks/fix_scripts/fix_bs_controller_tasks.py | 2 +- mpworks/fix_scripts/legacy/mps_to_snl.py | 2 +- mpworks/maintenance_scripts/classify_fizzled.py | 2 +- mpworks/maintenance_scripts/icsd2012_to_snl.py | 2 +- mpworks/osti_doi/osti_record.py | 4 ++-- mpworks/processors/submit_canonical.py | 2 +- mpworks/snl_utils/mpsnl.py | 4 ++-- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mpworks/check_snl/builders/base.py b/mpworks/check_snl/builders/base.py index 5d8bd641..3ae79b56 100644 --- a/mpworks/check_snl/builders/base.py +++ b/mpworks/check_snl/builders/base.py @@ -95,7 +95,7 @@ def _push_to_plotly(self): exc_type, exc_value, exc_traceback = sys.exc_info() _log.info('%r %r', exc_type, exc_value) _log.info('_push_to_plotly ERROR: bar=%r', bar_x) - for k,v in md.iteritems(): + for k,v in md.items(): if len(v) < 1: continue try: self._streams[2].write(Scatter( @@ -122,7 +122,7 @@ def _increase_counter(self, nrow, ncol, mismatch_dict): for k in categories[self.checker_name]: mc[categories[self.checker_name].index(k)] += len(mismatch_dict[k]) self._mismatch_counter = mc - for k,v in mismatch_dict.iteritems(): + for k,v in mismatch_dict.items(): self._mismatch_dict[k] += v currow = self._counter[nrow] currow[ncol] += 1 diff --git a/mpworks/fix_scripts/fix_bs_controller_tasks.py b/mpworks/fix_scripts/fix_bs_controller_tasks.py index 623c41a9..b5ea168e 100644 --- a/mpworks/fix_scripts/fix_bs_controller_tasks.py +++ b/mpworks/fix_scripts/fix_bs_controller_tasks.py @@ -216,7 +216,7 @@ def append_wf(fw_id, parent_fw_id=None): continue is_new = bool(datetime(2016, 1, 1) < workflow['updated_on']) if workflow['state'] == 'FIZZLED': - for fw_id_fizzled, fw_state in workflow['fw_states'].iteritems(): + for fw_id_fizzled, fw_state in workflow['fw_states'].items(): if fw_state == 'FIZZLED': fw_fizzled = lpdb.fireworks.find_one({'fw_id': int(fw_id_fizzled)}, {'_id': 0, 'name': 1, 'fw_id': 1, 'spec.task_type': 1}) counter[fw_fizzled['spec']['task_type']] += 1 diff --git a/mpworks/fix_scripts/legacy/mps_to_snl.py b/mpworks/fix_scripts/legacy/mps_to_snl.py index f11b8ce0..563ff741 100644 --- a/mpworks/fix_scripts/legacy/mps_to_snl.py +++ b/mpworks/fix_scripts/legacy/mps_to_snl.py @@ -38,7 +38,7 @@ def mps_dict_to_snl(mps_dict): projects.append(project) data = {'_materialsproject': {'deprecated': {'mps_ids': mps_ids}}, '_icsd': {}} - for k, v in m['about']['metadata']['info'].iteritems(): + for k, v in m['about']['metadata']['info'].items(): if k == 'icsd_comments': data['_icsd']['comments'] = v elif k == 'icsd_id': diff --git a/mpworks/maintenance_scripts/classify_fizzled.py b/mpworks/maintenance_scripts/classify_fizzled.py index a3ded4ef..243ea3f3 100644 --- a/mpworks/maintenance_scripts/classify_fizzled.py +++ b/mpworks/maintenance_scripts/classify_fizzled.py @@ -111,6 +111,6 @@ def get_task_info(fw_id, tdb): except_dict[except_str] = except_dict[except_str]+1 print('-----') - for k, v in except_dict.iteritems(): + for k, v in except_dict.items(): print({"{}\t{}".format(v, k)}) diff --git a/mpworks/maintenance_scripts/icsd2012_to_snl.py b/mpworks/maintenance_scripts/icsd2012_to_snl.py index c7b3c79b..c5751d8a 100644 --- a/mpworks/maintenance_scripts/icsd2012_to_snl.py +++ b/mpworks/maintenance_scripts/icsd2012_to_snl.py @@ -23,7 +23,7 @@ def icsd_dict_to_snl(icsd_dict): data = {'_icsd': {}} excluded_data = ['_id', 'a_len', 'b_len', 'c_len', 'alpha', 'beta', 'gamma', 'compostion', 'composition', 'created_at', 'crystal_id', 'idnum', 'journal', 'tstruct', 'updated_at', 'username'] - for k, v in icsd_dict.iteritems(): + for k, v in icsd_dict.items(): if k not in excluded_data: if isinstance(v, datetime.datetime): v = v.strftime(format='%Y-%m-%d %H:%M:%S') diff --git a/mpworks/osti_doi/osti_record.py b/mpworks/osti_doi/osti_record.py index cc9547d3..3d14871c 100644 --- a/mpworks/osti_doi/osti_record.py +++ b/mpworks/osti_doi/osti_record.py @@ -93,10 +93,10 @@ def insert_dois(self, dois): dois_insert = [ {'_id': mpid, 'doi': d['doi'], 'valid': False, 'created_at': datetime.datetime.now().isoformat()} - for mpid,d in dois.iteritems() if not d['updated'] + for mpid,d in dois.items() if not d['updated'] ] if dois_insert: logger.info(self.doicoll.insert(dois_insert)) - dois_update = [ mpid for mpid,d in dois.iteritems() if d['updated'] ] + dois_update = [ mpid for mpid,d in dois.items() if d['updated'] ] if dois_update: logger.info(self.doicoll.update( {'_id': {'$in': dois_update}}, diff --git a/mpworks/processors/submit_canonical.py b/mpworks/processors/submit_canonical.py index c6b1549f..9c1049d7 100644 --- a/mpworks/processors/submit_canonical.py +++ b/mpworks/processors/submit_canonical.py @@ -64,7 +64,7 @@ def submit_tests(names=None, params=None): mpr = MPRester() - for name, sid in compounds.iteritems(): + for name, sid in compounds.items(): if not names or name in names: sid = mpr.get_materials_id_from_task_id("mp-{}".format(sid)) s = mpr.get_structure_by_material_id(sid, final=False) diff --git a/mpworks/snl_utils/mpsnl.py b/mpworks/snl_utils/mpsnl.py index e0665c33..5e5fe01a 100644 --- a/mpworks/snl_utils/mpsnl.py +++ b/mpworks/snl_utils/mpsnl.py @@ -142,7 +142,7 @@ def as_dict(self): d['all_snl_ids'] = self.all_snl_ids d['num_snl'] = len(self.all_snl_ids) d['species_snl'] = [s.as_dict() for s in self.species_snl] - d['species_groups'] = dict([(str(k), v) for k, v in self.species_groups.iteritems()]) + d['species_groups'] = dict([(str(k), v) for k, v in self.species_groups.items()]) d['snlgroup_key'] = self.canonical_snl.snlgroup_key return d @@ -150,7 +150,7 @@ def as_dict(self): def from_dict(cls, d): sp_snl = [MPStructureNL.from_dict(s) for s in d['species_snl']] if 'species_snl' in d else None # to account for no int keys in Mongo dicts - species_groups = dict([(int(k), v) for k, v in d['species_groups'].iteritems()]) if 'species_groups' in d else None + species_groups = dict([(int(k), v) for k, v in d['species_groups'].items()]) if 'species_groups' in d else None return SNLGroup(d['snlgroup_id'], MPStructureNL.from_dict(d['canonical_snl']), d['all_snl_ids'], sp_snl, species_groups) From a4d8f34e73127416bb61e48e665e6b9ed38bdc61 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 26 Jul 2016 22:17:43 -0600 Subject: [PATCH 085/204] explicity using text mode for zopen to be compatible with Python3 for the error of "TypeError: the JSON object must be str, not 'bytes'" --- mpworks/drones/mp_vaspdrone.py | 2 +- mpworks/drones/signals.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/drones/mp_vaspdrone.py b/mpworks/drones/mp_vaspdrone.py index 8bcc63fb..7e2101c3 100644 --- a/mpworks/drones/mp_vaspdrone.py +++ b/mpworks/drones/mp_vaspdrone.py @@ -238,7 +238,7 @@ def process_fw(self, dir_name, d): break # custom Materials Project post-processing for FireWorks - with zopen(zpath(os.path.join(dir_name, 'FW.json'))) as f: + with zopen(zpath(os.path.join(dir_name, 'FW.json')), 'rt') as f: fw_dict = json.load(f) d['fw_id'] = fw_dict['fw_id'] d['snl'] = fw_dict['spec']['mpsnl'] diff --git a/mpworks/drones/signals.py b/mpworks/drones/signals.py index 28ce3c65..ccc39c38 100644 --- a/mpworks/drones/signals.py +++ b/mpworks/drones/signals.py @@ -30,7 +30,7 @@ def string_list_in_file(s_list, filename, ignore_case=True): """ matches = set() - with zopen(filename, 'r') as f: + with zopen(filename, 'rt') as f: for line in f: for s in s_list: if (ignore_case and s.lower() in line.lower()) or s in line: @@ -184,7 +184,7 @@ def detect(self, dir_name): file_names = glob.glob("%s/*.error" % dir_name) rx = re.compile(r'segmentation', re.IGNORECASE) for file_name in file_names: - with zopen(file_name, 'r') as f: + with zopen(file_name, 'rt') as f: lines = f.readlines() for line in lines: if rx.search(line) is not None: From 3b4d777663d324fd6805ec3157df718a4be067c9 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 26 Aug 2016 13:07:54 -0700 Subject: [PATCH 086/204] add NMR dir to RUN_LOCS --- mpworks/workflows/wf_settings.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/workflows/wf_settings.py b/mpworks/workflows/wf_settings.py index 57ff8c2a..12d0d78b 100644 --- a/mpworks/workflows/wf_settings.py +++ b/mpworks/workflows/wf_settings.py @@ -36,5 +36,6 @@ def RUN_LOCS(self): '/global/scratch/sd/matcomp/wc_tests/', '/global/scratch/sd/matcomp/aj_prod/', '/global/scratch2/sd/matcomp/mp_prod/', - '/global/scratch2/sd/matcomp/mp_prod_hopper/'] + '/global/scratch2/sd/matcomp/mp_prod_hopper/', + '/project/projectdirs/matgen/garden/nmr'] From a7f37b532986893284a1c8a9833a8105298ce45c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 26 Aug 2016 13:11:54 -0700 Subject: [PATCH 087/204] change six package version requirement to 1.10.0 to be consistent with fireworks --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 78067974..f7d512ed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,6 @@ monty==0.6.4 pybtex==0.18 PyYAML==3.11 requests==2.6.0 -six==1.9.0 +six==1.10.0 xmltodict==0.9.2 pymatgen>=4.0.0 From 6b232e5b205b9103116933c885844794683b0a6c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 20 Jan 2017 17:01:40 -0800 Subject: [PATCH 088/204] use NCORE instead of NPAR/KPAR since it works for both geometry optimization and NMR --- mpworks/firetasks/nmr_tasks.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 129c6d5d..f77756d5 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -111,10 +111,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru with open(config_file) as f: parent_config_dict = yaml.load(stream=f) config_dict = parent_config_dict[config_key] - if config_name == "NMR CS": - incar_enforce = {'KPAR': 4} - else: - incar_enforce = {'NPAR': 4} + incar_enforce = {'NCORE': 4} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) From 1f98b27654a5eb8e08199526a4c8acd21ba7fe4d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 22 Jan 2017 21:44:46 -0800 Subject: [PATCH 089/204] revert NPAR/KPAR settings --- mpworks/firetasks/nmr_tasks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index f77756d5..129c6d5d 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -111,7 +111,10 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru with open(config_file) as f: parent_config_dict = yaml.load(stream=f) config_dict = parent_config_dict[config_key] - incar_enforce = {'NCORE': 4} + if config_name == "NMR CS": + incar_enforce = {'KPAR': 4} + else: + incar_enforce = {'NPAR': 4} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) From 490c4d467674ecc33f93787bbdc9ae1bc8ab0c1c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:08:12 -0800 Subject: [PATCH 090/204] convert all indents in snl_mongo.py to spaces --- mpworks/snl_utils/snl_mongo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/snl_utils/snl_mongo.py b/mpworks/snl_utils/snl_mongo.py index 3f2e2eb5..11c3d122 100644 --- a/mpworks/snl_utils/snl_mongo.py +++ b/mpworks/snl_utils/snl_mongo.py @@ -124,7 +124,7 @@ def _add_if_belongs(self, snlgroup, mpsnl, testing_mode): if match_found: print('MATCH FOUND, grouping (snl_id, snlgroup): {}'.format((mpsnl.snl_id, snlgroup.snlgroup_id))) if not testing_mode: - self.snlgroups.update_one({'snlgroup_id': snlgroup.snlgroup_id}, {'$set': snlgroup.as_dict()}) + self.snlgroups.update_one({'snlgroup_id': snlgroup.snlgroup_id}, {'$set': snlgroup.as_dict()}) return match_found, spec_group From 4eb1863727d9863c506e42e1c6cd476bcb852b05 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:10:10 -0800 Subject: [PATCH 091/204] fixed indentation --- mpworks/snl_utils/snl_mongo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/snl_utils/snl_mongo.py b/mpworks/snl_utils/snl_mongo.py index 11c3d122..35517878 100644 --- a/mpworks/snl_utils/snl_mongo.py +++ b/mpworks/snl_utils/snl_mongo.py @@ -124,7 +124,7 @@ def _add_if_belongs(self, snlgroup, mpsnl, testing_mode): if match_found: print('MATCH FOUND, grouping (snl_id, snlgroup): {}'.format((mpsnl.snl_id, snlgroup.snlgroup_id))) if not testing_mode: - self.snlgroups.update_one({'snlgroup_id': snlgroup.snlgroup_id}, {'$set': snlgroup.as_dict()}) + self.snlgroups.update_one({'snlgroup_id': snlgroup.snlgroup_id}, {'$set': snlgroup.as_dict()}) return match_found, spec_group From 81f29e4e7f07344d2160d32ce1700383bf47952e Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:14:50 -0800 Subject: [PATCH 092/204] fix indentatioin in process_submission --- mpworks/processors/process_submissions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mpworks/processors/process_submissions.py b/mpworks/processors/process_submissions.py index 1c14d8d1..a1833b6b 100644 --- a/mpworks/processors/process_submissions.py +++ b/mpworks/processors/process_submissions.py @@ -117,10 +117,10 @@ def update_wf_state(self, submission_id): wf = self.launchpad.workflows.find_one({'metadata.submission_id': submission_id}, sort=[('updated_on', -1)]) - if not wf: - # submission_id from jobs collection doesn't exist in workflows collection - # workflow has probably been removed manually by user via `lpad delete_wflows` - return + if not wf: + # submission_id from jobs collection doesn't exist in workflows collection + # workflow has probably been removed manually by user via `lpad delete_wflows` + return details = '(none)' for e in self.launchpad.fireworks.find({'fw_id': {'$in' : wf['nodes']}}, From d4fef550c7284c44ebe0530a7e2e2d521861158d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:21:07 -0800 Subject: [PATCH 093/204] fix indentation in mp_vaspdrone --- mpworks/drones/mp_vaspdrone.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/drones/mp_vaspdrone.py b/mpworks/drones/mp_vaspdrone.py index 59d62798..a57d8880 100644 --- a/mpworks/drones/mp_vaspdrone.py +++ b/mpworks/drones/mp_vaspdrone.py @@ -101,8 +101,8 @@ def assimilate(self, path, launches_coll=None): if ("task_id" not in d) or (not d["task_id"]): d["task_id"] = "mp-{}".format( db.counter.find_one_and_update( - {"_id": "taskid"}, {"$inc": {"c": 1}} - )["c"]) + {"_id": "taskid"}, {"$inc": {"c": 1}}) + ["c"]) logger.info("Inserting {} with taskid = {}" .format(d["dir_name"], d["task_id"])) elif self.update_duplicates: @@ -215,7 +215,7 @@ def string_to_numlist(stringlist): d['analysis'].update(update_doc) d['calculations'][0]['output'].update(update_doc) - coll.update_one({"dir_name": d["dir_name"]}, {'$set': d}, upsert=True) + coll.update_one({"dir_name": d["dir_name"]}, {'$set': d}, upsert=True) return d["task_id"], d else: From c3cb81375a5ef59cc8c8aff9f09bd36b0b692e29 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:25:13 -0800 Subject: [PATCH 094/204] fix indentation in check_snl/builders/base.py --- mpworks/check_snl/builders/base.py | 43 +++++++++++++++++------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/mpworks/check_snl/builders/base.py b/mpworks/check_snl/builders/base.py index 3ae79b56..58776afe 100644 --- a/mpworks/check_snl/builders/base.py +++ b/mpworks/check_snl/builders/base.py @@ -1,10 +1,15 @@ -import sys, multiprocessing, time -from mpworks.snl_utils.mpsnl import SNLGroup +import multiprocessing +import sys +import time + +from init_plotly import py, stream_ids, categories from matgendb.builders.core import Builder from matgendb.builders.util import get_builder_log -from mpworks.check_snl.utils import div_plus_mod from pymatgen.analysis.structure_matcher import StructureMatcher, ElementComparator -from init_plotly import py, stream_ids, categories + +from mpworks.check_snl.utils import div_plus_mod +from mpworks.snl_utils.mpsnl import SNLGroup + if py is not None: from plotly.graph_objs import * @@ -83,19 +88,19 @@ def _push_to_plotly(self): heatmap_z = self._counter._getvalue() if not self._seq else self._counter bar_x = self._mismatch_counter._getvalue() if not self._seq else self._mismatch_counter md = self._mismatch_dict._getvalue() if not self._seq else self._mismatch_dict - try: - self._streams[0].write(Heatmap(z=heatmap_z)) - except: - exc_type, exc_value, exc_traceback = sys.exc_info() - _log.info('%r %r', exc_type, exc_value) - _log.info('_push_to_plotly ERROR: heatmap=%r', heatmap_z) - try: - self._streams[1].write(Bar(x=bar_x)) - except: - exc_type, exc_value, exc_traceback = sys.exc_info() - _log.info('%r %r', exc_type, exc_value) - _log.info('_push_to_plotly ERROR: bar=%r', bar_x) - for k,v in md.items(): + try: + self._streams[0].write(Heatmap(z=heatmap_z)) + except: + exc_type, exc_value, exc_traceback = sys.exc_info() + _log.info('%r %r', exc_type, exc_value) + _log.info('_push_to_plotly ERROR: heatmap=%r', heatmap_z) + try: + self._streams[1].write(Bar(x=bar_x)) + except: + exc_type, exc_value, exc_traceback = sys.exc_info() + _log.info('%r %r', exc_type, exc_value) + _log.info('_push_to_plotly ERROR: bar=%r', bar_x) + for k, v in md.items(): if len(v) < 1: continue try: self._streams[2].write(Scatter( @@ -103,7 +108,7 @@ def _push_to_plotly(self): y=k, text='
'.join(v) )) _log.info('_push_to_plotly: mismatch_dict[%r]=%r', k, v) - self._mismatch_dict.update({k:[]}) # clean + self._mismatch_dict.update({k: []}) # clean time.sleep(0.052) except: exc_type, exc_value, exc_traceback = sys.exc_info() @@ -136,7 +141,7 @@ def _increase_counter(self, nrow, ncol, mismatch_dict): if self._lock is not None: self._lock.release() def finalize(self, errors): - if py is not None: self._push_to_plotly() + if py is not None: self._push_to_plotly() _log.info("%d items processed.", self._counter_total.value) return True From 1d89a67daf0494d2ce82ed254d49496d85360258 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:28:00 -0800 Subject: [PATCH 095/204] fix_bs_controller_tasks --- .../fix_scripts/fix_bs_controller_tasks.py | 42 +++++++++---------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/mpworks/fix_scripts/fix_bs_controller_tasks.py b/mpworks/fix_scripts/fix_bs_controller_tasks.py index b5ea168e..96b20750 100644 --- a/mpworks/fix_scripts/fix_bs_controller_tasks.py +++ b/mpworks/fix_scripts/fix_bs_controller_tasks.py @@ -1,14 +1,12 @@ -import time, yaml, sys, os -from fireworks.core.launchpad import LaunchPad +import os +import yaml from fireworks.core.firework import Firework, Workflow -from mpworks.firetasks.controller_tasks import AddEStructureTask +from fireworks.core.launchpad import LaunchPad from fireworks.utilities.fw_utilities import get_slug -from mpworks.snl_utils.snl_mongo import SNLMongoAdapter from pymongo import MongoClient -from collections import Counter -from datetime import datetime -from fnmatch import fnmatch -from custodian.vasp.handlers import VaspErrorHandler + +from mpworks.firetasks.controller_tasks import AddEStructureTask +from mpworks.snl_utils.snl_mongo import SNLMongoAdapter cwd = os.getcwd() @@ -45,12 +43,12 @@ def append_wf(fw_id, parent_fw_id=None): elif child_fw['state'] == 'COMPLETED': print('AddEStructureTask v2 already successfully run for', fw_id) sec_child_fw_id = wf['links'][str(child_fw_id)][0] - sec_child_fw = lpdb.fireworks.find_one({'fw_id': sec_child_fw_id}, {'spec.task_type':1, 'state':1}) - if sec_child_fw['state'] == 'FIZZLED': - lpdb.rerun_fw(sec_child_fw_id) - print('FIZZLED -> marked for rerun:', sec_child_fw_id, sec_child_fw['spec']['task_type']) - else: - print('AddEStructureTask v2 added but neither DEFUSED, FIZZLED, or COMPLETED for', fw_id) + sec_child_fw = lpdb.fireworks.find_one({'fw_id': sec_child_fw_id}, {'spec.task_type':1, 'state':1}) + if sec_child_fw['state'] == 'FIZZLED': + lpdb.rerun_fw(sec_child_fw_id) + print('FIZZLED -> marked for rerun:', sec_child_fw_id, sec_child_fw['spec']['task_type']) + else: + print('AddEStructureTask v2 added but neither DEFUSED, FIZZLED, or COMPLETED for', fw_id) return f = lpdb.get_wf_summary_dict(fw_id)['name'].replace(' ', '_') name = get_slug(f + '--' + spec['task_type']) @@ -91,14 +89,14 @@ def append_wf(fw_id, parent_fw_id=None): #print 'nfws =', nfws mp_ids = [ - 'mp-2123', 'mp-10886', 'mp-582799', 'mp-21477', 'mp-535', 'mp-21293', 'mp-8700', - 'mp-9568', 'mp-973', 'mp-505622', 'mp-20839', 'mp-1940', 'mp-16521', 'mp-30354', - 'mp-568953', 'mp-454', 'mp-1010', 'mp-1416', 'mp-21385', 'mp-27659', 'mp-22481', - 'mp-569529', 'mp-1057', 'mp-1834', 'mp-2336', 'mp-12857', 'mp-21109', 'mp-30387', - 'mp-30599', 'mp-21884', 'mp-11397', 'mp-11814', 'mp-510437', 'mp-12565', 'mp-33032', + 'mp-2123', 'mp-10886', 'mp-582799', 'mp-21477', 'mp-535', 'mp-21293', 'mp-8700', + 'mp-9568', 'mp-973', 'mp-505622', 'mp-20839', 'mp-1940', 'mp-16521', 'mp-30354', + 'mp-568953', 'mp-454', 'mp-1010', 'mp-1416', 'mp-21385', 'mp-27659', 'mp-22481', + 'mp-569529', 'mp-1057', 'mp-1834', 'mp-2336', 'mp-12857', 'mp-21109', 'mp-30387', + 'mp-30599', 'mp-21884', 'mp-11397', 'mp-11814', 'mp-510437', 'mp-12565', 'mp-33032', 'mp-20885', 'mp-1891', - "mp-987", "mp-1542", "mp-2252", "mp-966", "mp-6945", "mp-1598", - "mp-7547", "mp-554340", "mp-384", "mp-2437", "mp-1167", "mp-571266", + "mp-987", "mp-1542", "mp-2252", "mp-966", "mp-6945", "mp-1598", + "mp-7547", "mp-554340", "mp-384", "mp-2437", "mp-1167", "mp-571266", "mp-560338", "mp-27253", "mp-1705", "mp-2131", "mp-676", "mp-2402", "mp-9588", "mp-2452", "mp-690", "mp-30033", "mp-10155", "mp-9921", "mp-9548", "mp-569857", "mp-29487", "mp-909", "mp-1536", "mp-28391", "mp-558811", "mp-1033", "mp-1220", @@ -146,7 +144,7 @@ def append_wf(fw_id, parent_fw_id=None): materials_wBS = [] for matidx, material in enumerate(materials.find({'task_id': {'$in': mp_ids}}, {'task_id': 1, '_id': 0, 'snlgroup_id_final': 1, 'has_bandstructure': 1, 'pretty_formula': 1})): mp_id, snlgroup_id = material['task_id'], material['snlgroup_id_final'] - url = 'https://materialsproject.org/materials/' + mp_id + url = 'https://materialsproject.org/materials/' + mp_id if material['has_bandstructure']: materials_wBS.append((mp_id, material['pretty_formula'])) counter['has_bandstructure'] += 1 From 4978b95979fc3eb02266a0ee46648956f9d5a144 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 13:28:40 -0800 Subject: [PATCH 096/204] fix indentation in fix_mpcomplete --- mpworks/fix_scripts/fix_mpcomplete.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/fix_scripts/fix_mpcomplete.py b/mpworks/fix_scripts/fix_mpcomplete.py index a58513cb..d7954771 100644 --- a/mpworks/fix_scripts/fix_mpcomplete.py +++ b/mpworks/fix_scripts/fix_mpcomplete.py @@ -56,6 +56,6 @@ launch_dir = '/'.join('/oasis/projects/nsf/csd436/phuck/garden'.split('/') + block_dir) if not os.path.exists(launch_dir): print(doc['fw_id'], '---->', '/'.join(block_dir), 'does not exists!') - continue + continue fw_ids.append(doc['fw_id']) print('fixed', fw_ids) From 84a40eab7e040703a32116ccce46ca3252ab894b Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 15:34:44 -0800 Subject: [PATCH 097/204] fix bug in determining handlers choice --- mpworks/firetasks/custodian_task.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index fdf2b000..0acc66e0 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -210,7 +210,8 @@ def get_custodian_task(spec): if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe) - elif {'static', 'deformed', 'NMR', 'Triple Jump Relax'} & set(task_type): + elif {'static', 'deformed', 'NMR EFG', 'NMR CS', 'Triple Jump Relax S1', + 'Triple Jump Relax S2', 'Triple Jump Relax S3'} & set(task_type): jobs = [VaspJob(v_exe)] else: # non-SCF runs From a250ab12295dafcf8c8b22e676956783f6c822df Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 15:45:53 -0800 Subject: [PATCH 098/204] fix type bug in task_type comparison --- mpworks/firetasks/custodian_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 0acc66e0..88df2e79 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -211,7 +211,7 @@ def get_custodian_task(spec): if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe) elif {'static', 'deformed', 'NMR EFG', 'NMR CS', 'Triple Jump Relax S1', - 'Triple Jump Relax S2', 'Triple Jump Relax S3'} & set(task_type): + 'Triple Jump Relax S2', 'Triple Jump Relax S3'} & {task_type}: jobs = [VaspJob(v_exe)] else: # non-SCF runs From 91c3d31a154c7287c36a2f42b73c223a63de6370 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 17:15:43 -0800 Subject: [PATCH 099/204] convert to list to force the in-place decoding of the Handlers objects --- mpworks/firetasks/custodian_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 88df2e79..0c1a08bd 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -71,7 +71,7 @@ def __init__(self, parameters): self.update(parameters) self.jobs = self['jobs'] dec = MontyDecoder() - self.handlers = map(dec.process_decoded, self['handlers']) + self.handlers = list(map(dec.process_decoded, self['handlers'])) self.max_errors = self.get('max_errors', 1) self.gzip_output = self.get('gzip_output', True) From d6d0ec297459977ef650b4be2c7d2e14bb8e6ad8 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 17:17:29 -0800 Subject: [PATCH 100/204] fix all the calls to map --- mpworks/check_snl/check_snl.py | 2 +- mpworks/check_snl/icsd.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/check_snl/check_snl.py b/mpworks/check_snl/check_snl.py index d12b9f28..af7f597f 100644 --- a/mpworks/check_snl/check_snl.py +++ b/mpworks/check_snl/check_snl.py @@ -314,7 +314,7 @@ def analyze(args): if args.t: if args.fig_id == 42: label_entries = filter(None, '
'.join(fig['data'][2]['text']).split('
')) - pairs = map(make_tuple, label_entries) + pairs = list(map(make_tuple, label_entries)) grps = set(chain.from_iterable(pairs)) snlgrp_cursor = sma.snlgroups.aggregate([ { '$match': { diff --git a/mpworks/check_snl/icsd.py b/mpworks/check_snl/icsd.py index 4dadd5f8..9ecf76ea 100644 --- a/mpworks/check_snl/icsd.py +++ b/mpworks/check_snl/icsd.py @@ -10,8 +10,8 @@ for category, text in zip(fig['data'][2]['y'], fig['data'][2]['text']): for line in text.split('
'): before_colon, after_colon = line.split(':') - snlgroup1, snlgroup2 = map(int, before_colon[1:-1].split(',')) + snlgroup1, snlgroup2 = list(map(int, before_colon[1:-1].split(','))) snls, icsd_matches = after_colon.split('->') - snl1, snl2 = map(int, snls[2:-2].split(',')) + snl1, snl2 = list(map(int, snls[2:-2].split(','))) icsd, matches = icsd_matches.strip().split(' ') writer.writerow([snlgroup1, snlgroup2, snl1, snl2, int(icsd), matches[1:-1]]) From eecfb36ddc163ed513cf38e7869ae82192c364f4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 23 Jan 2017 22:55:55 -0800 Subject: [PATCH 101/204] add support for multiple VASP binary attempt --- mpworks/firetasks/custodian_task.py | 54 +++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 0c1a08bd..3fc78cc1 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -1,3 +1,4 @@ +import tarfile from gzip import GzipFile import logging import socket @@ -130,10 +131,16 @@ def run_task(self, fw_spec): logging.basicConfig(level=logging.DEBUG) - c = Custodian(self.handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, - validators=[VasprunXMLValidator()], - terminate_func=terminate_func) # manual gzip - custodian_out = c.run() + error_list = [] + all_errors = self._run_custodian(terminate_func) + error_list.extend(all_errors) + if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: + logging.info("Initiate VASP calculations using alternate binaries") + all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, + fw_env.get("vasp_cmd", "vasp"), + fw_env.get("gvasp_cmd", "gvasp"), + fw_env["alt_cmds"][fw_spec['task_type']]) + error_list.extend(all_errors) if self.gzip_output: for f in os.listdir(os.getcwd()): @@ -143,12 +150,7 @@ def run_task(self, fw_spec): f_out.writelines(f_in) os.remove(f) - all_errors = set() - for run in custodian_out: - for correction in run['corrections']: - all_errors.update(correction['errors']) - - stored_data = {'error_list': list(all_errors)} + stored_data = {'error_list': error_list} update_spec = {'prev_vasp_dir': os.getcwd(), 'prev_task_type': fw_spec['task_type'], 'mpsnl': fw_spec['mpsnl'], @@ -158,6 +160,38 @@ def run_task(self, fw_spec): return FWAction(stored_data=stored_data, update_spec=update_spec) + def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, gvasp_cmd, alt_cmds): + error_list = [] + for new_vasp_path in alt_cmds: + new_vasp_cmd = new_vasp_path["vasp_cmd"] + new_gvasp_cmd = new_vasp_path["gvasp_cmd"] + new_v_exe = shlex.split(" ".join(v_exe).replace(vasp_cmd, new_vasp_cmd)) + new_gv_exe = shlex.split(" ".join(gv_exe).replace(gvasp_cmd, new_gvasp_cmd)) + logging.info("Run VASP with binary from {}".format(os.path.dirname(new_vasp_cmd))) + for job in self.jobs: + # set the vasp command to the alternative binaries + job.vasp_cmd = new_v_exe + job.gamma_vasp_cmd = new_gv_exe + if os.path.exists("error.1.tar.gz") and os.path.isfile("error.1.tar.gz"): + # restore to initial input set + with tarfile.open("error.1.tar.gz", "r") as tf: + for filename in ["INCAR", "KPOINTS", "POSCAR"]: + tf.extract(filename) + all_errors = self._run_custodian(terminate_func) + error_list.extend(all_errors) + return error_list + + def _run_custodian(self, terminate_func): + c = Custodian(self.handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, + validators=[VasprunXMLValidator()], + terminate_func=terminate_func) # manual gzip + custodian_out = c.run() + all_errors = set() + for run in custodian_out: + for correction in run['corrections']: + all_errors.update(correction['errors']) + return all_errors + @staticmethod def _get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd): tasks_per_node_flag = {"srun": "--ntasks-per-node", From 2333ec06585d0d8074ea9cd32f895a5df3d14ce3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 24 Jan 2017 11:57:06 -0800 Subject: [PATCH 102/204] add options to choose whether start the attempts from initial input file --- mpworks/firetasks/custodian_task.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 3fc78cc1..c8200321 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -139,7 +139,8 @@ def run_task(self, fw_spec): all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, fw_env.get("vasp_cmd", "vasp"), fw_env.get("gvasp_cmd", "gvasp"), - fw_env["alt_cmds"][fw_spec['task_type']]) + fw_env["alt_cmds"][fw_spec['task_type']], + fw_env.get("input_rewind", True)) error_list.extend(all_errors) if self.gzip_output: @@ -160,7 +161,8 @@ def run_task(self, fw_spec): return FWAction(stored_data=stored_data, update_spec=update_spec) - def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, gvasp_cmd, alt_cmds): + def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, + gvasp_cmd, alt_cmds, input_rewind): error_list = [] for new_vasp_path in alt_cmds: new_vasp_cmd = new_vasp_path["vasp_cmd"] @@ -172,11 +174,12 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, gvasp_cmd, # set the vasp command to the alternative binaries job.vasp_cmd = new_v_exe job.gamma_vasp_cmd = new_gv_exe - if os.path.exists("error.1.tar.gz") and os.path.isfile("error.1.tar.gz"): - # restore to initial input set - with tarfile.open("error.1.tar.gz", "r") as tf: - for filename in ["INCAR", "KPOINTS", "POSCAR"]: - tf.extract(filename) + if not input_rewind: + if os.path.exists("error.1.tar.gz") and os.path.isfile("error.1.tar.gz"): + # restore to initial input set + with tarfile.open("error.1.tar.gz", "r") as tf: + for filename in ["INCAR", "KPOINTS", "POSCAR"]: + tf.extract(filename) all_errors = self._run_custodian(terminate_func) error_list.extend(all_errors) return error_list From 4d4200f4eea2d3d23c8db5b517e28cdfb44c4c3a Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 25 Jan 2017 14:05:23 -0800 Subject: [PATCH 103/204] use explicit integer division operator to be py3 compatible --- mpworks/check_snl/builders/base.py | 2 +- mpworks/check_snl/check_snl.py | 10 +++++----- mpworks/firetasks/custodian_task.py | 2 +- mpworks/snl_utils/snl_mongo.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mpworks/check_snl/builders/base.py b/mpworks/check_snl/builders/base.py index 58776afe..2ff34f48 100644 --- a/mpworks/check_snl/builders/base.py +++ b/mpworks/check_snl/builders/base.py @@ -71,7 +71,7 @@ def get_items(self, snls=None, snlgroups=None, ncols=None): return self._snls.query(distinct_key='snl_id') def process_item(self, item, index): - nrow, ncol = index/self._ncols, index%self._ncols + nrow, ncol = index//self._ncols, index%self._ncols snlgroups = {} # keep {snlgroup_id: SNLGroup} to avoid dupe queries if isinstance(item, dict) and 'snlgroup_ids' in item: for gid in item['snlgroup_ids']: diff --git a/mpworks/check_snl/check_snl.py b/mpworks/check_snl/check_snl.py index af7f597f..703ba187 100644 --- a/mpworks/check_snl/check_snl.py +++ b/mpworks/check_snl/check_snl.py @@ -36,11 +36,11 @@ ) num_ids_per_stream = 20000 -num_ids_per_stream_k = num_ids_per_stream/1000 +num_ids_per_stream_k = num_ids_per_stream//1000 num_snls = sma.snl.count() num_snlgroups = sma.snlgroups.count() num_pairs_per_job = 1000 * num_ids_per_stream -num_pairs_max = num_snlgroups*(num_snlgroups-1)/2 +num_pairs_max = num_snlgroups*(num_snlgroups-1)//2 num_snl_streams = div_plus_mod(num_snls, num_ids_per_stream) num_snlgroup_streams = div_plus_mod(num_snlgroups, num_ids_per_stream) @@ -113,7 +113,7 @@ def __iter__(self): def _get_initial_pair(self, job_id): N, J, M = num_snlgroups, job_id, num_pairs_per_job i = int(N+.5-sqrt(N*(N-1)+.25-2*J*M)) - j = J*M-(i-1)*(2*N-i)/2+i+1 + j = J*M-(i-1)*(2*N-i)//2+i+1 return Pair(i,j) def next(self): if self.num_pairs > num_pairs_per_job: @@ -202,7 +202,7 @@ def init_plotly(args): def check_snl_spacegroups(args): """check spacegroups of all available SNLs""" - range_index = args.start / num_ids_per_stream + range_index = args.start // num_ids_per_stream idxs = [range_index*2] idxs += [idxs[0]+1] s = [py.Stream(stream_ids[i]) for i in idxs] @@ -242,7 +242,7 @@ def check_snl_spacegroups(args): def check_snls_in_snlgroups(args): """check whether SNLs in each SNLGroup still match resp. canonical SNL""" - range_index = args.start / num_ids_per_stream + range_index = args.start // num_ids_per_stream idxs = [2*(num_snl_streams+range_index)] idxs += [idxs[0]+1] s = [py.Stream(stream_ids[i]) for i in idxs] diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index c8200321..54d24d65 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -222,7 +222,7 @@ def _get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd): ranks_flag=ranks_num_flag[mpirun], nproc=sub_nproc, tpn_flag=tasks_per_node_flag[mpirun], - tpn=int(fw_data.SUB_NPROCS)/len(fw_data.NODE_LIST), + tpn=int(fw_data.SUB_NPROCS)//len(fw_data.NODE_LIST), nl_flag=nodelist_flag[mpirun], nl=','.join(fw_data.NODE_LIST), vasp_cmd=vasp_cmd)) diff --git a/mpworks/snl_utils/snl_mongo.py b/mpworks/snl_utils/snl_mongo.py index 35517878..2fa9b9ef 100644 --- a/mpworks/snl_utils/snl_mongo.py +++ b/mpworks/snl_utils/snl_mongo.py @@ -181,7 +181,7 @@ def lock_db(self, n_tried=0, n_max_tries=10): # DB was already locked by another process in a race condition self.lock_db(n_tried=n_tried, n_max_tries=n_max_tries) else: - raise ValueError('DB locked by another process! Could not lock even after {} minutes!'.format(n_max_tries/60)) + raise ValueError('DB locked by another process! Could not lock even after {} minutes!'.format(n_max_tries//60)) def release_lock(self): self.id_assigner.update_many({}, {'$set':{'lock': False}}) From 39a123727aad50163f7359980450a18e4d72bfd0 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 25 Jan 2017 22:21:42 -0800 Subject: [PATCH 104/204] ensure srun is running with -v option --- mpworks/firetasks/custodian_task.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 54d24d65..276c2907 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -210,6 +210,8 @@ def _get_vasp_cmd_in_job_packing(fw_data, fw_env, mpi_cmd): "mpirun": "", "aprun": ""} mpirun = mpi_cmd.split()[0] + if "srun" in mpi_cmd: + mpi_cmd += " -v" fw_data = FWData() # Don't honor the SLURM_NTASKS in case of job packing, Because SLURM_NTASKS is referring # to total number of processes of the parent job From 0e0010fdbe7322733a14715d781fc31a3e774845 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 25 Jan 2017 23:15:02 -0800 Subject: [PATCH 105/204] fix multiple binary logic, propogate CustodianError exception --- mpworks/firetasks/custodian_task.py | 40 ++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 276c2907..a6a84bb7 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -10,7 +10,7 @@ from custodian.vasp.validators import VasprunXMLValidator from fireworks.core.firework import FireTaskBase, FWAction from fireworks.utilities.fw_serializers import FWSerializable -from custodian.custodian import Custodian +from custodian.custodian import Custodian, CustodianError from custodian.vasp.jobs import VaspJob import shlex import os @@ -132,16 +132,23 @@ def run_task(self, fw_spec): logging.basicConfig(level=logging.DEBUG) error_list = [] - all_errors = self._run_custodian(terminate_func) - error_list.extend(all_errors) - if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: - logging.info("Initiate VASP calculations using alternate binaries") - all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, - fw_env.get("vasp_cmd", "vasp"), - fw_env.get("gvasp_cmd", "gvasp"), - fw_env["alt_cmds"][fw_spec['task_type']], - fw_env.get("input_rewind", True)) + cus_ex = None + try: + all_errors = self._run_custodian(terminate_func) error_list.extend(all_errors) + except CustodianError as ex: + cus_ex = ex + if cus_ex is not None: + if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: + logging.info("Initiate VASP calculations using alternate binaries") + all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, + fw_env.get("vasp_cmd", "vasp"), + fw_env.get("gvasp_cmd", "gvasp"), + fw_env["alt_cmds"][fw_spec['task_type']], + fw_env.get("input_rewind", True)) + error_list.extend(all_errors) + else: + raise cus_ex if self.gzip_output: for f in os.listdir(os.getcwd()): @@ -164,6 +171,7 @@ def run_task(self, fw_spec): def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, gvasp_cmd, alt_cmds, input_rewind): error_list = [] + cus_ex = None for new_vasp_path in alt_cmds: new_vasp_cmd = new_vasp_path["vasp_cmd"] new_gvasp_cmd = new_vasp_path["gvasp_cmd"] @@ -180,8 +188,16 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, with tarfile.open("error.1.tar.gz", "r") as tf: for filename in ["INCAR", "KPOINTS", "POSCAR"]: tf.extract(filename) - all_errors = self._run_custodian(terminate_func) - error_list.extend(all_errors) + cus_ex = None + try: + all_errors = self._run_custodian(terminate_func) + error_list.extend(all_errors) + except CustodianError as ex: + cus_ex = ex + if cus_ex is None: + break + if cus_ex is not None: + raise cus_ex return error_list def _run_custodian(self, terminate_func): From 2d70cb78da20781d7d75d7b1d420953a2049b8e1 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 26 Jan 2017 19:07:29 -0800 Subject: [PATCH 106/204] set exception in case that attempts continue with other binaries --- mpworks/firetasks/custodian_task.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index a6a84bb7..a8f98e36 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -140,6 +140,7 @@ def run_task(self, fw_spec): cus_ex = ex if cus_ex is not None: if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: + cus_ex = None logging.info("Initiate VASP calculations using alternate binaries") all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, fw_env.get("vasp_cmd", "vasp"), From 67aebceb8b51c73475d3766049fa850b6d70d9b4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 26 Jan 2017 20:47:29 -0800 Subject: [PATCH 107/204] catch all general Exception from custodian in case that the attempts haven't been concluded. --- mpworks/firetasks/custodian_task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index a8f98e36..b5dcf75f 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -136,7 +136,7 @@ def run_task(self, fw_spec): try: all_errors = self._run_custodian(terminate_func) error_list.extend(all_errors) - except CustodianError as ex: + except Exception as ex: cus_ex = ex if cus_ex is not None: if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: @@ -193,7 +193,7 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, try: all_errors = self._run_custodian(terminate_func) error_list.extend(all_errors) - except CustodianError as ex: + except Exception as ex: cus_ex = ex if cus_ex is None: break From e518f6ba7059887ea519ce76f8310b8600001340 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 3 Feb 2017 15:40:26 -0800 Subject: [PATCH 108/204] use lazy connection for MongoClient --- mpworks/drones/mp_vaspdrone.py | 2 +- mpworks/processors/submit_canonical.py | 2 +- mpworks/snl_utils/snl_mongo.py | 2 +- mpworks/submission/submission_mongo.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mpworks/drones/mp_vaspdrone.py b/mpworks/drones/mp_vaspdrone.py index a57d8880..d32ac33f 100644 --- a/mpworks/drones/mp_vaspdrone.py +++ b/mpworks/drones/mp_vaspdrone.py @@ -75,7 +75,7 @@ def assimilate(self, path, launches_coll=None): # Perform actual insertion into db. Because db connections cannot # be pickled, every insertion needs to create a new connection # to the db. - conn = MongoClient(self.host, self.port) + conn = MongoClient(self.host, self.port, connect=False) db = conn[self.database] if self.user: db.authenticate(self.user, self.password) diff --git a/mpworks/processors/submit_canonical.py b/mpworks/processors/submit_canonical.py index 9c1049d7..6f07d5dc 100644 --- a/mpworks/processors/submit_canonical.py +++ b/mpworks/processors/submit_canonical.py @@ -34,7 +34,7 @@ def clear_env(): lp.reset('', require_password=False) snl._reset() - conn = MongoClient(db_creds['host'], db_creds['port']) + conn = MongoClient(db_creds['host'], db_creds['port'], connect=False) db = conn[db_creds['database']] if db_creds['admin_user'] is not None: db.authenticate(db_creds['admin_user'], db_creds['admin_password']) diff --git a/mpworks/snl_utils/snl_mongo.py b/mpworks/snl_utils/snl_mongo.py index 2fa9b9ef..bdc45a5a 100644 --- a/mpworks/snl_utils/snl_mongo.py +++ b/mpworks/snl_utils/snl_mongo.py @@ -28,7 +28,7 @@ def __init__(self, host='localhost', port=27017, db='snl', username=None, self.username = username self.password = password - self.connection = MongoClient(host, port, j=False) + self.connection = MongoClient(host, port, j=False, connect=False) self.database = self.connection[db] if self.username: self.database.authenticate(username, password) diff --git a/mpworks/submission/submission_mongo.py b/mpworks/submission/submission_mongo.py index 99ebc32c..9b87a57b 100644 --- a/mpworks/submission/submission_mongo.py +++ b/mpworks/submission/submission_mongo.py @@ -78,7 +78,7 @@ def __init__(self, host='localhost', port=27017, db='snl', username=None, self.username = username self.password = password - self.connection = MongoClient(host, port, j=False) + self.connection = MongoClient(host, port, j=False, connect=False) self.database = self.connection[db] if self.username: self.database.authenticate(username, password) From f170824d950674163bfc1aa6d54faa1404d385ae Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 3 Feb 2017 15:56:47 -0800 Subject: [PATCH 109/204] use the share LaunchPad in case of job packing --- mpworks/firetasks/vasp_io_tasks.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 26f44491..4c878cbb 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -9,6 +9,8 @@ import os import shutil import sys + +from fireworks.fw_config import FWData from monty.os.path import zpath from custodian.vasp.handlers import UnconvergedErrorHandler from fireworks.core.launchpad import LaunchPad @@ -164,6 +166,12 @@ def run_task(self, fw_spec): sh = logging.StreamHandler(stream=sys.stdout) sh.setLevel(getattr(logging, 'INFO')) logger.addHandler(sh) + fw_data = FWData() + if not fw_data.MULTIPROCESSING: + launch_coll = LaunchPad.auto_load().launches + else: + lp = fw_data.lp + launch_coll = lp.launches with open(db_path) as f: db_creds = json.load(f) drone = MPVaspDrone(host=db_creds['host'], port=db_creds['port'], @@ -172,7 +180,7 @@ def run_task(self, fw_spec): collection=db_creds['collection'], parse_dos=parse_dos, additional_fields=self.additional_fields, update_duplicates=self.update_duplicates) - t_id, d = drone.assimilate(prev_dir, launches_coll=LaunchPad.auto_load().launches) + t_id, d = drone.assimilate(prev_dir, launches_coll=launch_coll) mpsnl = d['snl_final'] if 'snl_final' in d else d['snl'] snlgroup_id = d['snlgroup_id_final'] if 'snlgroup_id_final' in d else d['snlgroup_id'] From a59f51ed0ca5647c530fe5784c1e18c91db830cb Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 24 Mar 2017 12:07:09 -0700 Subject: [PATCH 110/204] handle run location in the period of CSCRATCH transition, in which the scratch file path has ".new" in it. --- mpworks/workflows/wf_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mpworks/workflows/wf_utils.py b/mpworks/workflows/wf_utils.py index 7d67d71c..237677d9 100644 --- a/mpworks/workflows/wf_utils.py +++ b/mpworks/workflows/wf_utils.py @@ -68,6 +68,10 @@ def get_block_part(m_dir): def get_loc(m_dir): if os.path.exists(m_dir): return m_dir + if re.match("/global/cscratch1/sd/\w+[.]new/.+", m_dir): + new_scr = m_dir.replace(".new", "") + if os.path.exists(new_scr): + return new_scr block_part = get_block_part(m_dir) for preamble in WFSettings().RUN_LOCS: From c26af055339526a61c67255c64c0c57d0c486920 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 25 Mar 2017 13:22:19 -0700 Subject: [PATCH 111/204] change K-points in NMR workflow to full automatic --- mpworks/firetasks/nmr_tensor_set.yaml | 4 ++-- mpworks/firetasks/triple_jump_relax_set.yaml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 49bd1b6a..ee3c7019 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -17,7 +17,7 @@ CS: PREC: ACCURATE SIGMA: 0.01 KPOINTS: - grid_density: 6000 + length: 60 POTCAR: C: C_h H: H_h @@ -130,7 +130,7 @@ EFG: Ra-223: 1210.3 SIGMA: 0.05 KPOINTS: - grid_density: 3000 + length: 30 POTCAR: C: C H: H diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 56a792e8..de97a04b 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -17,7 +17,7 @@ STEP1: PREC: ACCURATE SIGMA: 0.1 KPOINTS: - grid_density: 1000 + length: 10 POTCAR: C: C H: H @@ -55,7 +55,7 @@ STEP2: PREC: ACCURATE SIGMA: 0.03 KPOINTS: - grid_density: 3000 + length: 30 POTCAR: C: C_h H: H_h @@ -96,7 +96,7 @@ STEP3: SIGMA: 0.01 TIMESTEP: 0.05 KPOINTS: - grid_density: 6000 + length: 60 POTCAR: C: C_h H: H_h From b1b1f4aa19cfa9288270a15af30d377900e6667c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 25 Mar 2017 13:49:06 -0700 Subject: [PATCH 112/204] reduce number of K-points --- mpworks/firetasks/nmr_tensor_set.yaml | 4 ++-- mpworks/firetasks/triple_jump_relax_set.yaml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index ee3c7019..0ff6a70d 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -17,7 +17,7 @@ CS: PREC: ACCURATE SIGMA: 0.01 KPOINTS: - length: 60 + length: 32 POTCAR: C: C_h H: H_h @@ -130,7 +130,7 @@ EFG: Ra-223: 1210.3 SIGMA: 0.05 KPOINTS: - length: 30 + length: 24 POTCAR: C: C H: H diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index de97a04b..c6c764d2 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -17,7 +17,7 @@ STEP1: PREC: ACCURATE SIGMA: 0.1 KPOINTS: - length: 10 + length: 16 POTCAR: C: C H: H @@ -55,7 +55,7 @@ STEP2: PREC: ACCURATE SIGMA: 0.03 KPOINTS: - length: 30 + length: 24 POTCAR: C: C_h H: H_h @@ -96,7 +96,7 @@ STEP3: SIGMA: 0.01 TIMESTEP: 0.05 KPOINTS: - length: 60 + length: 32 POTCAR: C: C_h H: H_h From 1e5edc8009419d4d2b62804eedb5677ecee8f501 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 26 Mar 2017 12:56:00 -0700 Subject: [PATCH 113/204] tighten structure compare by 10 times for NMR calculation --- mpworks/snl_utils/mpsnl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/snl_utils/mpsnl.py b/mpworks/snl_utils/mpsnl.py index 5e5fe01a..1fbcfeed 100644 --- a/mpworks/snl_utils/mpsnl.py +++ b/mpworks/snl_utils/mpsnl.py @@ -188,9 +188,9 @@ def add_if_belongs(self, cand_snl): stol = 0.3 angle_tol = 5.0 else: - ltol = 0.02 - stol = 0.03 - angle_tol = 0.5 + ltol = 0.002 + stol = 0.003 + angle_tol = 0.05 sm = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol, primitive_cell=True, scale=True, attempt_supercell=False, comparator=ElementComparator()) From ed4002d8ebb04d5de599356d6c40297246e8b538 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 26 Mar 2017 15:22:37 -0700 Subject: [PATCH 114/204] add mechanism to avoid infinite loop in spawning dynamic FW --- mpworks/firetasks/vasp_io_tasks.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 4c878cbb..1b3660b8 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -11,6 +11,7 @@ import sys from fireworks.fw_config import FWData +from monty.io import zopen from monty.os.path import zpath from custodian.vasp.handlers import UnconvergedErrorHandler from fireworks.core.launchpad import LaunchPad @@ -132,6 +133,20 @@ def __init__(self, parameters=None): self.additional_fields = self.get('additional_fields', {}) self.update_duplicates = self.get('update_duplicates', False) # off so DOS/BS doesn't get entered twice + def same_fw_occurrences_in_cur_wflow(self, prev_dir, lp): + with zopen(zpath(os.path.join(prev_dir, 'FW.json')), 'rt') as f: + fw_dict = json.load(f) + prev_vasp_fw_id = fw_dict["launches"][0]["fw_id"] + prev_vasp_launch_id = fw_dict["launches"][0]["launch_id"] + prev_vasp_launch_assoc_fw_ids = [doc["fw_id"] for doc in + lp.fireworks.find({"launches": prev_vasp_launch_id}, projection=["fw_id"])] + + cur_wf_fw_ids = lp.workflows.find({"nodes": prev_vasp_fw_id})[0]["nodes"] + # preferably current fw_id, however, there no mechanism to get it. + + num_occurrences = len(set(cur_wf_fw_ids) & set(prev_vasp_launch_assoc_fw_ids)) + return num_occurrences + def run_task(self, fw_spec): if '_fizzled_parents' in fw_spec and not 'prev_vasp_dir' in fw_spec: prev_dir = get_loc(fw_spec['_fizzled_parents'][0]['launches'][0]['launch_dir']) @@ -168,7 +183,8 @@ def run_task(self, fw_spec): logger.addHandler(sh) fw_data = FWData() if not fw_data.MULTIPROCESSING: - launch_coll = LaunchPad.auto_load().launches + lp = LaunchPad.auto_load() + launch_coll = lp.launches else: lp = fw_data.lp launch_coll = lp.launches @@ -200,6 +216,10 @@ def run_task(self, fw_spec): # not successful - first test to see if UnconvergedHandler is needed if not fizzled_parent: unconverged_tag = 'unconverged_handler--{}'.format(fw_spec['prev_task_type']) + if self.same_fw_occurrences_in_cur_wflow(prev_dir, lp) > 1: + raise ValueError("Same Dynamics FW launch has been already existed in" + "the database for more than twice, stop spawn new FW" + "to avoid infinite loop") output_dir = last_relax(os.path.join(prev_dir, 'vasprun.xml')) ueh = UnconvergedErrorHandler(output_filename=output_dir) # TODO: make this a little more flexible From fd04620fb3f45b9626b006af5fea145dd5f64286 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 26 Mar 2017 15:55:45 -0700 Subject: [PATCH 115/204] if the structure is relaxed for more than 3 step while StructureMatcher categories to the same snlgroup, issue a warning --- mpworks/drones/mp_vaspdrone.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mpworks/drones/mp_vaspdrone.py b/mpworks/drones/mp_vaspdrone.py index d32ac33f..5e4146ba 100644 --- a/mpworks/drones/mp_vaspdrone.py +++ b/mpworks/drones/mp_vaspdrone.py @@ -5,6 +5,8 @@ import pprint import re import traceback +import warnings + from monty.io import zopen from monty.os.path import zpath from pymongo import MongoClient @@ -277,6 +279,14 @@ def process_fw(self, dir_name, d): d['snlgroup_id_final'] = snlgroup_id d['snlgroup_changed'] = (d['snlgroup_id'] != d['snlgroup_id_final']) + if len(d["calculations"][-1]["output"]["ionic_steps"]) >= 3 and not d['snlgroup_changed']: + message = "The structure has been relaxed for >=3 step, however, final structure" \ + "ends in the snlgroup with the initial group, please change either structure" \ + "relax criteria or StructureMatcher tolerance" + if "NMR" not in mpsnl.projects: + warnings.warn(message) + else: + raise ValueError(message) else: d['snl_final'] = d['snl'] d['snlgroup_id_final'] = d['snlgroup_id'] From d086b13824d73a735f11c6a5c48300ba4c438568 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 26 Mar 2017 15:59:57 -0700 Subject: [PATCH 116/204] fix input rewind logic --- mpworks/firetasks/custodian_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index b5dcf75f..756e34f7 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -183,7 +183,7 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, # set the vasp command to the alternative binaries job.vasp_cmd = new_v_exe job.gamma_vasp_cmd = new_gv_exe - if not input_rewind: + if input_rewind: if os.path.exists("error.1.tar.gz") and os.path.isfile("error.1.tar.gz"): # restore to initial input set with tarfile.open("error.1.tar.gz", "r") as tf: From 21f292816c0cb191cb15b40586d90ffdc534bf27 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 26 Mar 2017 16:07:28 -0700 Subject: [PATCH 117/204] always update geometry --- mpworks/firetasks/custodian_task.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 756e34f7..f73733cb 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -3,6 +3,7 @@ import logging import socket +import shutil from fireworks.fw_config import FWData from monty.os.path import which from custodian.vasp.handlers import VaspErrorHandler, NonConvergingErrorHandler, \ @@ -189,6 +190,10 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, with tarfile.open("error.1.tar.gz", "r") as tf: for filename in ["INCAR", "KPOINTS", "POSCAR"]: tf.extract(filename) + if os.path.exists("CONTCAR"): + if os.path.exists("POSCAR"): + os.remove("POSCAR") + shutil.move("CONTCAR") cus_ex = None try: all_errors = self._run_custodian(terminate_func) From ac5470129bb1d9a10b7c57727e5d6fcd2943d05b Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 26 Mar 2017 16:07:54 -0700 Subject: [PATCH 118/204] add missing target file --- mpworks/firetasks/custodian_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index f73733cb..6f8f45e5 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -193,7 +193,7 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, if os.path.exists("CONTCAR"): if os.path.exists("POSCAR"): os.remove("POSCAR") - shutil.move("CONTCAR") + shutil.move("CONTCAR", "POSCAR") cus_ex = None try: all_errors = self._run_custodian(terminate_func) From f0e1f3ccec4a0a6e8d754ac7b1cb9a80ab64c8bd Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 27 Mar 2017 09:09:55 -0700 Subject: [PATCH 119/204] tighten the structure matcher criteria even futher by 10 times. --- mpworks/snl_utils/mpsnl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/snl_utils/mpsnl.py b/mpworks/snl_utils/mpsnl.py index 1fbcfeed..a8e00437 100644 --- a/mpworks/snl_utils/mpsnl.py +++ b/mpworks/snl_utils/mpsnl.py @@ -188,9 +188,9 @@ def add_if_belongs(self, cand_snl): stol = 0.3 angle_tol = 5.0 else: - ltol = 0.002 - stol = 0.003 - angle_tol = 0.05 + ltol = 0.0002 + stol = 0.0003 + angle_tol = 0.005 sm = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol, primitive_cell=True, scale=True, attempt_supercell=False, comparator=ElementComparator()) From 46bc93b01bfdc745eb584b804edd3f3d4352f2ca Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 27 Mar 2017 15:06:58 -0700 Subject: [PATCH 120/204] check the validation of CONTCAR before replacing POSCAR --- mpworks/firetasks/custodian_task.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 6f8f45e5..de19b8ca 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -147,7 +147,8 @@ def run_task(self, fw_spec): fw_env.get("vasp_cmd", "vasp"), fw_env.get("gvasp_cmd", "gvasp"), fw_env["alt_cmds"][fw_spec['task_type']], - fw_env.get("input_rewind", True)) + fw_env.get("input_rewind", True), + fw_spec['mpsnl'].structure) error_list.extend(all_errors) else: raise cus_ex @@ -171,7 +172,7 @@ def run_task(self, fw_spec): return FWAction(stored_data=stored_data, update_spec=update_spec) def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, - gvasp_cmd, alt_cmds, input_rewind): + gvasp_cmd, alt_cmds, input_rewind, structure): error_list = [] cus_ex = None for new_vasp_path in alt_cmds: @@ -191,9 +192,15 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, for filename in ["INCAR", "KPOINTS", "POSCAR"]: tf.extract(filename) if os.path.exists("CONTCAR"): - if os.path.exists("POSCAR"): - os.remove("POSCAR") - shutil.move("CONTCAR", "POSCAR") + natoms = len(structure) + with open("CONTCAR") as f: + contcar_lines = f.readlines() + n_contcar_lines = len(contcar_lines) + if n_contcar_lines > natoms + 5: + # valid CONTCAR file + if os.path.exists("POSCAR"): + os.remove("POSCAR") + shutil.move("CONTCAR", "POSCAR") cus_ex = None try: all_errors = self._run_custodian(terminate_func) From b208e1d02bec7e03bd8ca3a06542eb24714b4a32 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 27 Mar 2017 15:53:28 -0700 Subject: [PATCH 121/204] back custodian.json for every VASP binary run --- mpworks/firetasks/custodian_task.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index de19b8ca..a10465cf 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -1,4 +1,5 @@ import tarfile +from glob import glob from gzip import GzipFile import logging import socket @@ -185,12 +186,29 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, # set the vasp command to the alternative binaries job.vasp_cmd = new_v_exe job.gamma_vasp_cmd = new_gv_exe + + # backup the files for the last VASP binary run + error_file_prefix = "error" + error_num = max([0] + [int(f.split(".")[1]) + for f in glob("{}.*.tar.gz".format(error_file_prefix))]) + error_filename = "{}.{}.tar.gz".format(error_file_prefix, error_num) + binary_file_prefix = "binary" + binary_num = max([0] + [int(f.split(".")[1]) + for f in glob("{}.*.tar.gz".format(binary_file_prefix))]) + binary_filename = "{}.{}.tar.gz".format(binary_file_prefix, binary_num + 1) + with tarfile.open(binary_filename, "w:gz") as tar: + for fname in ["custodian.json", error_filename, "CONTCAR"]: + for f in glob(fname): + tar.add(f) + if input_rewind: + # rewind the input to every beginning if os.path.exists("error.1.tar.gz") and os.path.isfile("error.1.tar.gz"): # restore to initial input set with tarfile.open("error.1.tar.gz", "r") as tf: for filename in ["INCAR", "KPOINTS", "POSCAR"]: tf.extract(filename) + if os.path.exists("CONTCAR"): natoms = len(structure) with open("CONTCAR") as f: @@ -201,6 +219,8 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, if os.path.exists("POSCAR"): os.remove("POSCAR") shutil.move("CONTCAR", "POSCAR") + + # run the calculation cus_ex = None try: all_errors = self._run_custodian(terminate_func) @@ -211,6 +231,7 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, break if cus_ex is not None: raise cus_ex + return error_list def _run_custodian(self, terminate_func): From 08c4dcc1bd21f3f10d8d33c2daec774efc74a9e7 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 28 Mar 2017 14:13:06 -0700 Subject: [PATCH 122/204] add special for dynamic workflows in case of triple jump step 3 --- mpworks/firetasks/nmr_tasks.py | 20 ++++++++++++++++++++ mpworks/firetasks/triple_jump_relax_set.yaml | 7 +++++++ mpworks/firetasks/vasp_io_tasks.py | 8 +++++++- 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 129c6d5d..07857625 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -11,6 +11,7 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp from mpworks.firetasks.vasp_io_tasks import VaspToDBTask +from mpworks.firetasks.vasp_setup_tasks import SetupUnconvergedHandlerTask from mpworks.workflows.wf_settings import WFSettings from mpworks.workflows.wf_utils import get_loc @@ -186,6 +187,25 @@ def run_task(self, fw_spec): return super(TripleJumpRelaxVaspToDBTask, self).run_task(fw_spec) +class SetupTripleJumpRelaxS3UnconvergedHandlerTask(SetupUnconvergedHandlerTask): + _fw_name = "Unconverged Handler Task" + + def run_task(self, fw_spec): + module_dir = os.path.abspath(os.path.dirname(__file__)) + config_file = os.path.join(module_dir, "triple_jump_relax_set.yaml") + config_key = "STEP_DYNA3" + with open(config_file) as f: + parent_config_dict = yaml.load(stream=f) + config_dict = parent_config_dict[config_key] + incar_update = config_dict["INCAR"] + actions = [{"dict": "INCAR", + "action": {"_set": incar_update}}] + from custodian.vasp.interpreter import VaspModder + tj_action = VaspModder().apply_actions(actions) + parent_action = super(SetupTripleJumpRelaxS3UnconvergedHandlerTask).run_task(fw_spec) + return tj_action + parent_action + + class DictVaspSetupTask(FireTaskBase, FWSerializable): _fw_name = "Dict Vasp Input Setup Task" diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index c6c764d2..275c2af2 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -111,3 +111,10 @@ STEP3: Ca: Ca_pv Y: Y_sv Gd: Gd + +STEP_DYNA3: + INCAR: + FTIMEMAX: 0.01 + MAXMOVE: 0.002 + TIMESTEP: 0.001 + FNMIN: 2 diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 1b3660b8..7e5f9611 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -248,9 +248,15 @@ def run_task(self, fw_spec): f = Composition( snl.structure.composition.reduced_formula).alphabetical_formula + if fw_spec['prev_task_type'] == "Triple Jump Relax S3": + from mpworks.firetasks.nmr_tasks import SetupTripleJumpRelaxS3UnconvergedHandlerTask + unconv_handler_task = SetupTripleJumpRelaxS3UnconvergedHandlerTask() + else: + unconv_handler_task = SetupUnconvergedHandlerTask() + fws.append(Firework( [VaspCopyTask({'files': ['INCAR', 'KPOINTS', 'POSCAR', 'POTCAR', 'CONTCAR'], - 'use_CONTCAR': False}), SetupUnconvergedHandlerTask(), + 'use_CONTCAR': False}), unconv_handler_task, get_custodian_task(spec)], spec, name=get_slug(f + '--' + spec['task_type']), fw_id=-2)) From e033d3562968ba6f6d70be21c4297cf5673f0d78 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 28 Mar 2017 15:22:18 -0700 Subject: [PATCH 123/204] always use LaunchPad directly rather than through the DataServer --- mpworks/firetasks/vasp_io_tasks.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 7e5f9611..e8e0d42f 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -181,13 +181,8 @@ def run_task(self, fw_spec): sh = logging.StreamHandler(stream=sys.stdout) sh.setLevel(getattr(logging, 'INFO')) logger.addHandler(sh) - fw_data = FWData() - if not fw_data.MULTIPROCESSING: - lp = LaunchPad.auto_load() - launch_coll = lp.launches - else: - lp = fw_data.lp - launch_coll = lp.launches + lp = LaunchPad.auto_load() + launch_coll = lp.launches with open(db_path) as f: db_creds = json.load(f) drone = MPVaspDrone(host=db_creds['host'], port=db_creds['port'], From 2355621e85d24128bb10655e3bd6313893cf2bf2 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 28 Mar 2017 15:35:52 -0700 Subject: [PATCH 124/204] change dynamic step FIRE optimizer parameters to 0.1 times of default value --- mpworks/firetasks/triple_jump_relax_set.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 275c2af2..2e3e0e04 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -114,7 +114,7 @@ STEP3: STEP_DYNA3: INCAR: - FTIMEMAX: 0.01 - MAXMOVE: 0.002 - TIMESTEP: 0.001 + FTIMEMAX: 0.1 + MAXMOVE: 0.02 + TIMESTEP: 0.01 FNMIN: 2 From 7232c689c49d96feced55d967d176c1eb307abb0 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 28 Mar 2017 17:46:49 -0700 Subject: [PATCH 125/204] fix fw_name --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 07857625..c95bcad0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -188,7 +188,7 @@ def run_task(self, fw_spec): class SetupTripleJumpRelaxS3UnconvergedHandlerTask(SetupUnconvergedHandlerTask): - _fw_name = "Unconverged Handler Task" + _fw_name = "Triple Jump Relax S3 Unconverged Handler Task" def run_task(self, fw_spec): module_dir = os.path.abspath(os.path.dirname(__file__)) From cf01498eb9befcd13b065aef27f99e8a3de85bb5 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 28 Mar 2017 21:59:21 -0700 Subject: [PATCH 126/204] remove old file before starting new calculations to avoid confuse custodian --- mpworks/firetasks/custodian_task.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index a10465cf..2afd9578 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -196,6 +196,13 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, binary_num = max([0] + [int(f.split(".")[1]) for f in glob("{}.*.tar.gz".format(binary_file_prefix))]) binary_filename = "{}.{}.tar.gz".format(binary_file_prefix, binary_num + 1) + + for fname in ["OSZICAR", "vasp.out", "std_err.txt"]: + # remove old file before starting new calculations to + # avoid confuse custodian + if os.path.exists(fname): + os.remove(fname) + with tarfile.open(binary_filename, "w:gz") as tar: for fname in ["custodian.json", error_filename, "CONTCAR"]: for f in glob(fname): From 4e80d9516deea55723e834386e1986a8bf8d161f Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 28 Mar 2017 22:10:46 -0700 Subject: [PATCH 127/204] fix "TypeError: a bytes-like object is required, not 'int'" in VaspCopyTask --- mpworks/firetasks/vasp_io_tasks.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index e8e0d42f..d7971e6d 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -101,11 +101,9 @@ def run_task(self, fw_spec): shutil.copy2(prev_filename, dest_file) if '.gz' in dest_file: # unzip dest file - f = gzip.open(dest_file, 'rb') - file_content = f.read() - with open(dest_file[0:-3], 'wb') as f_out: - f_out.writelines(file_content) - f.close() + with gzip.open(dest_file, 'rb') as f_in: + with open(dest_file[0:-3], 'wb') as f_out: + shutil.copyfileobj(f_in, f_out) os.remove(dest_file) if self.use_contcar and not self.keep_velocities: From 21b62d19092e693f4b62bb5d05da6d4ad3c94e21 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 30 Mar 2017 13:01:24 -0700 Subject: [PATCH 128/204] fix super class call --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index c95bcad0..ddeaf4b0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -202,7 +202,7 @@ def run_task(self, fw_spec): "action": {"_set": incar_update}}] from custodian.vasp.interpreter import VaspModder tj_action = VaspModder().apply_actions(actions) - parent_action = super(SetupTripleJumpRelaxS3UnconvergedHandlerTask).run_task(fw_spec) + parent_action = super(SetupTripleJumpRelaxS3UnconvergedHandlerTask, self).run_task(fw_spec) return tj_action + parent_action From 62671cf301b9c90060389623a750687bcd378541 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 30 Mar 2017 13:12:37 -0700 Subject: [PATCH 129/204] just copy the parent FWAction --- mpworks/firetasks/nmr_tasks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index ddeaf4b0..952c20da 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -201,9 +201,9 @@ def run_task(self, fw_spec): actions = [{"dict": "INCAR", "action": {"_set": incar_update}}] from custodian.vasp.interpreter import VaspModder - tj_action = VaspModder().apply_actions(actions) + VaspModder().apply_actions(actions) parent_action = super(SetupTripleJumpRelaxS3UnconvergedHandlerTask, self).run_task(fw_spec) - return tj_action + parent_action + return parent_action class DictVaspSetupTask(FireTaskBase, FWSerializable): From 13a64adbca71bac11eec22fac5c6cff86fe83392 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 31 Mar 2017 15:33:16 -0700 Subject: [PATCH 130/204] support SCAN functional --- mpworks/firetasks/custodian_task.py | 2 ++ mpworks/firetasks/nmr_tasks.py | 40 ++++++++++++++++++++++++++++- mpworks/firetasks/vasp_io_tasks.py | 4 +++ mpworks/workflows/snl_to_wf_nmr.py | 13 ++++++++-- 4 files changed, 56 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 2afd9578..7a994f47 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -169,6 +169,8 @@ def run_task(self, fw_spec): 'snlgroup_id': fw_spec['snlgroup_id'], 'run_tags': fw_spec['run_tags'], 'parameters': fw_spec.get('parameters')} + if 'functional' in fw_spec: + update_spec['functional'] = fw_spec['spec'] return FWAction(stored_data=stored_data, update_spec=update_spec) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 952c20da..505c9ac3 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,6 +1,8 @@ import copy import json import os + +import shutil import yaml from fireworks import FireTaskBase from fireworks.core.firework import FWAction @@ -89,7 +91,7 @@ def _change_garden_setting(): def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_run_tags=()): - parameters = parameters if parameters else {} + parameters = copy.deepcopy(parameters) if parameters else {} spec = {'parameters': parameters} module_dir = os.path.abspath(os.path.dirname(__file__)) @@ -222,3 +224,39 @@ def run_task(self, fw_spec): vis.potcar.write_file("POTCAR") vis.kpoints.write_file("KPOINTS") return FWAction(stored_data={"vasp_input_set": vis.as_dict()}) + + +class ScanFunctionalSetupTask(FireTaskBase, FWSerializable): + """ + This class is to setup the SCAN functional calculation for the + hack version of VASP. If official SCAN functional supported VASP + is release, the run_task() method body can be set to "pass". It + will make the workflow compatible with the new VASP version. + """ + _fw_name = "SCAN Functional Setup Task" + + def run_task(self, fw_spec): + functional = fw_spec.get("function", "PBE") + if functional == "SCAN": + incar_update = {"METAGGA": "Rtpss", + "LASPH": True} + actions = [{"dict": "INCAR", + "action": {"_set": incar_update}}] + from custodian.vasp.interpreter import VaspModder + VaspModder().apply_actions(actions) + + # guarantee VASP is the hacked version + fw_env = fw_spec.get("_fw_env", {}) + vasp_cmd = fw_env.get("vasp_cmd", "vasp") + if os.path.exists(vasp_cmd): + vasp_path = vasp_cmd + else: + vasp_path = shutil.which("vasp") + hacked_path = "/global/common/matgen/das/vasp/5.3.5-scan-beta/bin/vasp" + if vasp_path != hacked_path: + my_name = str(self.__class__).replace("", "") + raise ValueError("'{}' is designed to support the hack of VASP, " + "upon official release of VASP SCAN function, this class" + "should be modified".format(my_name)) + else: + pass diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index d7971e6d..476d109a 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -194,6 +194,8 @@ def run_task(self, fw_spec): mpsnl = d['snl_final'] if 'snl_final' in d else d['snl'] snlgroup_id = d['snlgroup_id_final'] if 'snlgroup_id_final' in d else d['snlgroup_id'] update_spec.update({'mpsnl': mpsnl, 'snlgroup_id': snlgroup_id}) + if 'functional' in fw_spec: + d['functional'] = fw_spec['spec'] print('ENTERED task id:', t_id) stored_data = {'task_id': t_id} @@ -234,6 +236,8 @@ def run_task(self, fw_spec): snl = StructureNL.from_dict(spec['mpsnl']) spec['run_tags'].append(unconverged_tag) spec['_queueadapter'] = QA_VASP + if 'functional' in fw_spec: + spec['functional'] = fw_spec['spec'] fws = [] connections = {} diff --git a/mpworks/workflows/snl_to_wf_nmr.py b/mpworks/workflows/snl_to_wf_nmr.py index c1a4659a..3aa456f3 100644 --- a/mpworks/workflows/snl_to_wf_nmr.py +++ b/mpworks/workflows/snl_to_wf_nmr.py @@ -8,7 +8,8 @@ from mpworks.dupefinders.dupefinder_vasp import DupeFinderDB from mpworks.firetasks.custodian_task import get_custodian_task -from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask, DictVaspSetupTask, TripleJumpRelaxVaspToDBTask +from mpworks.firetasks.nmr_tasks import snl_to_nmr_spec, NmrVaspToDBTask, DictVaspSetupTask, \ + TripleJumpRelaxVaspToDBTask, ScanFunctionalSetupTask from mpworks.firetasks.snl_tasks import AddSNLTask from mpworks.firetasks.vasp_io_tasks import VaspWriterTask, VaspCopyTask, VaspToDBTask from mpworks.snl_utils.mpsnl import MPStructureNL, get_meta_from_structure @@ -34,7 +35,12 @@ def get_nmr_vasp_fw(fwid, copy_contcar, istep, nick_name, parameters, priority, spec['_priority'] = priority spec['_queueadapter'] = QA_VASP spec['_trackers'] = trackers - tasks = [DictVaspSetupTask(), get_custodian_task(spec)] + tasks = [DictVaspSetupTask()] + functional = parameters.get("functional", "PBE") + spec["functional"] = functional + if functional != "PBE": + tasks.append(ScanFunctionalSetupTask()) + tasks.append(get_custodian_task(spec)) vasp_fw = Firework(tasks, spec, name=get_slug(nick_name + '--' + spec['task_type']), fw_id=fwid) return vasp_fw @@ -63,6 +69,9 @@ def snl_to_wf_nmr(snl, parameters): f = Composition(snl.structure.composition.reduced_formula).alphabetical_formula nick_name = parameters.get("nick_name", f) + functional = parameters.get("functional", "PBE") + if functional != "PBE": + nick_name += "_FUNC_" + functional if 'exact_structure' in parameters and parameters['exact_structure']: structure = snl.structure From 4cf98134924f8b4d081d9a6901936446c5f533fd Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 31 Mar 2017 15:39:24 -0700 Subject: [PATCH 131/204] fix typo --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 505c9ac3..783bb12b 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -236,7 +236,7 @@ class ScanFunctionalSetupTask(FireTaskBase, FWSerializable): _fw_name = "SCAN Functional Setup Task" def run_task(self, fw_spec): - functional = fw_spec.get("function", "PBE") + functional = fw_spec.get("functional", "PBE") if functional == "SCAN": incar_update = {"METAGGA": "Rtpss", "LASPH": True} From f44d89bca46c5fd8218ac967cf59e7c024499c0d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 31 Mar 2017 15:46:28 -0700 Subject: [PATCH 132/204] fix potcar setting in case of SCAN functional --- mpworks/firetasks/nmr_tasks.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 783bb12b..6121695d 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -50,8 +50,12 @@ def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): def _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters): + functional = parameters.get("functional", "PBE") + pot_map = {"PBE": "PBE", "SCAN": "PBE_52"} + potcar_functional = pot_map[functional] trial_set = DictSet(structure, config_dict=config_dict, - user_incar_settings=incar_enforce) + user_incar_settings=incar_enforce, + potcar_functional=potcar_functional) trial_potcar = trial_set.potcar all_enmax = [sp.enmax for sp in trial_potcar] all_eaug = [sp.eaug for sp in trial_potcar] @@ -73,7 +77,8 @@ def _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters) quad_efg = [_get_nuclear_quadrupole_moment(el, nqm_map, parameters) for el in all_elements] processed_config_dict["INCAR"]["QUAD_EFG"] = quad_efg vis = DictSet(structure, config_dict=processed_config_dict, - user_incar_settings=incar_enforce) + user_incar_settings=incar_enforce, + potcar_functional=potcar_functional) return vis From d998871c5605b464c0beec6fe0f41e16c57ecf12 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 31 Mar 2017 16:30:53 -0700 Subject: [PATCH 133/204] use valence dependent pseudopotential --- mpworks/firetasks/nmr_tasks.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 6121695d..88962922 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -8,6 +8,7 @@ from fireworks.core.firework import FWAction from fireworks.utilities.fw_serializers import FWSerializable from monty.os.path import zpath +from pymatgen.analysis.bond_valence import BVAnalyzer from pymatgen.io.vasp import Outcar from pymatgen.io.vasp.sets import DictSet @@ -95,6 +96,23 @@ def _change_garden_setting(): WFSettings().GARDEN = os.path.join(WFSettings().GARDEN, 'nmr') +def _assign_potcar_valence(structure, potcar_dict): + tri_val_elements = {"Ce", "Dy", "Er", "Eu", "Gd", "Ho", "Lu", "Nd", "Pm", "Pr", "Sm", "Tb_3", "Tm_3"} + di_val_elements = {"Er", "Eu", "Yb"} + st_elements = set([specie.symbol for specie in structure.species]) + bva = BVAnalyzer() + valences = bva.get_valences(structure) + for val, val_elements in [[3, tri_val_elements], + [2, di_val_elements]]: + for el in sorted(val_elements & st_elements): + if "_" in potcar_dict[el]: + continue + el_indices = structure.indices_from_symbol(el) + cur_el_valences = {valences[i] for i in el_indices} + if len(cur_el_valences) == 1 and val in cur_el_valences: + potcar_dict[el] = "{el}_{val:d}".format(el=el, val=val) + + def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_run_tags=()): parameters = copy.deepcopy(parameters) if parameters else {} spec = {'parameters': parameters} @@ -125,6 +143,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru incar_enforce = {'NPAR': 4} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) + _assign_potcar_valence(structure, config_dict["POTCAR"]) mpvis = _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters=parameters) From 924c03f26cacd74ce7b87336c979d3219887c001 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 1 Apr 2017 13:41:00 -0700 Subject: [PATCH 134/204] Put the larger ENMAX specie first, fix the "PSMAXN for non-local potential too small" warning --- mpworks/firetasks/nmr_tasks.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 88962922..834676a7 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -235,11 +235,20 @@ def run_task(self, fw_spec): class DictVaspSetupTask(FireTaskBase, FWSerializable): _fw_name = "Dict Vasp Input Setup Task" + @staticmethod + def _sort_structure_by_encut(structure, config_dict): + # put the larger ENMAX specie first + trial_vis = DictSet(structure, config_dict=config_dict) + trial_potcar = trial_vis.potcar + enmax_dict = {p.symbol.split("_")[0]: p.keywords["ENMAX"] for p in trial_potcar} + structure = structure.get_sorted_structure(key=lambda site: enmax_dict[site.specie.symbol], reverse=True) + return structure + def run_task(self, fw_spec): config_dict = fw_spec["input_set_config_dict"] incar_enforce = fw_spec["input_set_incar_enforce"] mpsnl = fw_spec["mpsnl"] - structure = mpsnl.structure + structure = self._sort_structure_by_encut(mpsnl.structure, config_dict) vis = DictSet(structure, config_dict=config_dict, user_incar_settings=incar_enforce) From c7b618aab4612af4cec58342365b365be3465d92 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 1 Apr 2017 13:42:30 -0700 Subject: [PATCH 135/204] don't sort structure again by DictSet --- mpworks/firetasks/nmr_tasks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 834676a7..d4275ffd 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -250,7 +250,8 @@ def run_task(self, fw_spec): mpsnl = fw_spec["mpsnl"] structure = self._sort_structure_by_encut(mpsnl.structure, config_dict) vis = DictSet(structure, config_dict=config_dict, - user_incar_settings=incar_enforce) + user_incar_settings=incar_enforce, + sort_structure=False) vis.incar.write_file("INCAR") vis.poscar.write_file("POSCAR") From f040c223252444cf0df99d03c343d07a195416d7 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 1 Apr 2017 13:48:12 -0700 Subject: [PATCH 136/204] update FIRE optimizer parameters --- mpworks/firetasks/triple_jump_relax_set.yaml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 2e3e0e04..991b3097 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -78,7 +78,7 @@ STEP3: EDIFF: -1.0e-10 EDIFFG: -0.002 ENCUT_ENHANCE_RATIO: 0.4 - FTIMEMAX: 0.5 + FTIMEMAX: 0.1 IBRION: 3 IOPT: 7 ISIF: 3 @@ -88,13 +88,14 @@ STEP3: LCHARG: false LREAL: AUTO LWAVE: false - MAXMOVE: 0.05 + MAXMOVE: 0.02 NELMIN: 10 NSW: 100 POTIM: 0 PREC: ACCURATE SIGMA: 0.01 - TIMESTEP: 0.05 + TIMESTEP: 0.01 + FNMIN: 3 KPOINTS: length: 32 POTCAR: @@ -114,7 +115,7 @@ STEP3: STEP_DYNA3: INCAR: - FTIMEMAX: 0.1 - MAXMOVE: 0.02 - TIMESTEP: 0.01 + FTIMEMAX: 0.05 + MAXMOVE: 0.01 + TIMESTEP: 0.005 FNMIN: 2 From 055a9e4d805a63749d2df14a431aa470fecf69fe Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 3 Apr 2017 10:48:21 -0700 Subject: [PATCH 137/204] use the same settings for EFG and Chemical Shift --- mpworks/firetasks/nmr_tensor_set.yaml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 0ff6a70d..86a7b196 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -35,8 +35,9 @@ CS: EFG: INCAR: - EDIFF: -1.0e-06 - ENCUT_ENHANCE_RATIO: 0.2 + EDIFF: -1.0e-10 + ENCUT_ENHANCE_RATIO: 0.4 + ICHIBARE: 1 ISMEAR: -5 ISTART: 0 ISYM: 0 @@ -128,19 +129,19 @@ EFG: Hg-201: 387.6 Ra: Ra-223: 1210.3 - SIGMA: 0.05 + SIGMA: 0.01 KPOINTS: - length: 24 + length: 32 POTCAR: - C: C - H: H + C: C_h + H: H_h Mg: Mg_sv - O: O + O: O_h Na: Na_sv Al: Al Si: Si - P: P - Cl: Cl + P: P_h + Cl: Cl_h K: K_pv Ca: Ca_pv Y: Y_sv From 8f8d079bcee77b44127c18ede6003949e5efa338 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 3 Apr 2017 10:59:59 -0700 Subject: [PATCH 138/204] fix typo --- mpworks/firetasks/custodian_task.py | 2 +- mpworks/firetasks/vasp_io_tasks.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 7a994f47..d7d75e5a 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -170,7 +170,7 @@ def run_task(self, fw_spec): 'run_tags': fw_spec['run_tags'], 'parameters': fw_spec.get('parameters')} if 'functional' in fw_spec: - update_spec['functional'] = fw_spec['spec'] + update_spec['functional'] = fw_spec['functional'] return FWAction(stored_data=stored_data, update_spec=update_spec) diff --git a/mpworks/firetasks/vasp_io_tasks.py b/mpworks/firetasks/vasp_io_tasks.py index 476d109a..14adb2d4 100644 --- a/mpworks/firetasks/vasp_io_tasks.py +++ b/mpworks/firetasks/vasp_io_tasks.py @@ -195,7 +195,7 @@ def run_task(self, fw_spec): snlgroup_id = d['snlgroup_id_final'] if 'snlgroup_id_final' in d else d['snlgroup_id'] update_spec.update({'mpsnl': mpsnl, 'snlgroup_id': snlgroup_id}) if 'functional' in fw_spec: - d['functional'] = fw_spec['spec'] + d['functional'] = fw_spec['functional'] print('ENTERED task id:', t_id) stored_data = {'task_id': t_id} @@ -237,7 +237,7 @@ def run_task(self, fw_spec): spec['run_tags'].append(unconverged_tag) spec['_queueadapter'] = QA_VASP if 'functional' in fw_spec: - spec['functional'] = fw_spec['spec'] + spec['functional'] = fw_spec['functional'] fws = [] connections = {} From fa1373cff1a740125428c85efc3034525796027c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 3 Apr 2017 20:19:45 -0700 Subject: [PATCH 139/204] remove unnecessary keywords for EFG --- mpworks/firetasks/nmr_tensor_set.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 86a7b196..2f293a49 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -37,7 +37,6 @@ EFG: INCAR: EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.4 - ICHIBARE: 1 ISMEAR: -5 ISTART: 0 ISYM: 0 From 17f3d12c24c70edb3f0a62c10187427e91a9d48c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 4 Apr 2017 11:29:19 -0700 Subject: [PATCH 140/204] also consider ENMIN --- mpworks/firetasks/nmr_tasks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index d4275ffd..2b238eac 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -240,7 +240,8 @@ def _sort_structure_by_encut(structure, config_dict): # put the larger ENMAX specie first trial_vis = DictSet(structure, config_dict=config_dict) trial_potcar = trial_vis.potcar - enmax_dict = {p.symbol.split("_")[0]: p.keywords["ENMAX"] for p in trial_potcar} + enmax_dict = {p.symbol.split("_")[0]: p.keywords["ENMAX"] + p.keywords["ENMIN"] * 1.0E-3 + for p in trial_potcar} structure = structure.get_sorted_structure(key=lambda site: enmax_dict[site.specie.symbol], reverse=True) return structure From 6a619ac8f3685f684440a808092eaa1b98c9d1c8 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 5 Apr 2017 11:08:21 -0700 Subject: [PATCH 141/204] further tighten NMR structure matcher threshold --- mpworks/snl_utils/mpsnl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/snl_utils/mpsnl.py b/mpworks/snl_utils/mpsnl.py index a8e00437..efd831b2 100644 --- a/mpworks/snl_utils/mpsnl.py +++ b/mpworks/snl_utils/mpsnl.py @@ -188,9 +188,9 @@ def add_if_belongs(self, cand_snl): stol = 0.3 angle_tol = 5.0 else: - ltol = 0.0002 - stol = 0.0003 - angle_tol = 0.005 + ltol = 0.00002 + stol = 0.00003 + angle_tol = 0.0005 sm = StructureMatcher(ltol=ltol, stol=stol, angle_tol=angle_tol, primitive_cell=True, scale=True, attempt_supercell=False, comparator=ElementComparator()) From e7ac40117db53a412d001225df549ec248680083 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 10 Apr 2017 16:56:44 -0700 Subject: [PATCH 142/204] update NMR calculation parameters --- mpworks/firetasks/nmr_tasks.py | 2 +- mpworks/firetasks/nmr_tensor_set.yaml | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 2b238eac..c08ce24e 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -97,7 +97,7 @@ def _change_garden_setting(): def _assign_potcar_valence(structure, potcar_dict): - tri_val_elements = {"Ce", "Dy", "Er", "Eu", "Gd", "Ho", "Lu", "Nd", "Pm", "Pr", "Sm", "Tb_3", "Tm_3"} + tri_val_elements = {"Ce", "Dy", "Er", "Eu", "Gd", "Ho", "Lu", "Nd", "Pm", "Pr", "Sm", "Tb", "Tm"} di_val_elements = {"Er", "Eu", "Yb"} st_elements = set([specie.symbol for specie in structure.species]) bva = BVAnalyzer() diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 86a7b196..c0744a44 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -1,5 +1,7 @@ CS: INCAR: + ADDGRID: true + ALGO: FAST DQ: 0.001 EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.4 @@ -12,7 +14,7 @@ CS: LNMR_SYM_RED: true LREAL: AUTO LWAVE: false - NELMIN: 5 + NELMIN: 10 NSLPLINE: true PREC: ACCURATE SIGMA: 0.01 @@ -35,6 +37,8 @@ CS: EFG: INCAR: + ADDGRID: true + ALGO: FAST EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.4 ICHIBARE: 1 @@ -45,7 +49,7 @@ EFG: LEFG: true LREAL: AUTO LWAVE: false - NELMIN: 5 + NELMIN: 10 PREC: ACCURATE QUAD_EFG_MAP: H: From 2c9056e8b51439aa3da0fcaf30d819e9f92ae8a8 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 11 Apr 2017 09:57:08 -0700 Subject: [PATCH 143/204] revert NMR parameter changes --- mpworks/firetasks/nmr_tensor_set.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index c0744a44..bd4cf9ee 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -1,7 +1,5 @@ CS: INCAR: - ADDGRID: true - ALGO: FAST DQ: 0.001 EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.4 From 93773f338663c2eb7d6dd93ff0011af048784a86 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 11 Apr 2017 09:57:52 -0700 Subject: [PATCH 144/204] also don't use ADDGRID for EFG --- mpworks/firetasks/nmr_tensor_set.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index bd4cf9ee..ca827cc6 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -35,7 +35,6 @@ CS: EFG: INCAR: - ADDGRID: true ALGO: FAST EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.4 From bae353fe000b29a14276108e3552bdec828f9652 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 11 Apr 2017 15:29:00 -0700 Subject: [PATCH 145/204] reduce max ENCUT to 30% more than ENMAX --- mpworks/firetasks/nmr_tensor_set.yaml | 4 ++-- mpworks/firetasks/triple_jump_relax_set.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index ca827cc6..52233d05 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -2,7 +2,7 @@ CS: INCAR: DQ: 0.001 EDIFF: -1.0e-10 - ENCUT_ENHANCE_RATIO: 0.4 + ENCUT_ENHANCE_RATIO: 0.3 ICHIBARE: 1 ISMEAR: -5 ISTART: 0 @@ -37,7 +37,7 @@ EFG: INCAR: ALGO: FAST EDIFF: -1.0e-10 - ENCUT_ENHANCE_RATIO: 0.4 + ENCUT_ENHANCE_RATIO: 0.3 ICHIBARE: 1 ISMEAR: -5 ISTART: 0 diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 991b3097..781adb08 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -77,7 +77,7 @@ STEP3: ALGO: FAST EDIFF: -1.0e-10 EDIFFG: -0.002 - ENCUT_ENHANCE_RATIO: 0.4 + ENCUT_ENHANCE_RATIO: 0.3 FTIMEMAX: 0.1 IBRION: 3 IOPT: 7 From a8413965c8b487301b4e88cc9b0a695c1055378b Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 11 Apr 2017 15:32:01 -0700 Subject: [PATCH 146/204] reduce ENCUT to 5% more than ENMAX in the first step of triple jump --- mpworks/firetasks/triple_jump_relax_set.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 781adb08..6e1e7ef7 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -3,7 +3,7 @@ STEP1: ALGO: FAST EDIFF: -1.0e-06 EDIFFG: -0.1 - ENCUT_ENHANCE_RATIO: 0.1 + ENCUT_ENHANCE_RATIO: 0.05 IBRION: 1 ISIF: 3 ISMEAR: -5 From ee4ff43a3051222da5caab8546beca403debab9a Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 11 Apr 2017 16:24:58 -0700 Subject: [PATCH 147/204] update POTCAR choice --- mpworks/firetasks/nmr_tensor_set.yaml | 8 ++++---- mpworks/firetasks/triple_jump_relax_set.yaml | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 52233d05..aeda4942 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -25,10 +25,10 @@ CS: O: O_h Na: Na_sv Al: Al - Si: Si + Si: Si_h P: P_h Cl: Cl_h - K: K_pv + K: K_sv Ca: Ca_pv Y: Y_sv Gd: Gd @@ -140,10 +140,10 @@ EFG: O: O_h Na: Na_sv Al: Al - Si: Si + Si: Si_h P: P_h Cl: Cl_h - K: K_pv + K: K_sv Ca: Ca_pv Y: Y_sv Gd: Gd diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 6e1e7ef7..e50c02a0 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -63,10 +63,10 @@ STEP2: O: O_h Na: Na_sv Al: Al - Si: Si + Si: Si_h P: P_h Cl: Cl_h - K: K_pv + K: K_sv Ca: Ca_pv Y: Y_sv Gd: Gd @@ -105,10 +105,10 @@ STEP3: O: O_h Na: Na_sv Al: Al - Si: Si + Si: Si_h P: P_h Cl: Cl_h - K: K_pv + K: K_sv Ca: Ca_pv Y: Y_sv Gd: Gd From 626c84e330046cc13fea90d2e6134ac146e4d612 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 11 Apr 2017 16:26:32 -0700 Subject: [PATCH 148/204] update POTCAR choice for Ca --- mpworks/firetasks/nmr_tensor_set.yaml | 4 ++-- mpworks/firetasks/triple_jump_relax_set.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index aeda4942..5b709b59 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -29,7 +29,7 @@ CS: P: P_h Cl: Cl_h K: K_sv - Ca: Ca_pv + Ca: Ca_sv Y: Y_sv Gd: Gd @@ -144,6 +144,6 @@ EFG: P: P_h Cl: Cl_h K: K_sv - Ca: Ca_pv + Ca: Ca_sv Y: Y_sv Gd: Gd diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index e50c02a0..8aa7ade0 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -67,7 +67,7 @@ STEP2: P: P_h Cl: Cl_h K: K_sv - Ca: Ca_pv + Ca: Ca_sv Y: Y_sv Gd: Gd @@ -109,7 +109,7 @@ STEP3: P: P_h Cl: Cl_h K: K_sv - Ca: Ca_pv + Ca: Ca_sv Y: Y_sv Gd: Gd From 5ba2efd8e1cdcbf070a5f48e4eda82d29f2be132 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 12 Apr 2017 10:44:57 -0700 Subject: [PATCH 149/204] remove uneccessary keywords in EFG INCAR --- mpworks/firetasks/nmr_tensor_set.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 5b709b59..acacf62f 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -38,7 +38,6 @@ EFG: ALGO: FAST EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.3 - ICHIBARE: 1 ISMEAR: -5 ISTART: 0 ISYM: 0 From a7eefb6e9d429af4a0f8be4ac0974e0b00cac159 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 16 Apr 2017 15:19:43 -0700 Subject: [PATCH 150/204] also delete custodian.json --- mpworks/firetasks/custodian_task.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index d7d75e5a..0c118cc8 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -199,17 +199,17 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, for f in glob("{}.*.tar.gz".format(binary_file_prefix))]) binary_filename = "{}.{}.tar.gz".format(binary_file_prefix, binary_num + 1) - for fname in ["OSZICAR", "vasp.out", "std_err.txt"]: - # remove old file before starting new calculations to - # avoid confuse custodian - if os.path.exists(fname): - os.remove(fname) - with tarfile.open(binary_filename, "w:gz") as tar: for fname in ["custodian.json", error_filename, "CONTCAR"]: for f in glob(fname): tar.add(f) + for fname in ["OSZICAR", "vasp.out", "std_err.txt", "custodian.json"]: + # remove old file before starting new calculations to + # avoid confuse custodian + if os.path.exists(fname): + os.remove(fname) + if input_rewind: # rewind the input to every beginning if os.path.exists("error.1.tar.gz") and os.path.isfile("error.1.tar.gz"): From fe881cf33485a44a1d286a556e2143b3fa78abf7 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 16 Apr 2017 15:46:56 -0700 Subject: [PATCH 151/204] use 50 atoms as threshold to define as large cell in NMR workflow --- mpworks/firetasks/custodian_task.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 0c118cc8..ef4a62ec 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -303,7 +303,12 @@ def _write_formula_file(fw_spec): def get_custodian_task(spec): task_type = spec['task_type'] v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node - handlers = [VaspErrorHandler(), FrozenJobErrorHandler(), + if {'NMR EFG', 'NMR CS', 'Triple Jump Relax S1', + 'Triple Jump Relax S2', 'Triple Jump Relax S3'} & {task_type}: + handlers = [VaspErrorHandler(natoms_large_cell=50)] + else: + handlers = [VaspErrorHandler()] + handlers += [FrozenJobErrorHandler(), MeshSymmetryErrorHandler(), NonConvergingErrorHandler(), PositiveEnergyErrorHandler()] if 'optimize structure (2x)' in task_type: From 6931948c22f0431e9497fb209873983049195049 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 23 Apr 2017 15:28:11 -0700 Subject: [PATCH 152/204] use fresh error handlers to reset error_count for every custodian run --- mpworks/firetasks/custodian_task.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index ef4a62ec..3657e820 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -73,8 +73,6 @@ class VaspCustodianTask(FireTaskBase, FWSerializable): def __init__(self, parameters): self.update(parameters) self.jobs = self['jobs'] - dec = MontyDecoder() - self.handlers = list(map(dec.process_decoded, self['handlers'])) self.max_errors = self.get('max_errors', 1) self.gzip_output = self.get('gzip_output', True) @@ -244,7 +242,9 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, return error_list def _run_custodian(self, terminate_func): - c = Custodian(self.handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, + dec = MontyDecoder() + handlers = list(map(dec.process_decoded, self['handlers'])) + c = Custodian(handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, validators=[VasprunXMLValidator()], terminate_func=terminate_func) # manual gzip custodian_out = c.run() From 34a56c79978da8aab1e861b96e6477dc5a8506ef Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sun, 23 Apr 2017 15:32:19 -0700 Subject: [PATCH 153/204] make a deep copy to make sure the new instance is independent --- mpworks/firetasks/custodian_task.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 3657e820..e157f995 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -16,6 +16,7 @@ from custodian.vasp.jobs import VaspJob import shlex import os +import copy from fireworks.utilities.fw_utilities import get_slug from mpworks.workflows.wf_utils import j_decorate, ScancelJobStepTerminator from pymatgen.io.vasp.inputs import Incar @@ -243,7 +244,8 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, def _run_custodian(self, terminate_func): dec = MontyDecoder() - handlers = list(map(dec.process_decoded, self['handlers'])) + h_dict = copy.deepcopy(self['handlers']) + handlers = list(map(dec.process_decoded, h_dict)) c = Custodian(handlers, self.jobs, max_errors=self.max_errors, gzipped_output=False, validators=[VasprunXMLValidator()], terminate_func=terminate_func) # manual gzip From 409c3178f8ee177cfa4a9a5a939f8ccf09bd8e34 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 24 Apr 2017 09:59:38 -0700 Subject: [PATCH 154/204] use StdErrHandler to deal with out of memory error --- mpworks/firetasks/custodian_task.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index ef4a62ec..5eaa422b 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -8,7 +8,7 @@ from fireworks.fw_config import FWData from monty.os.path import which from custodian.vasp.handlers import VaspErrorHandler, NonConvergingErrorHandler, \ - FrozenJobErrorHandler, MeshSymmetryErrorHandler, PositiveEnergyErrorHandler + FrozenJobErrorHandler, MeshSymmetryErrorHandler, PositiveEnergyErrorHandler, StdErrHandler from custodian.vasp.validators import VasprunXMLValidator from fireworks.core.firework import FireTaskBase, FWAction from fireworks.utilities.fw_serializers import FWSerializable @@ -321,6 +321,9 @@ def get_custodian_task(spec): jobs = [VaspJob(v_exe)] handlers = [] + if task_type == 'NMR CS': + handlers += [StdErrHandler(output_filename="std_err.txt")] + params = {'jobs': [j_decorate(j.as_dict()) for j in jobs], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5} From f14b643f097bc7303f3da7c04eef8eb5a7e14c10 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 25 Apr 2017 14:23:49 -0700 Subject: [PATCH 155/204] large molecules use large NPAR since it will be run with more CPU cores --- mpworks/firetasks/nmr_tasks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index c08ce24e..268137e0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -140,7 +140,10 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru if config_name == "NMR CS": incar_enforce = {'KPAR': 4} else: - incar_enforce = {'NPAR': 4} + if len(structure) < 64: + incar_enforce = {'NPAR': 4} + else: + incar_enforce = {'NPAR': 8} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) _assign_potcar_valence(structure, config_dict["POTCAR"]) From 8677630b64b5f3054f15b718aeab1d569c2091b9 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 27 Apr 2017 10:40:53 -0700 Subject: [PATCH 156/204] update NPAR/KPAR setting --- mpworks/firetasks/nmr_tasks.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 268137e0..5b82f540 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -137,13 +137,14 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru with open(config_file) as f: parent_config_dict = yaml.load(stream=f) config_dict = parent_config_dict[config_key] + if len(structure) < 64: + par_num = 4 + else: + par_num = 8 if config_name == "NMR CS": - incar_enforce = {'KPAR': 4} + incar_enforce = {'KPAR': par_num} else: - if len(structure) < 64: - incar_enforce = {'NPAR': 4} - else: - incar_enforce = {'NPAR': 8} + incar_enforce = {'NPAR': par_num} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) _assign_potcar_valence(structure, config_dict["POTCAR"]) From 98546f7d54bca1dd681d9d5f5ec5ff5e69493c71 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 28 Apr 2017 14:13:47 -0700 Subject: [PATCH 157/204] also backup OUTCAR and vasp.out to binary run file --- mpworks/firetasks/custodian_task.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 5ff01876..21060f6d 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -199,11 +199,13 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, binary_filename = "{}.{}.tar.gz".format(binary_file_prefix, binary_num + 1) with tarfile.open(binary_filename, "w:gz") as tar: - for fname in ["custodian.json", error_filename, "CONTCAR"]: + for fname in ["custodian.json", error_filename, "CONTCAR", "OUTCAR", + "vasp.out", "std_err.txt"]: for f in glob(fname): tar.add(f) - for fname in ["OSZICAR", "vasp.out", "std_err.txt", "custodian.json"]: + for fname in ["OSZICAR", "vasp.out", "std_err.txt", "custodian.json", + "OUTCAR"]: # remove old file before starting new calculations to # avoid confuse custodian if os.path.exists(fname): From 09aec0282365f73dfd967e27cf4d0ea5ce8860ea Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Sat, 29 Apr 2017 14:00:52 -0700 Subject: [PATCH 158/204] also back up INCAR POSCAR and KPOINTS to binary.#.tar.gz --- mpworks/firetasks/custodian_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 21060f6d..3828311e 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -200,7 +200,7 @@ def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, with tarfile.open(binary_filename, "w:gz") as tar: for fname in ["custodian.json", error_filename, "CONTCAR", "OUTCAR", - "vasp.out", "std_err.txt"]: + "vasp.out", "std_err.txt", "INCAR", "POSCAR", "KPOINTS"]: for f in glob(fname): tar.add(f) From 03e3d64400dc72b7344a3a31278b0a95a734f901 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 3 May 2017 15:59:28 -0700 Subject: [PATCH 159/204] always use ISMEAR=0 for chemical shift calculation --- mpworks/firetasks/nmr_tensor_set.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index acacf62f..d4579342 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -4,7 +4,7 @@ CS: EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.3 ICHIBARE: 1 - ISMEAR: -5 + ISMEAR: 0 ISTART: 0 ISYM: 0 LCHARG: false From a2db1af2710266649b65593ed3fc67a46e18932e Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 3 May 2017 21:05:38 -0700 Subject: [PATCH 160/204] add stub for chemical shift k-point average dynamic workflow --- mpworks/firetasks/custodian_task.py | 40 +++++++++++++++++++++-------- mpworks/firetasks/nmr_tasks.py | 2 ++ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 3828311e..fec3b92c 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -142,14 +142,22 @@ def run_task(self, fw_spec): if cus_ex is not None: if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: cus_ex = None - logging.info("Initiate VASP calculations using alternate binaries") - all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, - fw_env.get("vasp_cmd", "vasp"), - fw_env.get("gvasp_cmd", "gvasp"), - fw_env["alt_cmds"][fw_spec['task_type']], - fw_env.get("input_rewind", True), - fw_spec['mpsnl'].structure) - error_list.extend(all_errors) + try: + logging.info("Initiate VASP calculations using alternate binaries") + all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, + fw_env.get("vasp_cmd", "vasp"), + fw_env.get("gvasp_cmd", "gvasp"), + fw_env["alt_cmds"][fw_spec['task_type']], + fw_env.get("input_rewind", True), + fw_spec['mpsnl'].structure) + error_list.extend(all_errors) + except Exception as ex: + cus_ex = ex + dynamic_wfs = None + if cus_ex is not None: + if self._is_kpts_parallel_chemical_shift_eligible(fw_spec): + from mpworks.firetasks.nmr_tasks import chemical_shift_spec_to_dynamic_kpt_average_wfs + dynamic_wfs = chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec) else: raise cus_ex @@ -170,8 +178,20 @@ def run_task(self, fw_spec): 'parameters': fw_spec.get('parameters')} if 'functional' in fw_spec: update_spec['functional'] = fw_spec['functional'] + if dynamic_wfs is None: + return FWAction(stored_data=stored_data, update_spec=update_spec) + else: + return FWAction(stored_data=stored_data, update_spec=update_spec, + detours=dynamic_wfs) + + def _is_kpts_parallel_chemical_shift_eligible(self, fw_spec): + if fw_spec['task_type'] == "NMR CS": + eh = StdErrHandler(output_filename="std_err.txt") + errors = eh.check()["errors"] + if set(errors) & {'out_of_memory', 'seg_fault'}: + return True + return False - return FWAction(stored_data=stored_data, update_spec=update_spec) def _run_alt_vasp_cmd(self, terminate_func, v_exe, gv_exe, vasp_cmd, gvasp_cmd, alt_cmds, input_rewind, structure): @@ -313,7 +333,7 @@ def get_custodian_task(spec): else: handlers = [VaspErrorHandler()] handlers += [FrozenJobErrorHandler(), - MeshSymmetryErrorHandler(), NonConvergingErrorHandler(), PositiveEnergyErrorHandler()] + MeshSymmetryErrorHandler(), NonConvergingErrorHandler(), PositiveEnergyErrorHandler()] if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 5b82f540..1fb6e13a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -179,6 +179,8 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru return spec +def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): + pass class NmrVaspToDBTask(VaspToDBTask): _fw_name = "NMR Tensor to Database Task" From ca4da53b7a18d540cb66defae7a4d54fab1439b4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 3 May 2017 22:44:12 -0700 Subject: [PATCH 161/204] Manual chemical shift k-points average SCF part --- mpworks/firetasks/nmr_tasks.py | 55 ++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1fb6e13a..46d93e4a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -5,8 +5,9 @@ import shutil import yaml from fireworks import FireTaskBase -from fireworks.core.firework import FWAction +from fireworks.core.firework import FWAction, Firework from fireworks.utilities.fw_serializers import FWSerializable +from fireworks.utilities.fw_utilities import get_slug from monty.os.path import zpath from pymatgen.analysis.bond_valence import BVAnalyzer from pymatgen.io.vasp import Outcar @@ -180,7 +181,57 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru return spec def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): - pass + no_jobs_spec = copy.deepcopy(fw_spec) + no_jobs_spec.pop('jobs', None) + no_jobs_spec.pop('handlers', None) + no_jobs_spec.pop('max_errors', None) + no_jobs_spec.pop('_tasks', None) + no_jobs_spec.pop('custodian_default_input_set', None) + no_jobs_spec.pop('prev_task_type', None) + no_jobs_spec.pop('task_type', None) + no_jobs_spec.pop('vaspinputset_name', None) + nick_name = no_jobs_spec['parameters']['nick_name'] + priority = no_jobs_spec['_priority'] + + + cur_fwid = -1 + fws = [] + + # Pre Single Kpt CS SCF Task + scf_spec = copy.deepcopy(no_jobs_spec) + for k in ["DQ", "ICHIBARE", "LCHIMAG", "LNMR_SYM_RED", "NSLPLINE"]: + scf_spec['input_set_config_dict']['INCAR'].pop(k, None) + scf_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 + scf_spec['input_set_config_dict']['INCAR']['LCHARG'] = True + scf_spec['input_set_incar_enforce'] = {"NPAR": fw_spec['input_set_incar_enforce']["KPAR"]} + scf_spec['task_type'] = 'Pre Kpt CS SCF' + scf_spec['vaspinputset_name'] = scf_spec['task_type'] + " DictSet" + tasks = [DictVaspSetupTask()] + functional = scf_spec["functional"] + if functional != "PBE": + tasks.append(ScanFunctionalSetupTask()) + from mpworks.firetasks.custodian_task import get_custodian_task + tasks.append(get_custodian_task(no_jobs_spec)) + scf_vasp_fwid = cur_fwid # Links + cur_fwid -= 1 + vasp_fw = Firework(tasks, scf_spec, name=get_slug(nick_name + '--' + scf_spec['task_type']), + fw_id=scf_vasp_fwid) + fws.append(vasp_fw) + + scf_db_fwid = cur_fwid # Links + cur_fwid -= 1 + scf_db_type_class = VaspToDBTask + from mpworks.workflows.snl_to_wf_nmr import get_nmr_db_fw + scf_db_fw = get_nmr_db_fw(nick_name=nick_name, fwid=scf_db_fwid, prev_task_type=scf_spec['task_type'], + priority=priority, task_class=scf_db_type_class) + fws.append(scf_db_fw) + + # Single Kpt CS + kpt_cs_base_spec = copy.deepcopy(no_jobs_spec) + kpt_cs_base_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 + kpt_cs_base_spec['task_type'] = 'Single Kpt CS' + kpt_cs_base_spec['vaspinputset_name'] = kpt_cs_base_spec['task_type'] + " DictSet" + class NmrVaspToDBTask(VaspToDBTask): _fw_name = "NMR Tensor to Database Task" From c9c175181b9a746f47864cc67340c2a5b1d0c1ae Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 10:47:29 -0700 Subject: [PATCH 162/204] The chemical shift from ISMEAR=0 and ISMEAR=-5 are consistent, no need to always use ISMEAR=0 --- mpworks/firetasks/nmr_tensor_set.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index d4579342..acacf62f 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -4,7 +4,7 @@ CS: EDIFF: -1.0e-10 ENCUT_ENHANCE_RATIO: 0.3 ICHIBARE: 1 - ISMEAR: 0 + ISMEAR: -5 ISTART: 0 ISYM: 0 LCHARG: false From a9221f8eae80f8455840348d548a5376f5d43500 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 11:53:13 -0700 Subject: [PATCH 163/204] add FireTask classes for manual K-points averaging --- mpworks/firetasks/nmr_tasks.py | 67 +++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 46d93e4a..a13b8dc4 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -1,4 +1,5 @@ import copy +import hashlib import json import os @@ -10,7 +11,7 @@ from fireworks.utilities.fw_utilities import get_slug from monty.os.path import zpath from pymatgen.analysis.bond_valence import BVAnalyzer -from pymatgen.io.vasp import Outcar +from pymatgen.io.vasp import Outcar, zopen from pymatgen.io.vasp.sets import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp @@ -352,3 +353,67 @@ def run_task(self, fw_spec): "should be modified".format(my_name)) else: pass + + +class ChemicalShiftKptsAverageGenerationTask(FireTaskBase, FWSerializable): + """ + This class is to spawn the dynamical fws to calculate NMR chemical shfit on each + individual and then do K-points weighted average manually. + """ + _fw_name = "Chemical Shift K-Points Average Generation Task" + + def run_task(self, fw_spec): + pass + +class ChemicalShiftKptsAverageCollectTask(FireTaskBase, FWSerializable): + """ + This class do K-points weighted chemical shift average from the previous K-points + specific calculations. + """ + _fw_name = "Chemical Shift K-Points Average Collect Task" + + def run_task(self, fw_spec): + pass + + +class TagFileChecksumTask(FireTaskBase, FWSerializable): + + _fw_name = "Chemical Shift K-Points Average Generation Task" + + def __init__(self, files=None): + if files is None: + files = ["POSCAR", "CHGCAR", "POTCAR"] + self.files = files + + def run_task(self, fw_spec): + file_checksums = dict() + blocksize = 10 * 2 ** 20 # 10 MB + for fn in self.files: + with zopen(zpath('FW.json'), 'rb') as f: + hash = hashlib.sha224 + for block in iter(lambda: f.read(blocksize), b""): + hash.update(block) + checksum = hash.hexdigest() + file_checksums[fn] = {"type": "sha224", + "value": checksum} + stored_data = {"file_chechsum": file_checksums} + return FWAction(stored_data=stored_data) + + +class DeleteFileTask(FireTaskBase, FWSerializable): + + _fw_name = "Delete File Task" + + def __init__(self, files=None): + if files is None: + files = ["CHGCAR", "WAVCAR"] + self.files = files + + def run_task(self, fw_spec): + for fn in self.files: + gzfn = fn + ".gz" + if os.path.exists(fn): + os.remove(fn) + if os.path.exists(gzfn): + os.remove(gzfn) + From 8ef207c0d26ac00913785d9e6269d19ea2d69102 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 12:11:59 -0700 Subject: [PATCH 164/204] also print the checksum as file name --- mpworks/firetasks/nmr_tasks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index a13b8dc4..4cc43130 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -396,6 +396,8 @@ def run_task(self, fw_spec): checksum = hash.hexdigest() file_checksums[fn] = {"type": "sha224", "value": checksum} + with open("checksum.{}.{}".format(fn, checksum[:10]), "w") as f: + f.write("sha224") stored_data = {"file_chechsum": file_checksums} return FWAction(stored_data=stored_data) From 77bf9b7140c6c97ef9e2c71ae6790f13f6b1e759 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 12:30:06 -0700 Subject: [PATCH 165/204] fix file name --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 4cc43130..18ec0c4a 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -389,7 +389,7 @@ def run_task(self, fw_spec): file_checksums = dict() blocksize = 10 * 2 ** 20 # 10 MB for fn in self.files: - with zopen(zpath('FW.json'), 'rb') as f: + with zopen(zpath(fn), 'rb') as f: hash = hashlib.sha224 for block in iter(lambda: f.read(blocksize), b""): hash.update(block) From 76ea1f196ce9803fb3b95a184ca96900a5ad57cb Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 12:31:54 -0700 Subject: [PATCH 166/204] update function call --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 18ec0c4a..a8ba0f5c 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -390,7 +390,7 @@ def run_task(self, fw_spec): blocksize = 10 * 2 ** 20 # 10 MB for fn in self.files: with zopen(zpath(fn), 'rb') as f: - hash = hashlib.sha224 + hash = hashlib.sha224() for block in iter(lambda: f.read(blocksize), b""): hash.update(block) checksum = hash.hexdigest() From c105b98fd705bf1d5f4d9bf98cf7917f24235810 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 13:01:08 -0700 Subject: [PATCH 167/204] finish SCF dynamic workflow --- mpworks/firetasks/custodian_task.py | 8 +++---- mpworks/firetasks/nmr_tasks.py | 33 ++++++++++++++++++++--------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index fec3b92c..82471719 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -153,11 +153,11 @@ def run_task(self, fw_spec): error_list.extend(all_errors) except Exception as ex: cus_ex = ex - dynamic_wfs = None + dynamic_wf = None if cus_ex is not None: if self._is_kpts_parallel_chemical_shift_eligible(fw_spec): from mpworks.firetasks.nmr_tasks import chemical_shift_spec_to_dynamic_kpt_average_wfs - dynamic_wfs = chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec) + dynamic_wf = chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec) else: raise cus_ex @@ -178,11 +178,11 @@ def run_task(self, fw_spec): 'parameters': fw_spec.get('parameters')} if 'functional' in fw_spec: update_spec['functional'] = fw_spec['functional'] - if dynamic_wfs is None: + if dynamic_wf is None: return FWAction(stored_data=stored_data, update_spec=update_spec) else: return FWAction(stored_data=stored_data, update_spec=update_spec, - detours=dynamic_wfs) + detours=dynamic_wf) def _is_kpts_parallel_chemical_shift_eligible(self, fw_spec): if fw_spec['task_type'] == "NMR CS": diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index a8ba0f5c..4f862607 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -6,7 +6,7 @@ import shutil import yaml from fireworks import FireTaskBase -from fireworks.core.firework import FWAction, Firework +from fireworks.core.firework import FWAction, Firework, Workflow from fireworks.utilities.fw_serializers import FWSerializable from fireworks.utilities.fw_utilities import get_slug from monty.os.path import zpath @@ -207,15 +207,16 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): scf_spec['input_set_incar_enforce'] = {"NPAR": fw_spec['input_set_incar_enforce']["KPAR"]} scf_spec['task_type'] = 'Pre Kpt CS SCF' scf_spec['vaspinputset_name'] = scf_spec['task_type'] + " DictSet" - tasks = [DictVaspSetupTask()] + scf_tasks = [DictVaspSetupTask()] functional = scf_spec["functional"] if functional != "PBE": - tasks.append(ScanFunctionalSetupTask()) + scf_tasks.append(ScanFunctionalSetupTask()) from mpworks.firetasks.custodian_task import get_custodian_task - tasks.append(get_custodian_task(no_jobs_spec)) + scf_tasks.append(get_custodian_task(no_jobs_spec)) + scf_tasks.append(TagFileChecksumTask(["CHGCAR"])) scf_vasp_fwid = cur_fwid # Links cur_fwid -= 1 - vasp_fw = Firework(tasks, scf_spec, name=get_slug(nick_name + '--' + scf_spec['task_type']), + vasp_fw = Firework(scf_tasks, scf_spec, name=get_slug(nick_name + '--' + scf_spec['task_type']), fw_id=scf_vasp_fwid) fws.append(vasp_fw) @@ -227,11 +228,23 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): priority=priority, task_class=scf_db_type_class) fws.append(scf_db_fw) - # Single Kpt CS - kpt_cs_base_spec = copy.deepcopy(no_jobs_spec) - kpt_cs_base_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 - kpt_cs_base_spec['task_type'] = 'Single Kpt CS' - kpt_cs_base_spec['vaspinputset_name'] = kpt_cs_base_spec['task_type'] + " DictSet" + # Single Kpt CS Generation + gen_spec = copy.deepcopy(no_jobs_spec) + gen_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 + gen_spec['input_set_config_dict']['INCAR']['ICHARG'] = 11 + gen_spec['task_type'] = 'Single Kpt CS Generation' + gen_spec['vaspinputset_name'] = gen_spec['task_type'] + " DictSet" + gen_tasks = [ChemicalShiftKptsAverageGenerationTask()] + gen_fwid = cur_fwid # Links + cur_fwid -= 1 + gen_fw = Firework(gen_tasks, gen_spec, + name=get_slug(nick_name + '--' + gen_spec['task_type']), + fw_id=gen_fwid) + fws.append(gen_fw) + connections = {scf_vasp_fwid: scf_db_fwid, + scf_db_fwid: gen_fwid} + wf = Workflow(fws, connections) + return wf class NmrVaspToDBTask(VaspToDBTask): From 834daa30ae0248387f3ba2319c18764db14aa8e3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 13:14:04 -0700 Subject: [PATCH 168/204] fix spec typo --- mpworks/firetasks/nmr_tasks.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 4f862607..6c02dc31 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -194,7 +194,6 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): nick_name = no_jobs_spec['parameters']['nick_name'] priority = no_jobs_spec['_priority'] - cur_fwid = -1 fws = [] @@ -212,15 +211,15 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): if functional != "PBE": scf_tasks.append(ScanFunctionalSetupTask()) from mpworks.firetasks.custodian_task import get_custodian_task - scf_tasks.append(get_custodian_task(no_jobs_spec)) + scf_tasks.append(get_custodian_task(scf_spec)) scf_tasks.append(TagFileChecksumTask(["CHGCAR"])) - scf_vasp_fwid = cur_fwid # Links + scf_vasp_fwid = cur_fwid cur_fwid -= 1 vasp_fw = Firework(scf_tasks, scf_spec, name=get_slug(nick_name + '--' + scf_spec['task_type']), fw_id=scf_vasp_fwid) fws.append(vasp_fw) - scf_db_fwid = cur_fwid # Links + scf_db_fwid = cur_fwid cur_fwid -= 1 scf_db_type_class = VaspToDBTask from mpworks.workflows.snl_to_wf_nmr import get_nmr_db_fw @@ -235,7 +234,7 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): gen_spec['task_type'] = 'Single Kpt CS Generation' gen_spec['vaspinputset_name'] = gen_spec['task_type'] + " DictSet" gen_tasks = [ChemicalShiftKptsAverageGenerationTask()] - gen_fwid = cur_fwid # Links + gen_fwid = cur_fwid cur_fwid -= 1 gen_fw = Firework(gen_tasks, gen_spec, name=get_slug(nick_name + '--' + gen_spec['task_type']), From 3bf33173a4f623eceb40a26cd3b2b7540e4d9cdc Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 13:22:44 -0700 Subject: [PATCH 169/204] fix fw_name --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 6c02dc31..f32c1031 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -390,7 +390,7 @@ def run_task(self, fw_spec): class TagFileChecksumTask(FireTaskBase, FWSerializable): - _fw_name = "Chemical Shift K-Points Average Generation Task" + _fw_name = "Tag File Checksum Task" def __init__(self, files=None): if files is None: From 3eae287228b723085a71aa7f272589f4d8752d6f Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 13:27:52 -0700 Subject: [PATCH 170/204] delete prev_vasp_dir from new job spec --- mpworks/firetasks/nmr_tasks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index f32c1031..a4a25f0d 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -189,6 +189,7 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): no_jobs_spec.pop('_tasks', None) no_jobs_spec.pop('custodian_default_input_set', None) no_jobs_spec.pop('prev_task_type', None) + no_jobs_spec.pop('prev_vasp_dir', None) no_jobs_spec.pop('task_type', None) no_jobs_spec.pop('vaspinputset_name', None) nick_name = no_jobs_spec['parameters']['nick_name'] From 731ff03319b81f7f7b4e9421af2ad7ef1c9e47a4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 13:34:43 -0700 Subject: [PATCH 171/204] set prev_task_type and prev_vasp_dir --- mpworks/firetasks/nmr_tasks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index a4a25f0d..2f2e5f17 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -207,6 +207,8 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): scf_spec['input_set_incar_enforce'] = {"NPAR": fw_spec['input_set_incar_enforce']["KPAR"]} scf_spec['task_type'] = 'Pre Kpt CS SCF' scf_spec['vaspinputset_name'] = scf_spec['task_type'] + " DictSet" + scf_spec['prev_task_type'] = fw_spec['task_type'] + scf_spec['prev_vasp_dir'] = os.getcwd() scf_tasks = [DictVaspSetupTask()] functional = scf_spec["functional"] if functional != "PBE": From 637012b0ca2a26ea81c3caf9cb1dfca3bf12eec9 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 15:52:53 -0700 Subject: [PATCH 172/204] finished K points average generation --- mpworks/firetasks/custodian_task.py | 8 +- mpworks/firetasks/nmr_tasks.py | 157 ++++++++++++++++++++++++---- 2 files changed, 144 insertions(+), 21 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 82471719..94229d6e 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -176,8 +176,9 @@ def run_task(self, fw_spec): 'snlgroup_id': fw_spec['snlgroup_id'], 'run_tags': fw_spec['run_tags'], 'parameters': fw_spec.get('parameters')} - if 'functional' in fw_spec: - update_spec['functional'] = fw_spec['functional'] + for k in ['kpoint_tag', 'scf_vasp_dir', 'functional', 'total_kpts']: + update_spec[k] = fw_spec[k] + if dynamic_wf is None: return FWAction(stored_data=stored_data, update_spec=update_spec) else: @@ -338,7 +339,8 @@ def get_custodian_task(spec): if 'optimize structure (2x)' in task_type: jobs = VaspJob.double_relaxation_run(v_exe) elif {'static', 'deformed', 'NMR EFG', 'NMR CS', 'Triple Jump Relax S1', - 'Triple Jump Relax S2', 'Triple Jump Relax S3'} & {task_type}: + 'Triple Jump Relax S2', 'Triple Jump Relax S3', 'Pre Kpt CS SCF', + 'Single Kpt CS'} & {task_type}: jobs = [VaspJob(v_exe)] else: # non-SCF runs diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 2f2e5f17..0d46efeb 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -11,11 +11,11 @@ from fireworks.utilities.fw_utilities import get_slug from monty.os.path import zpath from pymatgen.analysis.bond_valence import BVAnalyzer -from pymatgen.io.vasp import Outcar, zopen +from pymatgen.io.vasp import Outcar, zopen, Kpoints from pymatgen.io.vasp.sets import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp -from mpworks.firetasks.vasp_io_tasks import VaspToDBTask +from mpworks.firetasks.vasp_io_tasks import VaspToDBTask, VaspCopyTask from mpworks.firetasks.vasp_setup_tasks import SetupUnconvergedHandlerTask from mpworks.workflows.wf_settings import WFSettings from mpworks.workflows.wf_utils import get_loc @@ -215,7 +215,7 @@ def chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec): scf_tasks.append(ScanFunctionalSetupTask()) from mpworks.firetasks.custodian_task import get_custodian_task scf_tasks.append(get_custodian_task(scf_spec)) - scf_tasks.append(TagFileChecksumTask(["CHGCAR"])) + scf_tasks.append(TagFileChecksumTask({"files": ["CHGCAR"]})) scf_vasp_fwid = cur_fwid cur_fwid -= 1 vasp_fw = Firework(scf_tasks, scf_spec, name=get_slug(nick_name + '--' + scf_spec['task_type']), @@ -261,10 +261,20 @@ def run_task(self, fw_spec): outcar = Outcar(zpath(os.path.join(prev_dir, "OUTCAR"))) prev_task_type = fw_spec['prev_task_type'] nmr_fields = dict() + update_spec = None if prev_task_type == "NMR CS": outcar.read_chemical_shifts() cs_fiels = {"chemical_shifts": [x.as_dict() for x in outcar.data["chemical_shifts"]["valence_only"]]} nmr_fields.update(cs_fiels) + elif prev_task_type == "Single Kpt CS": + update_spec = dict() + update_spec["total_kpts"] = fw_spec['total_kpts'] + update_spec['scf_vasp_dir'] = fw_spec['scf_vasp_dir'] + cs_kpt_name = fw_spec['kpoint_tag'] + update_spec[cs_kpt_name] = dict() + update_spec[cs_kpt_name]["kpt_vasp_dir"] = prev_dir + outcar.read_chemical_shifts() + update_spec[cs_kpt_name]["chemical_shifts"] = outcar.data["chemical_shifts"] elif prev_task_type == "NMR EFG": outcar.read_nmr_efg() efg_fields = {"efg": outcar.data["efg"]} @@ -272,7 +282,11 @@ def run_task(self, fw_spec): else: raise ValueError("Unsupported Task Type: \"{}\"".format(prev_task_type)) self.additional_fields.update(nmr_fields) - return super(NmrVaspToDBTask, self).run_task(fw_spec) + m_action = super(NmrVaspToDBTask, self).run_task(fw_spec) + if update_spec is not None: + update_spec.update(m_action.update_spec) + m_action.update_spec = update_spec + return m_action class TripleJumpRelaxVaspToDBTask(VaspToDBTask): @@ -308,6 +322,12 @@ def run_task(self, fw_spec): class DictVaspSetupTask(FireTaskBase, FWSerializable): _fw_name = "Dict Vasp Input Setup Task" + def __init__(self, parameters=None): + parameters = parameters if parameters else {} + default_files = ["INCAR", "POSCAR", "POTCAR", "KPOINTS"] + self.update(parameters) + self.files = parameters.get("files", default_files) + @staticmethod def _sort_structure_by_encut(structure, config_dict): # put the larger ENMAX specie first @@ -326,11 +346,17 @@ def run_task(self, fw_spec): vis = DictSet(structure, config_dict=config_dict, user_incar_settings=incar_enforce, sort_structure=False) - - vis.incar.write_file("INCAR") - vis.poscar.write_file("POSCAR") - vis.potcar.write_file("POTCAR") - vis.kpoints.write_file("KPOINTS") + if "INCAR" in self.files: + vis.incar.write_file("INCAR") + if "POSCAR" in self.files: + vis.poscar.write_file("POSCAR") + if "POTCAR" in self.files: + vis.potcar.write_file("POTCAR") + if "KOINTS" in self.files: + if "kpoints_enforce" not in fw_spec: + vis.kpoints.write_file("KPOINTS") + else: + fw_spec["kpoints_enforce"].write_file("KPOINTS") return FWAction(stored_data={"vasp_input_set": vis.as_dict()}) @@ -378,7 +404,100 @@ class ChemicalShiftKptsAverageGenerationTask(FireTaskBase, FWSerializable): _fw_name = "Chemical Shift K-Points Average Generation Task" def run_task(self, fw_spec): - pass + no_jobs_spec = copy.deepcopy(fw_spec) + no_jobs_spec.pop('jobs', None) + no_jobs_spec.pop('handlers', None) + no_jobs_spec.pop('max_errors', None) + no_jobs_spec.pop('_tasks', None) + no_jobs_spec.pop('custodian_default_input_set', None) + no_jobs_spec.pop('task_type', None) + no_jobs_spec.pop('vaspinputset_name', None) + nick_name = no_jobs_spec['parameters']['nick_name'] + priority = no_jobs_spec['_priority'] + no_jobs_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 + no_jobs_spec['input_set_config_dict']['INCAR']['LCHARG'] = True + no_jobs_spec['task_type'] = 'Single Kpt CS' + no_jobs_spec['vaspinputset_name'] = no_jobs_spec['task_type'] + " DictSet" + no_jobs_spec["scf_vasp_dir"] = fw_spec['prev_vasp_dir'] + + fws = [] + connections = dict() + db_fwids = [] + cur_fwid = -1 + prev_dir = fw_spec['prev_vasp_dir'] + scf_kpoint_filename = zpath(os.path.join(prev_dir, 'IBZKPT')) + whole_kpts = Kpoints.from_file(scf_kpoint_filename) + no_jobs_spec['total_kpts'] = len(whole_kpts.kpts) + + for (i, kpt) in enumerate(whole_kpts.kpts): + kweight = int(whole_kpts.kpts_weights[i]) + task_tag = "kpt_#{:d}_weight_{:d}".format(i + 1, kweight) + comment = "Individual {}th Kpoint for CS Calculation".format(i + 1) + cur_kpoints = Kpoints(comment=comment, num_kpts=1, + style=Kpoints.supported_modes.Reciprocal, + kpts=[kpt], kpts_weights=[1], + tet_number=0) + + # Individual K-Point Chemical Shift VASP + kpt_cs_spec = copy.deepcopy(no_jobs_spec) + kpt_cs_spec['run_tags'].append(task_tag) + kpt_cs_spec['kpoints_enforce'] = cur_kpoints + kpt_cs_spec["kpoint_tag"] = task_tag + + kpt_cs_tasks = [DictVaspSetupTask({'files': ['INCAR', "KPOINTS"]}), + VaspCopyTask({'files': ['CHGCAR', "POTCAR"], + 'use_CONTCAR': True, + 'keep_velocities': False})] + functional = kpt_cs_spec["functional"] + if functional != "PBE": + kpt_cs_tasks.append(ScanFunctionalSetupTask()) + kpt_cs_tasks.append(TagFileChecksumTask({"files": ["CHGCAR"]})) + from mpworks.firetasks.custodian_task import get_custodian_task + kpt_cs_tasks.append(get_custodian_task(kpt_cs_spec)) + kpt_cs_tasks.append(DeleteFileTask({"files": ["CHGCAR"]})) + kpt_cs_task_name = get_slug(nick_name + '--' + kpt_cs_spec['task_type'] + "--#{}".format(i)) + kpt_cs_vasp_fwid = cur_fwid # Links + cur_fwid -= 1 + vasp_fw = Firework(kpt_cs_tasks, kpt_cs_spec, name=kpt_cs_task_name, + fw_id=kpt_cs_vasp_fwid) + fws.append(vasp_fw) + + # Individual K-Point Chemical Shift VASP DB Insertion + kpt_cs_db_fwid = cur_fwid # Links + cur_fwid -= 1 + kpt_cs_db_type_class = NmrVaspToDBTask + from mpworks.workflows.snl_to_wf_nmr import get_nmr_db_fw + kpt_cs_db_fw = get_nmr_db_fw(nick_name=nick_name, fwid=kpt_cs_db_fwid, + prev_task_type=kpt_cs_spec['task_type'], + priority=priority, task_class=kpt_cs_db_type_class) + fws.append(kpt_cs_db_fw) + connections[kpt_cs_vasp_fwid] = kpt_cs_db_fwid + db_fwids.append(kpt_cs_db_fwid) + + collect_fwid = cur_fwid # Links + cur_fwid -= 1 + # K-Points Average Collect + collect_spec = copy.deepcopy(no_jobs_spec) + collect_spec['task_type'] = 'Single Kpt CS Collect' + collect_spec['vaspinputset_name'] = collect_spec['task_type'] + " DictSet" + collect_tasks = [ChemicalShiftKptsAverageCollectTask()] + collect_fwid = cur_fwid + cur_fwid -= 1 + collect_fw = Firework(collect_tasks, collect_spec, + name=get_slug(nick_name + '--' + collect_spec['task_type']), + fw_id=collect_fwid) + fws.append(collect_fw) + for dbid in db_fwids: + connections[dbid] = collect_fwid + wf = Workflow(fws, connections) + update_spec = {'total_kpts': no_jobs_spec['total_kpts'], + "scf_vasp_dir": fw_spec['prev_vasp_dir'], + 'prev_vasp_dir': fw_spec['prev_vasp_dir'], + 'prev_task_type': fw_spec['task_type']} + stored_data = {'total_kpts': no_jobs_spec['total_kpts'], + "scf_vasp_dir": fw_spec['prev_vasp_dir']} + return FWAction(update_spec=update_spec, stored_data=stored_data, + detours=wf) class ChemicalShiftKptsAverageCollectTask(FireTaskBase, FWSerializable): """ @@ -395,10 +514,11 @@ class TagFileChecksumTask(FireTaskBase, FWSerializable): _fw_name = "Tag File Checksum Task" - def __init__(self, files=None): - if files is None: - files = ["POSCAR", "CHGCAR", "POTCAR"] - self.files = files + def __init__(self, parameters=None): + parameters = parameters if parameters else {} + default_files = ["CHGCAR", "WAVCAR"] + self.update(parameters) + self.files = parameters.get("files", default_files) def run_task(self, fw_spec): file_checksums = dict() @@ -421,10 +541,11 @@ class DeleteFileTask(FireTaskBase, FWSerializable): _fw_name = "Delete File Task" - def __init__(self, files=None): - if files is None: - files = ["CHGCAR", "WAVCAR"] - self.files = files + def __init__(self, parameters=None): + parameters = parameters if parameters else {} + default_files = ["CHGCAR", "WAVCAR"] + self.update(parameters) + self.files = parameters.get("files", default_files) def run_task(self, fw_spec): for fn in self.files: From e5cdba6173c76501e582ac43bb6c5661c5a285dc Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 16:13:25 -0700 Subject: [PATCH 173/204] fix handler specification --- mpworks/firetasks/custodian_task.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 94229d6e..5da74079 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -329,8 +329,10 @@ def get_custodian_task(spec): task_type = spec['task_type'] v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node if {'NMR EFG', 'NMR CS', 'Triple Jump Relax S1', - 'Triple Jump Relax S2', 'Triple Jump Relax S3'} & {task_type}: - handlers = [VaspErrorHandler(natoms_large_cell=50)] + 'Triple Jump Relax S2', 'Triple Jump Relax S3', + 'Pre Kpt CS SCF', 'Single Kpt CS'} & {task_type}: + handlers = [VaspErrorHandler(natoms_large_cell=50), + StdErrHandler(output_filename="std_err.txt")] else: handlers = [VaspErrorHandler()] handlers += [FrozenJobErrorHandler(), @@ -347,9 +349,6 @@ def get_custodian_task(spec): jobs = [VaspJob(v_exe)] handlers = [] - if task_type == 'NMR CS': - handlers += [StdErrHandler(output_filename="std_err.txt")] - params = {'jobs': [j_decorate(j.as_dict()) for j in jobs], 'handlers': [h.as_dict() for h in handlers], 'max_errors': 5} From 4ea7d194c2cf910fdd2df86abc8d6df5b3994a27 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 16:16:48 -0700 Subject: [PATCH 174/204] fix INCAR setting --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 0d46efeb..5acd06e3 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -415,7 +415,7 @@ def run_task(self, fw_spec): nick_name = no_jobs_spec['parameters']['nick_name'] priority = no_jobs_spec['_priority'] no_jobs_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 - no_jobs_spec['input_set_config_dict']['INCAR']['LCHARG'] = True + no_jobs_spec['input_set_config_dict']['INCAR']['ICHARG'] = 11 no_jobs_spec['task_type'] = 'Single Kpt CS' no_jobs_spec['vaspinputset_name'] = no_jobs_spec['task_type'] + " DictSet" no_jobs_spec["scf_vasp_dir"] = fw_spec['prev_vasp_dir'] From 66234c6d31fbc96ce25b6eccc1907b28f0c2a629 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 16:26:56 -0700 Subject: [PATCH 175/204] fix fwid --- mpworks/firetasks/nmr_tasks.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 5acd06e3..058a6476 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -474,8 +474,6 @@ def run_task(self, fw_spec): connections[kpt_cs_vasp_fwid] = kpt_cs_db_fwid db_fwids.append(kpt_cs_db_fwid) - collect_fwid = cur_fwid # Links - cur_fwid -= 1 # K-Points Average Collect collect_spec = copy.deepcopy(no_jobs_spec) collect_spec['task_type'] = 'Single Kpt CS Collect' From 5031a25b948d93d8ad13af9b08cbb167e33d0f65 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 16:57:39 -0700 Subject: [PATCH 176/204] finish collect k-points average task --- mpworks/firetasks/custodian_task.py | 3 +- mpworks/firetasks/nmr_tasks.py | 77 +++++++++++++++++++++++++++-- 2 files changed, 76 insertions(+), 4 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 5da74079..e9dfed34 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -177,7 +177,8 @@ def run_task(self, fw_spec): 'run_tags': fw_spec['run_tags'], 'parameters': fw_spec.get('parameters')} for k in ['kpoint_tag', 'scf_vasp_dir', 'functional', 'total_kpts']: - update_spec[k] = fw_spec[k] + if k in fw_spec: + update_spec[k] = fw_spec[k] if dynamic_wf is None: return FWAction(stored_data=stored_data, update_spec=update_spec) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 058a6476..de3e7178 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -11,7 +11,11 @@ from fireworks.utilities.fw_utilities import get_slug from monty.os.path import zpath from pymatgen.analysis.bond_valence import BVAnalyzer -from pymatgen.io.vasp import Outcar, zopen, Kpoints +from pymatgen.io.vasp import Outcar, Kpoints +from monty.io import zopen +import re +import math +from pymatgen.analysis.nmr import NMRChemicalShiftNotation from pymatgen.io.vasp.sets import DictSet from mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp @@ -505,8 +509,75 @@ class ChemicalShiftKptsAverageCollectTask(FireTaskBase, FWSerializable): _fw_name = "Chemical Shift K-Points Average Collect Task" def run_task(self, fw_spec): - pass - + kpt_name_pattern = re.compile(r'kpt_#(?P\d+)_weight_(?P\d+)') + kpt_name_weigths = [] + for kpt_name in fw_spec.keys(): + m = kpt_name_pattern.match(kpt_name) + if m: + kpt_weight = m.group("weight") + kpt_name_weigths.append([kpt_name, kpt_weight]) + num_kpts = fw_spec["total_kpts"] + assert len(kpt_name_weigths) == num_kpts + num_atoms = len(fw_spec[kpt_name_weigths[0][0]]['chemical_shifts']['valence_only']) + num_ave_components = 7 + atom_cs_weight_vo_vc = [[list() for _ in range(num_ave_components)] + for _ in range(num_atoms)] + for i_kpt, (kpt_name, weight) in enumerate(kpt_name_weigths): + kpt_cs = fw_spec[kpt_name]['chemical_shifts'] + for i_atom in range(num_atoms): + val_only_tensor_pas = kpt_cs['valence_only'][i_atom].mehring_values[1:4] + val_core_tensor_pas = kpt_cs['valence_and_core'][i_atom].mehring_values[1:4] + components = (float(weight),) + val_only_tensor_pas + val_core_tensor_pas + for i_comp in range(num_ave_components): + atom_cs_weight_vo_vc[i_atom][i_comp].append(components[i_comp]) + for i_atom in range(num_atoms): + for i_comp in range(num_ave_components): + assert len(atom_cs_weight_vo_vc[i_atom][i_comp]) == num_kpts + ave_pas_tensors = [] + tensor_rmsd = [] + for i_atom in range(num_atoms): + atom_ave_tensor = [] + atom_tensor_rmsd = [] + for i_comp in range(1, num_ave_components): + sum_value = sum([weight * tensor for weight, tensor + in zip(atom_cs_weight_vo_vc[i_atom][0], + atom_cs_weight_vo_vc[i_atom][i_comp])]) + sum_weights = sum(atom_cs_weight_vo_vc[i_atom][0]) + ave_value = sum_value / sum_weights + atom_ave_tensor.append(ave_value) + sum_square_dev = sum([weight * ((tensor - ave_value) ** 2) for weight, tensor + in zip(atom_cs_weight_vo_vc[i_atom][0], + atom_cs_weight_vo_vc[i_atom][i_comp])]) + rmsd_value = math.sqrt(sum_square_dev / sum_weights) + atom_tensor_rmsd.append(rmsd_value) + ave_pas_tensors.append(atom_ave_tensor) + tensor_rmsd.append(atom_tensor_rmsd) + ave_tensor_notations = {"valence_only": [], 'valence_and_core': []} + for pas in ave_pas_tensors: + assert len(pas) == 6 + for comp_indices, comp_key in [[range(0, 3), "valence_only"], + [range(3, 6), 'valence_and_core']]: + sigmas = [pas[i] for i in comp_indices] + notation = NMRChemicalShiftNotation(*sigmas) + ave_tensor_notations[comp_key].append(notation) + single_kpt_vasp_calcs = {kpt_name: fw_spec[kpt_name] for kpt_name, weight + in kpt_name_weigths} + cs_fields = {"chemical_shifts": ave_tensor_notations, + "manual_kpt_average": fw_spec, + "rmsd": tensor_rmsd, + "rmsd_header": ["valence_only_11", "valence_only_22", "valence_only_33", + "valence_and_core_11", "valence_and_core_22", "valence_and_core_33"], + "manual_kpt_data": { + "total_kpts": fw_spec["total_kpts"], + "single_kpt_vasp_calcs": single_kpt_vasp_calcs + }} + stored_data = copy.deepcopy(cs_fields) + update_spec = copy.deepcopy(cs_fields) + update_spec['prev_task_type'] = fw_spec['task_type'] + update_spec['prev_vasp_dir'] = fw_spec['scf_vasp_dir'] + for k in ['scf_vasp_dir', 'functional']: + update_spec[k] = fw_spec[k] + return FWAction(stored_data=stored_data, update_spec=update_spec) class TagFileChecksumTask(FireTaskBase, FWSerializable): From 4825d00fef77a8e459e44930ad26de33d7d6959c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 17:04:35 -0700 Subject: [PATCH 177/204] finish collect k-points average final db insertion --- mpworks/firetasks/nmr_tasks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index de3e7178..809fee87 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -279,6 +279,10 @@ def run_task(self, fw_spec): update_spec[cs_kpt_name]["kpt_vasp_dir"] = prev_dir outcar.read_chemical_shifts() update_spec[cs_kpt_name]["chemical_shifts"] = outcar.data["chemical_shifts"] + elif prev_task_type == 'Single Kpt CS Collect': + for k in ['chemical_shifts', 'manual_kpt_average', 'rmsd', + 'rmsd_header', 'manual_kpt_data']: + nmr_fields = fw_spec[k] elif prev_task_type == "NMR EFG": outcar.read_nmr_efg() efg_fields = {"efg": outcar.data["efg"]} From e2e0729234ba8138edfc4f623be73bd4734b863c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 22:52:16 -0700 Subject: [PATCH 178/204] file KPOINTS file name --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 809fee87..5ab2856d 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -360,7 +360,7 @@ def run_task(self, fw_spec): vis.poscar.write_file("POSCAR") if "POTCAR" in self.files: vis.potcar.write_file("POTCAR") - if "KOINTS" in self.files: + if "KPOINTS" in self.files: if "kpoints_enforce" not in fw_spec: vis.kpoints.write_file("KPOINTS") else: From 6efe6ed7f171309da792705a5f479e32bca6412d Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 4 May 2017 23:17:06 -0700 Subject: [PATCH 179/204] first check the existence of std_err.txt --- mpworks/firetasks/custodian_task.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index e9dfed34..488fc9d4 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -155,7 +155,8 @@ def run_task(self, fw_spec): cus_ex = ex dynamic_wf = None if cus_ex is not None: - if self._is_kpts_parallel_chemical_shift_eligible(fw_spec): + if os.path.exists("std_err.txt") and \ + self._is_kpts_parallel_chemical_shift_eligible(fw_spec): from mpworks.firetasks.nmr_tasks import chemical_shift_spec_to_dynamic_kpt_average_wfs dynamic_wf = chemical_shift_spec_to_dynamic_kpt_average_wfs(fw_spec) else: From ff1fc79c0388846d08d0b93b3a8cffd2cd981cc2 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 5 May 2017 10:35:23 -0700 Subject: [PATCH 180/204] fix error message detect --- mpworks/firetasks/custodian_task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index 488fc9d4..e7fa7d3f 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -190,8 +190,8 @@ def run_task(self, fw_spec): def _is_kpts_parallel_chemical_shift_eligible(self, fw_spec): if fw_spec['task_type'] == "NMR CS": eh = StdErrHandler(output_filename="std_err.txt") - errors = eh.check()["errors"] - if set(errors) & {'out_of_memory', 'seg_fault'}: + eh.check() + if set(eh.errors) & {'out_of_memory', 'seg_fault'}: return True return False From 977962077ee7455172bc328a26fd5646e85cfbdc Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 5 May 2017 10:58:24 -0700 Subject: [PATCH 181/204] use KPAR=1 for single k-point calculations --- mpworks/firetasks/nmr_tasks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 5ab2856d..e54d20ea 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -424,6 +424,7 @@ def run_task(self, fw_spec): priority = no_jobs_spec['_priority'] no_jobs_spec['input_set_config_dict']['INCAR']['ISMEAR'] = 0 no_jobs_spec['input_set_config_dict']['INCAR']['ICHARG'] = 11 + no_jobs_spec['input_set_incar_enforce'] = {"KPAR": 1} no_jobs_spec['task_type'] = 'Single Kpt CS' no_jobs_spec['vaspinputset_name'] = no_jobs_spec['task_type'] + " DictSet" no_jobs_spec["scf_vasp_dir"] = fw_spec['prev_vasp_dir'] From ec7b4198fab09463511fa4a4894079b68e0c54c1 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 18 May 2017 11:18:20 -0700 Subject: [PATCH 182/204] file NMR SCF job file path --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index e54d20ea..4b217a87 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -433,7 +433,7 @@ def run_task(self, fw_spec): connections = dict() db_fwids = [] cur_fwid = -1 - prev_dir = fw_spec['prev_vasp_dir'] + prev_dir = get_loc(fw_spec['prev_vasp_dir']) scf_kpoint_filename = zpath(os.path.join(prev_dir, 'IBZKPT')) whole_kpts = Kpoints.from_file(scf_kpoint_filename) no_jobs_spec['total_kpts'] = len(whole_kpts.kpts) From 44f2ddc33acf3a612c472248ce84911df3a50d18 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 18 May 2017 11:22:57 -0700 Subject: [PATCH 183/204] fix typo --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 4b217a87..e9caf80b 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -607,7 +607,7 @@ def run_task(self, fw_spec): "value": checksum} with open("checksum.{}.{}".format(fn, checksum[:10]), "w") as f: f.write("sha224") - stored_data = {"file_chechsum": file_checksums} + stored_data = {"file_checksum": file_checksums} return FWAction(stored_data=stored_data) From 4ed202858a7c04722c2bd2bb2d1c013543cba05c Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Fri, 19 May 2017 11:10:47 -0700 Subject: [PATCH 184/204] add option to use environment variable GARDEN_LOC to customize the location of GARDEN --- mpworks/workflows/wf_settings.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/mpworks/workflows/wf_settings.py b/mpworks/workflows/wf_settings.py index 12d0d78b..242f45e8 100644 --- a/mpworks/workflows/wf_settings.py +++ b/mpworks/workflows/wf_settings.py @@ -1,3 +1,5 @@ +import os + from monty.design_patterns import singleton __author__ = 'Anubhav Jain' @@ -23,7 +25,10 @@ class WFSettings(object): def __init__(self): self.MOVE_TO_GARDEN_DEV = False self.MOVE_TO_GARDEN_PROD = False - self.GARDEN = '/project/projectdirs/matgen/garden' + if "GARDEN_LOC" in os.environ: + self.GARDEN = os.environ["GARDEN_LOC"] + else: + self.GARDEN = '/project/projectdirs/matgen/garden' @property def RUN_LOCS(self): @@ -36,6 +41,5 @@ def RUN_LOCS(self): '/global/scratch/sd/matcomp/wc_tests/', '/global/scratch/sd/matcomp/aj_prod/', '/global/scratch2/sd/matcomp/mp_prod/', - '/global/scratch2/sd/matcomp/mp_prod_hopper/', - '/project/projectdirs/matgen/garden/nmr'] + '/global/scratch2/sd/matcomp/mp_prod_hopper/'] From 4f6624cf603f466bc150f4b21b2b0bbfea805a46 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 22 May 2017 12:05:11 -0700 Subject: [PATCH 185/204] merge run_tags to query in DupeFinder to be consistent with Kiran's change to FireWorks --- mpworks/dupefinders/dupefinder_vasp.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mpworks/dupefinders/dupefinder_vasp.py b/mpworks/dupefinders/dupefinder_vasp.py index c27ed7a7..66ff2335 100644 --- a/mpworks/dupefinders/dupefinder_vasp.py +++ b/mpworks/dupefinders/dupefinder_vasp.py @@ -15,13 +15,10 @@ class DupeFinderVasp(DupeFinderBase): _fw_name = 'Dupe Finder Vasp' - def verify(self, spec1, spec2): - # assert: task_type and snlgroup_id have already been checked through query - return set(spec1.get('run_tags', [])) == set(spec2.get('run_tags', [])) - def query(self, spec): return {'spec.task_type': spec['task_type'], - 'spec.snlgroup_id': spec['snlgroup_id']} + 'spec.snlgroup_id': spec['snlgroup_id'], + 'spec.run_tags': spec['run_tags']} class DupeFinderDB(DupeFinderBase): From 2d335e4f7f503594cb3701f9e8f2fb8682b1da40 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 29 May 2017 14:05:35 -0700 Subject: [PATCH 186/204] fix type error --- mpworks/firetasks/nmr_tasks.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index e9caf80b..2f02d4a0 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -530,8 +530,10 @@ def run_task(self, fw_spec): for i_kpt, (kpt_name, weight) in enumerate(kpt_name_weigths): kpt_cs = fw_spec[kpt_name]['chemical_shifts'] for i_atom in range(num_atoms): - val_only_tensor_pas = kpt_cs['valence_only'][i_atom].mehring_values[1:4] - val_core_tensor_pas = kpt_cs['valence_and_core'][i_atom].mehring_values[1:4] + vo_tensor_notation = NMRChemicalShiftNotation.from_dict(kpt_cs['valence_only'][i_atom]) + vc_tensor_notation = NMRChemicalShiftNotation.from_dict(kpt_cs['valence_and_core'][i_atom]) + val_only_tensor_pas = vo_tensor_notation.mehring_values[1:4] + val_core_tensor_pas = vc_tensor_notation.mehring_values[1:4] components = (float(weight),) + val_only_tensor_pas + val_core_tensor_pas for i_comp in range(num_ave_components): atom_cs_weight_vo_vc[i_atom][i_comp].append(components[i_comp]) @@ -564,7 +566,7 @@ def run_task(self, fw_spec): [range(3, 6), 'valence_and_core']]: sigmas = [pas[i] for i in comp_indices] notation = NMRChemicalShiftNotation(*sigmas) - ave_tensor_notations[comp_key].append(notation) + ave_tensor_notations[comp_key].append(notation.as_dict()) single_kpt_vasp_calcs = {kpt_name: fw_spec[kpt_name] for kpt_name, weight in kpt_name_weigths} cs_fields = {"chemical_shifts": ave_tensor_notations, From d1e5f1491cfe389531313265777b3baf35e5c8ec Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 29 May 2017 14:40:16 -0700 Subject: [PATCH 187/204] fix DB insertion --- mpworks/firetasks/nmr_tasks.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 2f02d4a0..9d9fc54b 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -283,6 +283,14 @@ def run_task(self, fw_spec): for k in ['chemical_shifts', 'manual_kpt_average', 'rmsd', 'rmsd_header', 'manual_kpt_data']: nmr_fields = fw_spec[k] + shutil.copytree(prev_dir, "fake_nmr_vasp_files") + fake_prev_dir = os.path.abspath("fake_nmr_vasp_files") + fw_spec['prev_vasp_dir'] = fake_prev_dir + with zopen(zpath(os.path.join(fake_prev_dir, 'FW.json')), 'rt') as f: + fw_dict = json.load(f) + fw_dict["prev_task_type"] = "NMR CS" + with zopen(zpath(os.path.join(fake_prev_dir, 'FW.json')), 'wt') as f: + json.dump(fw_dict, f, sort_keys=True, indent=4) elif prev_task_type == "NMR EFG": outcar.read_nmr_efg() efg_fields = {"efg": outcar.data["efg"]} From 3aed2f88f4a910833b8d77d5813d423ba8748dfb Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 29 May 2017 14:51:27 -0700 Subject: [PATCH 188/204] fix file name --- mpworks/firetasks/nmr_tasks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 9d9fc54b..be60a475 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -283,12 +283,17 @@ def run_task(self, fw_spec): for k in ['chemical_shifts', 'manual_kpt_average', 'rmsd', 'rmsd_header', 'manual_kpt_data']: nmr_fields = fw_spec[k] - shutil.copytree(prev_dir, "fake_nmr_vasp_files") - fake_prev_dir = os.path.abspath("fake_nmr_vasp_files") + sub_dir_name = "fake_nmr_vasp_files" + shutil.copytree(prev_dir, sub_dir_name) + for fn in ["CHGCAR", "CHGCAR.gz"]: + ffn = os.path.join(sub_dir_name, fn) + if os.path.exists(ffn): + os.remove(ffn) + fake_prev_dir = os.path.abspath(sub_dir_name) fw_spec['prev_vasp_dir'] = fake_prev_dir with zopen(zpath(os.path.join(fake_prev_dir, 'FW.json')), 'rt') as f: fw_dict = json.load(f) - fw_dict["prev_task_type"] = "NMR CS" + fw_dict["task_type"] = "NMR CS" with zopen(zpath(os.path.join(fake_prev_dir, 'FW.json')), 'wt') as f: json.dump(fw_dict, f, sort_keys=True, indent=4) elif prev_task_type == "NMR EFG": From ad6115926358b80991e829d44c8ebd94ad0f93c3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 29 May 2017 14:56:14 -0700 Subject: [PATCH 189/204] fix tweak of fw_spec --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index be60a475..8411d16f 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -293,7 +293,7 @@ def run_task(self, fw_spec): fw_spec['prev_vasp_dir'] = fake_prev_dir with zopen(zpath(os.path.join(fake_prev_dir, 'FW.json')), 'rt') as f: fw_dict = json.load(f) - fw_dict["task_type"] = "NMR CS" + fw_dict["spec"]["task_type"] = "NMR CS" with zopen(zpath(os.path.join(fake_prev_dir, 'FW.json')), 'wt') as f: json.dump(fw_dict, f, sort_keys=True, indent=4) elif prev_task_type == "NMR EFG": From 104fb0f8e603757dde7cefcfa19b4ee185b2a246 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 29 May 2017 15:06:59 -0700 Subject: [PATCH 190/204] fix data formating --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 8411d16f..848ba63c 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -282,7 +282,7 @@ def run_task(self, fw_spec): elif prev_task_type == 'Single Kpt CS Collect': for k in ['chemical_shifts', 'manual_kpt_average', 'rmsd', 'rmsd_header', 'manual_kpt_data']: - nmr_fields = fw_spec[k] + nmr_fields[k] = fw_spec[k] sub_dir_name = "fake_nmr_vasp_files" shutil.copytree(prev_dir, sub_dir_name) for fn in ["CHGCAR", "CHGCAR.gz"]: From 5079aa63816298d5ec3da5510b1f0a11a6dda2b4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Mon, 29 May 2017 15:18:51 -0700 Subject: [PATCH 191/204] fix data type --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 848ba63c..33fbbf72 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -583,7 +583,7 @@ def run_task(self, fw_spec): single_kpt_vasp_calcs = {kpt_name: fw_spec[kpt_name] for kpt_name, weight in kpt_name_weigths} cs_fields = {"chemical_shifts": ave_tensor_notations, - "manual_kpt_average": fw_spec, + "manual_kpt_average": True, "rmsd": tensor_rmsd, "rmsd_header": ["valence_only_11", "valence_only_22", "valence_only_33", "valence_and_core_11", "valence_and_core_22", "valence_and_core_33"], From a7c2767c6538af4206c526455cf2630c5c6d17c5 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 6 Jun 2017 14:04:25 -0700 Subject: [PATCH 192/204] add F pseudopotential choice --- mpworks/firetasks/nmr_tensor_set.yaml | 2 ++ mpworks/firetasks/triple_jump_relax_set.yaml | 3 +++ 2 files changed, 5 insertions(+) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index acacf62f..837bc468 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -23,6 +23,7 @@ CS: H: H_h Mg: Mg_sv O: O_h + F: F_h Na: Na_sv Al: Al Si: Si_h @@ -137,6 +138,7 @@ EFG: H: H_h Mg: Mg_sv O: O_h + F: F_h Na: Na_sv Al: Al Si: Si_h diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 8aa7ade0..10f00a8e 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -23,6 +23,7 @@ STEP1: H: H Mg: Mg O: O + F: F Na: Na Al: Al Si: Si @@ -61,6 +62,7 @@ STEP2: H: H_h Mg: Mg_sv O: O_h + F: F_h Na: Na_sv Al: Al Si: Si_h @@ -103,6 +105,7 @@ STEP3: H: H_h Mg: Mg_sv O: O_h + F: F_h Na: Na_sv Al: Al Si: Si_h From 65c8d283273b9cc56fdffebf99ddbae9957983ee Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 6 Jun 2017 15:39:21 -0700 Subject: [PATCH 193/204] only assign valence for materials having valence specific pseudopotentials --- mpworks/firetasks/nmr_tasks.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index e9caf80b..d6b33597 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -36,6 +36,8 @@ This is modified from Wei Chen & Joseph Montoya's elastic_tasks. """ +tri_val_elements = {"Ce", "Dy", "Er", "Eu", "Gd", "Ho", "Lu", "Nd", "Pm", "Pr", "Sm", "Tb", "Tm"} +di_val_elements = {"Er", "Eu", "Yb"} def _get_nuclear_quadrupole_moment(element, nqm_dict, parameters): if element not in nqm_dict: @@ -103,8 +105,6 @@ def _change_garden_setting(): def _assign_potcar_valence(structure, potcar_dict): - tri_val_elements = {"Ce", "Dy", "Er", "Eu", "Gd", "Ho", "Lu", "Nd", "Pm", "Pr", "Sm", "Tb", "Tm"} - di_val_elements = {"Er", "Eu", "Yb"} st_elements = set([specie.symbol for specie in structure.species]) bva = BVAnalyzer() valences = bva.get_valences(structure) @@ -153,7 +153,9 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru incar_enforce = {'NPAR': par_num} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) - _assign_potcar_valence(structure, config_dict["POTCAR"]) + if set([sp.symbol for sp in structure.species]) & \ + (tri_val_elements | di_val_elements): + _assign_potcar_valence(structure, config_dict["POTCAR"]) mpvis = _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters=parameters) From e844efed7a822c9472b7adb876d16aa974275c95 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 6 Jun 2017 16:01:00 -0700 Subject: [PATCH 194/204] let alternative command bypass the default command --- mpworks/firetasks/custodian_task.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mpworks/firetasks/custodian_task.py b/mpworks/firetasks/custodian_task.py index e7fa7d3f..fe156fd2 100644 --- a/mpworks/firetasks/custodian_task.py +++ b/mpworks/firetasks/custodian_task.py @@ -134,25 +134,25 @@ def run_task(self, fw_spec): error_list = [] cus_ex = None + + if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: + try: + logging.info("Initiate VASP calculations using alternate binaries") + all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, + fw_env.get("vasp_cmd", "vasp"), + fw_env.get("gvasp_cmd", "gvasp"), + fw_env["alt_cmds"][fw_spec['task_type']], + fw_env.get("input_rewind", True), + fw_spec['mpsnl'].structure) + error_list.extend(all_errors) + except Exception as ex: + cus_ex = ex try: all_errors = self._run_custodian(terminate_func) error_list.extend(all_errors) except Exception as ex: cus_ex = ex - if cus_ex is not None: - if "alt_cmds" in fw_env and fw_spec['task_type'] in fw_env["alt_cmds"]: - cus_ex = None - try: - logging.info("Initiate VASP calculations using alternate binaries") - all_errors = self._run_alt_vasp_cmd(terminate_func, v_exe, gv_exe, - fw_env.get("vasp_cmd", "vasp"), - fw_env.get("gvasp_cmd", "gvasp"), - fw_env["alt_cmds"][fw_spec['task_type']], - fw_env.get("input_rewind", True), - fw_spec['mpsnl'].structure) - error_list.extend(all_errors) - except Exception as ex: - cus_ex = ex + dynamic_wf = None if cus_ex is not None: if os.path.exists("std_err.txt") and \ From c6e40ad4aaf6864d30434dd0bae708db584319b4 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 7 Jun 2017 10:44:04 -0700 Subject: [PATCH 195/204] garantee the elements has been assigned pawpot explicitly --- mpworks/firetasks/nmr_tasks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 3b97fa32..b0e99a1e 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -153,9 +153,12 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru incar_enforce = {'NPAR': par_num} spec['run_tags'] = spec.get('run_tags', []) spec['run_tags'].extend(additional_run_tags) - if set([sp.symbol for sp in structure.species]) & \ + elements_in_structure = set([sp.symbol for sp in structure.species]) + if elements_in_structure & \ (tri_val_elements | di_val_elements): _assign_potcar_valence(structure, config_dict["POTCAR"]) + print(elements_in_structure, set(config_dict["POTCAR"].keys())) + assert elements_in_structure <= set(config_dict["POTCAR"].keys()) mpvis = _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters=parameters) From bf3051926a70cee1edc62203c65880e1d647113e Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 7 Jun 2017 10:45:20 -0700 Subject: [PATCH 196/204] remove redundant print --- mpworks/firetasks/nmr_tasks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index b0e99a1e..04a069cd 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -157,7 +157,6 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru if elements_in_structure & \ (tri_val_elements | di_val_elements): _assign_potcar_valence(structure, config_dict["POTCAR"]) - print(elements_in_structure, set(config_dict["POTCAR"].keys())) assert elements_in_structure <= set(config_dict["POTCAR"].keys()) mpvis = _config_dict_to_input_set(config_dict, structure, From a73c50ef8d3a59ca9f9e2c19a65d70baf6e275ab Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 7 Jun 2017 10:48:56 -0700 Subject: [PATCH 197/204] change assert to exception to provide the element name --- mpworks/firetasks/nmr_tasks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 04a069cd..b3249441 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -157,7 +157,9 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru if elements_in_structure & \ (tri_val_elements | di_val_elements): _assign_potcar_valence(structure, config_dict["POTCAR"]) - assert elements_in_structure <= set(config_dict["POTCAR"].keys()) + if not elements_in_structure <= set(config_dict["POTCAR"].keys()): + missing_elements = set(config_dict["POTCAR"].keys()) - elements_in_structure + raise ValueError("Element {} is not available in config dict".format(missing_elements)) mpvis = _config_dict_to_input_set(config_dict, structure, incar_enforce, parameters=parameters) From b5aada32f7feaca8bf5d4e5f59faf9d214d7f5e1 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 7 Jun 2017 10:49:55 -0700 Subject: [PATCH 198/204] fix typo --- mpworks/firetasks/nmr_tasks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index b3249441..1074dd66 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -158,7 +158,7 @@ def snl_to_nmr_spec(structure, istep_triple_jump, parameters=None, additional_ru (tri_val_elements | di_val_elements): _assign_potcar_valence(structure, config_dict["POTCAR"]) if not elements_in_structure <= set(config_dict["POTCAR"].keys()): - missing_elements = set(config_dict["POTCAR"].keys()) - elements_in_structure + missing_elements = elements_in_structure - set(config_dict["POTCAR"].keys()) raise ValueError("Element {} is not available in config dict".format(missing_elements)) mpvis = _config_dict_to_input_set(config_dict, structure, From 6287714dcdefabff229d3833862dfd126284af40 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 7 Jun 2017 11:07:37 -0700 Subject: [PATCH 199/204] add more elements to pawpot --- mpworks/firetasks/nmr_tensor_set.yaml | 12 ++++++++++++ mpworks/firetasks/triple_jump_relax_set.yaml | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index 837bc468..d7af392f 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -33,6 +33,12 @@ CS: Ca: Ca_sv Y: Y_sv Gd: Gd + Be: Be_sv + Fe: Fe_sv_h + Ge: Ge_h + Li: Li_sv + S: S_h + Zn: Zn_pv EFG: INCAR: @@ -148,3 +154,9 @@ EFG: Ca: Ca_sv Y: Y_sv Gd: Gd + Be: Be_sv + Fe: Fe_sv_h + Ge: Ge_h + Li: Li_sv + S: S_h + Zn: Zn_pv diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 10f00a8e..5eceec84 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -33,6 +33,12 @@ STEP1: Ca: Ca_pv Y: Y_sv Gd: Gd + Be: Be_sv + Fe: Fe_sv + Ge: Ge_d + Li: Li_sv + S: S + Zn: Zn_pv STEP2: INCAR: @@ -72,6 +78,12 @@ STEP2: Ca: Ca_sv Y: Y_sv Gd: Gd + Be: Be_sv + Fe: Fe_sv_h + Ge: Ge_h + Li: Li_sv + S: S_h + Zn: Zn_pv STEP3: INCAR: @@ -115,6 +127,12 @@ STEP3: Ca: Ca_sv Y: Y_sv Gd: Gd + Be: Be_sv + Fe: Fe_sv_h + Ge: Ge_h + Li: Li_sv + S: S_h + Zn: Zn_pv STEP_DYNA3: INCAR: From 0fe81ab1b6b6a8262b353db01f648a6242a8df2a Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 4 Jul 2017 14:17:37 -0700 Subject: [PATCH 200/204] reder nuclear quadruplar moments after the atoms were reordered --- mpworks/firetasks/nmr_tasks.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 1074dd66..32cbc37c 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -365,6 +365,19 @@ def _sort_structure_by_encut(structure, config_dict): structure = structure.get_sorted_structure(key=lambda site: enmax_dict[site.specie.symbol], reverse=True) return structure + @staticmethod + def reorder_nqm(vis, parameters): + old_quad_efg_set = set(vis.incar["QUAD_EFG"]) + module_dir = os.path.abspath(os.path.dirname(__file__)) + config_file = os.path.join(module_dir, "nmr_tensor_set.yaml") + with open(config_file) as f: + parent_config_dict = yaml.load(stream=f) + nqm_map = parent_config_dict["EFG"]["INCAR"]["QUAD_EFG_MAP"] + all_elements = [sp.element for sp in vis.potcar] + quad_efg = [_get_nuclear_quadrupole_moment(el, nqm_map, parameters) for el in all_elements] + vis.incar["QUAD_EFG"] = quad_efg + assert old_quad_efg_set == set(vis.incar["QUAD_EFG"]) + def run_task(self, fw_spec): config_dict = fw_spec["input_set_config_dict"] incar_enforce = fw_spec["input_set_incar_enforce"] @@ -374,6 +387,8 @@ def run_task(self, fw_spec): user_incar_settings=incar_enforce, sort_structure=False) if "INCAR" in self.files: + if fw_spec["task_type"] == "NMR EFG": + self.reorder_nqm(vis, fw_spec["parameters"]) vis.incar.write_file("INCAR") if "POSCAR" in self.files: vis.poscar.write_file("POSCAR") From 1d5f777922b25d7fdf2bf8ee8bde3f8e05df13f3 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 4 Jul 2017 14:22:02 -0700 Subject: [PATCH 201/204] make a private method --- mpworks/firetasks/nmr_tasks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 32cbc37c..779a1a35 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -366,7 +366,7 @@ def _sort_structure_by_encut(structure, config_dict): return structure @staticmethod - def reorder_nqm(vis, parameters): + def _reorder_nqm(vis, parameters): old_quad_efg_set = set(vis.incar["QUAD_EFG"]) module_dir = os.path.abspath(os.path.dirname(__file__)) config_file = os.path.join(module_dir, "nmr_tensor_set.yaml") @@ -388,7 +388,7 @@ def run_task(self, fw_spec): sort_structure=False) if "INCAR" in self.files: if fw_spec["task_type"] == "NMR EFG": - self.reorder_nqm(vis, fw_spec["parameters"]) + self._reorder_nqm(vis, fw_spec["parameters"]) vis.incar.write_file("INCAR") if "POSCAR" in self.files: vis.poscar.write_file("POSCAR") From fbf32f8076af643adfaf1d7ca624df2ab2f38895 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Tue, 4 Jul 2017 14:37:59 -0700 Subject: [PATCH 202/204] fix a bug of copied instance --- mpworks/firetasks/nmr_tasks.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/mpworks/firetasks/nmr_tasks.py b/mpworks/firetasks/nmr_tasks.py index 779a1a35..9ab34e39 100644 --- a/mpworks/firetasks/nmr_tasks.py +++ b/mpworks/firetasks/nmr_tasks.py @@ -367,7 +367,8 @@ def _sort_structure_by_encut(structure, config_dict): @staticmethod def _reorder_nqm(vis, parameters): - old_quad_efg_set = set(vis.incar["QUAD_EFG"]) + incar = vis.incar + old_quad_efg_set = set(incar["QUAD_EFG"]) module_dir = os.path.abspath(os.path.dirname(__file__)) config_file = os.path.join(module_dir, "nmr_tensor_set.yaml") with open(config_file) as f: @@ -375,8 +376,9 @@ def _reorder_nqm(vis, parameters): nqm_map = parent_config_dict["EFG"]["INCAR"]["QUAD_EFG_MAP"] all_elements = [sp.element for sp in vis.potcar] quad_efg = [_get_nuclear_quadrupole_moment(el, nqm_map, parameters) for el in all_elements] - vis.incar["QUAD_EFG"] = quad_efg - assert old_quad_efg_set == set(vis.incar["QUAD_EFG"]) + incar["QUAD_EFG"] = quad_efg + assert old_quad_efg_set == set(incar["QUAD_EFG"]) + return incar def run_task(self, fw_spec): config_dict = fw_spec["input_set_config_dict"] @@ -387,9 +389,10 @@ def run_task(self, fw_spec): user_incar_settings=incar_enforce, sort_structure=False) if "INCAR" in self.files: + incar = vis.incar if fw_spec["task_type"] == "NMR EFG": - self._reorder_nqm(vis, fw_spec["parameters"]) - vis.incar.write_file("INCAR") + incar = self._reorder_nqm(vis, fw_spec["parameters"]) + incar.write_file("INCAR") if "POSCAR" in self.files: vis.poscar.write_file("POSCAR") if "POTCAR" in self.files: From 6dcee6b6a8997b5aa814a08565615a50eacaa2e2 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Wed, 30 Aug 2017 11:34:20 -0700 Subject: [PATCH 203/204] update pawpot choice --- mpworks/firetasks/nmr_tensor_set.yaml | 12 ++++++++++++ mpworks/firetasks/triple_jump_relax_set.yaml | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index d7af392f..c650de4e 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -23,6 +23,7 @@ CS: H: H_h Mg: Mg_sv O: O_h + N: N_h F: F_h Na: Na_sv Al: Al @@ -39,6 +40,11 @@ CS: Li: Li_sv S: S_h Zn: Zn_pv + Ti: Ti_sv_h + Zr: Zr_sv_new + Sr: Sr_sv + Ba: Ba_sv + La: La EFG: INCAR: @@ -144,6 +150,7 @@ EFG: H: H_h Mg: Mg_sv O: O_h + N: N_h F: F_h Na: Na_sv Al: Al @@ -160,3 +167,8 @@ EFG: Li: Li_sv S: S_h Zn: Zn_pv + Ti: Ti_sv_h + Zr: Zr_sv_new + Sr: Sr_sv + Ba: Ba_sv + La: La diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 5eceec84..86c0a188 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -23,6 +23,7 @@ STEP1: H: H Mg: Mg O: O + N: N F: F Na: Na Al: Al @@ -39,6 +40,11 @@ STEP1: Li: Li_sv S: S Zn: Zn_pv + Ti: Ti_sv_new + Zr: Zr_sv_new + Sr: Sr_sv + Ba: Ba_sv + La: La STEP2: INCAR: @@ -68,6 +74,7 @@ STEP2: H: H_h Mg: Mg_sv O: O_h + N: N_h F: F_h Na: Na_sv Al: Al @@ -84,6 +91,11 @@ STEP2: Li: Li_sv S: S_h Zn: Zn_pv + Ti: Ti_sv_h + Zr: Zr_sv_new + Sr: Sr_sv + Ba: Ba_sv + La: La STEP3: INCAR: @@ -117,6 +129,7 @@ STEP3: H: H_h Mg: Mg_sv O: O_h + N: N_h F: F_h Na: Na_sv Al: Al @@ -133,6 +146,11 @@ STEP3: Li: Li_sv S: S_h Zn: Zn_pv + Ti: Ti_sv_h + Zr: Zr_sv_new + Sr: Sr_sv + Ba: Ba_sv + La: La STEP_DYNA3: INCAR: From b6be7cb63e65e89488a0fa01372145d6e9090151 Mon Sep 17 00:00:00 2001 From: Xiaohui Qu Date: Thu, 19 Oct 2017 21:24:58 -0700 Subject: [PATCH 204/204] add pawpot for B, In, Pb --- mpworks/firetasks/nmr_tensor_set.yaml | 6 ++++++ mpworks/firetasks/triple_jump_relax_set.yaml | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/mpworks/firetasks/nmr_tensor_set.yaml b/mpworks/firetasks/nmr_tensor_set.yaml index c650de4e..0cc412a3 100644 --- a/mpworks/firetasks/nmr_tensor_set.yaml +++ b/mpworks/firetasks/nmr_tensor_set.yaml @@ -45,6 +45,9 @@ CS: Sr: Sr_sv Ba: Ba_sv La: La + In: In_d + B: B_h + Pb: Pb_d EFG: INCAR: @@ -172,3 +175,6 @@ EFG: Sr: Sr_sv Ba: Ba_sv La: La + In: In_d + B: B_h + Pb: Pb_d diff --git a/mpworks/firetasks/triple_jump_relax_set.yaml b/mpworks/firetasks/triple_jump_relax_set.yaml index 86c0a188..304e66e7 100644 --- a/mpworks/firetasks/triple_jump_relax_set.yaml +++ b/mpworks/firetasks/triple_jump_relax_set.yaml @@ -45,6 +45,9 @@ STEP1: Sr: Sr_sv Ba: Ba_sv La: La + In: In_d + B: B + Pb: Pb_d STEP2: INCAR: @@ -96,6 +99,9 @@ STEP2: Sr: Sr_sv Ba: Ba_sv La: La + In: In_d + B: B_h + Pb: Pb_d STEP3: INCAR: @@ -151,6 +157,9 @@ STEP3: Sr: Sr_sv Ba: Ba_sv La: La + In: In_d + B: B_h + Pb: Pb_d STEP_DYNA3: INCAR: