diff --git a/.gitignore b/.gitignore index 474370e..0e3c134 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,5 @@ _site/ .vscode/ **/.ipynb_checkpoints/ +/Gemfile +/Gemfile.lock diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..dfb25e4 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Joao Fonseca + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_config.yml b/_config.yml index 0af6d51..5ae4cc3 100644 --- a/_config.yml +++ b/_config.yml @@ -12,7 +12,7 @@ kramdown: input: GFM # Want to include files in `_includes/code` (expected way doesn't work). -include: +include: - _includes exclude: - _includes/head.html @@ -45,7 +45,7 @@ collections: nav_list_pages: false index_list_pages: true permalink: /:collection/:name - add_new_name: page + add_new_name: page experiments: output: true nav_order: 2 @@ -57,21 +57,37 @@ collections: tutorials: output: true nav_order: 3 - nav_name: Tutorials + nav_name: Tutorials + index_list_pages: true + permalink: /:collection/:name + add_new_name: tutorial + data_analysis: + output: true + nav_order: 5 + nav_name: Data and Analysis + nav_list_pages: false index_list_pages: true permalink: /:collection/:name - add_new_name: tutorial + add_new_name: page software_and_simulation: output: true - nav_order: 4 + nav_order: 6 nav_name: Software & Simulation nav_list_pages: false index_list_pages: true permalink: /:collection/:name - add_new_name: page - miscellaneous: + add_new_name: page + blog: output: true - nav_order: 5 + nav_order: 7 + nav_name: Blog + nav_list_pages: false + index_list_pages: true + permalink: /:collection/:name + add_new_name: page + miscellaneous: + output: true + nav_order: 8 nav_name: Miscellaneous nav_list_pages: true index_list_pages: true @@ -91,7 +107,7 @@ defaults: values: show_edit_button: true show_history_button: true - show_delete_button: false + show_delete_button: false - scope: path: "tags.*" values: @@ -103,13 +119,13 @@ defaults: values: show_edit_button: false show_history_button: false - show_delete_button: false + show_delete_button: false - scope: path: "add_metadata_file.*" values: show_edit_button: false show_history_button: false - show_delete_button: false + show_delete_button: false - scope: path: "collections/*/index.*" values: @@ -126,7 +142,8 @@ defaults: published: true show_breadcrumbs: true show_meta: true - title: '' + title: "" + subcollection: root - scope: path: "" type: "tutorials" @@ -134,7 +151,8 @@ defaults: layout: post show_breadcrumbs: true show_meta: true - title: '' + title: "" + subcollection: root - scope: path: "" type: "software_and_simulation" @@ -142,7 +160,17 @@ defaults: layout: post show_breadcrumbs: true show_meta: true - title: '' + title: "" + subcollection: root + - scope: + path: "" + type: "blog" + values: + layout: post + show_breadcrumbs: true + show_meta: true + title: "" + subcollection: root - scope: path: "" type: "handbook" @@ -150,7 +178,8 @@ defaults: layout: post show_breadcrumbs: true show_meta: true - title: '' + title: "" + subcollection: root - scope: path: "" type: "code_of_conduct" @@ -158,7 +187,8 @@ defaults: layout: post show_breadcrumbs: true show_meta: true - title: '' + title: "" + subcollection: root - scope: path: "" type: "miscellaneous" @@ -166,4 +196,13 @@ defaults: layout: post show_breadcrumbs: true show_meta: true - title: '' + title: "" + subcollection: root + - scope: + path: "" + type: "data_analysis" + values: + layout: post + show_breadcrumbs: true + show_meta: true + title: "" diff --git a/_data/people.yml b/_data/people.yml index f341b24..ce6a22f 100644 --- a/_data/people.yml +++ b/_data/people.yml @@ -3,36 +3,16 @@ url: https://lightform.org.uk/people/dr-adam-plowman initials: AP -- name: Natalie Shannon - handle: NatalieShannon - url: https://lightform.org.uk/people/natalie-shannon - initials: NS - - name: Christopher Daniel handle: ChristopherDaniel url: https://lightform.org.uk/people/dr-christopher-stuart-daniel initials: CD -- name: Alex Cassell - handle: AlexCassell - url: https://lightform.org.uk/people/dr-alex-cassell - initials: AC - -- name: Paloma Hidalgo-Manrique - handle: PalomaHidalgoManrique - url: https://lightform.org.uk/people/dr-paloma-hidalgo-manrique - initials: PHM - - name: Peter Crowther handle: PeterCrowther url: https://lightform.org.uk/people/peter-crowther initials: PC -- name: Sumeet Mishra - handle: SumeetMishra - url: https://lightform.org.uk/people/dr-sumeet-mishra - initials: SM - - name: João Fonseca handle: JoaoFonseca url: https://lightform.org.uk/people/joao-fonseca @@ -154,3 +134,13 @@ handle: KevinTanswell initials: KT url: https://lightform.org.uk/people/kevin-tanswell + +- name: Sakina Rehman + handle: SakinaRehman + initials: SR + url: https://lightform.org.uk/people/sakina-rehman + +- name: Muzamul Nawaz + handle: MuzamulNawaz + initials: MN + url: https://lightform.org.uk/people/muzamul-nawaz diff --git a/_includes/checklist_controls.html b/_includes/checklist_controls.html deleted file mode 100644 index 191b248..0000000 --- a/_includes/checklist_controls.html +++ /dev/null @@ -1,16 +0,0 @@ -{% include get_checklists.html %} -{% include checklist_urls.html %} -{% assign exp_title_clean = include.exp_title_clean %} -{% assign cl_add_redirect_url = "add_metadata_file#" | append: exp_title_clean | prepend: "/" | prepend: site.baseurl | prepend: site.url -%} -{% assign cl_edit_url = github_cl_edit_url | append: exp_title_clean | append: ".yml" -%} -{% assign cl_history_url = github_cl_history_url | append: exp_title_clean | append: ".yml" -%} -{% assign cl_delete_url = github_cl_del_url | append: exp_title_clean | append: ".yml" -%} -{% assign checklist_col = 'add' -%} -{% for cl in checklists -%} - {% if cl == exp_title_clean -%} - {% assign checklist_col = 'edit' -%} - {% assign checklist_col = checklist_col | append: ' | history' -%} - {% assign checklist_col = checklist_col | append: ' | delete' -%} - {% endif -%} -{% endfor -%} -{{ checklist_col }} diff --git a/_includes/checklist_urls.html b/_includes/checklist_urls.html deleted file mode 100644 index 48def5a..0000000 --- a/_includes/checklist_urls.html +++ /dev/null @@ -1,12 +0,0 @@ -{% assign site_url_split = site.url | split: "https://" %} -{% assign github_url = "https://github.com/" | append: site.github_user_or_organisation | append: site.baseurl %} -{% assign github_cl_end_url = "/_includes/checklists/" %} - - -{% assign github_cl_new_url = github_url | append: "/new/master" | append: github_cl_end_url %} -{% assign github_cl_edit_url = github_url | append: "/edit/master" | append: github_cl_end_url %} -{% assign github_cl_dir_url = github_url | append: "/tree/master" | append: github_cl_end_url %} -{% assign github_cl_del_url = github_url | append: "/delete/master" | append: github_cl_end_url %} -{% assign github_cl_history_url = github_url | append: "/commits/master" | append: github_cl_end_url %} - -{% assign github_exp_dir_url = github_url | append: "/tree/master/collections/_experiments" %} diff --git a/_includes/checklists/dilatometer-hot-compression-tests.yml b/_includes/checklists/dilatometer-hot-compression-tests.yml deleted file mode 100644 index a42cc7d..0000000 --- a/_includes/checklists/dilatometer-hot-compression-tests.yml +++ /dev/null @@ -1,11 +0,0 @@ -Dilatometer Hot Compression: - widget: GroupBox - title: Hot Compression Metadata - layout: FormLayout - children: - introduction: - widget: Label - text: This is where metadata for the dilatometer hot compression test is added. - temperatures: - widget: LineEdit - label: Temperature (°C) \ No newline at end of file diff --git a/_includes/checklists/ebsd.yml b/_includes/checklists/ebsd.yml deleted file mode 100644 index 8b4f0f3..0000000 --- a/_includes/checklists/ebsd.yml +++ /dev/null @@ -1,220 +0,0 @@ -EBSD general: - widget: GroupBox - title: General EBSD metadata - layout: FormLayout - children: - file_warning: - widget: Label - text: All EBSD data should be uploaded as ctf files as this has the correct metadata. - alloy_elements: - widget: ComboBox - editable: True - label: Alloy Elements - values: - - - - Ti - - Al - - V - tooltip: The elements present in the alloy being analysed. - orientation_1: - widget: ComboBox - editable: False - label: Sample Orientation (Z1) - values: - - - - ND (Normal) - - RD (Rolling) - - TD (Transverse) - - CD (Compression) - - R1 (Radial 1) - - R2 (Radial 2) - - FD (Forging) - - DD (Draw) - - ED (Extrusion) - - RD (Radial) - - AD (Axial) - tooltip: The orientation of the sample in the out of plane (Z1) direction. - orientation_2: - widget: ComboBox - editable: False - label: Sample Orientation (Y1) - values: - - - - ND (Normal) - - RD (Rolling) - - TD (Transverse) - - CD (Compression) - - R1 (Radial 1) - - R2 (Radial 2) - - FD (Forging) - - DD (Draw) - - ED (Extrusion) - - RD (Radial) - - AD (Axial) - tooltip: The orientation of the sample in the Up, North (Y1) direction. - orientation_3: - widget: ComboBox - editable: False - label: Sample Orientation (X1) - values: - - - - ND (Normal) - - RD (Rolling) - - TD (Transverse) - - CD (Compression) - - R1 (Radial 1) - - R2 (Radial 2) - - FD (Forging) - - DD (Draw) - - ED (Extrusion) - - RD (Radial) - - AD (Axial) - tooltip: The orientation of the sample in the Side, East (X1) direction. - phase_name: - widget: ComboBox - editable: True - label: Phase Name - values: - - - - Ti Hex - - Ti Beta - - Ti Cubic - - Al Cubic - tooltip: The phases present in the EBSD map. - crystal_system: - widget: ComboBox - values: - - - - Hexagonal - - Cubic - editable: False - label: Crystal System - tooltip: The type of crystal system present in the sample. - space_group: - widget: LineEdit - label: Crystal Space Group - tooltip: The crystal space group being analysed. - laue_group: - widget: LineEdit - label: Crystal Laue Group - tooltip: The crystal laue group being analysed. - reflectors: - widget: LineEdit - label: Reflectors - default: 40 - tooltip: The number of reflectors selected. - material: - widget: GroupBox - title: Material Description - layout: FormLayout - tooltip: A description of the material analysed. Consider adding information about the type of material and the forming process. - children: - material_description: - widget: PlainTextEdit - sample_processing: - widget: LineEdit - label: Sample Processing Route - tooltip: Sample processing method e.g. Rolling, Cast, Uniaxial Compression, Plane Strain, Tension, Forging - tilt_angle: - widget: LineEdit - label: Tilt angle (°) - default: 70 - tooltip: The tilt angle of the sample. - working_distance: - widget: LineEdit - label: Working Distance (mm) - tooltip: The working distance between the beam source and the sample. - magnification: - widget: LineEdit - label: Magnification - tooltip: The magnification of the imaging lens. - beam_energy: - widget: LineEdit - label: Beam Energy (kV) - tooltip: The energy of the probing electron beam. - step_size: - widget: LineEdit - label: Step Size (µm) - tooltip: The step size between analysis points. - exposure_time: - widget: LineEdit - label: Exposure Time (ms) - tooltip: The length of time the electron beam fires at each point. - overlap: - widget: LineEdit - label: Overlap of stitched maps (%) - tooltip: If multiple maps are stitched together to form a larger map, what is the stitching overlap. - sub_exepriments: - TESCAN: - children: - camera: - widget: ComboBox - label: Camera - values: - - Oxford Symmetry - insertion_distance: - widget: ComboBox - label: Camera insertion distance (mm) - values: - - 178 - beam_intensity: - widget: LineEdit - label: Beam intensity - default: 20 - spot_size: - widget: LineEdit - label: Spot size (mm) - default: 0.1 - scan_mode: - widget: ComboBox - label: Scan Mode - values: - - Resolution - - Depth - - Field - - Wide Field - - Channeling - camera_mode: - widget: ComboBox - label: Camera Mode - values: - - Resolution - - Sensitivity - - Speed 1 - - Speed 2 - FEI Sirion: - children: - camera: - widget: ComboBox - label: Camera - values: - - Oxford NordlysNano - insertion_distance: - widget: ComboBox - label: Camera insertion distance (mm) - values: - - 168.8 - spot_size: - widget: Slider - label: Spot size (mm) - default: 4 - min: 1 - max: 7 - step_size: 1 - binning_mode: - widget: ComboBox - label: Binning Mode - values: - - 1x1 - - 2x2 - - 4x4 - - 8x8 - - 8x16 - default: 4x4 - gain: - widget: ComboBox - label: Gain - values: - - Low - - High - default: Low \ No newline at end of file diff --git a/_includes/checklists/tem.yml b/_includes/checklists/tem.yml deleted file mode 100644 index 1adab98..0000000 --- a/_includes/checklists/tem.yml +++ /dev/null @@ -1,87 +0,0 @@ -tem: - widget: GroupBox - title: TEM metadata - layout: FormLayout - _name: tem - children: - accelerating_voltage: - widget: LineEdit - label: voltage (kV) - default: 200 - tooltip: The beam acceleration voltage - probe_current: - widget: LineEdit - label: Probe Current (nA) - default: 1 - magnification: - widget: LineEdit - label: Magnification - default: 5.5 - microscope: - widget: ComboBox - label: Microscope - values: - - FEI Titan G2 80-200 - orientation_1: - widget: ComboBox - editable: False - label: Sample Orientation (Z1) - values: - - - - ND (Normal) - - RD (Rolling) - - TD (Transverse) - - CD (Compression) - - R1 (Radial 1) - - R2 (Radial 2) - - FD (Forging) - - DD (Draw) - - ED (Extrusion) - - RD (Radial) - - AD (Axial) - tooltip: The orientation of the sample in the out of plane (Z1) direction. - orientation_2: - widget: ComboBox - editable: False - label: Sample Orientation (Y1) - values: - - - - ND (Normal) - - RD (Rolling) - - TD (Transverse) - - CD (Compression) - - R1 (Radial 1) - - R2 (Radial 2) - - FD (Forging) - - DD (Draw) - - ED (Extrusion) - - RD (Radial) - - AD (Axial) - tooltip: The orientation of the sample in the Up, North (Y1) direction. - orientation_3: - widget: ComboBox - editable: False - label: Sample Orientation (X1) - values: - - - - ND (Normal) - - RD (Rolling) - - TD (Transverse) - - CD (Compression) - - R1 (Radial 1) - - R2 (Radial 2) - - FD (Forging) - - DD (Draw) - - ED (Extrusion) - - RD (Radial) - - AD (Axial) - tooltip: The orientation of the sample in the Side, East (X1) direction. - - material: - widget: GroupBox - title: Material Description - layout: FormLayout - tooltip: A description of the material analysed. Consider adding information about the type of material and the forming process. - children: - material_description: - widget: PlainTextEdit \ No newline at end of file diff --git a/_includes/get_checklists.html b/_includes/get_checklists.html deleted file mode 100644 index 14de0a9..0000000 --- a/_includes/get_checklists.html +++ /dev/null @@ -1,13 +0,0 @@ -{% assign checklists = "" %} -{% for i in site.static_files %} - {% assign inc_url_split = i.path | split: "/_includes/" %} - {% assign inc_url = inc_url_split[1] %} - {% assign inc_dir_split = inc_url | split: "/" %} - {% assign inc_dir = inc_dir_split[0] -%} - {% if inc_dir == "checklists" %} - {% assign cl_name = i.name | split: ".yml" %} - {% assign checklists = checklists | append: cl_name[0] | append: "|" %} - {% endif %} -{% endfor %} - -{% assign checklists = checklists | split: "|" %} diff --git a/_includes/get_collection_pages.html b/_includes/get_collection_pages.html index f79142d..bb5070a 100644 --- a/_includes/get_collection_pages.html +++ b/_includes/get_collection_pages.html @@ -1,62 +1,78 @@ {% assign page = include.page %} {% for collec in site.collections %} - {% if collec.label == page.collection %} + {% if collec.label == page.collection %} {% if collec.index_list_pages %} - {% endif %} {% endif %} {% endfor %} diff --git a/_includes/head.html b/_includes/head.html index 5132544..b17563a 100644 --- a/_includes/head.html +++ b/_includes/head.html @@ -7,14 +7,14 @@ - - - + + +
+ + \ No newline at end of file diff --git a/_includes/plotly_figures/resource_use.html b/_includes/plotly_figures/resource_use.html new file mode 100644 index 0000000..f0ab244 --- /dev/null +++ b/_includes/plotly_figures/resource_use.html @@ -0,0 +1,31 @@ + + + +
+ + + +
+ +
+ + \ No newline at end of file diff --git a/_includes/ppt_templates/2021.03.05_matflow_intro.pptx b/_includes/ppt_templates/2021.03.05_matflow_intro.pptx new file mode 100644 index 0000000..ef9466f Binary files /dev/null and b/_includes/ppt_templates/2021.03.05_matflow_intro.pptx differ diff --git a/_includes/ppt_templates/2021.12.01_some_matflow_problems.pptx b/_includes/ppt_templates/2021.12.01_some_matflow_problems.pptx new file mode 100644 index 0000000..6c1202e Binary files /dev/null and b/_includes/ppt_templates/2021.12.01_some_matflow_problems.pptx differ diff --git a/_includes/ppt_templates/DAMASK_v3a3_workflow_changes.pdf b/_includes/ppt_templates/DAMASK_v3a3_workflow_changes.pdf new file mode 100644 index 0000000..4687587 Binary files /dev/null and b/_includes/ppt_templates/DAMASK_v3a3_workflow_changes.pdf differ diff --git a/_includes/ppt_templates/LF_research_showcase_Oct_2020.pptx b/_includes/ppt_templates/LF_research_showcase_Oct_2020.pptx new file mode 100644 index 0000000..a71efb5 Binary files /dev/null and b/_includes/ppt_templates/LF_research_showcase_Oct_2020.pptx differ diff --git a/_includes/ppt_templates/MTEX_crystal_plasticity_March_2021.pptx b/_includes/ppt_templates/MTEX_crystal_plasticity_March_2021.pptx new file mode 100644 index 0000000..2c223e8 Binary files /dev/null and b/_includes/ppt_templates/MTEX_crystal_plasticity_March_2021.pptx differ diff --git a/_includes/ppt_templates/TiFUN_2021.09.09_adam_plowman.pptx b/_includes/ppt_templates/TiFUN_2021.09.09_adam_plowman.pptx new file mode 100644 index 0000000..6da2a8e Binary files /dev/null and b/_includes/ppt_templates/TiFUN_2021.09.09_adam_plowman.pptx differ diff --git a/_includes/ppt_templates/TiFUN_2021.12.07_adam_plowman.pptx b/_includes/ppt_templates/TiFUN_2021.12.07_adam_plowman.pptx new file mode 100644 index 0000000..1410a0e Binary files /dev/null and b/_includes/ppt_templates/TiFUN_2021.12.07_adam_plowman.pptx differ diff --git a/_includes/ppt_templates/TiFUN_2022.04.07_adam_plowman.pptx b/_includes/ppt_templates/TiFUN_2022.04.07_adam_plowman.pptx new file mode 100644 index 0000000..6e21aa7 Binary files /dev/null and b/_includes/ppt_templates/TiFUN_2022.04.07_adam_plowman.pptx differ diff --git a/_layouts/experiment.md b/_layouts/experiment.md index 2f8635a..f01786d 100644 --- a/_layouts/experiment.md +++ b/_layouts/experiment.md @@ -19,34 +19,45 @@ layout: post {% else %} - - {% endif %} - - - Tutorials - - {% if page.tutorials %} - - {% else %} - - {% endif %} - + + + Tutorials + + {% if page.tutorials %} + + {% else %} + - + {% endif %} + + + + Metadata templates + + {% if page.metadata_templates %} + + {% else %} + - + {% endif %} +
{{ content }} -{{ "## Metadata template" | markdownify }} -{% assign exp_title_clean = page.title | replace: " ", "-" | downcase | replace: "(", "_" | replace: ")", "_" -%} -{% include checklist_controls.html exp_title_clean=exp_title_clean %} -{% include get_checklist_formatted.html exp_title_clean=exp_title_clean %} diff --git a/assets/images/Crystal_cleaning_tutorial_1.png b/assets/images/Crystal_cleaning_tutorial_1.png new file mode 100644 index 0000000..3af323d Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_1.png differ diff --git a/assets/images/Crystal_cleaning_tutorial_2.png b/assets/images/Crystal_cleaning_tutorial_2.png new file mode 100644 index 0000000..1edfddf Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_2.png differ diff --git a/assets/images/Crystal_cleaning_tutorial_3.png b/assets/images/Crystal_cleaning_tutorial_3.png new file mode 100644 index 0000000..90c3ab3 Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_3.png differ diff --git a/assets/images/Crystal_cleaning_tutorial_4.png b/assets/images/Crystal_cleaning_tutorial_4.png new file mode 100644 index 0000000..c908727 Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_4.png differ diff --git a/assets/images/Crystal_cleaning_tutorial_5.png b/assets/images/Crystal_cleaning_tutorial_5.png new file mode 100644 index 0000000..9229b70 Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_5.png differ diff --git a/assets/images/Crystal_cleaning_tutorial_6.png b/assets/images/Crystal_cleaning_tutorial_6.png new file mode 100644 index 0000000..332f17a Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_6.png differ diff --git a/assets/images/Crystal_cleaning_tutorial_7.png b/assets/images/Crystal_cleaning_tutorial_7.png new file mode 100644 index 0000000..c3a4c98 Binary files /dev/null and b/assets/images/Crystal_cleaning_tutorial_7.png differ diff --git a/assets/images/Dioptas_calibration_1.png b/assets/images/Dioptas_calibration_1.png new file mode 100644 index 0000000..55f604f Binary files /dev/null and b/assets/images/Dioptas_calibration_1.png differ diff --git a/assets/images/Dioptas_calibration_2.png b/assets/images/Dioptas_calibration_2.png new file mode 100644 index 0000000..633ad61 Binary files /dev/null and b/assets/images/Dioptas_calibration_2.png differ diff --git a/assets/images/EBSD_misindexing_1.png b/assets/images/EBSD_misindexing_1.png new file mode 100644 index 0000000..ca073fa Binary files /dev/null and b/assets/images/EBSD_misindexing_1.png differ diff --git a/assets/images/EBSD_misindexing_2.png b/assets/images/EBSD_misindexing_2.png new file mode 100644 index 0000000..da4c13f Binary files /dev/null and b/assets/images/EBSD_misindexing_2.png differ diff --git a/assets/images/EBSD_misindexing_3.png b/assets/images/EBSD_misindexing_3.png new file mode 100644 index 0000000..b7619ba Binary files /dev/null and b/assets/images/EBSD_misindexing_3.png differ diff --git a/assets/images/Slip System IPF.png b/assets/images/Slip System IPF.png new file mode 100644 index 0000000..8924f97 Binary files /dev/null and b/assets/images/Slip System IPF.png differ diff --git a/assets/images/posts/20210607_155223.jpg b/assets/images/posts/20210607_155223.jpg new file mode 100644 index 0000000..8ebccbf Binary files /dev/null and b/assets/images/posts/20210607_155223.jpg differ diff --git a/assets/images/posts/B13coater.jpeg b/assets/images/posts/B13coater.jpeg new file mode 100644 index 0000000..620fab9 Binary files /dev/null and b/assets/images/posts/B13coater.jpeg differ diff --git a/assets/images/posts/EMCcoater.jpeg b/assets/images/posts/EMCcoater.jpeg new file mode 100644 index 0000000..6773f05 Binary files /dev/null and b/assets/images/posts/EMCcoater.jpeg differ diff --git a/assets/images/posts/Insert sample into gleeble hydrawedge.jpg b/assets/images/posts/Insert sample into gleeble hydrawedge.jpg new file mode 100644 index 0000000..d9926d9 Binary files /dev/null and b/assets/images/posts/Insert sample into gleeble hydrawedge.jpg differ diff --git a/assets/images/posts/Measurement of thickness.jpg b/assets/images/posts/Measurement of thickness.jpg new file mode 100644 index 0000000..5ab163a Binary files /dev/null and b/assets/images/posts/Measurement of thickness.jpg differ diff --git a/assets/images/posts/Measurement of width.jpg b/assets/images/posts/Measurement of width.jpg new file mode 100644 index 0000000..d466c3b Binary files /dev/null and b/assets/images/posts/Measurement of width.jpg differ diff --git a/assets/images/posts/Picture1.jpg b/assets/images/posts/Picture1.jpg new file mode 100644 index 0000000..4110c1e Binary files /dev/null and b/assets/images/posts/Picture1.jpg differ diff --git a/assets/images/posts/Picture10.png b/assets/images/posts/Picture10.png new file mode 100644 index 0000000..36e88b1 Binary files /dev/null and b/assets/images/posts/Picture10.png differ diff --git a/assets/images/posts/Picture11.png b/assets/images/posts/Picture11.png new file mode 100644 index 0000000..d8845f1 Binary files /dev/null and b/assets/images/posts/Picture11.png differ diff --git a/assets/images/posts/Picture12.png b/assets/images/posts/Picture12.png new file mode 100644 index 0000000..dea033d Binary files /dev/null and b/assets/images/posts/Picture12.png differ diff --git a/assets/images/posts/Picture13.png b/assets/images/posts/Picture13.png new file mode 100644 index 0000000..45e71c4 Binary files /dev/null and b/assets/images/posts/Picture13.png differ diff --git a/assets/images/posts/Picture14.png b/assets/images/posts/Picture14.png new file mode 100644 index 0000000..77437ec Binary files /dev/null and b/assets/images/posts/Picture14.png differ diff --git a/assets/images/posts/Picture15.png b/assets/images/posts/Picture15.png new file mode 100644 index 0000000..07fe0b3 Binary files /dev/null and b/assets/images/posts/Picture15.png differ diff --git a/assets/images/posts/Picture16.png b/assets/images/posts/Picture16.png new file mode 100644 index 0000000..c45962d Binary files /dev/null and b/assets/images/posts/Picture16.png differ diff --git a/assets/images/posts/Picture17.png b/assets/images/posts/Picture17.png new file mode 100644 index 0000000..cc5316b Binary files /dev/null and b/assets/images/posts/Picture17.png differ diff --git a/assets/images/posts/Picture2.jpg b/assets/images/posts/Picture2.jpg new file mode 100644 index 0000000..5024cfc Binary files /dev/null and b/assets/images/posts/Picture2.jpg differ diff --git a/assets/images/posts/Picture2.png b/assets/images/posts/Picture2.png new file mode 100644 index 0000000..1b85ecc Binary files /dev/null and b/assets/images/posts/Picture2.png differ diff --git a/assets/images/posts/Picture3.jpg b/assets/images/posts/Picture3.jpg new file mode 100644 index 0000000..d771812 Binary files /dev/null and b/assets/images/posts/Picture3.jpg differ diff --git a/assets/images/posts/Picture4.jpg b/assets/images/posts/Picture4.jpg new file mode 100644 index 0000000..6cd315a Binary files /dev/null and b/assets/images/posts/Picture4.jpg differ diff --git a/assets/images/posts/Picture4.png b/assets/images/posts/Picture4.png new file mode 100644 index 0000000..a68fe37 Binary files /dev/null and b/assets/images/posts/Picture4.png differ diff --git a/assets/images/posts/Picture5.jpg b/assets/images/posts/Picture5.jpg new file mode 100644 index 0000000..ef6caac Binary files /dev/null and b/assets/images/posts/Picture5.jpg differ diff --git a/assets/images/posts/Picture5.png b/assets/images/posts/Picture5.png new file mode 100644 index 0000000..16f3778 Binary files /dev/null and b/assets/images/posts/Picture5.png differ diff --git a/assets/images/posts/Picture6.png b/assets/images/posts/Picture6.png new file mode 100644 index 0000000..3265092 Binary files /dev/null and b/assets/images/posts/Picture6.png differ diff --git a/assets/images/posts/Picture7.png b/assets/images/posts/Picture7.png new file mode 100644 index 0000000..f4b2a14 Binary files /dev/null and b/assets/images/posts/Picture7.png differ diff --git a/assets/images/posts/Picture8.png b/assets/images/posts/Picture8.png new file mode 100644 index 0000000..fe277b0 Binary files /dev/null and b/assets/images/posts/Picture8.png differ diff --git a/assets/images/posts/Picture9.png b/assets/images/posts/Picture9.png new file mode 100644 index 0000000..f99a5c9 Binary files /dev/null and b/assets/images/posts/Picture9.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig1.png b/assets/images/posts/Quenching_Dil_Fig1.png new file mode 100644 index 0000000..cd4cdd2 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig1.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig10.png b/assets/images/posts/Quenching_Dil_Fig10.png new file mode 100644 index 0000000..33bd03b Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig10.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig11.png b/assets/images/posts/Quenching_Dil_Fig11.png new file mode 100644 index 0000000..7b4d6e2 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig11.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig12.png b/assets/images/posts/Quenching_Dil_Fig12.png new file mode 100644 index 0000000..7d2cb0c Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig12.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig13.png b/assets/images/posts/Quenching_Dil_Fig13.png new file mode 100644 index 0000000..e22e62d Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig13.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig14.png b/assets/images/posts/Quenching_Dil_Fig14.png new file mode 100644 index 0000000..6b2223e Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig14.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig2.png b/assets/images/posts/Quenching_Dil_Fig2.png new file mode 100644 index 0000000..af9f041 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig2.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig3.png b/assets/images/posts/Quenching_Dil_Fig3.png new file mode 100644 index 0000000..2765aa0 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig3.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig4.png b/assets/images/posts/Quenching_Dil_Fig4.png new file mode 100644 index 0000000..a44a09d Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig4.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig5.png b/assets/images/posts/Quenching_Dil_Fig5.png new file mode 100644 index 0000000..607906c Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig5.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig6.png b/assets/images/posts/Quenching_Dil_Fig6.png new file mode 100644 index 0000000..61c31d9 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig6.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig7.png b/assets/images/posts/Quenching_Dil_Fig7.png new file mode 100644 index 0000000..0614ec4 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig7.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig8.png b/assets/images/posts/Quenching_Dil_Fig8.png new file mode 100644 index 0000000..781186f Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig8.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig9.png b/assets/images/posts/Quenching_Dil_Fig9.png new file mode 100644 index 0000000..8d88a40 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig9.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig_A1.png b/assets/images/posts/Quenching_Dil_Fig_A1.png new file mode 100644 index 0000000..6f65b6b Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig_A1.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig_A2.png b/assets/images/posts/Quenching_Dil_Fig_A2.png new file mode 100644 index 0000000..71389a2 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig_A2.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig_A3.png b/assets/images/posts/Quenching_Dil_Fig_A3.png new file mode 100644 index 0000000..1b80b68 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig_A3.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig_A4.png b/assets/images/posts/Quenching_Dil_Fig_A4.png new file mode 100644 index 0000000..4d159db Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig_A4.png differ diff --git a/assets/images/posts/Quenching_Dil_Fig_A5.png b/assets/images/posts/Quenching_Dil_Fig_A5.png new file mode 100644 index 0000000..4501894 Binary files /dev/null and b/assets/images/posts/Quenching_Dil_Fig_A5.png differ diff --git a/assets/images/posts/Slip System IPF.png b/assets/images/posts/Slip System IPF.png new file mode 100644 index 0000000..8924f97 Binary files /dev/null and b/assets/images/posts/Slip System IPF.png differ diff --git a/assets/images/posts/Zenn_Picture1.jpg b/assets/images/posts/Zenn_Picture1.jpg new file mode 100644 index 0000000..4110c1e Binary files /dev/null and b/assets/images/posts/Zenn_Picture1.jpg differ diff --git a/assets/images/posts/Zenn_Picture10.png b/assets/images/posts/Zenn_Picture10.png new file mode 100644 index 0000000..36e88b1 Binary files /dev/null and b/assets/images/posts/Zenn_Picture10.png differ diff --git a/assets/images/posts/Zenn_Picture11.png b/assets/images/posts/Zenn_Picture11.png new file mode 100644 index 0000000..d8845f1 Binary files /dev/null and b/assets/images/posts/Zenn_Picture11.png differ diff --git a/assets/images/posts/Zenn_Picture12.png b/assets/images/posts/Zenn_Picture12.png new file mode 100644 index 0000000..dea033d Binary files /dev/null and b/assets/images/posts/Zenn_Picture12.png differ diff --git a/assets/images/posts/Zenn_Picture13.png b/assets/images/posts/Zenn_Picture13.png new file mode 100644 index 0000000..45e71c4 Binary files /dev/null and b/assets/images/posts/Zenn_Picture13.png differ diff --git a/assets/images/posts/Zenn_Picture14.png b/assets/images/posts/Zenn_Picture14.png new file mode 100644 index 0000000..77437ec Binary files /dev/null and b/assets/images/posts/Zenn_Picture14.png differ diff --git a/assets/images/posts/Zenn_Picture15.png b/assets/images/posts/Zenn_Picture15.png new file mode 100644 index 0000000..07fe0b3 Binary files /dev/null and b/assets/images/posts/Zenn_Picture15.png differ diff --git a/assets/images/posts/Zenn_Picture16.png b/assets/images/posts/Zenn_Picture16.png new file mode 100644 index 0000000..c45962d Binary files /dev/null and b/assets/images/posts/Zenn_Picture16.png differ diff --git a/assets/images/posts/Zenn_Picture17.png b/assets/images/posts/Zenn_Picture17.png new file mode 100644 index 0000000..cc5316b Binary files /dev/null and b/assets/images/posts/Zenn_Picture17.png differ diff --git a/assets/images/posts/Zenn_Picture2.png b/assets/images/posts/Zenn_Picture2.png new file mode 100644 index 0000000..1b85ecc Binary files /dev/null and b/assets/images/posts/Zenn_Picture2.png differ diff --git a/assets/images/posts/Zenn_Picture3.jpg b/assets/images/posts/Zenn_Picture3.jpg new file mode 100644 index 0000000..d771812 Binary files /dev/null and b/assets/images/posts/Zenn_Picture3.jpg differ diff --git a/assets/images/posts/Zenn_Picture4.png b/assets/images/posts/Zenn_Picture4.png new file mode 100644 index 0000000..a68fe37 Binary files /dev/null and b/assets/images/posts/Zenn_Picture4.png differ diff --git a/assets/images/posts/Zenn_Picture5.png b/assets/images/posts/Zenn_Picture5.png new file mode 100644 index 0000000..16f3778 Binary files /dev/null and b/assets/images/posts/Zenn_Picture5.png differ diff --git a/assets/images/posts/Zenn_Picture6.png b/assets/images/posts/Zenn_Picture6.png new file mode 100644 index 0000000..3265092 Binary files /dev/null and b/assets/images/posts/Zenn_Picture6.png differ diff --git a/assets/images/posts/Zenn_Picture7.png b/assets/images/posts/Zenn_Picture7.png new file mode 100644 index 0000000..f4b2a14 Binary files /dev/null and b/assets/images/posts/Zenn_Picture7.png differ diff --git a/assets/images/posts/Zenn_Picture8.png b/assets/images/posts/Zenn_Picture8.png new file mode 100644 index 0000000..fe277b0 Binary files /dev/null and b/assets/images/posts/Zenn_Picture8.png differ diff --git a/assets/images/posts/Zenn_Picture9.png b/assets/images/posts/Zenn_Picture9.png new file mode 100644 index 0000000..f99a5c9 Binary files /dev/null and b/assets/images/posts/Zenn_Picture9.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/C_Maha_ani.gif b/assets/images/posts/blog/ti_cp_pf/C_Maha_ani.gif new file mode 100644 index 0000000..e5343c2 Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/C_Maha_ani.gif differ diff --git a/assets/images/posts/blog/ti_cp_pf/MTEX_seg_incorrect_align.png b/assets/images/posts/blog/ti_cp_pf/MTEX_seg_incorrect_align.png new file mode 100644 index 0000000..ca938c8 Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/MTEX_seg_incorrect_align.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_deformed_hard.png b/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_deformed_hard.png new file mode 100644 index 0000000..65a9abb Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_deformed_hard.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_dream3D_clustered_3_degs_hard.png b/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_dream3D_clustered_3_degs_hard.png new file mode 100644 index 0000000..9dcceae Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_dream3D_clustered_3_degs_hard.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_32.png b/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_32.png new file mode 100644 index 0000000..fed00ef Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_32.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/periodic_vs_non_periodic.png b/assets/images/posts/blog/ti_cp_pf/periodic_vs_non_periodic.png new file mode 100644 index 0000000..29431de Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/periodic_vs_non_periodic.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/resource_req_RVEs.png b/assets/images/posts/blog/ti_cp_pf/resource_req_RVEs.png new file mode 100644 index 0000000..3286537 Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/resource_req_RVEs.png differ diff --git a/assets/images/posts/blog/ti_cp_pf/smooth_ani.gif b/assets/images/posts/blog/ti_cp_pf/smooth_ani.gif new file mode 100644 index 0000000..ba52d37 Binary files /dev/null and b/assets/images/posts/blog/ti_cp_pf/smooth_ani.gif differ diff --git a/assets/images/posts/github_clone.png b/assets/images/posts/github_clone.png new file mode 100644 index 0000000..0bf2397 Binary files /dev/null and b/assets/images/posts/github_clone.png differ diff --git a/assets/images/posts/hard_vs_soft_plastic_strain_vM_32.png b/assets/images/posts/hard_vs_soft_plastic_strain_vM_32.png new file mode 100644 index 0000000..dc132e0 Binary files /dev/null and b/assets/images/posts/hard_vs_soft_plastic_strain_vM_32.png differ diff --git a/assets/images/posts/hrdicpattern.png b/assets/images/posts/hrdicpattern.png new file mode 100644 index 0000000..2621021 Binary files /dev/null and b/assets/images/posts/hrdicpattern.png differ diff --git a/assets/images/posts/particleRVE_camera_ani.gif b/assets/images/posts/particleRVE_camera_ani.gif new file mode 100644 index 0000000..5a6741d Binary files /dev/null and b/assets/images/posts/particleRVE_camera_ani.gif differ diff --git a/assets/images/posts/prep_Picture1.jpg b/assets/images/posts/prep_Picture1.jpg new file mode 100644 index 0000000..b8d445d Binary files /dev/null and b/assets/images/posts/prep_Picture1.jpg differ diff --git a/assets/images/posts/prep_Picture2.jpg b/assets/images/posts/prep_Picture2.jpg new file mode 100644 index 0000000..5024cfc Binary files /dev/null and b/assets/images/posts/prep_Picture2.jpg differ diff --git a/assets/images/posts/prep_Picture3.jpg b/assets/images/posts/prep_Picture3.jpg new file mode 100644 index 0000000..d26d016 Binary files /dev/null and b/assets/images/posts/prep_Picture3.jpg differ diff --git a/assets/images/posts/prep_Picture4.jpg b/assets/images/posts/prep_Picture4.jpg new file mode 100644 index 0000000..6cd315a Binary files /dev/null and b/assets/images/posts/prep_Picture4.jpg differ diff --git a/assets/images/posts/prep_Picture5.jpg b/assets/images/posts/prep_Picture5.jpg new file mode 100644 index 0000000..ef6caac Binary files /dev/null and b/assets/images/posts/prep_Picture5.jpg differ diff --git a/assets/images/posts/prep_Picture6.jpg b/assets/images/posts/prep_Picture6.jpg new file mode 100644 index 0000000..8ebccbf Binary files /dev/null and b/assets/images/posts/prep_Picture6.jpg differ diff --git a/assets/images/posts/schematicofstyreneremodelling.png b/assets/images/posts/schematicofstyreneremodelling.png new file mode 100644 index 0000000..1f81514 Binary files /dev/null and b/assets/images/posts/schematicofstyreneremodelling.png differ diff --git a/assets/images/posts/schematicofwaterremodelling.png b/assets/images/posts/schematicofwaterremodelling.png new file mode 100644 index 0000000..4bd2922 Binary files /dev/null and b/assets/images/posts/schematicofwaterremodelling.png differ diff --git a/assets/images/posts/styreneremodelling.png b/assets/images/posts/styreneremodelling.png new file mode 100644 index 0000000..9a8275a Binary files /dev/null and b/assets/images/posts/styreneremodelling.png differ diff --git a/assets/images/posts/tc_example.png b/assets/images/posts/tc_example.png new file mode 100644 index 0000000..edc5582 Binary files /dev/null and b/assets/images/posts/tc_example.png differ diff --git a/assets/images/posts/waterremodelling.png b/assets/images/posts/waterremodelling.png new file mode 100644 index 0000000..e008db3 Binary files /dev/null and b/assets/images/posts/waterremodelling.png differ diff --git a/assets/images/site/CLARI_logo.png b/assets/images/site/CLARI_logo.png new file mode 100644 index 0000000..33524cc Binary files /dev/null and b/assets/images/site/CLARI_logo.png differ diff --git a/assets/images/texture_variation_FE_results.png b/assets/images/texture_variation_FE_results.png new file mode 100644 index 0000000..d97161f Binary files /dev/null and b/assets/images/texture_variation_FE_results.png differ diff --git a/collections/_blog/index.md b/collections/_blog/index.md new file mode 100644 index 0000000..0705cec --- /dev/null +++ b/collections/_blog/index.md @@ -0,0 +1,9 @@ +--- +layout: collection_home +title: Blog +show_breadcrumbs: false +show_meta: false +published: true +--- + +The Blog is for writing about work-in-progress, or anything else that has some natural chronology to it! Topics are grouped into sub-categories below. diff --git a/collections/_blog/ti_CP_PF_2021.07.md b/collections/_blog/ti_CP_PF_2021.07.md new file mode 100644 index 0000000..ae3eeab --- /dev/null +++ b/collections/_blog/ti_CP_PF_2021.07.md @@ -0,0 +1,133 @@ +--- +title: "Titanium CP/PF modelling - 2021 July" +author: Adam Plowman +tags: + - titanium + - crystal-plasticity + - phase-field + - dual-phase +published: true +subcollection: Modelling of dual-phase titanium under hot rolling conditions +order: 2 +toc: true +--- + +*See the [project overview](/wiki/blog/ti-cp-pf-overview) for an outline. This page was last updated: July 2021* + +| [Next month's update (August)](/wiki/blog/ti-cp-pf-2021-08) 🡺 | + +Our initial work focussed on building a dual-phase titanium representative volume element that could be deformed in both "hard" and "soft" loading directions using a crystal plasticity package, DAMASK[^4]. We will later use the results from these types of simulations to build a new phase map over a subset of the deformed RVE, which can then be fed into a phase-field simulation code to examine, for example, grain growth. + +## Methodology + +### RVE construction + +We constructed a representative volume element (RVE) of dual-phase titanium composed of a small $\alpha$-phase colony within a $\beta$-phase matrix. Morphologically, the $\alpha$ colony was comprised of three stretched ellipsoids (axis ratio of 2:1:0.4) of the same size positioned semi-randomly (but fixed for all simulations unless otherwise noted) within the $\beta$ matrix, such that individual $\alpha$ laths do not overlap. All $\alpha$ laths were modelled to have the same crystal orientation. The specific orientations of the $\alpha$ and $\beta$ phases were chosen according to the following considerations: + +1. The Burgers orientation relationship[^3] specifies an empirically known relationship between the orientations of the HCP $\alpha$ lattice and the BCC $\beta$ lattice, and can be expressed in Miller indices as: + + $$ + \begin{equation} + \begin{split} + \{ 0001 \}_\alpha & \parallel \{ 110 \}_\beta \\ + \langle 11\bar{2}0\rangle_\alpha & \parallel \langle 111 \rangle_\beta + \end{split} + \end{equation} + $$ + +2. The planes of the ellipsoidal $\alpha$ laths in dual-phase $\alpha$/$\beta$ titanium are experimentally observed to preferentially lie on a habit plane given by[^1]: + + $$ + \begin{equation} + ( \bar{1}100 )_\alpha\quad\text{or}\quad( \bar{1}12)_\beta + \end{equation} + $$ + +3. For convenience, it is desirable for the flat plane of the ellipsoidal $\alpha$ laths to be parallel to one of the model Cartesian planes. This simplifies RVE construction, and reduces the cognitive load when interpreting results. We chose to align the long axes of the ellipsoids with the model $x$-direction. + +Given these constraints, we started by identifying a rotation of the hexagonal unit cell such that a "reference" hexagonal unit cell that is aligned with its $a$ and $c$ axes along the model $x$ and $z$ axes, respectively, (which is the alignment system used by DAMASK) is rotated to the frame in which the $( \bar{1}100 )_\alpha$ plane lies within the model $xy$ plane. From inspection, we note that such a rotation can be expressed as the following (pre-multiplying) matrix: + +$$ +\begin{pmatrix} +0 & 0 & 1 \\ +\sin{30\degree} & \cos{30\degree} & 0 \\ +-\cos{30\degree} & \sin{30\degree} & 0 +\end{pmatrix}\text{.} +$$ + +The action of this rotation matrix on the column vectors that form the edges of the hexagonal unit cell is shown in the figure below (blue is the reference unit cell and red is the rotated unit cell): + +{% include plotly_figures/hexagonal_axes_fig.html %} + +We then converted the rotation matrix above into a quaternion by first converting to an axis-angle representation, which is 93.84$\degree$ about the Cartesian axis (0.25056281, 0.93511313, 0.25056281), and in turn converting this to a quaternion, which is: (0.6830127, 0.1830127, 0.6830127, 0.1830127). Formulae for these conversions can be found in Rowenhorst et al.[^5]. This quaternion was then the starting orientation for all three $\alpha$ laths in the RVE. To find the orientation of the $\beta$ matrix in which the $\alpha$ colony is embedded, we employed the DefDAP Python package[^2]; this quaternion is (0.4103, 0.0964, -0.7325, -0.5347). + +In our initial simulations, we discretised the model geometry into a relatively small number of material points; the grid dimensions were 32 $\times$ 32 $\times$ 32 (32,768 material points). This RVE is shown below. In this way, we benefited from faster throughput in the initial stages of the work. However, we will later use larger grid discretisations, which will be more capable of accurately resolving the stress and strain fields of the deformed RVEs. + +![dual_phase_Ti_RVE_32](/wiki/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_32.png) +*The RVE used in our initial simulations of dual-phase Ti. Visualisation using ParaView.* + +### Crystal plasticity model for Ti64 + +To model plasticity, we employed the phenomenological power law as implemented in DAMASK[^4]. + +#### Critical resolved shear stresses + +We considered two slip systems for the BCC $\beta$ phase and three slip systems for the HCP $\alpha$ phase. The selected critical resolved shear stresses (CRSSs) are listed below. We did not include any hardening. + +| Phase | Slip plane | Slip direction | Number | CRSS Ref.[^1]) / MPa | CRSS chosen / MPa | +| -------- | ------------------------------ | --------------------------------- | ------ | -------------------- | ----------------- | +| $\beta$ | $\\{110\\}$ | $\langle 111\rangle$ | 12 | 390 | 390 | +| $\beta$ | $\\{112\\}$ | $\langle 111\rangle$ | 12 | - | 390 | +| $\alpha$ | $\\{0001\\}$ "basal" | $\langle 11\bar{2}0\rangle$ "a" | 3 | 390 | 390 | +| $\alpha$ | $\\{00\bar{1}0\\}$ "prismatic" | $\langle 11\bar{2}0\rangle$ "a" | 3 | 390 | 390 | +| $\alpha$ | $\\{00\bar{1}1\\}$ "pyramidal" | $\langle 11\bar{2}3\rangle$ "c+a" | 12 | 663 | 663 | + +### Loading direction: soft versus hard + +We used simple shear along two directions to probe "soft" and "hard" loading directions, with respect to the $\alpha$ phase. Given the chosen CRSS values listed above, a "hard" loading direction would be one that is more closely aligned for activating pyramidal slip than basal or prismatic, which are both more easily activated. Considering the aforementioned alignment of the $\alpha$-phase unit cell with respect to our model axes, a "hard" loading direction is thus along the $x$-axis. Conversely, a "soft" loading direction would be along the $y$-axis. Therefore, we performed two simple shear simulations; one along $xz$ (hard) and one along $yz$ (soft). The load cases were specified in terms of deformation gradient rate tensor, $\bold{\dot{F}}$, given below for the hard and soft loading directions, respectively: + +$$ +\begin{align} +\bold{\dot{F}}_\textrm{hard} &= \begin{pmatrix} + 0 & 0 & 1.0\mathrm{e}{-3}\\ + 0 & 0 & 0\\ + 0 & 0 & 0 +\end{pmatrix} +& +\bold{\dot{F}}_\textrm{soft} &= \begin{pmatrix} + 0 & 0 & 0\\ + 0 & 0 & 1.0\mathrm{e}{-3}\\ + 0 & 0 & 0 +\end{pmatrix} +\end{align} +$$ + +The simulations were run for 200 seconds (resulting in an effective total strain of 0.2 in both cases). + +### Use of MatFlow + +We did initial development of the RVE and CP simulation input file generation and simulation output processing using Python script. To improve reproducibility, we will soon integrating the methodology into a [MatFlow](https://github.com/LightForm-group/matflow) workflow. + +## Results + +### Qualitative analysis: soft versus hard + +The two simulations were performed using DAMASK v3-alpha3. The results are shown qualitatively in the below figure. For the hard loading direction (on the left), there is very little strain visible in the $\alpha$ laths, relative to the surrounding $\beta$ matrix. On the other hand, for the soft loading direction (on the right), the $\alpha$-laths exhibit much more plastic strain than the $\beta$ matrix. + +![hard_vs_soft_plastic_strain_vM_32](/wiki/assets/images/posts/hard_vs_soft_plastic_strain_vM_32.png) +*The deformed RVEs, showing the Von Mises equivalent plastic strain fields; "hard" loading direction on the left and "soft" loading direction on the right. Note that the distinct response of the $\alpha$ laths is visible in both cases. Visualisation using ParaView.* + +## Next steps + +1. Investigate methods of generating a new phase map over a region of interest (perhaps a single $\alpha$ lath), according to the formation of deformation-induced sub-grains. This smaller-scale volume element can then be used in a phase-field model to study, for example, grain growth. Two approaches can be pursued: + a. Partition the final orientation data (over all voxels) into new "grains" using a clustering algorithm. + b. Estimate dislocation density +2. Perform higher-resolution simulations + +## References and notes + +[^1]: Kasemer et al. (2017), "The Influence of Mechanical Constraints Introduced by β Annealed Microstructures on the Yield Strength and Ductility of Ti-6Al-4V" +[^2]: Atkinson, Michael D, Thomas, Rhys, Harte, Allan, Crowther, Peter, & Quinta da Fonseca, João. (2021). DefDAP: Deformation Data Analysis in Python - v0.93.2 (v0.93.2). Zenodo. [https://doi.org/10.5281/zenodo.4697260](https://doi.org/10.5281/zenodo.4697260) +[^3]: Bhattacharyya, D, G.B Viswanathan, Robb Denkenberger, D Furrer, and Hamish L Fraser. ‘The Role of Crystallographic and Geometrical Relationships between α and β Phases in an α/β Titanium Alloy’. Acta Materialia 51, no. 16 (September 2003): 4679–91. [https://doi.org/10.1016/S1359-6454(03)00179-4](https://doi.org/10.1016/S1359-6454(03)00179-4). +[^4]: Roters, F., M. Diehl, P. Shanthraj, P. Eisenlohr, C. Reuber, S. L. Wong, T. Maiti, et al. ‘DAMASK – The Düsseldorf Advanced Material Simulation Kit for Modeling Multi-Physics Crystal Plasticity, Thermal, and Damage Phenomena from the Single Crystal up to the Component Scale’. Computational Materials Science 158 (15 February 2019): 420–78. [https://doi.org/10.1016/j.commatsci.2018.04.030](https://doi.org/10.1016/j.commatsci.2018.04.030). +[^5]: Rowenhorst, D, A D Rollett, G S Rohrer, M Groeber, M Jackson, P J Konijnenberg, and M De Graef. ‘Consistent Representations of and Conversions between 3D Rotations’. Modelling and Simulation in Materials Science and Engineering 23, no. 8 (1 December 2015): 083501. [https://doi.org/10.1088/0965-0393/23/8/083501](https://doi.org/10.1088/0965-0393/23/8/083501). diff --git a/collections/_blog/ti_CP_PF_2021.08.md b/collections/_blog/ti_CP_PF_2021.08.md new file mode 100644 index 0000000..0ed3105 --- /dev/null +++ b/collections/_blog/ti_CP_PF_2021.08.md @@ -0,0 +1,118 @@ +--- +title: "Titanium CP/PF modelling - 2021 August" +author: Adam Plowman +tags: + - titanium + - crystal-plasticity + - phase-field + - dual-phase +published: true +subcollection: Modelling of dual-phase titanium under hot rolling conditions +order: 3 +toc: true +--- + +*See the [project overview](/wiki/blog/ti-cp-pf-overview) for an outline. This page was last updated: September 2021* + +| 🡸 [Previous month's update (July)](/wiki/blog/ti-cp-pf-2021-07) | [Next month's update (September)](/wiki/blog/ti-cp-pf-2021-09) 🡺 | + +Following on from our preliminary low resolution crystal plasticity simulations, we then investigated ways to defined sub-grains within the deformed RVE. + +## Investigating methods of orientation-field clustering + +### A naive orientation-clustering approach + +Conventional k-means clustering proceeds as follows: + +1. Choose some trial cluster centroids +2. Assign each sample to its nearest cluster (according to, for example, a Euclidean distance metric) +3. For each centroid, compute the mean position of all the associated samples +4. Move the trial centroids to coincide with these sample-mean positions +5. Repeat steps 2 to 4 until the trial centroids no longer move (within some tolerance) + +There are some issues with applying this method to the problem of sub-grain segmentation within an RVE. Firstly, we must be careful about choosing a suitable "distance" metric, since our samples are quaternions. Although these quaternions are represented as four-vectors, the Euclidean distance between two quaternion four-vectors does not seem particularly meaningful. Instead we should choose something that has meaning within the orientation domain, such as the angle between quaternions. Likewise, we must be careful about averaging orientation samples; a meaningful quaternion average can be calculated, which is distinct from an arithmetic mean of the quaternion vector components. However, even if we are careful to employ quaternion-specific maths in our implementation of k-means clustering, we have not yet encoded any spatial information into the algorithm. Thus, such an approach would result in disconnected (sub-)grains, which are composed of voxels that have been clustered together due to the similarity of their orientations without regard for the location of the voxels within the RVE. + +We could perhaps implement a k-means orientation-clustering algorithm where the angle between quaternions is additionally weighted by their spatial separation. However, instead of this, we worked initially on a cost-function approach, as discussed below. + +### Minimise centroid positions according to the variance in orientations + +Our initial approach was to: + +1. Choose a set of centroid positions within the RVE +2. Assign each RVE voxel to its nearest centroid (thereby performing a discrete Voronoi tessellation) +3. Calculate an "average" quaternion for each Voronoi region +4. Calculate the quaternion "variance" for each Voronoi region (using the rotation angle between quaternions as the "distance" metric) +5. Wrap up steps 1. to 5. in a cost function that returns the sum of the region "variances" +6. Use the Nelder-Mead optimisation algorithm (as implemented in SciPy[^8]) with the above cost function to iterate on centroid positions + +Unfortunately, using our current implementation, Step 2 (tessellation) is slow and memory intensive, which makes the optimisation very slow. Additionally, we must specify in advance how many clusters (i.e. centroid positions) we wish to use. Ideally, we would instead specify the "distance" (i.e. rotation angle) between the clusters. + +We chose the Nelder-Mead algorithm since it is "gradient-free", meaning the optimisation proceeds without the use of a gradient (i.e. a Jacobian or Hessian matrix). Other gradient-free optimisation algorithms also exist [^9], which we could try. These include the Powell algorithm [^10] and the basin-hopping method [^11]. + +We might investigate if using QHull as exposed in SciPy would be faster at performing discrete Voronoi tessellation than our implementation. If so, this could make the algorithm faster. + +### Existing methods in use to segment EBSD data + +We should be able to use existing methods for grain segmentation within EBSD data. Surveying the literature, we found that the MatLab toolbox MTEX, in its `calcGrains`[^7] function, uses [^3] [^4] one of two methods to segment (sub-)grains in EBSD data: + +1. Markovian clustering [^1] [^2] +2. Fast multiscale clustering + - Implementation details in MTEX discussed in Ref. [^5] + - Original algorithm discussed in Ref. [^6] + +The pipeline-based package Dream.3D also has a filter for segmenting grains, which we will investigate. + +#### MTEX + +The MTEX documentation gives some examples of loading in data from arbitrary text files[^12]. The first step was to generate from the deformed RVE a text file whose rows represent voxels and that contains columns with the following information: + +- Phase (two phases in our data) +- Spatial coordinates: x, y, z +- Euler angles: Euler1, Euler2, Euler3 + +We have encounted some problems when trying to get MTEX to successfully load 3D data for grain segmentation; see the GitHub issue via Ref. [^13]. + +#### Dream.3D + +- The pipeline-based Dream.3D package can perform grain segmentation using the `ScalarSegmentFeatures` filter[^14]. +- A [previous discussion](https://github.com/BlueQuartzSoftware/DREAM3D/issues/321) on the Dream.3D GitHub repository regarding importing results from crystal plasticity simulations into Dream.3D. +- Performing segmentation in Dream.3D requires an `ImageGeometry`, which is a uniform rectilinear grid. This means we cannot use the displacement information of each deformed voxel when doing this segmentation. However, I don't think this matters. +- We have written a pipelines for performing the segmentation on a deformed RVE using a 128-cubed grid size. The deformed RVE and the resulting feature segmentation from the Dream3D pipeline is shown below. In this case we used a threshold misorientation angle of three degrees. I have yet to find a comprehensive description of the clustering algorithm that Dream3D uses for the segmentation, but it is reasonable to assume that our selected threshold means that neighbouring voxels that are misoriented by more than three degrees are considered to belong to distinct features (i.e. sub-grains). + +![dual_phase_Ti_RVE_128_grid_deformed_hard](/wiki/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_deformed_hard.png) + +![dual_phase_Ti_RVE_128_grid_dream3D_clustered_3_degs_hard](/wiki/assets/images/posts/blog/ti_cp_pf/dual_phase_Ti_RVE_128_grid_dream3D_clustered_3_degs_hard.png) + +These preliminary results do not show significant formation of sub-grains, since the additional features are in very close proximity to the boundary of the $\alpha$ precipitates. We will continue to investigate this. + +## MatFlow integration + +We have developed a MatFlow workflow (and associated task schemas/extension functionality) to perform the DAMASK simulations and grain segmentation using Dream.3D. More details will be provided as and when we concretise our analysis approach. However, an example task for generating the dual-phase RVE can be found [here](https://github.com/LightForm-group/UoM-CSF-matflow/blob/524fe20f2453df42e0ec3c59fc9265997de4c0e2/task_examples/generate_volume_element.yml#L110). + +## Quaternion component ordering conventions + +During our development of the associated MatFlow workflow, we found that different software can employ different orderings of quaternion components; vector-scalar or, more commonly, scalar-vector: + +- DAMASK: scalar-vector[^16] +- MTEX: scalar-vector (I could not find an explicit statement, but from doing an Euler-to-quaternion conversion in MTEX, it seems so.) +- DefDAP: scalar-vector +- Dream3D: vector-scalar[^15] + +## References + +[^1]: [https://sites.cs.ucsb.edu/~xyan/classes/CS595D-2009winter/MCL_Presentation2.pdf](https://sites.cs.ucsb.edu/~xyan/classes/CS595D-2009winter/MCL_Presentation2.pdf) +[^2]: [https://micans.org/mcl/ani/mcl-animation.html](https://micans.org/mcl/ani/mcl-animation.html) +[^3]: [https://mtex-toolbox.github.io/GrainReconstructionAdvanced.html](https://mtex-toolbox.github.io/GrainReconstructionAdvanced.html) +[^4]: Bachmann, Florian, Ralf Hielscher, and Helmut Schaeben. ‘Grain Detection from 2d and 3d EBSD Data—Specification of the MTEX Algorithm’. Ultramicroscopy 111, no. 12 (1 December 2011): 1720–33. [https://doi.org/10.1016/j.ultramic.2011.08.002](https://doi.org/10.1016/j.ultramic.2011.08.002). +[^5]: Loeb, Andrew, Michael Ferry, and Lori Bassman. ‘Segmentation of 3D EBSD Data for Subgrain Boundary Identification and Feature Characterization’. Ultramicroscopy 161 (1 February 2016): 83–89. [https://doi.org/10.1016/j.ultramic.2015.11.003](https://doi.org/10.1016/j.ultramic.2015.11.003). +[^6]: Kushnir, Dan, Meirav Galun, and Achi Brandt. ‘Fast Multiscale Clustering and Manifold Identification’. Pattern Recognition 39, no. 10 (October 2006): 1876–91. [https://doi.org/10.1016/j.patcog.2006.04.007](https://doi.org/10.1016/j.patcog.2006.04.007). +[^7]: [https://mtex-toolbox.github.io/EBSD.calcGrains.html](https://mtex-toolbox.github.io/EBSD.calcGrains.html) +[^8]: [https://docs.scipy.org/doc/scipy/reference/optimize.minimize-neldermead.html](https://docs.scipy.org/doc/scipy/reference/optimize.minimize-neldermead.html) +[^9]: [http://scipy-lectures.org/advanced/mathematical_optimization/#gradient-less-methods](http://scipy-lectures.org/advanced/mathematical_optimization/#gradient-less-methods) +[^10]: [https://docs.scipy.org/doc/scipy/reference/optimize.minimize-powell.html](https://docs.scipy.org/doc/scipy/reference/optimize.minimize-powell.html) +[^11]: [https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.basinhopping.html](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.basinhopping.html) +[^12]: [https://mtex-toolbox.github.io/EBSD.load.html](https://mtex-toolbox.github.io/EBSD.load.html) +[^13]: [https://github.com/mtex-toolbox/mtex/issues/1170](https://github.com/mtex-toolbox/mtex/issues/1170) +[^14]: [http://www.dream3d.io/Filters/ReconstructionFilters/ScalarSegmentFeatures/](http://www.dream3d.io/Filters/ReconstructionFilters/ScalarSegmentFeatures/) +[^15]: [http://www.dream3d.io/Filters/OrientationAnalysisFilters/ConvertQuaternion/](http://www.dream3d.io/Filters/OrientationAnalysisFilters/ConvertQuaternion/) +[^16]: [https://git.damask.mpie.de/damask/DAMASK/-/blob/development/src/rotations.f90](https://git.damask.mpie.de/damask/DAMASK/-/blob/development/src/rotations.f90) indicates that DAMASK adopts the conventions listed in: Rowenhorst, D, A D Rollett, G S Rohrer, M Groeber, M Jackson, P J Konijnenberg, and M De Graef. ‘Consistent Representations of and Conversions between 3D Rotations’. Modelling and Simulation in Materials Science and Engineering 23, no. 8 (1 December 2015): 083501. [https://doi.org/10.1088/0965-0393/23/8/083501](https://doi.org/10.1088/0965-0393/23/8/083501), which writes quaternions in the scalar-vector format. diff --git a/collections/_blog/ti_CP_PF_2021.09.md b/collections/_blog/ti_CP_PF_2021.09.md new file mode 100644 index 0000000..1151656 --- /dev/null +++ b/collections/_blog/ti_CP_PF_2021.09.md @@ -0,0 +1,57 @@ +--- +title: "Titanium CP/PF modelling - 2021 September" +author: Adam Plowman +tags: + - titanium + - crystal-plasticity + - phase-field + - dual-phase +published: true +subcollection: Modelling of dual-phase titanium under hot rolling conditions +order: 4 +toc: true +--- + +*See the [project overview](/wiki/blog/ti-cp-pf-overview) for an outline. This page was last updated: October 2021* + +| 🡸 [Previous month's update (August)](/wiki/blog/ti-cp-pf-2021-08) | [Next month's update (October)](/wiki/blog/ti-cp-pf-2021-10) 🡺 | + +## Resource requirements + +The crystal plasticity simulations must be performed with a sufficiently dense grid resolution. In particular, we must ensure: + +1. smooth spatial variations of final stress/strain fields +2. sufficient modelling of the curved surfaces of the $\alpha$-phase particles +3. valid results when we later investigate the effects of the surface roughness of the $\alpha$-phase particles + +To this end, we performed the same simple shear deformation on a set of volume elements with grid sizes ranging from (32 $\times$ 32 $\times$ 32) to (256 $\times$ 256 $\times$ 256). + +![resource_req_RVEs](/wiki/assets/images/posts/blog/ti_cp_pf/resource_req_RVEs.png) +*Lower- and higher-resolution RVEs* + + The simulation durations and memory consumptions are plotted below, where grid sizes 32 and 64 where run using eight CPU cores, grid sizes 128 and 192 were run using 16 cores, and grid size 256 was run using 32 cores. + +{% include plotly_figures/resource_use.html %} + +## Alignment issues + +We ran the fast multiscale clustering (FMC) routine in MTEX on the $y=0$ slice of the deformed RVE, using $C_\text{Maha} = 2.5$. The figure below shows how the clustering process splits up the image into "grains". However, given that we would not expect much rotation of the hexagonal $\alpha$-phase, the inverse-pole-figure (IPF) colouring in the below figure is odd, since it indicates that the ellipsoidal $\alpha$-phase particle has $\[\bar{1} 1 0 0\]$ (blue) direction in the $y$-direction (out of the page). Instead, we would expect to see an $a$-direction $\[\bar{1}2\bar{1}0\]$ (see [July's blog post](/wiki/blog/ti-cp-pf-2021-07#rve-construction) for details of the RVE construction). + +![MTEX_seg_incorrect_align](/wiki/assets/images/posts/blog/ti_cp_pf/MTEX_seg_incorrect_align.png) +*2D FMC grain segmentation of the $y=0$ slice, with **incorrect** alignment of the hexagonal unit cell* + +In fact, this is incorrect, and arises from the different hexagonal unit cell alignments used by DAMASK and MTEX. To fix this issue, we must specify in MTEX the same alignment system that DAMASK uses, which is to align the $x$-axis with the $a$-direction of the hexagonal unit cell. This is done when defining the `crystalSymmetry` object: `crystalSymmetry('hexagonal', 'mineral', 'Ti (alpha)', 'X||a')` --- note the `X||a`. + +## Comparison of clustering parameter $C_\text{Maha}$ + +After correcting the alignment issue above, we ran the clustering for a range of different values of $C_\text{Maha}$, which controls how large the clusters will be. This is shown in the below figure. + +![C_Maha_ani](/wiki/assets/images/posts/blog/ti_cp_pf/C_Maha_ani.gif) +*Clustering for values of $C_\text{Maha} = \\{0.5, 1.0, 1.5, 2.0, 2.5, 3.0\\}$ (with correct hexagonal unit cell alignment!)* + +## Accounting for periodicity + +Since the RVE that was simulated was periodic, we need to consider this when performing the clustering. To do this, we tiled the 2D slice into a $3 \times 3$ grid of 2D slices, and performed the clustering on this new, larger slice instead. Cropping the resulting image of the clustered grains back to the size of the original slice, we can see some subtle differences in the result of the clustering due to the periodicity, as portrayed in the figure below. + +![periodic_vs_non_periodic](/wiki/assets/images/posts/blog/ti_cp_pf/periodic_vs_non_periodic.png) +*We must consider periodicity in the grain clustering. Doing so has an effect on the morphology of the clustered grains. (Example using $C_\text{Maha}=2.5$ and no smoothing.)* diff --git a/collections/_blog/ti_CP_PF_2021.10.md b/collections/_blog/ti_CP_PF_2021.10.md new file mode 100644 index 0000000..e052c5a --- /dev/null +++ b/collections/_blog/ti_CP_PF_2021.10.md @@ -0,0 +1,81 @@ +--- +title: "Titanium CP/PF modelling - 2021 October" +author: Adam Plowman +tags: + - titanium + - crystal-plasticity + - phase-field + - dual-phase +published: true +subcollection: Modelling of dual-phase titanium under hot rolling conditions +order: 5 +toc: true +--- + +*See the [project overview](/wiki/blog/ti-cp-pf-overview) for an outline. This page was last updated: September 2021* + +| 🡸 [Previous month's update (September)](/wiki/blog/ti-cp-pf-2021-09) | [Next month's update (May 2022)](/wiki/blog/ti-cp-pf-2022-05) 🡺 | + +## Effect of MTEX FMC smoothing parameter + +![smooth_ani](/wiki/assets/images/posts/blog/ti_cp_pf/smooth_ani.gif) +*Effect of MTEX smoothing parameter to smooth grain boundaries of grains clustered using FMC* + +## Getting MTEX-segmentation back into Python +- Our initial approach was to save a PNG image of the grain ID map and load that into Python. Some notes on this approach are listed below: + - Initially problems with anti-aliasing producing erroneous grain ID map when importing into Python + - Tried some different methods for saving arrays directly: + - Matlab's `getframe` - not easy to control resolution + - Saving `grains` to a `.mat` file - could not open `grains` in Python with `scipy.io.loadmat` and not sure if sufficient information contained in `grains`. + - Investigated saving array directly from Matlab figure - figure is comprised of patches and would need to be rasterised/discretised. + - Eventually found how to switch off anti-aliasing in saving figures + - `set(gcf,'graphicssmoothing','off')` + - results much better now + - but when rescaling in Python to the size of the simulation slice, some pixels at the edges seem to change grain ID. + - fix is to turn off interpolation in `zoom` with the `order` parameter: `grain_IDs_resampled = zoom(grain_IDs, zoom_factor, order=0)` +- A better solution was found as follows: + - Use `ebsdsq = gridify(ebsd);` to project the (segmented) EBSD map onto a grid (it should already be in an effective grid, but we need to use grid-specific methods) + - Loop over the coordinates of the EBSD map and use `findByLocation` to get the grain ID at each grid coordinate + - Return the grain IDs matrix from the MTEX script and then use the output directly in the invoking MatLab engine invocation. + +## FMC in MTEX + +- Unfortunately, it turns out that MTEX no longer support 3D grain segmentation using FMC. So if we want to perform this analysis in 3D (rather than on 2D slices), we will need to look elsewhere, or implement our own version. + +## Estimating dislocation density from the crystal plasticity simulation results + +We can use the Taylor hardening law to relate the flow stress (or, in our case, the Von Mises "equivalent" stress), $\sigma$ with the dislocation density, $\rho$ [^1]: + +$$ +\begin{equation} +\rho = \left(\frac{\sigma}{\alpha G b}\right)^2, +\end{equation} +$$ + +where $\alpha$ is a material parameter, $G$ is the shear modulus, and $b$ is the Burgers vector. + +## Voronoi tessellation of seed points according to dislocation density + +With a dislocation density field, $\rho$ defined over the model slice, we can generate seed points on the slice that have a commensurate density. To this we can use the "random choice" function within Numpy, which allows selecting `N` random indices, where each can be assigned a selection probability. In our case, we set this probability to be the dislocation density field: + +```python +import numpy as np + +rho # dislocation density grid + +rho_flat = rho.flatten() # create a flat copy of the array +rho_flat /= rho_flat.sum() # probabilities array should sum to one + +rng = np.random.default_rng() +sample_index = rng.choice(a=rho_flat.size, p=rho_flat, size=(number,)) + +# Convert back to an array of 2D row vectors, where each row represents integer grid coordinates: +seeds = np.vstack(np.unravel_index(sample_index, rho.shape)).T + +``` +To perform the Voronoi tessellation (for the set of seed points that fall within each sub-grain), we then need to consider again the periodicity. + + +## References + +[^1]: [Humphreys, F. J., and M. Hatherly. ‘Chapter 2 - The Deformed State’. In Recrystallization and Related Annealing Phenomena (Second Edition), edited by F. J. Humphreys and M. Hatherly, 11–II. Oxford: Elsevier, 2004.](https://doi.org/10.1016/B978-008044164-1/50006-2) diff --git a/collections/_blog/ti_CP_PF_overview.md b/collections/_blog/ti_CP_PF_overview.md new file mode 100644 index 0000000..5c6381f --- /dev/null +++ b/collections/_blog/ti_CP_PF_overview.md @@ -0,0 +1,18 @@ +--- +title: "Titanium CP/PF modelling - Project overview" +author: Adam Plowman +tags: + - titanium + - crystal-plasticity + - phase-field + - dual-phase +published: true +subcollection: Modelling of dual-phase titanium under hot rolling conditions +order: 1 +--- + +*Last updated: July 2021* + +The aim of the work is to investigate microstructural evolution of dual-phase titanium (e.g. Ti64) under hot rolling. Ultimately, we aim to develop a coupled crystal plasticity (CP) and phase field (PF) model that can track the kinetics of microstructural development during hot rolling. We will use insight from the model to understand experimental observations, and to provide guidance on the mechanisms at play during hot rolling of these alloys. + +The coupled model will be developed as part of the DAMASK crystal plasticity package. However, since this approach will require substantial time for code development and testing, we will, initially, also investigate "static" microstructural transformation. In this case, we will firstly perform CP simulations of a suitable dual-phase representative volume element (RVE). We will then use various methods to identify a region of interest on the deformed RVE, which can then be fed into a phase field-model in order to investigate the kinetics of the microstructure development. For example, we will run a grain-growth model on the region of interest. diff --git a/collections/_blog/ti_CP_PF_presentations.md b/collections/_blog/ti_CP_PF_presentations.md new file mode 100644 index 0000000..7a28283 --- /dev/null +++ b/collections/_blog/ti_CP_PF_presentations.md @@ -0,0 +1,22 @@ +--- +title: "Titanium CP/PF modelling - Presentations" +author: Adam Plowman +tags: + - titanium + - crystal-plasticity + - phase-field + - dual-phase +published: true +subcollection: Modelling of dual-phase titanium under hot rolling conditions +order: 10 +toc: true +--- + +- [TiFUN progress meeting 2021.09.09 - Adam Plowman][1] +- [TiFUN progress meeting 2021.12.07 - Adam Plowman][2] +- [TiFUN progress meeting 2022.04.07 - Adam Plowman (presented by JF)][3] + +[1]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/TiFUN_2021.09.09_adam_plowman.pptx +[2]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/TiFUN_2021.12.07_adam_plowman.pptx +[3]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/TiFUN_2022.04.07_adam_plowman.pptx + diff --git a/collections/_code_of_conduct/index.md b/collections/_code_of_conduct/index.md index d3e67c8..bb153b9 100644 --- a/collections/_code_of_conduct/index.md +++ b/collections/_code_of_conduct/index.md @@ -31,7 +31,7 @@ If you have concerns regarding colleague conduct, you can choose to discuss thes Contact details -**LightForm Project Manager** +**João Fonseca** (LightForm Project Manager) We are currently without a project manager. During this period, João Fonseca will be trying to keep up with project management duties. This is not ideal, and we hope to have a new project manager very soon. @@ -68,10 +68,13 @@ We are currently without a project manager. During this period, João Fonseca wi ### Equality, Diversity and Inclusion Policy -The University of Manchester is committed to promoting equality and providing an environment where all members of its community are treated with respect and dignity. We are committed to seeking to employ a workforce and educate a student body that reflects the diverse community we serve. The Equality Act (2010) helps by providing a legal framework to protect people from discrimination, harassment and victimisation in the workplace and wider society. http://documents.manchester.ac.uk/display.aspx?DocID=8361 +The University of Manchester is committed to promoting equality and providing an environment where all members of its community are treated with respect and dignity. We are committed to seeking to employ a workforce and educate a student body that reflects the diverse community we serve. The Equality Act (2010) helps by providing a legal framework to protect people from discrimination, harassment and victimisation in the workplace and wider society. + +- [UoM Equality Diversity and Inclusion Policy](http://documents.manchester.ac.uk/display.aspx?DocID=8361) Any staff member or student who believes that they may have been the victim of discrimination, harassment, bullying or victimisation shall have protection under the University’s Dignity at Work and Study Policy and Procedure. -https://www.staffnet.manchester.ac.uk/equality-and-diversity/policies-and-guidance/dignity-at-work-and-study/ + +- [UoM Dignity at Work and Study Policy](https://www.staffnet.manchester.ac.uk/equality-and-diversity/policies-and-guidance/dignity-at-work-and-study/) ### How we communicate and support one another diff --git a/collections/_data_analysis/data_management.pdf b/collections/_data_analysis/data_management.pdf new file mode 100644 index 0000000..c4f62cc Binary files /dev/null and b/collections/_data_analysis/data_management.pdf differ diff --git a/collections/_data_analysis/experimental-metadata.md b/collections/_data_analysis/experimental-metadata.md new file mode 100644 index 0000000..fddaa51 --- /dev/null +++ b/collections/_data_analysis/experimental-metadata.md @@ -0,0 +1,14 @@ +--- +title: Experimental Metadata +author: Peter Crowther +tags: + - Open-data + - metadata +published: true +--- + +# Experimental Metadata + +### Record metadata + +Metadata tells people about how the data was collected. Data without context is meaningless as it cannot be analysed. Make sure that you know what metadata is important for each experiment and make sure that it is being recorded for each measurement you do. It may be the case that the metadata is collected along with the result in the result file but it may be the case that you have to record some of the data manually. This is what a lab book is for but it is also a good idea to make a digital record of the metadata too so that if the files are passed on but not the lab book, the data does not lose its value. diff --git a/collections/_data_analysis/index.md b/collections/_data_analysis/index.md new file mode 100644 index 0000000..07af681 --- /dev/null +++ b/collections/_data_analysis/index.md @@ -0,0 +1,54 @@ +--- +layout: post +title: Data and Analysis +show_breadcrumbs: false +show_meta: false +published: true +author: Peter Crowther +toc: true +--- + +The experimental and simulation methods we use in modern research produce huge amounts of data. This data is valuable since it takes time and resources to produce, but if this data is not stored and analysed correctly this can greatly decrease its value. In these pages, data management and producing reliable and reproducible analysis workflows are broken down into key sections which are discussed in detail. Guidelines are given in each section to set out what is expected of you to make sure that we maintain a high standard in these areas. + +A series of presentations were given to group members in July 2020. The slides for these can be found here: [Data management slides](./data_management.pdf) + +## Introduction to open and reproducible science +Open research is the principle that as well as sharing results, the methods data and analysis used to create the results are also shared. The principles of open research are what drives all of the considerations discussed below. + +[Open Research](./open-research) + + +## Data Management +Data management is about ensuring that the data we collect is stored, collated and annotated in such a way that it can be used in the future as easily as when it is first collected. + +### Organisation of Research data +Starting out at the beginning of your PhD with just a few files, it might seem a lot of bother to set up an organisation system. The sooner you get into the habit of organising your research data, the easier it will be. Digital data also has to be stored carefully, since a single unpredictable event such as the failure of a storage device can result in permanganate data loss. In this section we set out guidelines for how you should be storing your data. + +[Organisation of research data](./organisation-of-research-data) + + +### Metadata +Raw experimental or simulation data means nothing without information about the context of the simulation or experiment. If this metadata is not captured and stored along with the data, the data is likely to lose meaning over time. In this section we discuss metadata and guidelines for how to capure it. + +[Experimental Metadata](./experimental-metadata) + + +### Uploading data to a repository +When we publish data, we need to put it somwhere it will be accessible well into the future. For this we use a data repository. This section has details about how to upload datasets to Zenodo. + +[Uploading data to Zenodo](./zenodo) + +## Reproducible scientific workflows for data analysis +Our analyses are often complex which reflects the complexity of the physical properties and processes that we are analysing. It is important that we are able to produce clear and reproducible analyses so that other people can verify the work that we do. The other important reason is that it allows others to more easily build on the work we do. + +### Writing good code +Because our datasets are getting larger, automating our analyses has become essential rather than just nice to have. A lot of modern data analysis means programming. You don't need to be a programming expert but there are some simple things we can do to ensure that the code we write is maintainable, reproducible and shareable. + +[Writing good code](./writing-good-code) + +### Examples of good analysis workflows + +[Analysis workflows](./analysis-workflows) + +## Saving images +Some advice on choosing a suitable image [file format and resolution](./saving-figures) diff --git a/collections/_data_analysis/open-research.md b/collections/_data_analysis/open-research.md new file mode 100644 index 0000000..09e20b6 --- /dev/null +++ b/collections/_data_analysis/open-research.md @@ -0,0 +1,39 @@ +--- +title: Open and reproducible Research +author: Peter Crowther +tags: + - open-research +--- +# Open and reproducible Research + +## Why share? + +Sharing research is good for everyone. It allows verification of research that has been done, decreases duplication which increases the amount of productive research which gets done and allows groups with fewer resources to still participate in productive research. + +Sharing can also be good for the group that publishes the work. It increases the impact of the work by allowing more people to access it, it allows the development of collaborations with new researchers and groups from around the world and it also ensures that all researchers get credit for the work they do, not just PIs or grant holders. + +As well as the inherrently positive aspects of open research, we are starting to see changes in policy in areas like research funding and publishing that will frther increase the importance of open research. In LightForm we want to be ahead of the curve in adopting these practices. + +## Barriers to sharing + +Some people are concerned that if they share incomplete ideas or datasets then other people will steal them. While this is theoretically possible, this rarely happens. If we ensure that there is an easy way for people to cite the work which is released, then people will likely cite it. As for people 'stealing' incomplete ideas and publishing them as their own, if the work is of any value then it would take a long time for others to reproduce the expertise of the publishing group in order to bring that work to publication. + +Another barrier to sharing data is lack of knowledge and lack of time. These are barriers that are now reducing as there are a greater number of resources now available to educate people about open research and also funders and PIs are putting a greater value and emphasis on sharing which means that we can afford to spend the time working on it. + +## Five star open data + +Tim Berners-Lee, the inventor of the Web set out a [5 star scale for Open Data](https://5stardata.info/en/). It sets out guidelines for sharing data, specifying some criteria that increase the . + +We are required by our funders to share the data used in our publications. The minimum requirement for open data is that the data is shared under an open licence, this would be one star data. Once you have published one star data it takes little extra effort to increase the quality to 5 star data, but it greatly increases its value by making it more sharable and more interoperable. + +★ : Make your data available on the Web under an open license +★★ : Make it available as structured data (e.g., Excel instead of image scan of a table) +★★★ : Make it available in a non-proprietary open format (e.g., CSV instead of Excel) +★★★★ : Use URIs to denote things, so that people can point at your stuff (e.g. Use the DOI feature of Zenodo) +★★★★★ : Link your data to other data to provide context (e.g. include metadata, link paper and data, link data in the Zenodo LightForm community. + +We discuss each of the things required to reach 5 star quality datasets in separate pages on the wiki. + +## Further reading + +[A manifesto for reproducible science](https://www.nature.com/articles/s41562-016-0021) is an excellent piece which covers some of the current issues in scientific research and highlights some ways in which we can move towards a more open and reproducible workflow in research diff --git a/collections/_handbook/Dropbox.md b/collections/_data_analysis/organisation-of-research-data.md similarity index 67% rename from collections/_handbook/Dropbox.md rename to collections/_data_analysis/organisation-of-research-data.md index a78db66..74cbd17 100644 --- a/collections/_handbook/Dropbox.md +++ b/collections/_data_analysis/organisation-of-research-data.md @@ -1,9 +1,28 @@ --- -title: 14. Data management -author: Natalie Shannon -order: 14 +title: Organisation of research data +author: Peter Crowther --- +All modern research means collecting and processing data. As equipment has increased in complexity and computing power has increased, the amount of data collected and its complexity has also increased. In order to do good reproducible research, it is important that the data is treated correctly. If the data is not treated correctly it can result in drawing inappropriate conclusions which does not make good science. + +## Good practices for data organisation + +### Keep raw data +The raw data from a measurement should always be kept. While it is possible to reproduce analysis of a raw dataset, it may not be possible to reproduce the original raw data. Keeping the original raw data is important so that others can reproduce the analysis that you have done. + +### Ensure data are backed up +Data should be backed up, preferably in more than one location. Where possible, use university networked research data storage/Dropbox as this is much more robust than USB hard disks. + +### Use version control +For any text based documents such as papers or code, use version control tools to keep a single versioned copy. This reduces the chances of losing vital work and allows easy collaboration with other people. + +### Further reading: +This is a summary of the points covered in the paper [Good enough practices in scientific computing](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510) + +# Use of Dropbox to manage your research data +All LightForm students have Dropbox folders set up which are shared with their supervisor. This should be the primary location used for storage of research data and analysis. +We have set up folders with specific purposes. + ![](/wiki/assets/images/posts/dropbox_folder_overview.png) ## Writing Folder @@ -52,7 +71,7 @@ Example subfolder title: "PowerPoint Industrial sponsor update 05.02.19" **How literature should be saved:** -You are required to use a bibliography reference manager tool for example Zotero, Endnote, Mendeley, in order to reference articles relevant to your project. +It is a good idea to use a bibliography reference manager tool for example Zotero, Endnote, Mendeley, in order to reference articles relevant to your project. You can either point your reference manager to this folder for saving the reference database or export your database as a Bibtex file to here. @@ -82,7 +101,7 @@ Then within each individual experiment folder, you will have a further 2 sub-sub All raw data relating to the individual experiment – there should be no data analysis presented or stored in the Raw Data subfolder. -This is the data that will be used for updating to the repository on publication/thesis completion. When completed, the experimental data can be uploaded to ZENODO using DataLight. +This is the data that will be used for updating to the repository on publication/thesis completion. When completed, the experimental data can be uploaded to ZENODO. You experimental data files should also be named sensibly. See guidelines on the template for your experiment. @@ -104,18 +123,3 @@ Subfolders within Analysis should be given an appropriate descriptive title. Inc The Progress Summary Spreadsheet enables the Project Manager to see at a glance, progress to date and will be discussed at your supervision meetings. You will have only one Progress Summary sheet which you update on a month by month basis. **Please note: You will also use the Progress Summary sheet to update the team on progress at monthly theme meetings.** - - -### Keeping our information safe - -The University not only holds personal data about you, it also holds a vast amount of sensitive material, for example in relation to our research. - -The University must protect this information and ensure that it is not shared internally, or externally, with those who have no right to it. We all, therefore, have an important responsibility to keep information safe and secure. - -Publicising and sharing of LightForm outputs, research findings, IP, collaborations and other sensitive information is strictly prohibited. - -You must seek written authorisation from your supervisor in advance, regarding any communications for social media, press or broadcast media. - -Where appropriate we aim to make all research reproducible and accessible, subject to the constraints of the specific project, through quality science data collection and archiving procedures. - - diff --git a/collections/_data_analysis/saving-figures.md b/collections/_data_analysis/saving-figures.md new file mode 100644 index 0000000..a611ed9 --- /dev/null +++ b/collections/_data_analysis/saving-figures.md @@ -0,0 +1,20 @@ +--- +title: Figures and file formats +author: Gerard Capes +--- +## Which file format to use? +When processing data (e.g. with python and matplotlib) to create figures, there are multiple options for saving them. + +[This blog](https://btjanaka.net/blog/matplotlib-figures/) gives some great tips and discussion on this topic, +but the key points are summarised below: + +- Prefer vector-based formats (svg and pdf) + - These represent images with points, lines, and curves, so they have 'infinite resolution'. + - Raster-based formats like png represent images with pixels, so they will get blurry when zoomed in. +- Use png when you need a raster-based format + - PNG files are saved with lossless compression, whereas jpg uses lossy compression, which gives rise to artifacts + or distortions due to the data when saving. + - While raster images are not as sharp as vector based, you can mitigate against this by setting the required resolution + ``` + plt.savefig("plot.png", dpi=300) + ``` diff --git a/collections/_software_and_simulation/using_git.md b/collections/_data_analysis/using-git.md similarity index 78% rename from collections/_software_and_simulation/using_git.md rename to collections/_data_analysis/using-git.md index b228869..ed87b0b 100644 --- a/collections/_software_and_simulation/using_git.md +++ b/collections/_data_analysis/using-git.md @@ -7,13 +7,11 @@ tags: published: true --- -# Using Git - Git is a free and open source distributed version control system useful for versioning textual data. This could be code or data for making a paper. Git is not so good at storing very large files or binary files as these can make the repository very large. ## Getting Git -Get git here: https://git-scm.com/ +Get git here: ## Git glossary @@ -40,10 +38,10 @@ These commands work when you have set up a remote: While you will occasionally need to use the command line to access advanced features, for the most part you can use a graphical interface. -* I would recommend GitHub Desktop: https://desktop.github.com/ for beginners who have not used git before and only want to use basic features. It is multiplatform, free and open source. -* For a more complete interface with nice branch visualisation I would recommend GitExtensions: https://github.com/gitextensions/gitextensions. Git extensions V3 is Windows only but V2 supports all platforms and is not that much older. It is free and open source. -* As an alternative GitKraken is really nice though it is proprietary and some features are subscriber only: https://www.gitkraken.com +* I would recommend GitHub Desktop: for beginners who have not used git before and only want to use basic features. It is multiplatform, free and open source. +* For a more complete interface with nice branch visualisation I would recommend GitExtensions: . Git extensions V3 is Windows only but V2 supports all platforms and is not that much older. It is free and open source. +* As an alternative GitKraken is really nice though it is proprietary and some advanced features are subscriber only: ## GitHub -GitHub is an online service for storing remote copies of repositories. While you can use git entirely as a local version control system, the advantage of having a remote repository is that you can synchronise your work over multiple computers and collaborate with others. We use a GitHub community to keep our groups code which you can access here: https://github.com/LightForm-group +GitHub is an online service for storing remote copies of repositories. While you can use git entirely as a local version control system, the advantage of having a remote repository is that you can synchronise your work over multiple computers and collaborate with others. We use a GitHub community to keep our groups code which you can access here: diff --git a/collections/_data_analysis/writing-good-code.md b/collections/_data_analysis/writing-good-code.md new file mode 100644 index 0000000..dd0f5a2 --- /dev/null +++ b/collections/_data_analysis/writing-good-code.md @@ -0,0 +1,42 @@ +--- +title: Writing good code +author: Peter Crowther +tags: + - code + - git + - version control +published: true +--- + +Here we set out some simple steps that can make a big difference to the quality of your code. It is important that code is readable as well as just functional. Well designed readable code is much easier for you or others to understand, maintain and verify. These points are summarised from: [Good enough practices in scientific computing +](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510) + +### Use code comments +Place a brief explanatory comment at the start of every program to say what this script does. Give a comment at the start of each function to say what it does (it only takes a line). If your code does anything complex then add some comments next to the complex bit to say what is going on. + +### Give functions and variables meaningful names +If your vairables are called i, j and k then it will be really hard to understand what your code is doing. Using really short variable names was a convention from many years ago when screens could only show 80 characters. These days it is not necessary. Use names that descirbe what you are doing. e.g. + +``` +# This code calculates the number of flocks of sheep +# given the number of sheep and the flock size. +num_sheep = 100 +flock_size = 20 +num_flocks = num_sheep / flock_size +print(num_flocks) +``` + +### Use functions to break up code / don't repeat yourself +If your code is longer than 20 lines, it is time to start breaking it up into logical subsections. Using functions allows breaking down the problem being solved into smaller chunks so it can be more easily understood. It also reduces repetition which makes code easier to maintain. If you find yourself copying and pasting sections of code and renaming varaible names then you should be using a function. + +### Dont reinvent the wheel +Before you start writing code, find out if someone else has written it first. Use functions from libraries. + +### Provide a simple example and test data +If someone else wants to use your code, the easiest way to find out how it works is to run an example with test data. It also verifies that the code is working as expected. + +### Use version control +Use version control to manage code. [Instructions on how to use git](./using-git) + +### Submit code to a reputable DOI-issuing repository +Once you are done with your analysis, the code you have written forms part of the anayslsis pipeline. All parts of this need to be made public so people can reproduce the work. diff --git a/collections/_experiments/dilatometer_hot.md b/collections/_experiments/dilatometer_hot.md index e3beacb..cbad2d4 100644 --- a/collections/_experiments/dilatometer_hot.md +++ b/collections/_experiments/dilatometer_hot.md @@ -6,8 +6,6 @@ analysis_codes: link: https://github.com/LightForm-group/Ti_dilatometer_analysis - name: ZrNb dilatometer analysis link: https://github.com/LightForm-group/compression-dilatometer-analysis-ZrNb -tutorials: - - Tutorial for dilatometer hot-compression tests published: true --- diff --git a/collections/_experiments/ebsd.md b/collections/_experiments/ebsd.md index 02ce173..c6feed0 100644 --- a/collections/_experiments/ebsd.md +++ b/collections/_experiments/ebsd.md @@ -2,6 +2,17 @@ published: true author: Christopher Daniel title: EBSD +tutorials: + - Beta reconstruction + - Cleaning EBSD data + - EBSD Sample preparation +metadata_templates: + - ebsd.yml +analysis_codes: + - name: Example MTEX Scripts + link: https://github.com/LightForm-group/Lightform_Mtex + - name: MTEX texture slice analysis + link: https://github.com/LightForm-group/MTEX-texture-slice-analysis --- ## General EBSD This describes the metadata which applies to EBSD experiments. EBSD is a technique where electrons are fired at a sample and the scattering of the electrons is analysed. diff --git a/collections/_experiments/gleeble_hot_compression.md b/collections/_experiments/gleeble_hot_compression.md new file mode 100644 index 0000000..3b42441 --- /dev/null +++ b/collections/_experiments/gleeble_hot_compression.md @@ -0,0 +1,25 @@ +--- +title: Gleeble and Hydrawedge hot-compression tests +author: Muzamul Nawaz, Christopher Daniel +tutorials: + - Gleeble uniaxial compression tests + - Hydrawedge plane strain compression tests +published: true +--- + +This is a thermomechnnical testing machine. This describes the compression test. + +### Adjustable parameters + +- Sample geometry - (Cylinder, Sqaure, prisms) +- Platen material - (Alumina, Silica Nitride) +- Temperature - (Up to 1300 C) +- Targer Deformation extent - (Percentage of original size) +- Control mode - (Strain, Position, Load, Multi-step) +- Deformation rate - (Percentage per second) +- Heating protocol - (Text) +- Cooling protocol - (Text) +- Shielding gas - (Argon, Nitrogen, Other) + +## Other notes +- Other notes diff --git a/collections/_experiments/hr-dic_sample_prep.md b/collections/_experiments/hr-dic_sample_prep.md new file mode 100644 index 0000000..8be01fb --- /dev/null +++ b/collections/_experiments/hr-dic_sample_prep.md @@ -0,0 +1,116 @@ +--- +title: HR-DIC Sample Prep +author: Fan Gao +tags: + - hrdic + - gold-remodelling + - speckle-pattern +toc: true +published: true +--- + +HR-DIC (High Resolution Digital Image Correlation) is used to investigate the deformation behavior of materials at microstructural scale. Like macro-DIC, HR-DIC requires specific pattern distributing on the sample surface to record the movement of materials during the plastic deformation and thus acquire displacement map which can be used to plot strain map afterwards. Instead of spraying white and black paints, HR-DIC requires a gold speckle pattern homogeneously distributing on sample surface, this pattern can be achieved via gold remodeling and observed on high resolution SEM. + +There are two gold remodelling methods, one uses water the other uses chemical reagent like styrene. Water remodelling is suitable for materials which are able to stand high temperature and have good corrosion resistance (e.g. Ti alloys, Ni-based superalloys, stainless steels) while chemical remodelling would be considered if materials cannot go high temperature or they are easily attacked by water (e.g. Mg alloys). Both methods will be introduced individually as follow. + +## WATER REMODELLING + +### 1. Gold Deposition + +Using *Edwards S150B Sputter Coater* (Fig.1) in B13 Morton to deposit gold onto your polished sample. Coating for **5 to 6 minutes** will roughly give **20 to 30nm** gold layer on your sample surface. There is a handbook right beside the coater so you can control this equipment by following the guidance step by step. + +![](/wiki/assets/images/posts/B13coater.jpeg) + +Fig.1 Edwards S150B Sputter Coater + + + +You might want to wear nitrile gloves and use tweezer when taking your sample out of coater after gold sputtering, this is to avoid the gold layer being contaminated or scratched by fingers. + +### 2. Gold Remodelling with Water Vapour + +After gold deposition then you can put your sample into the water remodelling device. Fig.2 and Fig.3 show what the water remodelling system looks like and how it works: +- The small beaker (b) inside is the water reservoir. Filling it with about **1/3 to 2/3 cup of water** prior to heating up (Do not fill it full in case of overflow when boiling). +- Hot plate is at the bottom of the remodelling system, it is used to evaporate the water and heat your sample up to the remodelling temperature. For materials like **Ti alloys, Ni-based superalloys and stainless steels**, the remodelling temperature is usually between **250 to 280C**. +- The large beaker (a) outside is the remodelling chamber, it is used to maintain the density of water vapour at the certain level and assure the samples are always in contact with fresh and hot vapour. Your sample is put on the hot plate between the large and small beakers (see Fig.2 or Fig.3 top view). +- You might need to refill the small beaker (b) with water for few times during your remodelling. Wear heatproof gloves when you are doing so and be careful not to get yourself hurt by steam. There is a kettle near the remodelling device, try to use it to provide hot water when refilling, cold water could possibly crack the beaker once it has been heated up to 250C. +- Remodelling time is around **3.5 to 4 hours**, this can finally provide a homogeneous distribution of gold pattern with the average speckle size around **50 to 100nm** (Fig.7a, b and c). +- Turn off hot plate after remodelling. Wear heatproof gloves and use tweezer to take your sample out from remodelling chamber and put it aside for cooling down. Do not rinse hot beakers with cold water immediately, let them cool down for a bit and dry them. +- For safety reason, leave a note to keep other people away from the hot plate when it is cooling down. + +![](/wiki/assets/images/posts/waterremodelling.png) + +Fig.2 Water remodelling system: (a) remodelling chamber, (b) water reservoir, (c) hot plate, (d) cup holder + +![](/wiki/assets/images/posts/schematicofwaterremodelling.png) + +Fig.3 Schematic of water remodelling system: a) remodelling chamber, b) water reservoir, c) hot plate [1] + + + +For more details about how to use water remodelling device safely please do refer to the risk assessment form placed on [Dropbox](https://www.dropbox.com/sh/gn7m0qwdlw5m50f/AABP154p8ZCppeDzHL8o_hnDa/Equipment%20RAs?dl=0&preview=MR1000_Remodelling+Equipment+B01+%26+B013_CRA.docx&subfolder_nav_tracking=1.) + +## CHEMICAL/STYRENE REMODELLING + +### 1. Gold Deposition + +Using *Quorum Q150T Coater* (Fig.4) in sample prep room of EM centre to provide gold film onto your polished sample. This is an equipment which allows you to control the thickness of the gold film. To our best knowledge, **5nm** is the ideal thickness for samples to get fully remodelled by styrene vapour within 30 hours. Again, there is a manual near the coater which can lead you step by step on how to use this machine. + +![](/wiki/assets/images/posts/EMCcoater.jpeg) + +Fig.4 Quorum Q150T Coater + + + +By the way, the default target material mounted in the coater is Au/Pd [80:20] so you need to change it to gold target before coating. You can borrow the gold target from Anjali and remember to return it back after your coating work. If you don’t know how to switch to the gold target, Anjali or Michael might be able to help (**please note your request might not be accepted until face-to-face assistance is available**). + +Again, wear nitrile gloves and use tweezer when taking your sample out of coater after gold deposition. + +### 2. Gold Remodelling with Styrene Vapour + +Your gold coated sample is now ready to be chemically remodelled by styrene vapour. As styrene might cause biohazard and irritation if it contacts with skins and eyes or is inhaled accidentally, therefore, this remodelling work is only allowed to be carried out in a fume cupboard. Safety spectacle, lab coat, mask and nitrile gloves must be worn to prevent yourself from being contaminated. Fig.5 and Fig.6 illustrate what the styrene remodelling system looks like and how it works: +- First, switch on the ventilator and raise up the sash (Do not over the limited height). +- Open the lid of exhaust chamber (d), remove the remodelling chamber (e) and then put your sample onto the hot plate (f). +- Put the remodelling chamber back and close the lid of exhaust chamber. +- Turn on the argon flow: Turn on the gas controller (a) first by twisting the black knob counterclockwise, this is in case argon piles up in the plastic tube and also avoids a sudden change of gas pressure. Then turn on the black valve and white knob slowly (shown in Fig.5h) to provide argon. Argon will then pass through the styrene reservoir (b) and carry styrene vapour to remodel your sample. Make sure the gas pressure is not higher than 10psi, this can be monitored via the pressure gauge on the wall. The styrene rate is better between **1.5 to 2 bubbles per second**, this can be manipulated by adjusting the gas controller (a). +- Down the sash and let the argon flow run for about **40 minutes** to exhaust the air in the chamber. +- Set up remodelling temperature: First press the large green button on heater (c) to turn it on, then hold the other two green buttons ★ and ▲ (under the panel) simultaneously to set your remodelling temperature. Remodelling temperature is related to your material and the gold film thickness deposited on your sample surface – higher temperature usually means faster remodelling speed but it might also lead an unwanted grain growth. For magnesium alloy like **AZ31** or **ZEK100** with 5nm gold film on it, **210C** manages to get a good remodelling result. Once you’ve set up your remodelling temperature, turn on the sliver switch in the middle of the heater, you now should see a red flash which indicates the temperature of hot plate is heating up to your set value. +- Remodelling time is, again, related to your material and the thickness of gold film coated on your sample, it is also affected by remodelling temperature. In the case of magnesium alloy AZ31 or ZEK100, the best remodelling time is around **25 hours**, this can create an evenly distributed gold pattern with average speckle size approximately **20 to 40nm** (Fig.7d). If you have other materials for example Al or Zr alloys then you have to find out you own remodelling parameters. +- Once your remodelling work has been done, set the temperature down to RT by holding ★ and ▼ buttons at the same time. It’ll then take one or two hours for your sample to cool down to RT. Real-time temperature will be displayed on the panel. +- When the temperature drops down to RT, raise up sash, open the lid of exhaust chamber (d), remove the remodeling chamber (e), take your sample out, put the remodeling chamber back and close the lid. +- Turn off the sliver switch on heater (c) and then press the large green button again to turn off the heater. +- Switch off the black valve on the wall to cut off the argon flow. Twist the black knob on the gas controller (a) counterclockwise a bit to let the residual gas out. When there are no running bubbles in styrene reservoir, switch off the white knob on the wall. +- Down the sash. +- Let the ventilator work for another 5 to 10 minutes and then turn it off. + +![](/wiki/assets/images/posts/styreneremodelling.png) + +Fig.5 Chemical/Styrene remodelling system: (a & h) argon flow controller, (b) styrene reservoir, (c) heater, (d) exhaust chamber, (e) remodelling chamber, (f) hot plate, (g) ventilator switch + +![](/wiki/assets/images/posts/schematicofstyreneremodelling.png) + +Fig.6 Schematic of chemical/styrene remodelling system: a) argon flow controller, b) styrene reservoir, c) heater, d) remodelling vessel [2] + + + +For more details about how to use styrene remodelling device safely please do refer to the risk assessment form placed on [Dropbox](https://www.dropbox.com/sh/gn7m0qwdlw5m50f/AABP154p8ZCppeDzHL8o_hnDa/Equipment%20RAs?dl=0&preview=MR1000_Remodelling+Equipment+B01+%26+B013_CRA.docx&subfolder_nav_tracking=1.) + + + +Once your sample has been gold remodelled then you can check the speckle pattern on high resolution SEM such as Q650, Tescan Mira3 or Magellan etc. by using the BSE mode as gold with high atomic number is able to provide good contrast under BSE. 20kv with 10mm WD or 10kv with 5mm WD are both okay for taking images, can also try other parameters. Magnification is related to the speckle size you’ve got – basically small gold speckle requires higher magnification while large speckle means lower magnification because you need to make sure that there are no less than 3 pixels in each gold speckle so that might possibly provide you a better correlation result afterwards. We also suggest each sub-image (tile) to be overlapped by 20% to enable an easy stitching in ImageJ. Fig.7a, b, c and d presents BSE image of the gold speckle pattern of pure Ti, Ni-based superalloy, stainless steel and Mg alloy respectively. + +![](/wiki/assets/images/posts/hrdicpattern.png) + +Fig.7 Gold speckle pattern BSE image of (a) pure Ti after water remodelling [3], (b) Inconel 718 (Ni-based superalloy) after water remodelling [4], (c) 304L stainless steel after water remodelling [1] and (d) ZEK100 (Mg alloy) after styrene remodelling. Image is acquired from high resolution SEM Magellan, Tescan S8000, Sirion and Q650 respectively with average speckle size for each one around 83nm (pure Ti), 70nm (Inconel 718), 56nm (304L stainless steel) and 25nm (ZEK100) + + + +## Reference + +[1] F. Di Gioacchino and J. Quinta da Fonseca, “Plastic Strain Mapping with Sub-micron Resolution Using Digital Image Correlation,” Exp. Mech., vol. 53, no. 5, pp. 743–754, 2013, doi: 10.1007/s11340-012-9685-2. + +[2] A. Orozco-Caballero, D. Lunt, J. D. Robson, and J. Quinta da Fonseca, “How magnesium accommodates local deformation incompatibility: A high-resolution digital image correlation study,” Acta Mater., vol. 133, pp. 367–379, 2017, doi: 10.1016/j.actamat.2017.05.040. + +[3] SEM image is kindly provided by Yukun Xu. + +[4] SEM image is kindly provided by Dongchen Hu. diff --git a/collections/_experiments/image-analysis.md b/collections/_experiments/image-analysis.md new file mode 100644 index 0000000..cfd1474 --- /dev/null +++ b/collections/_experiments/image-analysis.md @@ -0,0 +1,16 @@ +--- +published: true +author: Peter Crowther +title: Image Analysis +analysis_codes: + - name: Measuring rods in an image + link: https://github.com/LightForm-group/rod_measurement + - name: Separation of primary and secondary alpha + link: https://github.com/LightForm-group/alpha_separation +--- + +Sometimes analysis of images can be useful in order to quantify aspects of the image, perhaps measuring a certain structure or identifying the position of a certain structure to include or exclude it in another analysis. + +The skimage library in Python contains many useful functions for image analysis. + +Example analysis scripts are linked on the main experiments wiki page. \ No newline at end of file diff --git a/collections/_experiments/index.html b/collections/_experiments/index.html index ba94be2..ebdab22 100644 --- a/collections/_experiments/index.html +++ b/collections/_experiments/index.html @@ -6,16 +6,13 @@ show_meta: false --- -{% include checklist_urls.html %} {% assign exp_sorted = site.experiments | sort: "title" %}
- + - @@ -25,8 +22,8 @@ {% assign filename = exp.url | split: "/" -%} {% if filename[-1] != "index" -%} {% assign exp_name = filename[-1] | split: ".html" -%} - {% assign an_code_link = "-" -%} - {% assign tut_link = "-" -%} + {% assign an_code_link = "" -%} + {% assign tut_link = "" -%} {% for tut in site.tutorials -%} {% assign tut_filename = tut.url | split: "/" -%} @@ -35,7 +32,6 @@ {% assign tut_link = 'Link' -%} {% endif -%} {% endfor %} - {% assign exp_title_clean = exp.title | replace: " ", "-" | downcase | replace: "(", "_" | replace: ")", "_" -%} {% assign all_authors = exp.author | split : ',' %} @@ -46,7 +42,6 @@ {% endfor %} - @@ -72,8 +65,6 @@ {% endfor %} {% endfor %} - {% else %} - - {% endif %} diff --git a/collections/_experiments/macro_dic.md b/collections/_experiments/macro_dic.md index 6643c8c..e82cdf8 100644 --- a/collections/_experiments/macro_dic.md +++ b/collections/_experiments/macro_dic.md @@ -2,6 +2,7 @@ published: true author: Wayne Heatman, Elliot Cooksey-Nash title: Macro DIC + --- This document oulines the procedure for carrying out macro DIC measurements. This procedure should be followed for achieving best quality data and make it ready for sharing and re-use. @@ -39,7 +40,7 @@ The images that are to be obtained need to be as sharp as possible. Therefore, w ### Error determination and calibration This section explains how the subset size and step size affect noise/error in DIC (Digital Image Correlation) data. This section should help you to decide what subset size and step size you should choose for analysing your DIC data. The analysis was done using the DIC python code for the .csv output format from the LaVision DaVis DIC software. The following analysis was carried out on the first 10 images of one set of DIC data. The subset size and step size where changed and the strain was recorded. The images were captured before any deformation had begun, so there should be no strain detected. Any strain detected by the DIC software is therefore noise in the recording. The area in which the data was gathered from can be seen in the image below. -![](/wiki/assets/images/posts/AnalysisLocation.jpg =152x224) +![](/wiki/assets/images/posts/AnalysisLocation.jpg){: width=152 height=224} ### Example Error determination and calibration The first set of tests were performed to look at the variation in strain with changing Subset Size and constant step size. The Subset size variation can be seen below: diff --git a/collections/_experiments/optical_microscopy-general.md b/collections/_experiments/optical_microscopy-general.md index a8a5506..65c027d 100644 --- a/collections/_experiments/optical_microscopy-general.md +++ b/collections/_experiments/optical_microscopy-general.md @@ -2,6 +2,8 @@ published: true title: Optical microscopy author: Peter Crowther +tutorials: + - Automated Optical Microscopy with ZEN 2 --- This describes two optical microscopes for looking at microstructures. diff --git a/collections/_experiments/quenching_dilatometry.md b/collections/_experiments/quenching_dilatometry.md new file mode 100644 index 0000000..8c4b5cd --- /dev/null +++ b/collections/_experiments/quenching_dilatometry.md @@ -0,0 +1,411 @@ +--- +title: Dilatometer quenching tests +author: Ed Pickering, Josh Collins, Mark Taylor +analysis_codes: + - name: Transformation Start (Ts) Analysis + link: https://github.com/JoshUoM/steel_dilatometer_analysis +--- + +## Executive Summary + +***What is this technique?*** + +Quenching dilatometry is a technique that measures of the expansion or contraction of a sample (i.e., its dilatation) when its temperature is changed. A small specimen of material is placed into the dilatometer and is heat treated according to an inputted programme. The temperature is controlled very accurately, typically using induction to heat the sample and pressurised inert gas to cool it. Concurrently, the change in length of the sample is measured. This is typically recorded in one direction only, using pushrods. The change in length not only provides information regarding thermal expansion, but also solid-state phase transformations that involve significant changes in volume (e.g., the austenite to ferrite transition). + +***What information does it provide?*** + +The principal output of an experiment is a graph of change in length vs. temperature. From this, the following can be extracted using appropriate data analyses: +* Transformation start and finish temperatures (e.g., martensite start). This can be used to create continuous cooling transformation (CCT) diagrams, continuous heating transformation (CHT) diagrams, and time-temperature-transformation (TTT) diagrams. +* The evolution of the volume fractions of microstructural constituents (e.g., martensite) with temperature. +* Thermal expansion coefficients. + +***Why/when use this technique?*** + +The technique is very useful when attempting to characterise and predict the microstructures that form in steels during industrial heat treatment processes, in particular austenitisation + quenching. Different cooling rates can be experienced in different areas of large components when quenching, which may lead to the formation of different microstructures. Dilatometry allows for the heat treatments at specific locations or (e.g., at different thicknesses) to be simulated, and hence the microstructures to be characterised and predicted more readily. The construction of CCT and TTT diagrams is a particularly useful output in this regard. + +***What are the sample requirements?*** + +Samples need to be cylinders measuring 4 mm in diameter and 10 mm in length. They may be made hollow for very fast heating and cooling rates. The flat ends of the cylinder should be as parallel as possible and ground or milled to a fine finish. The surface of the sample should be free from oxide. + +***How do we assess the data quality?*** + +To assess the origins of artefacts that may be apparent within datasets, a number of approaches can be used: +* Different sample geometries and multiple thermocouple placements can be used to test whether non-uniform temperatures (or other geometry-related effects) are the cause of a phenomenon or not. +* Alternative sample materials, such as platinum, are also useful when looking for the cause of artefacts. +* Machine-derived artefacts can be assessed by examining the metadata gathered from results files, such as the data associated with HF power. +* Repeat measurements, either on the same sample or on samples of the same material, are always useful to determine whether artefacts have originated from one-time sample or machine oddities. + +In addition, to ensure good data quality and that our interpretations are sound in general, the following may be used: +* Repeat measurements on different samples of the same material. +* Comparisons of results to those obtained through other experimental methods, in particular optical and scanning electron microscopy (SEM), and hardness. +* Comparisons to empirical relationships for start temperatures can provide reassurances with respect to the identity of a transformation start temperature. +* Comparisons to results from more advanced models for phase transformation kinetics can also be a useful way validate the interpretation of transformation curves. + +***What are the common limitations/pitfalls?*** + +* Only phase transformations that involve significant changes in sample length can be measured using dilatometry. Hence, it is useful for steels that display solid-state allotropic phase transitions on heating/cooling, but is less useful for examining precipitation in nickel alloys or aluminium alloys. +* Sample sizes are typically no more than 4 mm diameter x 10 mm long. The technique measures the length change of the entire specimen only (it samples the whole volume at once). +* Heating and cooling rates of up to 100˚C s-1 are usually achievable in a well-controlled manner in most alloys. Heating and cooling rates of 1000˚C s-1 are achievable, but samples usually need to be hollow, the material has to have high thermal conductivity, and rates may not be so controllable. +* Typical maximum temperature of 1600˚C, although measurements and/or control of temperature may be less reliable at very high temperatures. +* It is difficult to assess volume fractions and start temperatures in mixed microstructures where there is overlap of transformations (e.g., bainite into martensite). +* It is also difficult to calculate accurate volume fractions if there is a significant amount of retained austenite (with unknown volume fraction) at the end of the test, or under certain circumstances in which there is a significant amount of carbon ioning to the austenite during transformation(s), since the procedure does not account for these occurrences. +• The orientation of the samples should be considered where transformations may be non-isotropic in terms of their strain, for example when a sample is textured and the transformation has a strain that varies with crystallographic direction. + +## 1. Introduction + +**Dilatometry is the measurement of the expansion or contraction of a sample (i.e., its dilatation) when its temperature is changed.** This typically involves measurement in one direction (e.g., change in sample length), but measurements can also be multi-dimensional. + +The most common reason for conducting dilatometry measurements is to measure the thermal expansion coefficients a material. However, for steels and other metals that display allotropic transformations (i.e., a change in crystal structure with temperature), dilatometry can be used to examine their phase transformation behaviour. In steels, the change from body-centred cubic (BCC) ferrite to face-centred cubic (FCC) austenite (and the reverse) is accompanied by a change in volume of a few percent – precise values depend on the temperature of transformation and the composition of the steel. See Fig. 1 for an example of how the dilatation changes when a low-alloy steel is heated from room temperature to above its Ae3 temperature, where austenite is stable, and is then cooled back to room temperature. + +![Fig1](/wiki/assets/images/posts/Quenching_Dil_Fig1.png) +**Figure 1.** Dilatation curve for SA508 Grade 3 steel heated to 1000˚C at 20˚C s-1 and cooled to room temperature at 20˚C s-1. The transformation on cooling around 400˚C is that to martensite. + +In the following document, we will consider the procedure for measuring the dilatation of low alloy steel samples (e.g., SA508 Grade 3) during heat treatments designed to replicate industrial heat treatments (e.g., austenitisation and quenching). This is referred to as quenching dilatometry to distinguish it from more standard dilatometry that is only concerned with the measurement of thermal expansion coefficients. The typical aims of carrying out quenching dilatometry are: +* To determine **transformation start and finish temperatures** (e.g., for martensite or bainite) for a particular steel during a non-isothermal heat treatment – typically austenitisation and quenching. This information can be used to construct a **continuous cooling transformation (CCT)** diagram or a **continuous heating transformation (CHT)** diagram. +*To determine **transformation start and finish time** (e.g., for martensite or bainite) for a particular steel during an isothermal heat treatment, where the temperature is held constant. This information can be used to construct a **time temperature transformation (TTT)** diagram. +* To estimate the **volume fractions of microstructural constituents** present in the material after heat treatment (or examine their evolution over time). Such information can be used to the interpret or predict microstructures formed following heat treatment, and can also be used in the modelling of residual stress evolution during welding. **However, it is important to note that there are some key limitations to such estimations. See Section 1.1 below. ** + +For the low-alloy steels Rolls-Royce uses for plant construction, the principal interest is in CCT behaviour rather than TTT behaviour, since most steels are austenitised and quenched. The overall goal of such work is usually to be able to interpret (or predict) the microstructures that form during production heat treatments (e.g., the percentage bainite formed). Note that achieving this is not always straightforward through dilatometry alone, and the results of dilatometry should always be combined with techniques such as optical microscopy, scanning electron microscopy and microhardness to corroborate any conclusions drawn. + +The remainder of this document will set out the standard procedures for carrying out quenching dilatometry measurements on low-alloy steels. Special focus will be given to austenitisation and quenching (CCT) investigations. The type of quenching dilatometer used is a so-called **pushrod dilatometer**, which uses ceramic pushrods to contact the sample in order to measure length change, as will be described in the following section. **This by far the most common type of quenching dilatometer**, and hence this document is written in reference to it. + +The document begins by describing the typical experimental apparatus used for quenching dilatometry, including highlighting common choices for options such as quenching gas and pushrod material (Section 2). It then proceeds to talk about sample preparation considerations (Section 3), before detailing the technicalities of data acquisition (Section 4) and analysis (Section 5). Common artefacts and misinterpretations are highlighted in Section 6, ensuring data quality is addressed in Section 7, and complementary and alternative techniques are described in Section 8. Finally, a ‘How To’ guide for common experiments is given in Section 9. + +### 1.1. Technique Limitations + +Before proceeding, it is worth highlighting some key limitations to the technique: + +* **Only phase transformations that involve significant changes in sample length can be measured using dilatometry.** Hence, it is useful for steels that display solid-state allotropic phase transitions on heating/cooling, but is less useful for examining precipitation in nickel alloys or aluminium alloys, unless there is a significant change in length or change in expansion coefficient associated with the event. In theory, assessing Ti alloys should be possible, since like steels they exhibit an allotropic transition (alpha to beta), but the author has often found that the strains associated with this can be difficult to interpret and are often not as strong as for steels. +* **Sample sizes** are typically no more than 4 mm diameter x 10 mm long. The technique measures the length change of the entire specimen only (it samples the whole volume at once). This may be significant where the steel exhibits compositional microsegregation (banding). +* **Heating and cooling rates** of up to 100˚C s-1 are usually achievable in a well-controlled manner in most alloys. Heating and cooling rates of 1000˚C s-1 are achievable, but samples usually need to be hollow, the material has to have high thermal conductivity, and rates may not be so controllable. +* **Typical maximum temperature of 1600˚C**, although measurements and/or control of temperature may be less good very high temperatures. +* **It is difficult to assess volume fractions and start temperatures in mixed microstructures where there is overlap of transformations** (e.g., bainite into martensite). Results should always be compared to those from microscopy and microhardness for a sanity check. +* **Accurate measurement of volume fractions, following the procedure in Section 5.3 below, is difficult if there is a significant amount of retained austenite** (with unknown volume fraction) at the end of the test, or under certain circumstances in which there is a significant amount of carbon partitioning to the austenite during transformation(s), since the procedure does not account for these occurrences. + +## 2. Apparatus and Function + +Fig. 2 shows some of the principal components of a pushrod dilatometer. + +![Fig2](/wiki/assets/images/posts/Quenching_Dil_Fig2.png) +**Figure 2.** Photographs of the inside of a quenching dilatometer, with major components labelled. When operating, the sample would sit inside the heating coil (the pushrod assembly slides to translate the sample into the coil once loaded). +* The pushrods are used to transmit the sample change in length to the measuring system (see below). Two of them hold the sample in place during the heat treatment (there is a very small compressive force applied to sample) and are moved when the sample changes length. The third acts as a reference. +* The pushrods are usually either silica quartz or alumina. **It is recommended to use silica quartz for all heat treatments that do not involve an excursion at or above 1200˚C for over 30 seconds.** For heat treatments that do go to such high temperatures for prolonged periods, alumina pushrods should be used. +* A linear variable differential transformer (LDTV) module measures the dilatation. +* **Heating** is usually provided by an **induction coil**, which itself is water cooled (by internal water circulation). The water cooling of the coil also aids to cool to the sample although this is principally achieved (particularly at high cooling rates) using a quenching gas (water does not flow onto or through the sample itself, but the water-cooled coil acts to cool the environment). +* An **inert gas** is typically used to control the rate of **cooling** during quenching processes. This gas is delivered, under pressure, into the chamber to directly impinge on the sample. This is usually achieved by the gas blowing onto the sample from numerous holes in the induction coil, which also includes a gas channel as well as a water cooling channel. When using a hollow sample the gas can be blown through the centre of the sample to further increase the cooling rate. The gas can also be used as an inert environment (most machines can operate under vacuum or inert gas atmosphere). However, care must be taken as using an inert gas atmosphere at high temperatures can lead to decarburisation in some cases, as discussed later in Section 4.1. +* Ar and He are the usual choices for inert gas. He is more expensive, but has a higher conductivity and can be used to cool the sample to sub-zero temperatures by being cooled in liquid nitrogen. +* Thermocouples are used to monitor the temperature of the sample and provide the feedback for control of heating/quenching gas. Type S are most common to typical steel heat treatments, whilst Type K might be used for sub-zero work. + +## 3. Sample Geometries and Preparation + +The sample geometries for most quenching dilatometry are cylindrical specimens that match the diameters of the pushrods and sit with their full lengths inside the induction coil. Typically, samples measure 4 mm in diameter by 10 mm long. Larger diameters and non-cylindrical geometries are possible, but they must fit within the heating coil. + +### 3.1. Quenching Speed Considerations + +For the fastest heating and cooling rates, the sample mass may need to be reduced. This is achieved, typically, by using hollow samples. Examples of technical drawings of various hollow samples are provided in Appendix 1. + +### 3.2. Surface Finish + +The surface finish of the at ends of the specimens needs to be very good and the ends need to be as close to parallel as possible. Concentric machining marks, such as those due to machining, should be removed. This can be achieved quickly and effectively using SiC grinding paper and an appropriate jig to keep the ends flat and parallel. The surface finish of the circumferential surface of the cylindrical samples is less important, but does need to be amenable to thermocouple attachment. + +### 3.3. Homogeneity and Orientation + +A dilatometry measurement will sample the behaviour of the whole sample volume and provide an averaged signal for all the material contained within it. Hence, any in-homogeneity within the sample volume (in terms of chemistry, levels of deformation, crystallographic texture, etc) will influence the overall dilatation measured. This needs to be accounted for when preparing samples and interpreting results. + +## 4. Data Acquisition + +The data acquisition stage of a dilatometry experiment is a relatively standard procedure, which involves programming in the temperature steps of interest (either holding, cooling or heating steps) and requesting the number of data points to be acquired. Typically, only linear profiles can be used for each step, but programmes can contain many steps (usually 50+), so curves can be recreated by inputting many small linear steps. Example heat treatment profiles are shown in Fig. 3. + +![Fig3](/wiki/assets/images/posts/Quenching_Dil_Fig3.png) +**Figure 3.** Example heat treatment profiles. (a) Austenitisation at 1000˚C for 5 minutes with cooling and heating at 20˚C s-1. (b) Cooling from austenitisation in which the rate is changed every 100˚C. + +The number of data points acquired per programme is usually limited, so more points should be allocated to the steps of interest. Measurement frequencies of over 1000 Hz are possible, but may not always be practical. Thermocouples are typically sensitive to ± 0.1˚C, so gathering data with much greater frequency than every 0.1˚C of cooling or heating is unnecessary. + +### 4.1. Heating and Austenitisation + +Programmes used to construct CCT curves will need to start with heating and austenitisation steps to set the prior austenite grain size. The speed of heating, hold temperature and hold time can all have a large influence on the grain size. + +For heat treatments involving prolonged high-temperature holds (>1000˚C), it has previously been recommended that a partial pressure of inert gas be used as the environment, rather than vacuum. This is to prevent oxide from subliming from the sample surface and coating the chamber. However, prolonged holding under inert gas atmosphere has recently been found to lead to decarburisation of samples in some cases. In such cases, it is recommended austenitisation be performed under vacuum where possible, and that the surfaces of samples are cleaned of any oxide (e.g., by quick hand grinding using SiC paper) before being run. Tests should be performed in each case to assess whether holding under vacuum or under inert gas is preferrable. + +Regarding heat rates – for the fastest heating rates a vacuum will be required, although relatively fast heating rates may still be attained using an inert atmosphere (a few hundred °C s-1). + +### 4.2. Quenching + +The speed of quenching will determine which microstructural constituents form (ferrite/ pearlite/bainite/martensite). Quenching with inert gas is usually necessary for all but the slowest cooling rates (around 0.1 C s-1). For the fastest cooling rates, He gas will be required. The gas can be introduced (switched on) during the first cooling step after an experiment has started under a vacuum (so it can run under vacuum until the first cool requiring gas), although this may lead to ‘blips’ in the data recorded (see section 6). + +### 4.3. PID Controls + +Accurate control of sample heating and cooling is not a trivial task when the rates of doing so are high (>50˚C s-1), particularly when samples are being heated to high temperatures. Proportional–integral–derivative (PID) controls should be altered to tune both the heating power and the quenching gas (there are usually separate controls for each). + +For example, by tuning the PID settings, significant overshoots in temperature on fast heating can be suppressed, see Fig. 4. Different PID settings are usually required for the particular geometry and thermal conductivity of the sample. + +![Fig4](/wiki/assets/images/posts/Quenching_Dil_Fig4.png) +Figure 4. Examples of the effect of changing the PID settings on the temperature control (taken from TA instruction manual for DIL805A/D/T). + +### 4.4. Temperature Uniformity + +Temperature uniformity is often a concern for dilatometry experimentalists – ideally, the temperature should be constant all through the sample volume during the test. This is never achievable in reality, but at heating and cooling rates <10˚C s-1 the differences in heat treatment experienced in different areas of the sample (assuming standard 4 mm diameter x 10 mm long dimensions) should be negligible for steels. + +The action of the induction heating will mean that the sample is always hottest at its centre. This can have a big effect when heating samples at high heating rates to high temperatures – the centre of the sample can overshoot the set temperature considerably, leading to the outside of the sample also overshooting. This may be mitigated to a great extent using a thin-walled hollow sample. Simple FEM models of quenching from austenitisation have suggested, for a non-hollow sample, the temperature difference between the very centre and edge of the sample at mid length is not more than 1˚C at cooling rates of 5˚C s-1 and below. However, it may reach around 10˚C at 50˚C s-1. + +Temperature uniformity along sample length can be monitored by placing one thermocouple at the sample centre and welding another nearer the sample’s end. For typical heat treatment cycles to produce a CCT, temperature gradients along the lengths of samples tend to be small (*illustrative data to be collected*). However, significant gradients can exist when samples are held above 1000˚C, owing to heat loss to the pushrods. The heat loss to the pushrods can also change according to the pushrod material. + +### 4.5. Sub-Zero Quenching + +It is possible on some dilatometers to quench samples to below room temperature (down to -150˚C) by passing He gas through a heat exchanger submerged in liquid nitrogen. However, such experiments are not trivial to carry out, and results seen by the author have often contained artefacts (such as non-linear dilatation curves below room temperature, when zero transformation would have been expected). + +### 4.6. Safety and Machine Preservation + +Heat treatments that involve very high temperatures (>1200˚C) should be carefully monitored to ensure that no instabilities begin to develop. Over-compensation for drops in temperature can develop into such instabilities and large variations in the input power (and temperature) that can eventually melt the sample. Localised sample melting can also occur during high temperature holds if significant segregation is present. The authors have also observed localised melting around thermocouple wires during holds >1300˚C, although the origins of this are unclear. + +The use of a partial pressure of gas is generally advised at very high temperatures to suppress sublimination of species. However, as highlighted in Section 4.1, holding at high temperature with an inert gas atmosphere can lead to unwanted decarburisation in some cases, so this should be accounted for. + +In general, during prolonged holds at any temperature, the maximum power of the machine should be limited such that accidental overheating (and melting) of the sample is not possible. This can be achieved by selecting the maximum possible HF power during each heat treatment step. + +## 5. Data Analysis + +The principal data extracted from a dilatometry experiment are change in sample length and the sample temperature. The change in length can easily be converted to strain by dividing by the original sample length (around 10 mm). + +### 5.1. Transformation Start and Finish Temperatures + +The transformations from ferrite to austenite and austenite to ferrite involve deviations in dilatometry curves as shown in Fig. 1. We can associate a start temperature for these transformations, but it is not always easy to do so – just as it is not obvious how to define a yield point for an alloy with a gradual transition to plastic behaviour. Two methods are highlighted below. + +#### 5.1.1. The Offset Method + +In the **offset method** is the recommended method for transformations that are preceded by periods of no transformation (which yield regions of linear dilatation). Essentially, the method involves taking the gradient of the curve pre-transformation, and offsetting this from the curve by a particular amount. The transformation temperature is defined as that when the offset curve meets the experimental data, see Fig. 5 for an example of transformation start during cooling. It is analogous to the 0.2% proof stress method of determining yield strength. + +![Fig5](/wiki/assets/images/posts/Quenching_Dil_Fig5.png) +**Figure 5.** Showing the application of the offset method to determine the martensite start temperature of an SA508 Grade 3 steel quenched at 20˚C s-1 (full cooling curve shown inset). + +The offset method discussed here is that proposed by Yang and Bhadeshia [1]. They used a **constant offset for all samples of a given steel, corresponding to the strain for transformation to 1% martensite** (i.e., the strain that would be expected if 1% martensite formed in 100% austenite at room temperature). The strain corresponding to 1% martensite can be found by computing the lattice parameters of the austenite and martensite, which in turn depend on the alloy composition. Spreadsheets for the calculation of the offset strain (computed using empirical formulae for lattice parameters) can be found here: http://www.phase-trans.msm.cam.ac.uk/2007/mart.html (both spreadsheets contain the same calculations, which are also repeated in the paper). Additionally, a Jupyter Notebook has been provided for users to quickly calculate the offset strain in an IPython environment. This can be found in the 'Analysis codes' section for this experiment, titled "Offset_Calculator.ipynb". Furthermore, a Jupyter Notebook for directly calculating transformation start temperatures has also been provided in the 'Analysis codes', titled "Transformation_Ts_Calculator.ipynb". This notebook has been setup so that users test a variety of offset gradients (3 recommended), over a range of temperatures, so that any uncertainties can be quantified. + +The offset can be applied to assess finish temperatures in an identical fashion, with the offset being applied to the transformed curve, see Fig. 6 for example during cooling. Note that the method to assess finish temperature on cooling is equivalent to that to assess start temperature on heating. + +![Fig6](/wiki/assets/images/posts/Quenching_Dil_Fig6.png) +**Figure 6.** Showing the application of the offset method to determine the martensite finish temperature of an SA508 Grade 3 steel quenched at 20˚C s-1 (full cooling curve shown inset). + +One difficulty associated with the offset method is that there is no prescribed region over which the gradient of the untransformed (or transformed) curves should be assessed (i.e., what temperature interval). **However, it is recommended that the gradient be taken over at least a 50˚C temperature range, at least 50˚C away from the approximate transformation start temperature. ** + +The offset method can be used when two or more transformation events occur during heating or cooling, as long as they are separated by an interval of no transformation in which the gradient can be measured. An example of this is shown in Fig. 7, which shows a high-temperature transformation to ferrite followed by a transformation to bainite at lower temperatures. + +![Fig7](/wiki/assets/images/posts/Quenching_Dil_Fig7.png) +**Figure 7.** Schematising how the offset method can be used to determine the start temperatures of sequential reactions, so long as there is a linear portion of the curve between the transformations. + +#### 5.1.2. 2nd Derivative + +The offset method is the recommended method of determining start temperatures when the transformation is preceded by a period of no transformation activity. However, there may be cases in which there is no linear region of dilatation before a transformation occurs. This is found to be the case when one transformation immediately follows another, such as in Fig. 8 where a martensite transformation occurs directly after a bainite transformation. + +Here, the only indication of a transformation is the change in curvature of the transformation curve (as highlighted in the figure). This can be assessed using the **second derivative** of the curve, which should change sign when the curvature switches. **The transformation temperature is that at which the second derivative is equal to zero.** + +![Fig8](/wiki/assets/images/posts/Quenching_Dil_Fig8.png) +**Figure 8.** Showing the application of the offset method to assess the likely martensite start temperature for a sample that has already undergone a partial transformation to bainite. The change in curvature around 380˚C is schematised using the dashed curves. + +The second derivative of a curve can be calculated quite straightforwardly, e.g., using the gradient function in an Excel spreadsheet. However, there is one key complication, and that is the temperature range over which the second derivative is taken. If this is too short, the derivative picks up noise in the data and is noisy itself. If it is too long, changes in curvature may be averaged out. For Fig. 8, the second derivative was taken over a 10˚C temperature range, but several ranges should be tried to assess the best outcome and the uncertainty involved in the method. + + +### 5.2. Uncertainties in Start/Finish Temperatures + +There are several sources of uncertainty associated with the acquisition of data during a single dilatometry test, and its subsequent analysis to obtain start and finish temperatures. For the offset method, these include: +* **Noise in the change in length signal**, typically ±0.1 µm, which leads to an uncertainty in start temperature, typically around ±2˚C. +* **Uncertainty associated with finding the gradient to apply the offset to** (i.e., the temperature range over which the gradient is evaluated can change the gradient itself). The author has found that this can often introduce large uncertainties (±40˚C), particularly when the start transformation involves a dilation that is not particularly steep. + +However, for a *repeat test with a consistent analysis method* (i.e., *gradient evaluation procedure*), there are two sources of uncertainty or scatter that often dominate: +* **Sample-to-sample variations**, which can be caused by small differences in alloy composition, differences in sample geometry (which can influence temperature uniformity) or differences in experimental conditions (e.g., placement of sample between pushrods). +* **Temperature non-uniformities** can exist within a sample during heating, holding and cooling stages, in particular during fast heating and cooling owing to where the heat is generated or extracted. + +Typically, one finds that combining all these sources of uncertainty, **the scatter in start temperature is not greater than ±20˚C for a single heat treatment cycle with a consistent offset methodology**, and can be much smaller. The only cases in which greater uncertainty is often encountered are cases in which transformations are evaluated for high heating and cooling rates (50˚C s-1 and above). In these cases, there are a number of artefacts that can be problematic (see Section 6), and temperature uniformity will be poor. + +The above uncertainties only refer to one particular heat treatment and analysis method. One could, of course, be interested in how martensite start temperature varies with cooling rate. However, assessing the uncertainty introduced by changing the cooling rate is not trivial, since there are likely to be metallurgical reasons why start temperature would change with cooling rate (e.g., the austenite grain size may be higher for slower cools). + +Similar sources of uncertainty are also present for the 2nd derivative method. The temperature interval over which the derivative is taken can be varied to give an idea of the uncertainty in the method. However, sample-to-sample variations and temperature non-uniformities tend to dominate, as for the offset method. + +### 5.3. Volume Fraction Transformed + +The volume fraction transformed can be estimated from dilatometry data by assuming the evolution of strain follows a lever rule type relationship. For a general transformation, this can be written: + +fraction transformed = $$\frac{\varepsilon - \varepsilon_\textrm{untransformed}}{\varepsilon_\textrm{transformed} - \varepsilon_\textrm{untransformed}}$$ + +For transformation(s) on cooling (from austenite to ferrite), the following can be written: + +fraction transformed = $$\frac{\varepsilon - \varepsilon_\gamma}{\varepsilon_\alpha - \varepsilon_\gamma}$$ + +Where \(\varepsilon\) is the strain from the dilatation curve at a particular temperature, \(\varepsilon_\alpha\) is the strain from the extrapolated transformed gradient (assumed to be ferrite, hence the α notation) at the same temperature, and \(\varepsilon_\gamma\) is the strain from the extrapolated untransformed curve (assumed to be austenite) again at the same temperature. This is schematised here: + +![Fig9](/wiki/assets/images/posts/Quenching_Dil_Fig9.png) +**Figure 9.** Schematising the calculation of fraction transformed from a dilatation curve on cooling from austenite to ferrite (martensite in this case). + +And the resulting transformation curve from Fig. 9 is: + +![Fig10](/wiki/assets/images/posts/Quenching_Dil_Fig10.png) +**Figure 10.** The evolution of the transformation on cooling depicted in Fig. 9. + +**Important note:** this assumes that the final strain measured during the test corresponds to 100% transformed. If retained austenite is present, then this will need to be measured through a different technique (e.g., XRD) and accounted for. Also, errors may be introduced if a transformation involves significant carbon portioning into the retained austenite (e.g., if allotriomorphic ferrite forms at higher temperatures). This is because this carbon partitioning leads to an expansion of the austenite lattice parameter, and the accompanying increased strain is misinterpreted as being due to the volume fraction of ferrite increasing (not due to austenite expansion). Further details can be found in [2]. + +For curves with more than one transformation event, the volume fraction curve can be used to estimate the volume fractions of the respective microconstituents. For example, the volume fraction curve for Fig. 8, which shows both ferrite and bainite formation, is as follows: + + +![Fig11](/wiki/assets/images/posts/Quenching_Dil_Fig11.png) +**Figure 11.** The evolution of the transformation on cooling depicted in Fig. 8. + +**There is no standard routine for reading the % of microconstituents formed in mixed samples.** There are a number of different approaches, including quoting the volume fraction at a specified temperature (that is deemed to be between the finish T of one transformation and the start of the next). Another common approach is to take the volume fraction as that at the transformation start temperature of the subsequent reaction. + +Note that **no technique can be used to calculate the fractions of microconstituents that form simultaneously during a dilatometry experiment** – their dilatations will be convoluted. + +A question that must be asked when performing the analysis is ‘what am I measuring the fraction of?’. The answer to this should be informed by comparison of the start temperatures to previous studies or empirical relationships, and should also use the results of microscopy and hardness. This is discussed further in Section 6.2. + +### 6. Common Artefacts and Misinterpretations + +Common features and artefacts seen in dilatometry curves include the following, which can either be seen in Fig. 1 or are schematised in Fig. 12: + +* **The final length of the sample does not match the original length of the sample.** This can be seen in Fig.1. The reasons behind this are not always clear, but can be a combination of: + * **Different end microstructure to start microstructure**, which can include the formation of **retained austenite** (more retained austenite will result in bigger difference, if the starting microstructure was 100% ferrite and cementite). + * **Transformation plasticity** could in theory contribute to this behaviour. The formation of hard martensite inside austenite can plastically deform it and will constrain any subsequent transformation, leading to macroscopic changes in length. + * **‘Drift’** in the apparatus. This is essentially referring to any change that is due to expansion or contraction of the machine, or change in the measuring system, that leads to a change in measured sample length. This seems to be especially prevalent at high heating and cooling rates when using alumina pushrods. + * Length changes during isothermal holding… +* **Length changes during isothermal holding at high temperature.** This can also be seen in Fig.1, in which there is a small drop in strain observed during holding at 1000˚C. The following can contribute: + * **Phase transformations** such as precipitate dissolution, which were not completed during the heat step(s). + * **Creep** of the sample, owing to the relaxation of residual stresses (the force applied to hold the sample in the pushrods is typically very small, and should not lead to creep). The removal of dislocations and other defects could in theory also contribute to the creep strain. + * **‘Drift’** in the apparatus, as described above. +* **‘Blips’** in the length change data in linear regions can arise from a number of sources: + * **Sudden changes in gas flow rate***, owing to a switch from low to high flow rate valves (the machine will do this automatically). As mentioned earlier, these are particularly pronounced when the gas is switched on after performing the initial part of an experiment under a vacuum. If these are problematic, it is usually best to change the experimental process to avoid gas being introduced during the middle of the cooling step. Austenitisation (and other high-temperature holds) may still best be operated under vacuum to avoid decarburisation, as discussed in Section 4.1. However, this vacuum can be removed at the very start of any cooling step (away from transformation events) and a partial pressure introduced if necessary, to avoid sudden jumps in gas flow during subsequent cooling. + * **Mechanical issues with the machine**, such as untightened pushrods or broken or slipping pushrods. This can be caused by repeatedly using silica pushrods at temperatures up to or exceeding 1200 °C. + * **Vibrations from the surroundings**, although these would have to be significant (simply knocking the machine has no effect). +* **‘Spiralling’** is often observed during fast heating and cooling rates, particularly at high temperatures. It is usually caused by the machine struggling to maintain the correct (set) temperature, and rapidly changing the heating power and/or cooling gas flow rate. This can create a disparity between the sample temperature at the centre and the sample temperature at the surface thermocouple, and the dilatation read corresponds to neither. Not placing the sample correctly in the induction coil can cause this. +* **Non-linear gradients** (in regions usually linear) at fast cooling rates are often observed. These can arise owing to the non-uniform temperature through the volumes of samples when they are cooled rapidly using quenching gas at the sample surface. Lag in the measurement system could also cause this change, in theory, as could the effect of fast gas impingement on the pushrods. +* **Apparent reversal of transformations can be seen in tests.** There are few metallurgical phenomena that could explain this effect (one being the release of latent heat in one area of the sample retransforming another area). Instead, the origin is likely to be an issue with the measurement system itself, or perhaps some sort of creep or transformation plasticity. Indeed, it has been shown that application of a stress during the transformation can result in similar strain profiles, see [3]. + +![Fig12](/wiki/assets/images/posts/Quenching_Dil_Fig12.png) +**Figure 12.** Schematising the artefacts that are commonly seen in dilatometry data. + +* **Decarburisation** can occur during some heat treatments, see Fig. 13. As discussed in Section 4.1., it’s been found that this can happen when samples are held at high temperatures for a long duration (hours) with an inert gas atmosphere (presumably this is because oxygen contamination in the gas reacts with C at the sample surface). Holding under vacuum does eliminate the decarburisation, although at very high temperatures could lead to more sublimation of elements from the sample surface. Tests should be performed to find the best approach. + +![Fig13](/wiki/assets/images/posts/Quenching_Dil_Fig13.png) +**Figure 13.** The decarburised surface layer of a low-alloy steel following a 2-hour hold at 820˚C with a partial pressure of helium. + +### 7. Ensuring the Quality of Data and Interpretation + +How can we assess the origins of artefacts in data? And how can we ensure our data is of good quality, and our interpretations are correct? The following approaches are tools that can be used. + +**In order to determine the origins of artefacts (and remove them if possible):** + +* **Different sample geometries** and multiple thermocouple placements can be used to test whether non-uniform temperatures (or other geometry-related effects) are the cause of a phenomenon or not. For instance, non-linear gradients during fast cooling may disappear if hollow samples are used, since there will be more temperature uniformity. + +* **Alternative sample materials** are also useful when looking for the cause of artefacts. For example, a platinum reference (often supplied with the machine), or the use of austenitic steels (which should not transform) can be used to test for the origin of non-linear responses. Pure Fe can be used to test start temperatures. Samples that should creep less (e.g., W or intermetallics) can be used to assess the origins of ‘drift’. + +* **Other machine-derived artefacts** (such as ‘blips’ due to change in gas flow rate) can be assessed by examining the metadata gathered from the results files, such as the data associated with HF power or gas inlet flow rate. + +* **Repeat measurements**, either on the same sample or on samples of the same material, are always useful to determine whether artefacts have originated from one-time sample or machine oddities. + +In addition to the removal/recognition of artefacts, to ensure good data quality and that our interpretations are sound: + +* **Repeat measurements** on different samples of the same material should be carried out if the **uncertainty** in results is to be adequately quantified. It may also be appropriate to examine samples taken from different locations in a forging and/or at different orientations to a particular direction (e.g., the radial direction). + +* **Comparisons of results** to those obtained through other experimental methods, in particular optical and scanning electron microscopy (SEM), and hardness, should always be carried out. Such comparisons are critically important when confirming the presence of microconstituents and their volume fractions. Optical microscopy is particularly useful to checking volume fractions of ferrite, whilst the presence of martensite will usually be obvious from the material hardness. Ball-park figures for the hardnesses of single and mixed microconstituent microstructures can be predicted using the relationships provided by Ion et al. [4]. + +* **Comparisons to empirical relationships for start temperatures** can provide reassurances with respect to the identity of a start temperature (e.g., whether it corresponds to the bainite start or martensite start). Such relationships are typically functions of alloy composition. See Appendix 2 for a list of these. + +* **Comparisons to results from more advanced models** for phase transformation kinetics can also be a useful way validate the interpretation of transformation curves. Good examples of such models included the MUCG83 programme developed at University of Cambridge [5] and the transformation model developed by Li et al. [6] (note, however, that many such models use the same empirical expressions for start temperatures shown in Appendix 2). + +### 8. Complementary and Alternative Techniques + +As stated in the previous section, optical microscopy and SEM, as well as hardness, are primary experimental techniques that are complementary to quenching dilatometry, in that they are able to help confirm the microconstituents present in a sample. Transmission electron microscopy is also of use, although is a specialist technique that only assesses a small volume of material and requires expert interpretation. Similarly, techniques like X-ray diffraction (XRD), electron back-scatter diffraction and magnetic measurements can also be useful to determine whether any retained austenite is present in a sample. + +In terms of measuring the **real-time** evolution of phase transformation behaviour, in-situ experiments can be performed. It is possible to perform in-situ heating and cooling experiments in both the SEM and TEM, but observations are often complicated by sample drift, surface effects and oxidation. A realistic alternative is the use of real-time X-ray diffraction using a synchrotron source of X-rays (so-called synchrotron XRD, SXRD). Such sources are able to provide very high fluxes of X-rays, meaning that signals can be transmitted through large volumes of material (a few mm in thickness) and be detected with high-frequency acquisition systems. By quantifying the evolution of austenite/ferrite during heating/cooling cycles, SXRD provides comparable information to quenching dilatometry (volume fraction transformed, start temperatures), see [2]. However, SXRD requires access to national experimental facilities (synchrotron sources) and analysis of results is not trivial. + +### 9. ‘How To’ Guide + +#### 9.1. Continuous Cooling Transformation (CCT) Diagrams + +The production of CCT diagrams is one of the principal uses of dilatometry, and is of particular interest to metallurgists who wish to understand and/or predict microstructural evolution when forgings are quenched during standard austenitisation-quench-temper heat treatments. + +The following are notes on conducting experiments to produce CCT diagrams: + +* Heating and cooling rates, as well as the hold times and temperatures used, should be as close to the manufacturing process as possible. The heating rate and hold time and temperature are important as they will set the **prior austenite grain size**, which will determine the transformation kinetics. +* Typical cooling rates to assess are: 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100˚C s-1. +* Transformation start temperatures should be determined as outlined in Section 5. +* The CCT diagram should be plotted such that time = 0 when the Ae3 temperature is passed through. +* An alloy's Ae3 temperature can be determined using Thermo-Calc software (https://thermocalc.com/) or can be manually calculated using published empirical equations. + +Example equations for manually calculating steel Ae3 temperatures are provided in Appendix 2. Appendix 3 provides examples of dilatation curves obtained for a number of different cooling rates in SA540 B24 steel. + +The prior austenite grain size should be measured and quoted alongside any CCT diagram. It can be measured using thermal etching. + + +### 9.2. Thermal Etching in the Dilatometer + +Measuring the prior-austenite grain size in modern low-alloy steels is challenging. Etches (such as picric acid) which may have been used historically to reveal boundaries are not very effective in modern low-alloy steels that contain little phosphorus. Instead, two other methods can be used: EBSD and thermal etching. Like etching, both require the formation of bainitic or martensitic structures from the prior austenite, because this means that the grain boundaries are preserved after cooling. + +Using EBSD, the orientations of martensitic or bainitic plates can be mapped, and then grain construction software used to reconstruct the prior austenite grain structure. The reconstruction is possible because only certain crystallographic variants of martensite or bainite should exist in a single prior austenite grain. An example of reconstruction software is the MTEX toolbox [7]. There are challenges associated with this process in that as-quenched microstructures are often difficult to analyse using EBSD, since they tend to be highly strained and full of defects. Tempering is needed to anneal defects, but this should not be so severe as to change the grain structure. + +Thermal etching measures prior austenite grain size more directly, and **can be performed in a quenching dilatometer.** It works by subjecting a polished surface to the austenitisation heat treatment in a vacuum. When this happens, the surface of the sample grooves at grain boundaries owing to surface energy considerations. These grooves then appear dark when viewed optically, Fig. 14. The process for conducting thermal etching in a quenching dilatometer is as follows: + +* A standard solid 4 mm diameter x 10 mm long sample should be ground on the curved edge to produce a flat region along the length of the sample (the result is a cylinder with a section of its curved edge sliced off uniformly along its length). +* This flat region should be polished to as good a finish as possible. +• The sample should be heat treated in the dilatometer **under vacuum** following the austenitisation process of interest. The chamber may need to be vacuum pumped, flooded with an inert gas and re-pumped a few times to ensure that most of the oxygen has been removed from the chamber. It can also help to clean the inside of the induction coil with a cloth and some ethanol before starting the experiment. +* The sample should be **cooled in vacuum** by setting the dilatometer to cool rapidly, but without turning on the quenching gas (this can result in surface oxidation). +* Images of the surface can be taken using an optical microscope, and the linear intercept method used to calculate the grain size. +* The technique is not suitable for steels with low hardenability, which don’t form martensite or bainite when cooled in vacuum. + +![Fig14](/wiki/assets/images/posts/Quenching_Dil_Fig14.png) +Figure 14. The results of thermal etching, as viewed using optical microscopy. Taken from [8]. + +Acknowledgements + +This work was instructed and funded by Rolls-Royce plc. Special thanks to E. Grieveson and D. Cogswell for their guidance during the writing of this document. + +References + +[1]. H-S. Yang and H.K.D.H. Bhadeshia, Uncertainties In Dilatometric Determination Of +Martensite Start Temperature, Materials Science and Technology 23 (2007) 556-560. +[2]. E.J. Pickering, J. Collins, A. Stark, L.D. Connor, A.A. Kiely, H.J. Stone, In Situ Observations of Continuous Cooling Transformations in Low Alloy Steels, Materials Characterization 165 (2020) 110355. +[3]. J.-B. Leblond, G. Mottet, J. Devaux and J.-C. Devaux, Mathematical Models of Anisothermal Phase Transformations in Steels, and Predicted Plastic Behaviour, Materials Science and Technology 1 (1985) 815-822. +[4]. J.C. Ion, K.E. Easterling and M.F. Ashby, A Second Report On Diagrams Of Microstructure And Hardness For Heat-Affected Zones In Welds, Acta Metallurgica 32 (1984) 1949-1962. +[5]. M.J. Peet and H.K.D.H. Bhadeshia, MUCG83 freeware, http://www.msm.cam.ac.uk/map/steel/programs/mucg83.html +[6]. M.V. Li, D.V. Niebuhr, L.L. Meekisho and D.G. Atteridge, A Computational Model for the Prediction of Steel Hardenability, Metallurgical and Materials Transactions B, 29B (1998) 661-672. +[7]. MTEX analysis toolbox for MATLAB, https://mtex-toolbox.github.io/ +[8]. H. Pous Romero, I. Lonardelli, D. Cogswell and H.K.D.H. Bhadeshia, Austenite Grain Growth in a Nuclear Pressure Vessel Steel, Materials Science and Engineering A 567 (2013) 72-79. + + +## APPENDIX 1 – Sample Geometries + +The following are examples of sample geometries used for pushrod dilatometers (provided by TA Instruments): + +![FigA1](/wiki/assets/images/posts/Quenching_Dil_Fig_A1.png) + + +## APPENDIX 2 – Transformation Start Temperatures & Ae3 Calculations + +Empirical relationships for bainite and martensite start temperatures are listed below: + +Expression (all compositions in wt.%) | Reference | Value for SA508 Grade 3 | Value for SA508 Grade 4N | Value for SA540 +------------ | ------------- | --------- | ------------- | -------- + Ms (˚C) = 561–474C – 33Mn – 17Cr – 17Ni – 21Mo + **10Co – 7.5Si** * |W. Steven and A.G. Haynes, JISI 183 (1956) 349–359.|406˚C **(404˚C)**|342˚C **(341˚C)**|287˚C **(285˚C)** + Ms (˚C) = 539 – 423C – 30.4Mn – 12.1Cr – 17.7Ni – **7.5Mo + 10Co – 7.5Si** * |K.W. Andrews, JISI 203 (1965) 721–727.|407˚C **(402˚C)**|348˚C **(344˚C)**|298˚C **(293˚C)** + | | | | + Bs (˚C) = 637 – 58C – 35Mn – 15Ni – 34Cr – 41Mo |M.V. Li, D.V. Niebuhr, L.L. Meekisho and D.G. Atteridge, Metall. Mater. Trans. B, 29B (1998) 661-672.|542˚C | 470˚C | 519˚C + +*The modification in bold was proposed by Kung and Rayment (C.Y. Kung and J.J. Rayment, Metall. Trans. A 13 (1982) 328–331). + +Examples of 3 different empirical equations for predicting Ae3 temperatures are listed below: + +Expression (all compositions in wt.%) | Reference | Value for SA508 Grade 3 | Value for SA508 Grade 4N | Value for SA540 B24 +------------ | ------------- | --------- | ------------- | -------- +Ae3 (˚C) = 910 - 203(C)^(1/2) + 44.7Si - 15.2Ni + 31.5Mo + 104V + 13.1W - 30.0Mn + 11.0Cr + 20.0Cu - 700P - 400Al - 120As - 400Ti | K.W. Andrews, JISI 203 (1965) 721–727. | 802˚C | 787˚C | 751˚C +Ae3 (˚F) = 1570 - 323C - 25Mn + 80Si - 3Cr - 32Ni
**Note: ˚C = (˚F - 32)(5/9)** | R.A. Grange, Metal Progress 73 (1961). | 801˚C | 744˚C | 748˚C +Ae3 (˚C) = 871 - 254.4(C)^(1/2) - 24.2Ni + 51.7Si | Eldis: REFERENCE NEEDED | 765˚C | 705˚C | 694˚C + +The alloy chemistries used to calculate the start temperatures and Ae3 values above are shown here (all wt.%). Only elements of interest are provided: + +Steel | C | Si | Mn | Ni | Cr | Mo | V | Cu | Al | Ti +---------- | ---------- | ---------- | ---------- | ---------- | ---------- | ---------- | ---------- | ---------- | ---------- | ---------- +SA508 Grade 3 | 0.18 | 0.25 | 1.32 | 0.74 | 0.21 | 0.49 | - | 0.03 | 0.002 | - +SA508 Grade 4N | 0.21 | 0.1 | 0.35 | 3.87 | 1.87 | 0.5 | - | 0.02 | 0.004 | - +SA540 B24 | 0.42 | 0.26 | 0.72 | 1.78 | 0.85 | 0.31 | 0.01 | 0.08 | 0.021 | 0.01 + +## APPENDIX 3 – Examples of CCT Results + +The following dilatometry curves and CCT diagram for SA-540 B24 were found for an austenitisation heat treatment of 870˚C for 2 hours (to simulate a typical industrial treatment). The Ae3 temperature was calculated, using equations by Andrews and Grange, and determined to be 749˚C. + +![FigA2](/wiki/assets/images/posts/Quenching_Dil_Fig_A5.png) +![FigA3](/wiki/assets/images/posts/Quenching_Dil_Fig_A4.png) + diff --git a/collections/_experiments/sxrd-caking.md b/collections/_experiments/sxrd-caking.md index 85d19de..38438be 100644 --- a/collections/_experiments/sxrd-caking.md +++ b/collections/_experiments/sxrd-caking.md @@ -2,8 +2,12 @@ published: true author: Christopher Daniel title: SXRD caking diffraction pattern images +analysis_codes: + - name: pyFAI Azimuthal Integration and Caking + link: https://github.com/LightForm-group/pyFAI-integration-caking tutorials: - - Tutorial for caking SXRD diffraction pattern images + - Caking SXRD diffraction pattern images using DAWN + - Caking SXRD diffraction pattern images using Dioptas and PyFAI --- ## Parameters diff --git a/collections/_experiments/sxrd-in-situ-phase-fraction.md b/collections/_experiments/sxrd-in-situ-phase-fraction.md index 697fac4..67645ff 100644 --- a/collections/_experiments/sxrd-in-situ-phase-fraction.md +++ b/collections/_experiments/sxrd-in-situ-phase-fraction.md @@ -5,6 +5,8 @@ title: SXRD in-situ phase fraction analysis_codes: - name: TOPAS batch analysis link: 'https://github.com/LightForm-group/TOPAS-batch-analysis' +tutorials: + - Analysing phase fraction changes using TOPAS --- ## Parameters diff --git a/collections/_experiments/sxrd-in-situ-straining.md b/collections/_experiments/sxrd-in-situ-straining.md index 1ccb525..6e36252 100644 --- a/collections/_experiments/sxrd-in-situ-straining.md +++ b/collections/_experiments/sxrd-in-situ-straining.md @@ -4,8 +4,14 @@ author: Christopher Daniel title: SXRD in-situ straining analysis_codes: - name: xrdfit - link: 'https://github.com/LightForm-group/xrdfit' + link: https://github.com/LightForm-group/xrdfit + - name: Plotting intensity vs. time and angle + link: https://github.com/LightForm-group/sxrd-intensity-time-plots + - name: Single phase micromechanical deformation analysis + link: https://github.com/LightForm-group/xrdfit-Zr-hydride-deformation-analysis + - name: Dual phase micromechanical deformation analysis + link: https://github.com/LightForm-group/xrdfit-two-phase-hot-deformation-analysis --- -## Parameters +## Measuring strain with SXRD -Add experiment parameters here. +SXRD can be used to measure structral changes in real time during sample straining. \ No newline at end of file diff --git a/collections/_experiments/sxrd-in-situ-texture.md b/collections/_experiments/sxrd-in-situ-texture.md index 8e0876d..3dabfec 100644 --- a/collections/_experiments/sxrd-in-situ-texture.md +++ b/collections/_experiments/sxrd-in-situ-texture.md @@ -4,7 +4,11 @@ author: Christopher Daniel title: SXRD in-situ texture analysis_codes: - name: MAUD batch analysis - link: 'https://github.com/LightForm-group/MAUD-batch-analysis' + link: https://github.com/LightForm-group/MAUD-batch-analysis + - name: Continuous-Peak-Fit analysis + link: https://github.com/LightForm-group/continuous-peak-fit-analysis + - name: Averaging multiple SXRD tiff images + link: https://github.com/LightForm-group/sxrd-tiff-summer --- ## Parameters diff --git a/collections/_experiments/sxrd_analysis.md b/collections/_experiments/sxrd_analysis.md new file mode 100644 index 0000000..20e59ef --- /dev/null +++ b/collections/_experiments/sxrd_analysis.md @@ -0,0 +1,12 @@ +--- +published: true +author: Christopher Daniel +title: SXRD analysis guide +analysis_codes: +tutorials: + - Synchrotron X-ray diffraction analysis guide + - Synchrotron X-ray diffraction analysis on iCSF / CSF +--- +## Parameters + +Add experiment parameters here. diff --git a/collections/_experiments/sxrd_running_experiment.md b/collections/_experiments/sxrd_running_experiment.md new file mode 100644 index 0000000..8d4a040 --- /dev/null +++ b/collections/_experiments/sxrd_running_experiment.md @@ -0,0 +1,12 @@ +--- +published: true +author: Christopher Daniel +title: SXRD running experiment +tutorials: + - Tips for setting up ETMT at Diamond + - Operating the Diamond beamline + - Transferring data from Diamond or DESY beamlines +--- +## Parameters + +Add experiment parameters here. diff --git a/collections/_experiments/tensile-testing-with-dic.md b/collections/_experiments/tensile-testing-with-dic.md deleted file mode 100644 index 6f01ad4..0000000 --- a/collections/_experiments/tensile-testing-with-dic.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -published: true -author: Sumeet Mishra -title: Tensile testing with DIC ---- -## Parameters - -Add experiment parameters here. diff --git a/collections/_experiments/tensile-testing.md b/collections/_experiments/tensile-testing.md index a820534..96b37d2 100644 --- a/collections/_experiments/tensile-testing.md +++ b/collections/_experiments/tensile-testing.md @@ -2,6 +2,9 @@ published: true author: Sumeet Mishra title: Tensile testing +analysis_codes: + - name: DIC Tensile analysis + link: https://github.com/LightForm-group/timet-interactive-analysis --- ## Parameters diff --git a/collections/_handbook/Listserve.md b/collections/_handbook/Listserve.md index a6eff8f..8e3fe14 100644 --- a/collections/_handbook/Listserve.md +++ b/collections/_handbook/Listserve.md @@ -10,19 +10,19 @@ LightForm Listserves Several LightForm Listserves have been set-up, please consider who you wish to contact/include in correspondence prior to using, please consider if Listserve is appropriate for your correspondence. All current PDRA's and PhD's -lightform-pdras-phds@listserv.manchester.ac.uk + All PDRA Team, PhD's, Management Team, MSc -MASS-PHD-PDRA@LISTSERV.MANCHESTER.AC.UK + All PDRA Team -lightform-pdra@listserv.manchester.ac.uk + All PhD's -lightform-phds@listserv.manchester.ac.uk + diff --git a/collections/_handbook/OOH.md b/collections/_handbook/OOH.md index 3b298c6..392385c 100644 --- a/collections/_handbook/OOH.md +++ b/collections/_handbook/OOH.md @@ -4,10 +4,8 @@ author: Natalie Shannon order: 6 --- -***Out of Hours Working *** - -Out of hours working is defined as any time after 5.00 pm or before 8:00 -am Monday to Friday, all day Saturday, all day Sunday, public holidays +Out of hours working is defined as any time after 5.00pm or before 8:00am +Monday to Friday, all day Saturday, all day Sunday, public holidays and during the University's Christmas closure period. ### PERMITS ARE REQUIRED FOR ALL OUT-OF-HOURS WORKING @@ -45,11 +43,5 @@ supervisor, with a remote buddy system and a risk assessment in place. Flexible working hours -Core campus working hours are 10am to 4.00am Monday to Friday in order to facilitate collaborative working. Meetings and events will not be arranged outside these hours. Exceptions for these core hours can be arranged in line with University and Departmental policy. - -Avoid sending work-related email outside of 8.00am and 6.00pm, colleagues are not required, or should feel obliged to reply to emails outside of their typical work hours. - -On occasions when you need to send emails outside of working hours, you may consider adding the following: - -***While I may be sending this email outside my normal office hours, I have no expectation to receive a reply outside yours. +Guidance on working hours is provided in the [code of conduct](../code_of_conduct/#working-hours). diff --git a/collections/_handbook/Safety.md b/collections/_handbook/Safety.md index c21d084..ba48b0b 100644 --- a/collections/_handbook/Safety.md +++ b/collections/_handbook/Safety.md @@ -4,27 +4,28 @@ author: Natalie Shannon order: 2 --- -The School Safety Adviser is Chris Turnbull. +The School Safety Adviser is **Sylvester Boon**. Contact via: -Email: christopher.turnbull@manchester.ac.uk +Email: sylvester.boon@manchester.ac.uk -Telephone: 0161 306 3596 -**Must read documentation** +Teams and mobile: +441615294175 + +### Must read documentation It is important that you read the School Safety Policy. Management System and Safety Personnel documents can be found on the School intranet [here](https://www.staffnet.manchester.ac.uk/materials/health-and-safety/). In addition, you must read the University safety policy which can be found [here](http://documents.manchester.ac.uk/DocuInfo.aspx?DocID=654). -**Mandatory training** +### Mandatory training You must attend the Health and Safety Induction and any specialised local safety training specified by your supervisor or the health and safety staff. There is detailed information in the Management System document ([here](https://www.staffnet.manchester.ac.uk/materials/health-and-safety/forms/)) on the how to carry out Risk and COSHH assessments for laboratory based work. You need to update/revise your Risk and COSHH assessment forms annually when your work changes. -**Please note: No work can commence until a risk assessment and or COSHH has been completed. +**Please note: No work can commence until a risk assessment and/or COSHH has been completed. You must familiarise yourself with your laboratories and pay attention to all the safety documents relating to those areas. Safety equipment such as safety glasses, laboratory coats, gloves and hard hats will be issued as required and must be worn. @@ -32,7 +33,7 @@ For Postgraduate Students there are also Compulsory Health & Safety assessments The School safety policy and templates for risk assessments are available on the Virtual Common Room. -If you have any questions regarding health and safety, please contact your supervisor or the School Safety Advisor, Christopher Turnbull - christopher.turnbull@manchester.ac.uk +If you have any questions regarding health and safety, please contact your supervisor or the School Safety Advisor as detailed above. ### Keeping our team safe @@ -44,9 +45,17 @@ You can do this by complying with University Health and Safety Policies and Code We all have a duty of care in the workplace, therefore if you see anything that may be a health and safety breach and is likely to cause harm, you must speak up and report it to the appropriate health and safety officials immediately. +### Near misses + +Reporting near misses is an important part of a safety culture. By reporting near misses (incidents that could have become accidents) we can learn and improve before any injury occurs. Near-misses are currently under-reported. You can report near misses informally to your supervisor and/or using the near miss reporting form. Be assured that all such reports will be treated as constructive help and the person reporting the near miss will not be blamed. The outcomes of a near miss will be used to improve safety for everyone, and will be treated anonymously. +### Lone working +Lone working means working out of shouting distance of help. Lone working is covered by strict rules, and any high risk activity is prohibited from lone working. Avoid lone working whenever possible. Further details are available in the department safety documents. +### Out of hours work +Working outside core hours (9-6pm) requires an out of hours form to be completed and approved. A risk assessment must be completed for out of hours working. Note that this applies to both office and laboratory work. Out of hours working is discouraged, and not permitted for any high risk laboratory tasks (e.g. using chemicals). The department health and safety pages provide links to the necessary forms for out of hours working. If you are also using or leaving running some laboratory equipment out of hours, then this must have a equipment overnight permit. This permit has a section that must be attached to the equipment, giving details of the shutdown procedure in an accident (to be used by Estates or the fire service, for example). +### Safety is everyone's responsibility - if anything is unclear, speak to your supervisor or the safety officer diff --git a/collections/_handbook/Slack.md b/collections/_handbook/Slack.md index b18b4d6..210f37e 100644 --- a/collections/_handbook/Slack.md +++ b/collections/_handbook/Slack.md @@ -1,7 +1,7 @@ --- -title: 15. SLACK +title: 14. SLACK author: Natalie Shannon -order: 15 +order: 14 --- Slack @@ -14,6 +14,4 @@ As a SLACK members you can join and leave channels as and when they need to – SLACK enables Face-to-face and face-to-screen, you can talk in depth over voice or video calls directly from Slack, if you need to show your work, you can also share your screen. -If you have not yet signed up to SLACK, please contact Natalie Shannon Natalie.A.Shannon@manchester.ac.uk for an invitation to join. - - +If you have not yet signed up to SLACK, see the [new starters documentation](new-starters) for information on how to join. \ No newline at end of file diff --git a/collections/_handbook/Travel .md b/collections/_handbook/Travel .md index 06655eb..13785c5 100644 --- a/collections/_handbook/Travel .md +++ b/collections/_handbook/Travel .md @@ -4,8 +4,6 @@ author: Natalie Shannon order: 8 --- -**Travel and accommodation - All travel relating to University business (e.g. to a conference directly related to your research) must be booked through Key Travel wherever possible. Conference registration, travel and accommodation arrangements should be made well in advance and only following consultation with your Supervisor. @@ -14,7 +12,7 @@ An iproc requisition should be completed and submitted where applicable. Please do not make independent travel and accommodation arrangements with the expectation that they will later be reimbursed from University funds. -**Payments to Conferences/Suppliers not set up on Oracle: +### Payments to Conferences/Suppliers not set up on Oracle: Where on-line conference fees or, occasionally, travel need to be paid on the School credit card, please complete the One Off Payment form and return to Raj.Tandon@manchester.ac.uk diff --git a/collections/_handbook/index.html b/collections/_handbook/index.html index a817623..27cff44 100644 --- a/collections/_handbook/index.html +++ b/collections/_handbook/index.html @@ -10,23 +10,19 @@

This handbook gives insight into the structures and practices of the LightForm project and is designed for use by all LightForm colleagues and associates. Its purpose is to provide practical guidance regarding University policies and LightForm specific information and procedures. It aims to make it easier for colleagues to understand how we work, join and engage with the LightForm community.

-

About this handbook

-

The LightForm handbook is a living document, that means that it won't ever be complete, and our aim is for it to change and develop over time, with your input. So, if you see something that is missing or could be improved, you can contribute.

-

Structure

Our handbook contains LightForm's most "static" knowledge and information; it's a quick guide on "how to" for the administration aspects of LightForm, for example how to arrange payment for a conference or hotel. Importantly, it also defines how we operate, for example best practice lab procedures, safety and risk assessment.

Contributing to this handbook

-

The LightForm handbook can only be useful if it is a living document, which is taken care of by the LightForm community and fed with regular updates. Like the rest of the LightForm Wiki, this handbook can easily be modified and maintained as a dynamic document. As a LightForm colleague, this is your handbook, so we encourage you to contribute. We look forward to your suggestions & contributions.

+

The LightForm handbook is a living document, that means that it won't ever be complete, and our aim is for it to change and develop over time, with your input. Like the rest of the LightForm Wiki, this handbook can easily be modified and maintained as a dynamic document. As a LightForm colleague, this is your handbook, so we encourage you to contribute. We look forward to your suggestions & contributions.

If you want to contribute to the handbook (or to anywhere else on the Wiki), please give this short guide a quick read.

-

When editing the handbook in particular, please be mindful of the following:

+

When editing the handbook, please be mindful of the following:

  • If possible, respect the existing structure of the handbook when making changes
  • -
  • Co-ordinate with the LightForm Project Manager (@NatalieShannon) before making large changes to existing parts, such as restructuring or deleting sections.
  • -
  • Let Natalie know after you have created a new section!
  • +
  • Co-ordinate with the LightForm Project Manager before making large changes to existing parts, such as restructuring or deleting sections.
-

Handbook Pages

+

Handbook Pages

{% capture pages_list %}{% include get_collection_pages.html page=page %}{% endcapture %} {{ pages_list }} diff --git a/collections/_handbook/labsequip.md b/collections/_handbook/labsequip.md index bdda983..485f0ff 100644 --- a/collections/_handbook/labsequip.md +++ b/collections/_handbook/labsequip.md @@ -13,62 +13,60 @@ contact Dave Strong via: **Office location:** B016 Morton Lab -**Email:** David.Strong@manchester.ac.uk +**Email:** **Telephone:** 0161 306 3597 - - - **Machine Operation:** - - Switch off machines after use. - - Polishing machines and furnaces should be switched off, the large cutting wheel left open to dry. + +### Machine Operation - Polishing machines ***should not*** be left running if you leave the room. +Switch off machines after use. - Grinding and grinding papers : +Polishing machines and furnaces should be switched off, the large cutting wheel left open to dry. - When grinding, try to re-use any grinding papers that are available. - - Remember that you can clean them easily to remove any contamination. - - If you are putting used grinding paper back in the tray – clean it first as failure to do so can contaminate other materials +Polishing machines ***should not*** be left running if you leave the room. - **Polishing - do's and don'ts** +### Grinding and grinding papers - When polishing, if your sample is covered in a large foam of diamond spray or swimming in OPS then you are using too much. - - The diamond cloths are impregnated with the appropriate diamond paste when they are changed, the spray is simply used to ‘top-up' the paste held within the cloth. It is the substance that is held within the fibres of the cloth that does the polishing, any that is floating around on the surface is not contributing to your polishing. - - This is again a matter of patience; it won't polish any quicker if it's swimming in polishing medium. The same goes for lubricant, it's just to stop the sample sticking, so you don't need loads. - - Liquid consumables – replacing/topping up/disposing +When grinding, try to re-use any grinding papers that are available. - If you finish anything – methanol, acetone, OPS, etc. – replace it, or if it is low then top it up. - - There should never be less than half a bottle of any liquid consumables at any time. Leaving 1cm of liquid in a bottle is not an excuse to think it isn't empty, especially when the second bottle has been opened. (Remember that OPS should be diluted with 3 to 4 parts water for every part solution.) - - New bottles of chemicals, tape, tissue paper etc can be obtained from stores (collect tissue paper in packs of 6 only to save trips) People working out of hours should replace anything at the earliest possible opportunity when stores are open. - - If you finish a bottle or box, dispose of it either in the bin or in the skip at the end of the building. Don't leave it on/under the desk, or in the bottle carriers. +Remember that you can clean them easily to remove any contamination. - **Basic housekeeping and good practice :** +If you are putting used grinding paper back in the tray – clean it first as failure to do so can contaminate other materials - **Clean up after yourself**. Everyone has to use the lab, not just you. So when you've finished, clean any surfaces, sinks or machines that you've used that aren't clean. - - A dirty lab affects everybody's work. There are now sample storage cupboards available – no samples should be left out on any work surface. - - **Return communal equipment**. Using a piece of equipment does not make it yours. Everything in the group is for everybody's use, even if you ordered it, so share it about. - - Take something, use it, return it and don't lock it in your desk. Everybody needs access to the same equipment, so communicate with each other. - - **Do not remove beakers, bottles from the lab.** I made sure there was a bottle of methanol, acetone, water, detergent and two of OPS, all labelled. These seem to have been used, removed and rotated so much that we have 5 or 6 random empty bottles most of the time. +### Polishing - do's and don'ts - **Booking Equipment / lab space / Cancelling lab space and equipment bookings** +When polishing, if your sample is covered in a large foam of diamond spray or swimming in OPS then you are using too much. - If you have booked equipment or lab space, please arrive on time for your booked session. - - If you are unable to attend or no longer require your booked session, please cancel, where possible providing 24hrs notice. - Please contact Dave Strong to cancel David.Strong@manchester.ac.uk +The diamond cloths are impregnated with the appropriate diamond paste when they are changed, the spray is simply used to ‘top-up' the paste held within the cloth. It is the substance that is held within the fibres of the cloth that does the polishing, any that is floating around on the surface is not contributing to your polishing. - +This is again a matter of patience; it won't polish any quicker if it's swimming in polishing medium. The same goes for lubricant, it's just to stop the sample sticking, so you don't need loads. + +Liquid consumables – replacing/topping up/disposing + +If you finish anything – methanol, acetone, OPS, etc. – replace it, or if it is low then top it up. + +There should never be less than half a bottle of any liquid consumables at any time. Leaving 1cm of liquid in a bottle is not an excuse to think it isn't empty, especially when the second bottle has been opened. (Remember that OPS should be diluted with 3 to 4 parts water for every part solution.) + +New bottles of chemicals, tape, tissue paper etc can be obtained from stores (collect tissue paper in packs of 6 only to save trips) People working out of hours should replace anything at the earliest possible opportunity when stores are open. + +If you finish a bottle or box, dispose of it either in the bin or in the skip at the end of the building. Don't leave it on/under the desk, or in the bottle carriers. + +### Basic housekeeping and good practice + +**Clean up after yourself**. Everyone has to use the lab, not just you. So when you've finished, clean any surfaces, sinks or machines that you've used that aren't clean. + +A dirty lab affects everybody's work. There are now sample storage cupboards available – no samples should be left out on any work surface. + +**Return communal equipment**. Using a piece of equipment does not make it yours. Everything in the group is for everybody's use, even if you ordered it, so share it about. + +Take something, use it, return it and don't lock it in your desk. Everybody needs access to the same equipment, so communicate with each other. + +**Do not remove beakers, bottles from the lab.** I made sure there was a bottle of methanol, acetone, water, detergent and two of OPS, all labelled. These seem to have been used, removed and rotated so much that we have 5 or 6 random empty bottles most of the time. + +**Booking Equipment / lab space / Cancelling lab space and equipment bookings** + +If you have booked equipment or lab space, please arrive on time for your booked session. + +If you are unable to attend or no longer require your booked session, please cancel, where possible providing 24hrs notice. +Please contact Dave Strong to cancel: \ No newline at end of file diff --git a/collections/_handbook/pettyc.md b/collections/_handbook/pettyc.md index ea4a834..e1fc1d6 100644 --- a/collections/_handbook/pettyc.md +++ b/collections/_handbook/pettyc.md @@ -3,7 +3,6 @@ title: 7. Petty Cash Author: Natalie Shannon order: 7 --- -***Petty Cash*** For information about ordering procedures, making expense claims, please contact: diff --git a/collections/_handbook/stores.md b/collections/_handbook/stores.md index f92386d..67d4bbe 100644 --- a/collections/_handbook/stores.md +++ b/collections/_handbook/stores.md @@ -3,7 +3,6 @@ title: 4. Stores author: Natalie Shannon order: 4 --- -***Stores *** Stores are open from Monday to Friday, and are sited as follows: diff --git a/collections/_handbook/theme_meetings.md b/collections/_handbook/theme_meetings.md index 8347133..93f2cfa 100644 --- a/collections/_handbook/theme_meetings.md +++ b/collections/_handbook/theme_meetings.md @@ -5,8 +5,6 @@ page_width: wide order: 13 --- -** Theme Meetings - LightForm research projects are categorised into themes: 1. Aluminium @@ -16,20 +14,19 @@ LightForm research projects are categorised into themes: Progress for each individual theme is reviewed in a weekly meeting, usually occurring on a Tuesday. With a multisite team it is imperative that progress is shared, updated and recorded with engagement from all parties, in order to avoid working in isolation, therefore attendance at theme meetings is strongly recommended. -Everyone is invited to the meeting, which will be co-delivered by a PDRA/Supervisor/PhD +Everyone is invited to the meeting, which will be chaired by a PDRA/Supervisor -** Input at theme meetings +### Input at theme meetings Everyone is invited to give a brief update on their work, including: -• Summary of activities/experiments -• Reporting of any technical difficulties (what has/hasn't worked) -• Experiment plans for the following month -• Providing summary slide (as shown in the dropbox folder or on the Slack #titanium channel) summarising progress, plans and updates to milestones/deadlines -• Summary slides should be shared in advance of theme meetings, please forward to the Lead Researcher, at least one day prior to the scheduled meeting - -** Example project progress summary slide: +- Summary of activities/experiments +- Reporting of any technical difficulties (what has/hasn't worked) +- Experiment plans for the following month +- Providing summary slide (as shown in the dropbox folder or on the Slack #titanium channel) summarising progress, plans and updates to milestones/deadlines +- Summary slides should be shared in advance of theme meetings, please forward to the Lead Researcher, at least one day prior to the scheduled meeting +### Example project progress summary slide
Experimentmanage allExperiment ChampionMetadata templatemanage all - Analysis codes Tutorials
{{ exp.title }} {% include checklist_controls.html exp_title_clean=exp_title_clean %} {% if exp.analysis_codes %} @@ -55,8 +50,6 @@
  • {{ analysis_code.name }}
  • {% endfor -%} - {% else %} - - {% endif %}
    @@ -55,14 +52,6 @@ Everyone is invited to give a brief update on their work, including: For colleagues unable to attend in person, a conference call will be setup within the theme Slack channel to join 5 minutes before the meeting. -**Schedule of meetings: - -You will receive an email meeting reminder; the full schedule of dates can be found on the shared LightForm Google Calendar, if you require access to this calendar, please contact Christopher Daniels christopher.daniel@manchester.ac.uk - - - - - - - +### Schedule of meetings +You will receive an email meeting reminder; the full schedule of dates can be found on the shared LightForm Google Calendar, if you require access to this calendar, please contact Christopher Daniel christopher.daniel@manchester.ac.uk diff --git a/collections/_handbook/travel on bus.md b/collections/_handbook/travel on bus.md index 0ae912a..ae9026b 100644 --- a/collections/_handbook/travel on bus.md +++ b/collections/_handbook/travel on bus.md @@ -4,19 +4,17 @@ author: Natalie Shannon order: 11 --- -Travelling on University business: +If you are travelling on University business, you are responsible for familiarising yourself with any potential travel risks and ensuring you have travel insurance. -If you are travelling on University business, you are responsible for familiarising yourself with any potential travel risks and ensuring you have travel insurance, please see below: - -Risk Assessment +### Risk Assessment The University have recently introduced a policy for all colleagues travelling on University business to familiarise themselves with potential travel risks and to register with the School of Materials H&S. Full details regarding a risk assessment/pre-travel self checklist and general travel advice can be found https://www.staffnet.manchester.ac.uk/insurance/travel/ -To register University business travel, please contact christopher.turnbull@manchester.ac.uk +To register University business travel, please contact -Travel Insurance +### Travel Insurance Full details of University travel insurance can be found https://www.staffnet.manchester.ac.uk/insurance/travel/ diff --git a/collections/_miscellaneous/contribution.md b/collections/_miscellaneous/contribution.md index 72660df..c08f6dc 100644 --- a/collections/_miscellaneous/contribution.md +++ b/collections/_miscellaneous/contribution.md @@ -1,12 +1,12 @@ --- title: How to contribute to the Wiki -author: Adam Plowman +author: Adam Plowman, Guy Bowker toc: true --- This Wiki is designed to be updated frequently; we want everyone to share any and all useful information related to LightForm and the research we do. This is a short guide for how to contribute. Don't worry about breaking the Wiki, we can always revert it to a previous state (because it uses GitHub)! -Before you start, please make sure you have a GitHub account, and that you are a member of the LightForm "wiki-creators" team. @AdamPlowman or @PeterCrowther will be happy to help with this. +Before you start, please make sure you have a GitHub account, and that you are a member of the LightForm "wiki-creators" team. @GuyBowker or @PeterCrowther will be happy to help with this. Content in this wiki is organised into *collections* (which you can see in the navigation side-bar on the left). They are currently: @@ -116,4 +116,4 @@ Once the changes have been deployed on GitHub, you should now find a link to the ## Help -Please contact @AdamPlowman if you have any problems with contributing to the Wiki. +Please contact @GuyBowker if you have any problems with contributing to the Wiki. diff --git a/collections/_miscellaneous/open_science.md b/collections/_miscellaneous/open_science.md deleted file mode 100644 index 97d52a4..0000000 --- a/collections/_miscellaneous/open_science.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Open and reproducible Science -author: Peter Crowther -tags: - - open science ---- -# Open and reproducible Science - -## Why share? - -Sharing research is good for everyone. There are several reasons that sharing is for the good of everyone, it allows verification of research that has been done, decreases duplication which increases the amount of productive research which gets done and allows groups with fewer resources to still participate in productive research. - -Sharing can also be good for the group that publishes the work. It increases the impact of the work by allowing more people to access it, it allows the development of collaborations with new researchers and groups from around the world and it also ensures that all researchers get credit for the work they do, not just PIs or grant holders. - -## Barriers to sharing - -Some people are concerned that if they share incomplete ideas or datasets then other people will steal them. While this is theoretically possible this rarely happens. If we ensure that there is an easy way for people to cite the work which is released, then people will likely cite it. As for people 'stealing' incomplete ideas and publishing them as their own, if the work is of any value then it would take a long time for others to reproduce the expertise of the publishing group in order to bring that work to publication. - -Another barrier to publishing is lack of knowledge and lack of time. These are barriers that are now reducing as there are a greater number of resources now available to educate people about open research and also funders and PIs are putting a greater value and emphasis on sharing which means that we can afford to spend the time working on it. - -## Further reading - -**A manifesto for reproducible science** is an excellent piece which covers some of the current issues in scientific research and highlights some ways in which we can move towards a more open and reproducible workflow in research https://www.nature.com/articles/s41562-016-0021 diff --git a/collections/_miscellaneous/organisation of research data.md b/collections/_miscellaneous/organisation of research data.md deleted file mode 100644 index e59970e..0000000 --- a/collections/_miscellaneous/organisation of research data.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Organisation of research data -author: Peter Crowther ---- -# Organisation of research data - -This is a summary of the points covered in the paper **Good enough practices in scientific computing**: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510 - -## Why organise data? - -All modern research means collecting and processing data. As equipment has increased in complexity and computing power has increased, the amount of data collected and its complexity has also increased. In order to do good reproducible research, it is important that the data is treated correctly. If the data is not treated correctly it can result in drawing inappropriate conclusions which does not make good science. - -## Data organisation - -### Keep raw data - -The raw data from a measurement should always be kept. While it is possible to reproduce analysis of a raw dataset, it may not be possible to reproduce the original raw data. Keeping the original raw data is important so that others can reproduce the analysis that you have done. - -### Ensure data are backed up - -Data should be backed up, preferably in more than one location. Where possible use university networked storage as this is much more robust than USB hard disks. - -### Record metadata - -Metadata tells people about how the data was collected. Data without context is meaningless as it cannot be analysed. Make sure that you know what metadata is important for each experiment and make sure that it is being recorded for each measurement you do. It may be the case that the metadata is collected along with the result in the result file but it may be the case that you have to record some of the data manually. This is what a lab book is for but it is also a good idea to make a digital record of the metadata too so that if the files are passed on but not the lab book, the data does not lose its value. - -### Organisation - -Put each project in its own directory. If a project is big, it may be relevant to break it up into sub projects. Keep documentation in a *docs* folder, analysis code in a *src* folder, raw data in a *data* folder and analysed results in a *results* folder. If you are publishing then make a new folder for the paper and have a *data* folder for data, a *plots* folder for code that plots graphs and a *figures* folder for the completed plots. - -### Use version control - -For any text based documents such as papers or code, use version control tools to keep a single versioned copy. This reduces the chances of losing vital work and allows easy collaboration with other people. \ No newline at end of file diff --git a/collections/_sample_prep/index.md b/collections/_sample_prep/index.md new file mode 100644 index 0000000..215ed3f --- /dev/null +++ b/collections/_sample_prep/index.md @@ -0,0 +1,7 @@ +--- +layout: collection_home +title: Sample Preparation +show_breadcrumbs: false +show_meta: false +published: true +--- diff --git a/collections/_sample_prep/titanium.md b/collections/_sample_prep/titanium.md new file mode 100644 index 0000000..e2cd59d --- /dev/null +++ b/collections/_sample_prep/titanium.md @@ -0,0 +1,10 @@ +--- +title: Ttianium Sample Prep +author: Emma Buckworth +tags: + - Titanium + - Grinding + - Polishing +toc: true +published: true +--- diff --git a/collections/_software_and_simulation/Aluminium_modelling_parameters.md b/collections/_software_and_simulation/Aluminium_modelling_parameters.md new file mode 100644 index 0000000..e66dba0 --- /dev/null +++ b/collections/_software_and_simulation/Aluminium_modelling_parameters.md @@ -0,0 +1,31 @@ +--- +title: Al Modelling Parameters +author: Guy Bowker, Laura González Duque +tags: + - Aluminium + - DAMASK + - Crystal Plasticity + - Modelling +toc: false +subcollection: Aluminium +published: true +--- + +## Values of crystal plasticity parameters used in different studies. + +| Aluminium | $C_{11} (GPa)$ | $C_{12} (GPa)$ | $C_{44} (GPa)$ | $a$ | $n_{sl}=\frac{1}{m}$ | $\dot{\gamma}_0$ | $h_0 (MPa)$ |$h_{ij} (\alpha = \beta)$ |$h_{ij} (\alpha \neq \beta) $ | $\tau_0 (MPa)$ | $\tau_{inf} (MPa)$ | Source +| -------- | -------- | -------- | -------- | -------- | -------- | -------- | --------- | -------- | -------- | --------- | -------- | --------- +| 6xxx | 106.43 | 60.35 | 28.21 | 1.354 | 200 | 0.01 s^{-1} | 411.25 | 1.0 | 1.4 | 46.70 | 104.02 | (M. Khadyko, 2014) [^1] +| - | 106.75 | 60.41 | 28.34 | - | 20 | 0.001 m s^{-1} | 75 | - | 1.4 | 31 | 63 | (M. Kasemer, 2020) [^2] +| 1xxx | 114.30 | 64.30 | 30.75 | 20 | 1.75 | 0.001 s^{-1} | 1 | - | 1.0 | 88 | 132 | (F. Roters, 2019) [^3] +| AA6111 | 106.75 | 60.41 | 28.34 | 1.2 | 12 | 0.001 s^{-1} | 400 | 1.0 | 1.4 | 62 | 152 | (M. Duancheng, 2018) [^4] + +## References + +[^1]: M. Khadyko, S. Dumoulin, T. Børvik, O. Hopperstad. An experimental–numerical method to determine the work-hardening of anisotropic ductile materials at large strains. International Journal of Mechanical Sciences 88 (2014) 25–36. https://doi.org/10.1016/j.ijmecsci.2014.07.001 + +[^2]: M. Kasemer, G. Falkinger, F. Roters. A numerical study of the influence of crystal plasticity modeling parameters on the plastic anisotropy of rolled aluminum sheet. Modelling and Simulation in Materials Science and Engineering 28 (8) (2020) 085005. https://doi.org/10.1088/1361-651X/abb8e2 + +[^3]: F. Roters, M. Diehl, P. Shanthraj, P. Eisenlohr, C. Reuber, S.L. Wong, T. Maiti, A. Ebrahimi, T. Hochrainer, H.-O. Fabritius, S. Nikolov, M. Friák, N. Fujita, N. Grilli, K.G.F. Janssens, N. Jia, P.J.J. Kok, D. Ma, F. Meier, E. Werner, M. Stricker, D. Weygand, D. Raabe. DAMASK – The Düsseldorf Advanced Material Simulation Kit for modeling multi-physics crystal plasticity, thermal, and damage phenomena from the single crystal up to the component scale. Computational Materials Science 158 (2019) 420-478. https://doi.org/10.1016/j.commatsci.2018.04.030 + +[^4]: Ma, Duancheng. "Assessment of full field crystal plasticity finite element method for forming limit diagram prediction." arXiv preprint arXiv:1810.05742 (2018). https://doi.org/10.48550/arXiv.1810.05742 diff --git a/collections/_software_and_simulation/CIPHER.md b/collections/_software_and_simulation/CIPHER.md new file mode 100644 index 0000000..355e837 --- /dev/null +++ b/collections/_software_and_simulation/CIPHER.md @@ -0,0 +1,148 @@ +--- +title: Installation +author: Sakina Rehman, Pratheek Shanthraj +tags: + - simulation + - csf + - cipher +toc: true +subcollection: CIPHER (Calphad Integrated Phase-field solvER) +published: true +--- + +CIPHER is a parallel phase-field simulation code for microstructure evolution in multi-component alloy systems. It includes advanced features such as automatic parallel adaptive mesh refinement, local truncation error estimates and adaptive time stepping. CIPHER is designed for a large number of phases (10-10000), with computational complexity independent of the number of phases, and uses an efficient grand-canonical-based phase-field implementation with direct use of Compound-Energy- Formalism and other CALPHAD thermodynamic models for multi-component systems [1]. + +## Installation +>This software requires MPI, p4est v2.2 [2], and PETSc v3.12 [3]. + +The following installation instructions are specific to users of the University of Manchester's Computational Shared Facility (CSF). Non CSF users will need to follow their local procedures to install, p4est and PETSc. + +### Installing p4est: + +First load MPI compilers and OpenBLAS with the command: +``` +module load mpi/intel-17.0/openmpi/3.1.3 +module load libs/gcc/openblas/0.3.6 +``` +Navigate to a software folder or create it by entering: + +``` +mkdir $HOME/software +cd $HOME/software +``` + +Download p4est with the command line utility wget: +``` +wget http://p4est.github.io/release/p4est-2.2.tar.gz +``` + +The p4est download will be compressed in a tar file. This can be extracted using: + +``` +tar -xvf p4est-2.2.tar.gz +``` + +The p4est tar file can now be deleted: + +``` +rm p4est-2.2.tar.gz +``` +Those without the wget utility can download the latest p4est source from http://www.p4est.org and unpack. + +Navigate into the extracted p4est folder. Now configure the p4est software library, with the correct command arguments (flags): +``` +./configure --prefix=$PWD/intel-17.0-openblas --enable-mpi CC=mpicc FC=mpif90 F77=mpif77 CXX=mpic++ CFLAGS='-O2 -msse4.2 -axSSE4.2,AVX,CORE-AVX2' CXXFLAGS='-O2 -msse4.2 -axSSE4.2,AVX,CORE-AVX2' FFLAGS='-O2 -msse4.2 -axSSE4.2,AVX,CORE-AVX2' BLAS_LIBS=$OPENBLASDIR/lib/libopenblas.a +``` +This runs a script that will localize the source distribution so that it will compile and load on your local system. Next enter: +``` +make +make install +``` +Now you need to add an environment variable to your system where p4est is located: +``` +export P4EST_DIR=$HOME/software/p4est-2.2/intel-17.0-openblas +``` +### Installing PETSc: + +Navigate back into the software folder and download PETSc with: + +``` +cd $HOME/software +wget http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-lite-3.12.1.tar.gz +``` +Extract it with the tar utility: + +``` +tar -xvf petsc-lite-3.12.1.tar.gz +``` + +Set an environment variable to tell the system where PETSc is located: +``` +export PETSC_DIR=$HOME/software/petsc-3.12.1 +export PETSC_ARCH=intel-17.0-mkl +``` +> Note that if you put PETSc in a different folder to that used in this example, you will need to alter this variable. + +Make a folder, inside the PETSc folder and name if after the architecture that you will be using (e.g. mkdir intel-17.0-mkl). Make another folder, inside the architecture folder for your external packages and name it ‘external packages’ (eg mkdir intel-17.0-mkl/externalpackages). Add another line to .bash_profile to set an environment variable to specify what architecture PETSc will use: +``` +mkdir $PETSC_DIR/$PETSC_ARCH/externalpackages +cd $PETSC_DIR/$PETSC_ARCH/externalpackages +``` +Download three external packages for PETSc by navigating into the external packages folder. The first of these is Triangle: +``` +wget http://ftp.mcs.anl.gov/pub/petsc/externalpackages/Triangle.tar.gz +``` +The next one is HDF5 +``` +wget https://support.hdfgroup.org/ftp/HDF5/prev-releases/hdf5-1.8/hdf5-1.8.18/src/hdf5-1.8.18.tar.gz +``` +The next one is YAML +``` +wget http://pyyaml.org/download/libyaml/yaml-0.1.4.tar.gz +``` +and finally Chaco: +``` +wget http://ftp.mcs.anl.gov/pub/petsc/externalpackages/Chaco-2.2-p2.tar.gz +``` +Load cmake (required to install some external packages): +``` +module load tools/gcc/cmake/3.11.4 +``` +Navigate to the folder containing PETSc and configure it, including the necessary flags: +``` +cd $PETSC_DIR +./configure --with-pthread --download-yaml=$PWD/$PETSC_ARCH/externalpackages/yaml-0.1.4.tar.gz --download-metis --download-parmetis --download-chaco=$PWD/$PETSC_ARCH/externalpackages/Chaco-2.2-p2.tar.gz --with-mkl_pardiso-dir=$MKLROOT --with-mkl_sparse-dir=$MKLROOT --with-mkl_sparse_optimize-dir=$MKLROOT --download-hypre --download-ml --download-triangle=$PWD/$PETSC_ARCH/externalpackages/Triangle.tar.gz --download-ctetgen --download-hdf5=$PWD/$PETSC_ARCH/externalpackages/hdf5-1.8.18.tar.gz --with-zlib --with-p4est-dir=$P4EST_DIR --with-blaslapack-dir=$MKLROOT --with-cxx-dialect=C++11 --with-debugging=0 COPTFLAGS="-O2 -msse4.2 -axSSE4.2,AVX,CORE-AVX2" CXXOPTFLAGS="-O2 -msse4.2 -axSSE4.2,AVX,CORE-AVX2" FOPTFLAGS="-O2 -msse4.2 -axSSE4.2,AVX,CORE-AVX2" PETSC_ARCH=$PETSC_ARCH PETSC_DIR=$PETSC_DIR +``` +Then make PETSc: +``` +make PETSC_DIR=$PETSC_DIR PETSC_ARCH=$PETSC_ARCH +make install PETSC_DIR=$PETSC_DIR PETSC_ARCH=$PETSC_ARCH all +``` + +### Installing CIPHER: + +If not already set, add CIPHER_DIR/bin to your PATH +``` +export PATH=CIPHER_DIR/bin:$PATH +``` +Navigate to examples folder +``` +cd $CIPHER_DIR/examples/GrainBoundaryPrecipitate +``` +And run an example: +``` +mpiexec -n 4 cipher.exe --config GrainBoundaryPrecipitate.yaml +``` + +## Contact + +This code is maintained by the Microstructure Modelling Group at the University of Manchester. For questions, comments, bug-reports or contributions please email Dr. Pratheek Shanthraj at [pratheek.shanthraj@manchester.ac.uk](mailto:pratheek.shanthraj@manchester.ac.uk) or Sakina Rehman at [sakina.rehman@postgrad.manchester.ac.uk](mailto:sakina.rehman@postgrad.manchester.ac.uk). + +## References + +[1] Grand-canonical phase-field implementation: [https://arxiv.org/abs/1906.10503](https://arxiv.org/abs/1906.10503) +[2] p4est: [http://www.p4est.org](http://www.p4est.org/) +[3] PETSc: [https://www.mcs.anl.gov/petsc/](https://www.mcs.anl.gov/petsc/) +
    [4] Ursula R. Kattner and Carelyn E. Campbell. Invited review: modelling of thermodynamics and diffusion in multicomponent systems. Materials Science and Technology, 25(4):443–459, 2009.
    +[5] The Paraview Guide: [https://www.paraview.org/paraview-guide/](https://www.paraview.org/paraview-guide/) + diff --git a/collections/_software_and_simulation/CIPHER_input_file.md b/collections/_software_and_simulation/CIPHER_input_file.md new file mode 100644 index 0000000..566dd88 --- /dev/null +++ b/collections/_software_and_simulation/CIPHER_input_file.md @@ -0,0 +1,170 @@ +--- +title: Input File +author: Sakina Rehman, Pratheek Shanthraj +tags: + - simulation + - csf + - cipher +toc: true +subcollection: CIPHER (Calphad Integrated Phase-field solvER) +published: true +--- +## Input .yaml file + +> This example is used to briefly describe the purpose of each block contained within a CIPHER input .yaml file. The example used is from the shared GitHub micmog group for modelling grain boundary precipitation (GrainBoundaryPrecipitate.yaml). + +### header block + +The first block is named the ```header``` block: + +``` +header: + grid : [64, 64, 64] + size : [64, 64, 64] + n_phases : 3 + materials : [matrix, precipitate] + interfaces : [grainboundary, pptboundary] + components : [al, cu, mg, zn] + outputs : [al_c, cu_c, mg_c, zn_c, phaseid] + ``` + +The ```grid``` section defines the grid size of the model and ```size``` defines the physical dimensions of the model. In this example, the model is 3-dimensional, however, the model can be defined as 2-dimensional or 1-dimensional by altering the dimensions in the [x, y, z] array. The ```n_phases``` section defines the number of phases used in the simulation and the ```material``` section defines the number of materials present in the model. The ```interfaces``` section defines the interfaces present in the model. For example, the ```pptboundary``` defines an interface at the precipitate boundary and ```grain boundary``` defines a grain boundary present. The ```components``` section can be used to define the components present in this model and the ```outputs``` are the results that will be written out to a ```.vtu``` file. + +### solution_parameters block + +Usually found below the header block is the ```solution_parameters``` block: +``` +solution_parameters: + time : 72000.0 + interfacewidth : 4 + initblocksize : [2, 2, 2] + initrefine : 5 + maxnrefine : 5 + minnrefine : 3 + initcoarsen : 3 + amrinterval : 200 + outputfreq : 5000 + outfile : GBP + interpolation: cubic + petscoptions : -ts_adapt_monitor + ``` +A detailed understanding of this block is perhaps not necessary when running simple simulations. The main sections include the ```time```, which is the physical time in seconds (note that the physical time is not the same as the simulation time), the ```outputfreq```, which is the number of steps after which the results will be written to a ```.vtu``` file, and the ```outfile``` which is the name of the output file. + +### material block + +In this example, there are two possible materials in the simulation: the matrix and the precipitate. The material block takes the usual structure of: + +``` +material: + matrix: + chemicalenergy : calphaddis + molarvolume : 1e-5 + temperature0: 393.0 + chempot_ex_kineticcoeff : 1.0 + c0 : [0.90, 0.02, 0.04, 0.04] + mobilityc : + al : + mobility0 : 1.0e+18 + unary_migration : + al : + t_coefficient : [-127200.0, -92.98] + t_exponent: [0, 1] + cu : + t_coefficient : [-181583.4, -99.8] + t_exponent: [0, 1] + mg : + t_coefficient : [-127200.0, -92.98] + t_exponent: [0, 1] + zn : + t_coefficient : [-83255.0, -92.92] + t_exponent: [0, 1] + unary_enthalpy : + al : + t_coefficient : [-7976.15, 137.093038, -1.884662e-3, -8.77664e-6, 74092.0] + t_exponent: [0, 1, 2, 3, -1] + tlnt_coefficient : -24.3671976 + cu : + t_coefficient : [-7770.458, 130.485235, -2.65684e-3, 1.29223e-6, 52478.0] + t_exponent: [0, 1, 2, 3, -1] + tlnt_coefficient : -24.112392 + mg : + t_coefficient : [-5767.34, 142.775547, 4.858e-3, -1.393669e-6, 78950.0] + t_exponent: [0, 1, 2, 3, -1] + tlnt_coefficient : -26.1849782 + zn : + t_coefficient : [-4315.967, 116.900389, -1.712034e-3, 1.264963e-6] + t_exponent: [0, 1, 2, 3] + tlnt_coefficient : -23.701314 +``` +This block defines the ```matrix``` material properties. The ```chemical energy``` section is used to define whether the material is disordered (```calphaddis```), a sub-lattice model (```calphad2sl```) etc. The ```temperature``` is also defined in this block. This temperature can be kept the same in each material for an isothermal simulation or can be set differently in each material to allow thermal diffusion to occur. The ```c0``` array defines the alloy compositions of the components present in the simulation. The sum of these compositions must be 1. This block also allows the addition of the atomic mobilities (```unary_migration```, ```binary_migration``` etc.) by inputting the temperature coefficients and temperature exponents. The enthalpy terms can also be added to this block (```unary_enthalpy```, ```binary_enthalpy```, ```ternary_enthalpy``` etc.) in the same manner. The atomic mobilities and energy parameters are obtained from a diffusion and thermodynamic database respectively. I basic understanding of the CALPHAD method is required to be able to input the correct parameters into this block [4]. + +### interface block + +The interface block usually takes the structure of: +``` +interface : + grainboundary : + energy : + e0: 5.0e+8 + mobility : + m0: 2.0e+2 + activation_energy: + t_coefficient : -1.0e+5 + t_exponent: 0 + potential : [0.175e+4, -0.180e+4, -0.235e+4] +``` + +The interface type mentioned here is the ```grain boundary```. Here the ```energy``` and ```mobility``` terms are inputted, which should be normalised to the appropriate length scale. Further interface types can be added e.g. ```pptboundary`` with the same block structure. +### boundary block + +To allow either a chemical or thermal influx into the simulation, a boundary block must be included: + +``` +boundary: + influx: + boundary_id: 3 + chem: + type: neumann + value: [0.0, 0.0, 1e7] + ``` + +This ```boundary_id``` defines the boundary at which the influx will be applied and ```type``` defines the type of boundary condition, which, in this case, it is set as the Neumann or second-type boundary condition. In the ```value``` section the value of the influx at the boundary condition can be added e.g. an oxygen influx in partial pressure units. Please note that the ```boundary``` section must be added to the ```header``` block in order for the boundary condition to be applied: + +``` +boundaries : influx +``` +### mappings block + +Each of the phases present must be assigned a material type, this is where the ```mapping``` block comes into use: + +``` +mappings: + phase_material_mapping : |- + 2 of 1 + 2 + voxel_phase_mapping: |- + 116510 of 1 + 4 of 3 + 59 of 1 + 6 of 3 + 57 of 1 + ... + interface_mapping: |- + 2 of 1 + 7 of 2 +``` +For the GrainBoundaryPrecipitate.yaml example, in the ```phase_material_mapping```, '2 of 1' indicates 1 being repeated twice, so phase 1 is material 1 and phase 2 is material 1. The second line '2' shows that phase 3 is material 2. If there was '2 to 1' written in this block, this would create a list i.e. 2, 1. For example, if '10 to 5' was written, this would list 10, 9, 8, 7, 6, 5. If there is an $n$ number of phases, then there is an $n^2$ number of interfaces between $n$ phases. As this example has 3 phases, there are 9 possible boundaries. This is why there are 9 interfaces mapped under ```interface_mapping``` + +In terms of mapping, it is ideal to orientate a model so it is easier to assign phases i.e. compact models. For the more complex geometries there are automatic scripts to generate them. + +## Contact + +This code is maintained by the Microstructure Modelling Group at the University of Manchester. For questions, comments, bug-reports or contributions please email Dr. Pratheek Shanthraj at [pratheek.shanthraj@manchester.ac.uk](mailto:pratheek.shanthraj@manchester.ac.uk) or Sakina Rehman at [sakina.rehman@postgrad.manchester.ac.uk](mailto:sakina.rehman@postgrad.manchester.ac.uk). + +## References + +[1] Grand-canonical phase-field implementation: [https://arxiv.org/abs/1906.10503](https://arxiv.org/abs/1906.10503) +[2] p4est: [http://www.p4est.org](http://www.p4est.org/) +[3] PETSc: [https://www.mcs.anl.gov/petsc/](https://www.mcs.anl.gov/petsc/) +
    [4] Ursula R. Kattner and Carelyn E. Campbell. Invited review: modelling of thermodynamics and diffusion in multicomponent systems. Materials Science and Technology, 25(4):443–459, 2009.
    +[5] The Paraview Guide: [https://www.paraview.org/paraview-guide/](https://www.paraview.org/paraview-guide/) diff --git a/collections/_software_and_simulation/CIPHER_output.md b/collections/_software_and_simulation/CIPHER_output.md new file mode 100644 index 0000000..75a804c --- /dev/null +++ b/collections/_software_and_simulation/CIPHER_output.md @@ -0,0 +1,29 @@ +--- +title: Output File +author: Sakina Rehman, Pratheek Shanthraj +tags: + - simulation + - csf + - cipher +toc: true +subcollection: CIPHER (Calphad Integrated Phase-field solvER) +published: true +--- + +## Viewing output files + +> Please note that this is a simple run through on how to view the output ```.vtu``` files in Paraview. For further detailed documentation on how to use Paraview, please visit [5] + +WIP + +## Contact + +This code is maintained by the Microstructure Modelling Group at the University of Manchester. For questions, comments, bug-reports or contributions please email Dr. Pratheek Shanthraj at [pratheek.shanthraj@manchester.ac.uk](mailto:pratheek.shanthraj@manchester.ac.uk) or Sakina Rehman at [sakina.rehman@postgrad.manchester.ac.uk](mailto:sakina.rehman@postgrad.manchester.ac.uk). + +## References + +[1] Grand-canonical phase-field implementation: [https://arxiv.org/abs/1906.10503](https://arxiv.org/abs/1906.10503) +[2] p4est: [http://www.p4est.org](http://www.p4est.org/) +[3] PETSc: [https://www.mcs.anl.gov/petsc/](https://www.mcs.anl.gov/petsc/) +
    [4] Ursula R. Kattner and Carelyn E. Campbell. Invited review: modelling of thermodynamics and diffusion in multicomponent systems. Materials Science and Technology, 25(4):443–459, 2009.
    +[5] The Paraview Guide: [https://www.paraview.org/paraview-guide/](https://www.paraview.org/paraview-guide/) diff --git a/collections/_software_and_simulation/Calibration_of_DAMASK_crystal_plasticity_models_using_MatFlow.md b/collections/_software_and_simulation/Calibration_of_DAMASK_crystal_plasticity_models_using_MatFlow.md new file mode 100644 index 0000000..4a7acf2 --- /dev/null +++ b/collections/_software_and_simulation/Calibration_of_DAMASK_crystal_plasticity_models_using_MatFlow.md @@ -0,0 +1,14 @@ +--- +title: Calibration of DAMASK crystal plasticity models using MatFlow +author: Matty Warner +toc: true +published: true +tags: +- damask +- matflow +subcollection: DAMASK +--- +This page aims to show how to calibrate crystal plasticity models in DAMASK using a stress-strain data from an experimental tensile test with a MatFlow workflow. + +## Crystal Plasticity Models +Phenomenological and dislocation density-based crystal plasticity models are both used to simulate deformation in crystalline structures. The phenomenological model uses a macroscopic picture of deformation, with a power law associating the resolved shear stress on a given slip system with the shear rate. The dislocation density model is a physics-based formulation that calculates the shear rate from the interaction between dislocations and slip systems. Both models involve fitting parameters that must be calibrated with respect to experimental data in order to run physically meaningful simulations. diff --git a/collections/_software_and_simulation/Creating_ODF_slices_of_evolving texture_from_simulated_data.md b/collections/_software_and_simulation/Creating_ODF_slices_of_evolving texture_from_simulated_data.md new file mode 100644 index 0000000..3dbd806 --- /dev/null +++ b/collections/_software_and_simulation/Creating_ODF_slices_of_evolving texture_from_simulated_data.md @@ -0,0 +1,105 @@ +--- +title: Creating ODF slices of evolving texture from simulated data +author: Aiden Ha, Guy Bowker +tags: + - MTEX + - ma + - EBSD +published: true +subcollection: MTEX +--- +based on Guy Bowker's codes... + +Assuming you have a list of orientations for every time-step as quaternions as a txt file: +It should be formatted like this: + +``` +xyzw + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.550647107000000 0.759967622000000 -0.236600486000000 -0.251509817000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 + 0.575272627000000 0.741162387000000 -0.231873625000000 -0.256854711000000 +``` +My files are named QDF_quat_list_{increment} and I have 121 time steps, so to read these in MTEX... + +```matlab +CS = { ... + 'notIndexed', ... + crystalSymmetry('6/mmm', [3 3 4.7], 'X||a*', 'Y||b', 'Z||c*', 'mineral', 'Ti-Hex', 'color', [0.53 0.81 0.98]), ... + crystalSymmetry('m-3m', [3.2 3.2 3.2], 'mineral', 'Titanium cubic', 'color', [0.56 0.74 0.56]) ... +}; + +% Select beta phase (Titanium cubic) +CS_beta = CS{3}; + +% Set specimen symmetry +SS = specimenSymmetry('orthorhombic'); + + +%% +setMTEXpref('xAxisDirection','north'); +setMTEXpref('zAxisDirection','intoplane'); + +%% --- Define path to quaternion.txts --- %% + +path_to_txts = '/path/to/txts/'; +n_incs = 121; +%% +for inc = 1:1:n_incs + + increment = string(inc); + fprintf("\nInc %s:\n", increment); + +% % Define path to quaternion.txt files.. + + ori_path = strcat(path_to_txts, 'ODF_quat_list_', increment, '.txt'); + fprintf("Reading: %s\n", ori_path); + fid = fopen(ori_path,'r'); + quat_data = textscan(fid, '%f%f%f%f', 'HeaderLines', 1, 'CollectOutput', 1); + quat_data = quat_data{:}; + fid = fclose(fid); + % q = quaternion(transpose(quat_data(:, :, inc))); % from HDF5 + q = quaternion(transpose(quat_data)); % from quat.txt + + % Estiamte an ODF from the orientations + ori = orientation(q, CS_beta, SS); + %psi = calcKernel(ori); + + %odf = calcDensity(ori, 'kernel', psi); + odf = calcDensity(ori, 'kernel', SO3DeLaValleePoussinKernel, 'halfwidth', 10*degree); + + setMTEXpref('FontSize', 31); + + figure() + plot(odf, 'phi2', 45*degree, ... + 'antipodal', ... + 'linewidth', 1, ... + 'colorbar', 'cs', 'ss', ... + 'minmax', 0:0.1:10, ... + 'colorRange', [0, 30]); + + mtexColorbar('location', 'southOutSide', 'title', 'mrd'); +saveas(gcf, strcat(path_to_txts, 'ODF_quat_list_', increment, '.png')) +close(gcf); +end +``` +This will loop through the quaternion lists to produce a 45-degree ODF slice for each time step + + diff --git a/collections/_software_and_simulation/Dream3D.md b/collections/_software_and_simulation/Dream3D.md new file mode 100644 index 0000000..690da48 --- /dev/null +++ b/collections/_software_and_simulation/Dream3D.md @@ -0,0 +1,10 @@ +--- +title: Using DREAM3D for DAMASK +author: Inigo Howe +tags: + - dream3d +subcollection: DAMASK +--- +## Installation and setup + +DREAM3D is an open source software package used to generate virtual microstructres. It is avalible to download from [here](http://dream3d.bluequartz.net/Download/). diff --git a/collections/_software_and_simulation/Driving Force Example.md b/collections/_software_and_simulation/Driving Force Example.md new file mode 100644 index 0000000..3bb99c0 --- /dev/null +++ b/collections/_software_and_simulation/Driving Force Example.md @@ -0,0 +1,122 @@ +--- +title: Driving Force (Gibbs Energy Fit) Example +author: Sakina Rehman +tags: + - thermocalc + - morton pc + - python +toc: true +subcollection: TC Python +published: true +--- +# Driving Force Example + +Using TC-Python, the Gibbs energy for a specific phase can be calculated for a multivariate system to help calculate driving forces. + +Please check the below modules are installed and available on your PC. These are all installed on the Morton PC, it is recommended you use the Morton PC and use the PyCharm IDE. + +``` +from mpl_toolkits.mplot3d import Axes3D +import matplotlib.pyplot as plt +from matplotlib import cm +import numpy as np +from tc_python import * +import pandas as pd +``` +To ensure/check which phases are present in the system, the following function can be utilised. This is useful when the Gibbs energy of a specific phase is required. + +``` +def list_stable_phases(result): +​ + stable_phases = result.get_stable_phases() + print(stable_phases) +``` +This function below provides a 3D plot of the Gibbs energy as a function of two independent variables. In this case, a ternary Al-Cr-Ni system is considered where the Gibbs energy is calculated for changing solute concentration of Al and Cr. +``` +def plot_3d(list_of_x, list_of_y, list_of_z, xlabel, ylabel, zlabel, title): +​ + #Plot a 3d figure using matplotlib given data and labels on the three axes. +​ + fig = plt.figure() + fig.suptitle(title, fontsize=14, fontweight='bold') + ax = plt.subplot(projection='3d') + z = np.empty([len(list_of_x), len(list_of_y)]) + k = 0 + for index_x, x in enumerate(list_of_x): + for index_y, y in enumerate(list_of_y): + z[index_x, index_y] = list_of_z[k] + k = k + 1 +​ + xx, yy = np.meshgrid(list_of_x, list_of_y, indexing='ij') + ax.plot_surface(xx, yy, z, cmap=cm.coolwarm, linewidth=1, antialiased=True) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_zlabel(zlabel) + for spine in ax.spines.values(): + spine.set_visible(False) +​ + plt.show() +​ +​ +``` +This is the main script, which is a single equilibrium calculation using the NIDEMO database. +>Please note that there are many databases available for Ni, Al, Ti based alloys. To check please open Thermo-Calc 2022a GUI and check the available databases. + +The temperature has been fixed at 973.15 K and ```ALL_PHASES``` have been set to dormant, except the ```FCC_L12#2``` phase. This is the phase of interest (the ordered precipitate phase in this system). Please refer to the database manual/documentation for phase naming information for your system. +``` +with TCPython() as start: + # single equilibrium calculation + calculation = ( + start + .set_cache_folder(os.path.basename(__file__) + "_cache") + .select_database_and_elements("NIDEMO", ["Ni", "Al", "Cr"]) + .get_system() + .with_single_equilibrium_calculation() + .set_condition(ThermodynamicQuantity.temperature(), 973.15) + .set_phase_to_dormant(ALL_PHASES) + .set_phase_to_entered('FCC_L12#2',1.0) + .disable_global_minimization() + ) +``` +A range of solute mole fractions are considered, please do not set the minimum as zero but instead a very small number. +``` + list_of_x_Al = np.linspace(1e-4, 15e-2, 10) + list_of_x_Cr = np.linspace(1e-4, 30e-2, 10) +​ + list_of_gibbs_gp = [] + x_Als = [] + x_Crs = [] +​ + for x_Al in list_of_x_Al: + for x_Cr in list_of_x_Cr: + calc_result = (calculation + .set_condition(ThermodynamicQuantity.mole_fraction_of_a_component("Al"), x_Al) + .set_condition(ThermodynamicQuantity.mole_fraction_of_a_component("Cr"), x_Cr) + .calculate() + ) +​ +``` +This section focuses on retrieving the Gibbs energy of the given phase for the range of solute concentrations. The results are printed to the terminal and a 3D plot is provided. +``` + gibbs_gp = calc_result.get_value_of('G') + list_of_gibbs_gp.append(gibbs_gp) + x_Als.append(x_Al) + x_Crs.append(x_Cr) +​ + print("X(Al)={0:.2f}".format(x_Al) + " , X(Cr)={0:.2f}".format(x_Cr) + ", Gibbs = {0:.2f}".format( + gibbs_gp) + "[kJ/mol]") +​ + list_stable_phases(calc_result) + plot_3d(list_of_x_Al, list_of_x_Cr, list_of_gibbs_gp, 'X(Al)', 'X(Cr)', 'Gibbs Energy [kJ/mol]', + "Gibbs Energy for Ni-Al-Cr alloy at 973.15K") +​ +``` +This saves the data as a .csv value, which can be used in further post-processing, especially when obtaining the multivariate polynomial fits. +``` + data_Gp = [x_Als, x_Crs,list_of_gibbs_gp ] + data_Gp = pd.DataFrame(data_Gp) + data_Gp = data_Gp.T + #data_Gp.columns = ['x_al', 'x_cr','G_gp'] + data_Gp.to_csv('data_Gp.csv', index=False) +``` + diff --git a/collections/_software_and_simulation/General Information.md b/collections/_software_and_simulation/General Information.md new file mode 100644 index 0000000..f892ba5 --- /dev/null +++ b/collections/_software_and_simulation/General Information.md @@ -0,0 +1,76 @@ +--- +title: General Information +author: Sakina Rehman +tags: + - thermocalc + - morton pc + - python +toc: true +subcollection: TC Python +published: true +--- + +# Getting Started with TC-Python + +TC-Python is a Python™ language-based SDK available with Thermo-Calc which allows for easy and flexible coupling of Thermo-Calc calculations with other numerical packages like NumPy, SciPy, and TensorFlow. The majority of calculations found within the Thermo-Calc graphical interface and console mode are available in TC-Python. This includes: +- Single equilibrium +- Phase diagrams +- Property diagrams +- Scheil solidification simulations +- Batch equilibrium (significant performance improvements when calculating multiple fast single equilibria) +- Property models +- Precipitation simulations +- Diffusion simulations +- Steel and Nickel libraries (set of Property Models designed to help experts working in the steel and nickel industry) +- Process Metallurgy Module calculations + +## Installation + +> TC Python is readily available and up to date on the Morton PC. Please logon to Morton PC and open PyCharm. If you run into any issues please see the information below or contact support below. + +Install an IDE (PyCharm is recommended) at https://www.jetbrains.com/pycharm/download. Once installed, open PyCharm to configure the bundled Python-interpretator. Go to **File -> Settings** and navigate in the tree to **Project: YourProjectName** and choose **Project Interpreter**. Click on the **Settings** symbol and select **Add**. Choose **System Interpreter** and add the bundled Thermo-Calc Python 3 interpreter. For the Morton PC operating system, this would be: +``` +C:\Program Files\Thermo-Calc\2022a\python\python.exe +``` +For MacOS: +``` +/Applications/Thermo-Calc-2022a.app/Contents/Resources/python/bin/python3 +``` +For Linux OS: +``` +/home/UserName/Thermo-Calc/2022a/python/bin/python3 +``` + +You should now be ready to run a TC-Python script, it is recommended that you run some of the examples provided by Thermo-Calc. Go to **File -> Open** and please navigate to the path of the TC-Python installation. Click on the **Examples** folder and from any subfolder run the example. Below is an example (```pyex_P_01_Precipitation_Al-Sc_AL3SC.py```) from the Precipitation subfolder, which simulates the kinetics of precipitation of Al3Sc from an FCC_A1 solution phase and shows some results, with minimally required settings. Default values are used for unspecified settings. + +![alt text](https://github.com/LightForm-group/wiki/blob/master/collections/_software_and_simulation/tc_example.png) + +## Architecture + +TC-Python contains classes of these types: + +**TCPython** – this is where you start with general settings. + +**SystemBuilder and System** – where you choose database and elements etc. + +**Calculation** – where you choose and configure the calculation. + +**Result** – where you get the results from a calculation you have run. + + +Please refer to https://download.thermocalc.com/docs/tc-python/2022a/html/architecture.html#tcpython for further information + +## Contact + +For questions, comments, bug-reports or contributions please email Sakina Rehman at [sakina.rehman@postgrad.manchester.ac.uk](mailto:sakina.rehman@postgrad.manchester.ac.uk). The University of Manchester also holds a subscription to Thermo-Calc support so please feel free to email [support@thermocalc.com](support@thermocalc.com). Please ensure you include the following information in your email: + +- Name: +- Organisation Name: University of Manchester +- Department: +- Country: +- Phone Number: +- Software version: +- Databases: +- OS: Windows (Morton PC) +- Version: Windows 7 Enterprise (Morton PC) +- Description of Problem: diff --git a/collections/_software_and_simulation/JMatPro.md b/collections/_software_and_simulation/JMatPro.md new file mode 100644 index 0000000..70b768e --- /dev/null +++ b/collections/_software_and_simulation/JMatPro.md @@ -0,0 +1,27 @@ +--- +title: Thermophysical Properties - What is JMatPro actually doing? +author: James Butler +toc: true +tags: + - python + - matflow-new + - csf + - csf3 +published: true +subcollection: JMatPro +--- + +# Thermophysical Properties - Extended General + +If you have JMatPro installed (congratulations for surviving that process) and have your alloy / composition in question loaded, you will be confronted by a vast array of calculations that can be applied to that alloy. This page is purely focussed on the "Thermo-Physical Properties, Extended General" tab, a tab which produces plots that show how properties for your alloy of choice will be affected by a change in temperature. + +After clicking on the extended general tab, you will be greeted by three parameters you need to fill in, the heat treatment temperature, the upper limit and your step size. I'd like to address each parameter one by one, going from the simplest to the most complicated, while providing visuals to explain how each parameter will affect the data in the plots produced. + +## Step Size + + + + + + + diff --git a/collections/_software_and_simulation/KWN_damask.md b/collections/_software_and_simulation/KWN_damask.md new file mode 100644 index 0000000..daee99d --- /dev/null +++ b/collections/_software_and_simulation/KWN_damask.md @@ -0,0 +1,303 @@ +--- +title: Damask version including KWN model for precipitation kinetics +author: Madeleine Bignon +tags: + - damask + - csf + - Crystal Plasticity + - Modelling +toc: false +subcollection: + - KWN precipitation model + - DAMASK +published: true +--- +This version of Damask includes a constitutive behaviour law for dynamic precipitation. The program is available [here](https://github.com/LightForm-group/Damask-KWN). + +The constitutive law called ```kwnpowerlaw``` allows to consider the evolution of a precipitate distribution under deformation. The evolution of the precipitate distribution is calculated with a multi-class KWN precipitation kinetics model. The strengthening effect of precipitates is taken into account in the calculation of the critical resolved shear stress for dislocation glide. The accelerating effect of deformation on precipitation kinetics is considered using a phenomenological model for excess vacancy production. + +The documentation of the model, which describes the equations used and the corresponding entries in the input files, can be downloaded [here](https://github.com/LightForm-group/Damask-KWN/blob/main/model_documentation/fullfield-kwn.pdf) + +## Installation + +The following installation instructions are specific to users of the University of Manchester's Computational Shared Facility (CSF). + +- First connect to the CSF +- if it does not already exist, create a ```software``` folder in your ```home``` by running the following command: + +```bash +mkdir software +``` + +- Download the files ```load_DAMASK.sh``` and ```load_DAMASK_processing.sh``` from the ```env-script``` folder ([here](https://github.com/LightForm-group/Damask-KWN)) and copy them in your ```home``` folder +- In your home directory, run the following commands: + +```bash +source load_DAMASK_processing.sh +``` + +- Download the folder called ```damask-kwn``` from [this link](https://github.com/LightForm-group/Damask-KWN) and copy it in your ```software``` directory (or clone it directly from Github) +- Go to the ```software/damask-kwn/``` directory: + +```bash +cd software/damask-kwn/ +``` +- Go to the ```src``` directory and remove the hidden files that might have appeared when copying the ```damask-kwn``` folder in the ```software``` directory (this step is not necessary if you used github to clone the folder): + +```bash +cd src +rm ._* +cd .. +``` + +- Create a ```build``` directory in ```software/damask-kwn/``` and navigate into it: + +```bash +mkdir build +``` + +```bash +cd build +``` + +- Run the following command: + +```bash +cmake ../ -DDAMASK_SOLVER=GRID -DCMAKE_INSTALL_PREFIX=../ +``` + + +- Run the following command + +```bash +make all install +``` + +## Running simulations + +First of all, open the CSF and run the following command: + +```bash +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK_processing.sh +``` +### Running a job + + +Place the following ```jobscript.sh``` file into a directory containing `DAMASK` input files (`geom.vtr`, `load.yaml`, and `material.yaml`) and submit it with +```bash +qsub jobscript.sh +``` + +where ```jobscript.sh``` looks like this +```sh +#!/bin/bash --login +#$ -cwd # Submit in the current working directory +#$ -pe smp.pe 4 # Use a parallel environment with four cores + +source ~/load_DAMASK.sh + +mpirun -n $NSLOTS DAMASK_grid -l load.yaml -g geom.vtr +``` +Running a job on the CSF will create two files in the working directory it is run within: A `jobscript.sh.o0000000` file, which contains generic job output, and `jobscript.sh.e0000000` which contains detail on errors that occured during the run. + +A set of examples files (`geom.vtr`, `load.yaml`, `material.yaml` and `jobscript.sh` files) is available in the ```example_of_use``` directory [here](https://github.com/LightForm-group/Damask-KWN) +### Input files examples + +The ```material.yaml``` file contains all the material properties. It also contains all the parameters of the KWN dynamic precipitation model (please note that the temperature is defined in the ```load.yaml``` file). The documentation detailing the meaning of the inputs is available [here](https://github.com/LightForm-group/Damask-KWN/blob/main/model_documentation/fullfield-kwn.pdf). + +Example ```material.yaml``` file (for a simulation with 4 grains) +```yaml +homogenization: + SX: + N_constituents: 1 + mechanical: + type: pass + thermal: + type: pass + +material: +- constituents: + - O: + - 0.19813654684736873 + - -0.4003702989793671 + - 0.38708148638970563 + - -0.806606133991621 + phase: Aluminum + v: 1.0 + homogenization: SX +- constituents: + - O: + - 0.4131826988800702 + - -0.6684243946439803 + - 0.5135613787035137 + - 0.34459192720543513 + phase: Aluminum + v: 1.0 + homogenization: SX +- constituents: + - O: + - 0.21956653913118745 + - -0.3396582332002957 + - 0.5011663510132476 + - -0.765019678260156 + phase: Aluminum + v: 1.0 + homogenization: SX +- constituents: + - O: + - 0.2487765086598615 + - -0.8408812909074685 + - 0.44711671323124785 + - 0.17639599794237146 + phase: Aluminum + v: 1.0 + + +phase: + Aluminum: + lattice: cF + mechanical: + elastic: + C_11: 100000000000.0 + C_12: 60410000000.0 + C_44: 28340000000.0 + type: Hooke + output: + - F + - P + - F_e + - F_p + - L_p + - O + plastic: + N_sl: + - 12 + a_sl: 2.25 + atol_precipitate_density: 1e-12 + atol_solute: 1e-12 + atol_xi: 1.0 + atomic_volume: 1.66e-29 + burgers_vector: 2.9e-10 + c0_matrix: + - 0.02889 + - 0.02406 + ceq_matrix: + - 0.012082196019601959 + - 0.0005296453049830318 + ceq_precipitate: + - 0.27 + - 0.38 + dislocation_arrangement: 1 + dot_gamma_0_sl: 0.001 + gamma_coherent: 0.265 + h_0_sl_sl: 589020809.2851744 + h_sl_sl: + - 1 + - 1 + - 1.4 + - 1.4 + - 1.4 + - 1.4 + - 1.4 + initial_dislocation_density: 100000000000000.0 + initial_mean_radius: 9.0e-10 + initial_volume_fraction: 0.007 + jog_formation_energy: 4.806529901999999e-20 + kwn_nsteps: 60 + kwn_step0: 5 + kwn_stepsize: 0.5 + lattice_parameter: 4.0695e-10 + misfit_energy: 0 + molar_volume: 1e-05 + n_sl: 50.0 + output: + - gamma_sl + - phi_total + - phi + - solute_c + - r_avg + - xi_sl + - vacancy_c + - f + precipitate_strength_constant: 0.03535 + transition_radius: 3.3e-9 + shear_modulus: 28340000000.0 + solute_diffusion0: 1.49e-05 + solute_migration_energy: 2.0923e-19 + solute_strength: 683000000.0 + standard_deviation: 2.01322e-10 + stoechiometry: + - 5 + - 7 + - 6 + type: kwnpowerlaw + vacancy_diffusion0: 1e-05 + vacancy_energy: 8.331e-20 + vacancy_generation: 0.035 + vacancy_migration_energy: 1.49e-19 + vacancy_sink_spacing: 5e-05 + xi_0_sl: + - 7800000.0 + xi_inf_sl: + - 210330547.0653053 + rho: 1.0 + thermal: + C_p: 1.0 + +``` + +The ```load.yaml``` file contains the deformation conditions as well as the temperature. + +Example ```load.yaml``` file +```yaml +initial_conditions: + thermal: + T: 423.0 +loadstep: +- boundary_conditions: + mechanical: + P: + - x + - x + - x + - x + - 0 + - x + - x + - x + - 0 + dot_F: + - 0.0001 + - 0 + - 0 + - 0 + - x + - 0 + - 0 + - 0 + - x + discretization: + N: 10000 #discretisation for the calculation (N time steps) + t: 2000 #total deformation time in [s] + f_out: 100 #frequency of the outputs : one file every f_out calculations + + +solver: + mechanical: spectral_basic + thermal: spectral + ``` +## Post-processing + +Some post-processing tools are available in the ```env_scripts``` directory from [this repository](https://github.com/LightForm-group/Damask-KWN). +To use them, copy the ```post_processing.py``` and ```notebook_post_processing.ipynb``` in the folder containing the result file (which has ```.hdf5``` extension). + +To create textfiles containing the results of the calculation, as well as ```.vtr``` files allowing to display the result in 3D (e.g. with Paraview), go with the CSF to the folder containing the result file (```geom_load.hdf5``` in the example below) and run the following command: + +```bash +python post_processing.py geom_load.hdf5 +``` +This will create a textfile per variable and per time increment containing the value of the given variables for all voxels of the simulation. For example, if the simulation is run with a 16x16x16 (=4096) box, the file ```vf_100``` contains 4096 lines for the calculated volume fractions of the 4096 elements of the simulation at increment time 100. +This also creates ```.vtr``` files that can be visualised in Paraview. + +Additionally, the jupyter notebook ```notebook_post_processing.ipynb``` can be copied in the folder containing the result file and run to display some plots. + diff --git a/collections/_software_and_simulation/OsxFUSE_Error.md b/collections/_software_and_simulation/OsxFUSE_Error.md new file mode 100644 index 0000000..aa59cc5 --- /dev/null +++ b/collections/_software_and_simulation/OsxFUSE_Error.md @@ -0,0 +1,35 @@ +--- +title: OsxFUSE Folder Error for macOS BigSur +author: Yichao Yao +subcollection: Misc +--- + +## OsxFUSE Error +After the intallation of macOS BigSur version 11.0.1 in your Macbook PC, it will be unable to mount the CSF OsxFUSE folder using **sshfs**. +This problem can be solved following: + +Step 1: Install the new version of OsxFUSE: +Download .dmg file and install it following the guidance, here is the link: +``` +https://github.com/osxfuse/osxfuse/releases/tag/macfuse-4.0.3 +``` + +Step 2: When you try to mount the folder using sshfs after the installation of MacFuse 4.0.3, you may have this error: +``` +dyld: Library not loaded: /usr/local/lib/libosxfuse_i64.2.dylib +``` + +Step 3: You need to run the following command in Terminal to create a symlink pointing to the new version of the library: +``` +cd /usr/local/lib/ +ln -s “libosxfuse.2.dylib” “/usr/local/lib/libosxfuse_i64.2.dylib” +``` + +If permission denied, use this command and enter admin password: +``` +sudo ln -s “libfuse.2.dylib” “/usr/local/lib/libosxfuse_i64.2.dylib” +``` + +Step 4: **sshfs** should work well now, mount the folder as normal + +*Thanks Benjamin Fleischer(Member of FUSE for macOS) for his help* diff --git a/collections/_software_and_simulation/PETSC_DAMASK_install.md b/collections/_software_and_simulation/PETSC_DAMASK_install.md new file mode 100644 index 0000000..fb5c088 --- /dev/null +++ b/collections/_software_and_simulation/PETSC_DAMASK_install.md @@ -0,0 +1,157 @@ +--- +title: Installing PETSc and DAMASK on the CSF3 +author: Samuel Engel +tags: + - DAMASK + - PETSc + - CSF3 + - Crystal Plasticity + - Modelling +toc: false +subcollection: DAMASK +published: true +--- + +This page aims to detail how to install PETSc (https://petsc.org/release/) and DAMASK (https://damask-multiphysics.org/) for simulating crystal plasticity. + + +## Edit Bash Profile +Before we begin, it is important to define some enviroment variables, these should be added to your `.bash_profile`, which can be accessed using the following commands. NOTE: make sure you do not have any conda initialisation in you bash profiles, as this will interfere with installation, for more information see https://ri.itservices.manchester.ac.uk/csf3/software/applications/anaconda-python/ . + +``` +cd + +vim .bash_profile +``` +With the editor open, add the following lines of code to the bottom + +``` +export HDF5_USE_FILE_LOCKING=FALSE + +export PETSC_DIR=$HOME/software/petsc +export PETSC_ARCH=linux-gnu +export PATH=$PETSC_DIR/$PETSC_ARCH/bin:$PATH +``` + +Make sure to save and exit, you will need to logout and log back in for the changes to take effect. + +## Install PETSc + +Now, we need to download and install PETSc, if you don't already have a software folder on the CSF3, then ensure you make one in your home directory. Once downloaded, it's important to make sure you are on the correct branch. + +``` +cd + +mkdir software + +cd software + +git clone https://github.com/petsc/petsc.git + +cd petsc + +git checkout release-3.19 +``` + +Now that we have PETSc downloaded we need to configure it for use with DAMASK. We need to create a folder within PETSc to hold extra packaged required by DAMASK. This filepath should look like this `software/petsc/linux-gnu`. Since we already defined a variable called `$PETSC_DIR`, we can just use this to go directly to the folder. + +``` +cd $PETSC_DIR + +mkdir linux-gnu +``` + +When configuring PETSc, it's best to either do so interactively or using a submission script. Here, we use a submission script that we can call `configure_petsc.sh`. + +``` +#!/bin/bash --login + +#$ -cwd +#$ -N make_petsc +#$ -l short + +# Exports +export HDF5_USE_FILE_LOCKING=FALSE +export OMP_NUM_THREADS=1 +export PETSC_ARCH=linux-gnu + +# contains: +module purge +module load compilers/gcc/14.1.0 +module load tools/gcc/cmake/3.28.6 + +cd $PETSC_DIR + +## For CSF3 +./configure \ +--with-cc=$CC \ +--with-cxx=$CXX \ +--with-fc=$FC \ +--download-mpich \ +--download-fftw \ +--download-hdf5 \ +--download-hdf5-fortran-bindings=1 \ +--download-libfyaml \ +--download-zlib \ +--download-fblaslapack \ +COPTFLAGS="-O2 -march=znver4" \ +CXXOPTFLAGS="-O2 -march=znver4" \ +FOPTFLAGS="-O2 -march=znver4" \ +PETSC_ARCH=$PETSC_ARCH \ +PETSC_DIR=$PETSC_DIR + +make PETSC_DIR=$PETSC_DIR PETSC_ARCH=$PETSC_ARCH all +make PETSC_DIR=$PETSC_DIR PETSC_ARCH=$PETSC_ARCH check +``` +This should run successfully and install PETSc. There might be some issues when running the tests, which is usually to do with MPI not being loaded properly. + +## Install DAMASK + +Again, we can use git to download DAMASK. Here, we install the `release` branch, but you can use any branch or tag that you prefer. +``` +git clone https://github.com/damask-multiphysics/DAMASK.git + +git checkout release +``` +We will also use a submission script to make and install DAMASK, which we can call something like `install_damask.sh`. + +``` +#!/bin/bash --login + +#$ -cwd +#$ -N install_damask + +# Exports +export HDF5_USE_FILE_LOCKING=FALSE +export OMP_NUM_THREADS=1 +export DAMASK_ROOT=$HOME/software/DAMASK + +source $DAMASK_ROOT/env/DAMASK.sh +PATH=$PETSC_DIR/$PETSC_ARCH/bin:$PATH +LD_LIBRARY_PATH=$PETSC_DIR/$PETSC_ARCH/lib:$LD_LIBRARY_PATH + +# contains: +module purge +module load compilers/gcc/14.1.0 +module load tools/gcc/cmake/3.28.6 + +PETSC_DIR=$HOME/software/petsc +PETSC_ARCH=linux-gnu + +cd $DAMASK_ROOT + +mkdir build +cd build + +cmake -DCMAKE_INSTALL_PREFIX=../ -DDAMASK_SOLVER=GRID ../ +make all install + +``` + +This should make and install DAMASK fairly quickly. Now we can define another enviroment variable (add this to your bash profile as well). + +``` +export DAMASK_grid=$HOME/software/DAMASK/bin/DAMASK_grid +``` + +DAMASK can now be run on the command line by simply executing `$DAMASK_grid`. You will also need to specifiy the various input files, however information about this can be found on the DAMASK website (https://damask-multiphysics.org/). diff --git a/collections/_software_and_simulation/Steel b/collections/_software_and_simulation/Steel new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/collections/_software_and_simulation/Steel @@ -0,0 +1 @@ + diff --git a/collections/_software_and_simulation/Ti_modelling_parameters.md b/collections/_software_and_simulation/Ti_modelling_parameters.md new file mode 100644 index 0000000..fe7bb14 --- /dev/null +++ b/collections/_software_and_simulation/Ti_modelling_parameters.md @@ -0,0 +1,135 @@ +--- +title: Ti Modelling Parameters +author: Guy Bowker, Adam Plowman +tags: + - Titanium + - DAMASK + - Crystal Plasticity + - Modelling + - Parameters +toc: false +subcollection: Titanium +published: true +--- +The model presented uses a constitutive law based on a phenomenological crystal plasticity model described by Pierce et al. (Pierce, 1983) as part of the DAMASK framework (Roters, 2019). +A phenomenological model attempts to predict the response one variable has on another, but is not derived from first principles. +The DAMASK full-field crystal plasticity model considers a representative volume element as a continuous body $\mathcal{B}$ consisting of material points $\textbf{x}$ located in reference configuration $\mathcal{B}$0 which move to the current configuration $\textbf{y}$ in $\mathcal{B}$t with deformation. +An infinitesimal line segment $d\textbf{x}$ is moved by the application of a deformation gradient tensor $F$, which maps $d\textbf{x}$ in the reference configuration to $d\textbf{y}$ in the current configuration. $d\textbf{y} = F(\textbf{x})\cdot d\textbf{x}$. +Multiplicative decomposition of the deformation gradient tensor splits $F$ into the elastic deformation gradient tensor $F_{e}$ and plastic deformation gradient tensor $F_{p}$ + +$$ +F = F_{e} \cdot F_{p} +$$ + +The elastic deformation gradient tensor $F_{e}$ is calculated from the Green-Lagrange strain $E$ which is itself determined using Hooke's law + +$$ +S = \mathbb{C}:E +$$ + +Where $S$ is the Cauchy stress tensor and $\mathbb{C}$ is the elastic stiffness tensor. $E$ may then be expressed + +$$ +E = \frac{(\boldsymbol{F_{e}}^{T}\boldsymbol{F_{e}}-\boldsymbol{I})}{2} +$$ + +The plastic deformation gradient tensor $F_{p}$ is calculated using constitutive equations. +A constitutive equation describes the response of a specific material to external stimuli. The slip rate $\dot{\gamma}^i$ of polycrystal slip plane $i$ for a given load case is determined as follows + +The phenomenological power law by which the slip rate $\dot{\gamma}^i$ on some slip system $i$ is dependant upon the initial shear rate $\dot{\gamma_{0}}^i$ , ratio between resolved shear stress $\tau^i$ and critical resolved shear stress (CRSS) $\xi^i$, inverse of strain rate sensitivity $n_{sl}=\frac{1}{m}$ (also known as the stress exponent), and resolved shear stress on the slip system $\tau^{i}$, is given as + +$$ +\dot{\gamma}^i = \dot{\gamma_{0}}^{i}\displaystyle\left\vert\frac{\tau^i}{\xi^i}\right\vert^{n_{sl}}\text{sgn}(\tau^i) +$$ + +The CRSS $\xi^i$ in Equation is analogous to the yield of slip system $i$. When the resolved shear stress becomes greater than that of the CRSS of the slip system, $\dot{\gamma}^{i}\neq0$ and the slip system begins to slip. +The resolved shear stress on the system $i$, $\tau^i$, is the second piola-kirchoff stress tensor, $\boldsymbol{S}$, projected by the corresponding schmid tensor, itself given by the dyadic product of the unit vectors along the slip direction, $\boldsymbol{b}^i$, and the slip plane normal, $\boldsymbol{n}^i$ + +$$ +\tau^i = \boldsymbol{S}\cdot\boldsymbol{b}^i\otimes\boldsymbol{n}^i +$$ + +The following power law is used to determine the change of CRSS $\xi^i$ from its initial value $\xi_0^i$, to the defined saturated CRSS $\xi_\infty^{i^{\prime}}$ with flow hardening as shown in Equation *numb* + +$$ +\dot{\xi}^i = \dot{h}_0^{s-s}\sum_{i^{\prime}=1}^{N_s} \displaystyle\left\vert\dot{\gamma}^i\right\vert \displaystyle\left\vert1-\frac{\xi^{i^{\prime}}}{\xi_\infty^{i^{\prime}}}\right\vert^{w}sgn(1-\frac{\xi^{i^{\prime}}}{\xi_\infty^{i^{\prime}}})h^{ii^{\prime}} +$$ + +Where $\dot{h_0}^{s-s}$ is the initial hardening rate, $w$ is a fitting parameter and $h^{ii^{\prime}}$ is the components of the slip-slip interaction matrix. $h_{ij}$ is 1.0 for self-hardening and 1.4 for latent hardening. + +Please find below a collection of single crystal property parameters for titanium and its alloys from a variety of literature sources. +Please add to this list should your literature review include these parameters, to aid future work into modelling of titanium and its alloys. + + +## Alpha phase (Ti-α) +Ti-$\alpha$ phase possesses a hexagonal-close packed (HCP) unit cell with $c/a$ ratio 1.587, smaller than the ideal ratio of 1.633 (Lutjering, 2007)[^1]. +### Elastic properties + +| $C_{11}$ | $C_{12}$ | $C_{13}$ | $C_{33}$ | $C_{44}$ | $C_{66}$ | Source | Comments | +| -------- | -------- | -------- | -------- | -------- | -------- | --------- | ---------------------------------------------------------------------------------------------------------------------------- | +| 162.4 | 92.0 | 69.0 | 180.7 | 46.7 | - | (Fisher, 1964)[^2] | Ultrasonic wave interference tests of CP-Ti at room temperature. Measurement of $C_{33}$ was interrupted but still included. | +| 160.0 | 95.0 | 45.0 | 181.0 | 55.0 | 55.0 | (Naimon, 1974)[^3] | 'pulse superposition' of Ti64 at room temperature. | +| 160.0 | 90.0 | 66.0 | 181.0 | 46.5 | - | (Hearmon, 1984)[^4] | Collection of crystal parameters from legacy papers | + +### Plastic properties - Initial and saturated Critical Resolved Shear Stresses (CRSS) +Be aware some are given as ratios. + +| Slip system | \{ 0002 \}\< 11-20 \> | \{ 10-10 \}\< 11-20 \> | \{ 10-11 \}\< 11-23 \> | Source | Comments | +| ----------- | --------------------- | --------------------- | ---------------------- | --------- | -------- | +| CRSS ratio | 1.5 | 1 | 3 | (Dunst, 1996)[^5] | texture predictions validated eagainst experimental. | +| CRSS | 420.0 | 370.0 | 590.0 | (Bridier, 2006)[^6] | In-situ fatigue tests of room temperature Ti64. | +| CRSS | 349.0 | 150.0 | 1107.0 | (Zambaldi, 2012)[^7] | 'Simplex algorythym' used to matchup results of MARC CPFE model with compression tests of room temperature CP-Ti. | +| CRSS | 444.0 | 392.0 | 631.0 | (Jones, 1981)[^8] | 'Ball model' - estimations of CRSS were validated against uniaxial compression and tensile tests of CP-Ti at room temperature. | + +### Plastic properties - Hardening equation parameters + +| $a$ | $n_{sl}=\frac{1}{m}$ | $dot{\gamma}_0$ | $h_0$ | $h_{ij}$ | Source | Comments | +| --- | -------------------- | --------------- | ----- | ---------- | --------- | -------- | +| 2.0 | 0.05 | 0.001 | 200.0 | [1.4 (non-coplanar), 1.0 (coplanar)] | (Zambaldi, 2012)[^7] | Set arbritrarily for use in 'simplex algorythim to determine CRSS' | +| | | | | | | | + + +## Beta phase (Ti-β) +Because single-crystal properties of the beta phase cannot be determined directly at room temperature, some assumptions may be made in order to model its deformation response. E.g. Interstitial free steel is a good approximation for the $\beta$-phase, due to possessing similar slip modes. +### Elastic properties + +| $C_{11}$ | $C_{12}$ | $C_{44}$ | Source | Comments | +| ------- | -------- | -------- | ------ | -------- | +| 97.7 | 87.2 | 37.5 | (Ledbetter, 2004)[^9] | non-contacting electromagnetic-acoustic resonance at 1030C. | +| | | | | | + +### Plastic properties - Initial and saturated Critical Resolved Shear Stresses (CRSS) + +| Pencil glide | \{ 110 \}\< 111 \> | \{ 112 \}\< 111 \>| \{ 123 \}\< 111 \> | Source | Comments | +| ------------ | ------------------ | ----------------- | ------------------ | ------ | -------- | +| CRSS ratio | 1/3 | 1/3 | 1/3 | (Dunst, 1996) [^5] | Hot Texture predictions compared against experimental. | +| | | | | | | + +### Plastic properties - Hardening equation parameters + +| $w$ | $n_{sl}$ | $dot{\gamma}_0$ | $h_0$ | $h_{ij}$ | Source | Comments | +| --- | -------- | --------------- | ----- | -------- | ------ | -------- | +| | | | | | | | +| | | | | | | | + + +## Contact + +This code is maintained by the Microstructure Modelling Group at the University of Manchester. For questions, comments, bug-reports or contributions please email Dr. Adam Plowman at [Adam.plowman@manchester.ac.uk](mailto:Adam.plowman@manchester.ac.uk) or Guy Bowker at [guy.bowker@postgrad.manchester.ac.uk](mailto:guy.bowker@postgrad.manchester.ac.uk). + +## References + +[^1]: Lütjering, G. and Williams, J.C., 2007. Titanium. Springer Science & Business Media. +[^2]: Fisher, E.S. and Renken, C.J., 1964. Single-crystal elastic moduli and the hcp→ bcc transformation in Ti, Zr, and Hf. Physical review, 135(2A), p.A482. +[^3]: Naimon, E.R., Weston, W.F. and Ledbetter, H.M., 1974. Elastic properties of two titanium alloys at low temperatures. Cryogenics, 14(5), pp.246-249. +[^4]: Hearmon, R.F.S., 1984. The elastic constants of crystals and other anisotropic materials. Landolt-Bornstein Tables, III/18, 1154. +[^5]: Dunst, D. and Mecking, H., 1996. Analysis of Experimental and Theoretical Rolling Textures of Two-phase Titanium Alloys/Analyse von gemessenen und berechneten Walztexturen bei zweiphasigen Titanbasislegierungen. International Journal of Materials Research, 87(6), pp.498-507. +[^6]: Bridier, F., 2006. Analyse expérimentale des modes de déformation et d'endommagement par fatigue à 20° C d'alliage de titane: aspects cristallographiques à différentes échelles (Doctoral dissertation, Poitiers). +[^7]: Zambaldi, C., Yang, Y., Bieler, T.R. and Raabe, D., 2012. Orientation informed nanoindentation of α-titanium: Indentation pileup in hexagonal metals deforming by prismatic slip. Journal of Materials Research, 27(1), pp.356-367. +[^8]: Jones, I.P. and Hutchinson, W.B., 1981. Stress-state dependence of slip in Titanium-6Al-4V and other HCP metals. Acta Metallurgica, 29(6), pp.951-968. +[^9]: Ledbetter, H., Ogi, H., Kai, S., Kim, S. and Hirao, M., 2004. Elastic constants of body-centered-cubic titanium monocrystals. Journal of applied physics, 95(9), pp.4642-4644. + + + + + diff --git a/collections/_software_and_simulation/UNIX_guide.md b/collections/_software_and_simulation/UNIX_guide.md new file mode 100644 index 0000000..32ed035 --- /dev/null +++ b/collections/_software_and_simulation/UNIX_guide.md @@ -0,0 +1,34 @@ +--- +title: UNIX for dummies - How to navigate mac OS terminal and the CSF +author: Guy Bowker +tags: + - simulation + - csf + - UNIX +toc: true +published: true +subcollection: CSF +--- + +## UNIX interface + +The terminal is a very powerful interface based in the UNIX language with which you can do everything you could usually do using a mouse cursor and windows. Using a variety of commands, directories and files can be created, moved and copied. Programs can be installed, ran and debugged from the command line. Here is where you can start to appreciate the utility of typing commands over clicking and dragging icons. + +## Accessing the command line +To access the terminal on mac, go to launchpad, and search 'Terminal'. This uses 'bash'. Opening this will bring up a black window with some lines of code. Lets walk you through it: +```bash +Last login: Fri Mar 12 10:38:17 on ttys002 +Y15576GB@C-LOSX1EVL410 ~ % +``` +This is my terminal. The first line is telling me the date and time of my last login, while the second line is telling me the user (Y15576GB) and the machine (LOSX1EVL410). Here <> is used to indicate where you could type a command. The <> should be ommitted to run the command. + +On windows, the equivalent is powershell. You can use CMD, however some commands are different. Search powershell in the searchbar on the bottom left of windows 10 and open it. + +## How to navigate the grid and create files + +You can navigate file directories as you would on windows or mac file explorers. In both windows and mac, you begin at your user directory, which contains Desktop, Pictures, Downloads etc. To move to Desktop and view its contents as you would by double-clicking the icon, type `cd Desktop` and press enter. Remember that filenames are case-sensitive and will not open unless exact. To move back to the previously-viewed directory (as if clicking the back button) use `cd -`. To list the contents of the file your pointer is currently in, use `ls` (sometimes `dir` in windows). To move from a directory to the directory containing the one you are in, use `cd ..`. To create a new directory (similar to clicking 'new folder') use `mkdir `. Remember to use a name not seperated by spaces but underscores (_) instead. To delete a directory use `rm -r `. *Be very careful here*, as deleting a file from the command line is perminent. (unlike sending to the rescycle bin) + +## Helpful Youtube resources + +- [A playlist on basics of using the terminal](https://www.youtube.com/playlist?list=PLII6oL6B7q78PKy6_R6JTkkYjVXZBZcVq) +- [Installing Anaconda by Joao](https://www.youtube.com/watch?v=EbYGBANqDdY) diff --git a/collections/_software_and_simulation/abaqus_on_csf.md b/collections/_software_and_simulation/abaqus_on_csf.md index 098f1be..796b1bc 100644 --- a/collections/_software_and_simulation/abaqus_on_csf.md +++ b/collections/_software_and_simulation/abaqus_on_csf.md @@ -1,14 +1,20 @@ --- title: How to run ABAQUS software as a GUI on the CSF via a Linux remote desktop author: Kevin Tanswell +subcollection: ABAQUS --- 1. First contact the IT Services RI team: its-ri-team@manchester.ac.uk to setup a CSF user profile and provide the following details as per: http://ri.itservices.manchester.ac.uk/csf3/getting-started/user-accounts/ -a. The name of the system you wish to access – in this case the CSF -b. A brief description of why you want to use the CSF -c. your University IT Username (not password) -d. your library card number -e. the name of your PI/supervisor/line manager + +- The name of the system you wish to access – in this case the CSF + +- A brief description of why you want to use the CSF + +- your University IT Username (not password) + +- your library card number + +- the name of your PI/supervisor/line manager 2. Download X2Go client from https://wiki.x2go.org/doku.php this is a Virtual Desktop Service required to access the CSF via a remote Linux desktop. diff --git a/collections/_software_and_simulation/call_matlab_from_python.md b/collections/_software_and_simulation/call_matlab_from_python.md new file mode 100644 index 0000000..b6d4e37 --- /dev/null +++ b/collections/_software_and_simulation/call_matlab_from_python.md @@ -0,0 +1,7 @@ +--- +title: Running Matlab code from Python +author: Adam Plowman +subcollection: MTEX +--- + +Do this: [https://uk.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html](https://uk.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html) diff --git a/collections/_software_and_simulation/comp_phase.md b/collections/_software_and_simulation/comp_phase.md new file mode 100644 index 0000000..b262e5b --- /dev/null +++ b/collections/_software_and_simulation/comp_phase.md @@ -0,0 +1,39 @@ +--- +title: Fraction of a Phase +author: Sakina Rehman +tags: + - thermocalc + - morton pc + - python +toc: true +subcollection: TC Python +published: true +--- +# Fraction of a Phase + +This example prints the composition of a phase in a single equilibrium calculation for a Fe-based alloy (Fe-Cr-C). The mass fraction for both Cr and C have been set (please note, TC-Python automatically calculates the mass fraction of Fe) and the temperature has been fixed. For the phase ```FCC_A1``` the composition as a weight fraction is outputted from this script + +``` +from tc_python import * + +with TCPython() as session: + # create equilibrium calculation object and set conditions + eq_calculation = ( + session. + set_cache_folder(os.path.basename(__file__) + "_cache"). + select_database_and_elements("FEDEMO", ["Fe", "Cr", "C"]). + get_system(). + with_single_equilibrium_calculation(). + set_condition(ThermodynamicQuantity.temperature(), 1300.0). + set_condition(ThermodynamicQuantity.mass_fraction_of_a_component("Cr"), 0.1). + set_condition(ThermodynamicQuantity.mass_fraction_of_a_component("C"), 0.01) + + ) + + calc_result = eq_calculation.calculate() + + compo_phase = calc_result.get_value_of(ThermodynamicQuantity. + composition_of_phase_as_weight_fraction('FCC_A1', 'Cr')) +print(compo_phase) + +``` diff --git a/collections/_software_and_simulation/cropping_binary_files.md b/collections/_software_and_simulation/cropping_binary_files.md index f84ff74..c72b5f9 100644 --- a/collections/_software_and_simulation/cropping_binary_files.md +++ b/collections/_software_and_simulation/cropping_binary_files.md @@ -5,6 +5,7 @@ tags: - Python - EBSD published: true +subcollection: Misc --- # Cropping CRC files diff --git a/collections/_software_and_simulation/debugging.md b/collections/_software_and_simulation/debugging.md new file mode 100644 index 0000000..c1a9bdf --- /dev/null +++ b/collections/_software_and_simulation/debugging.md @@ -0,0 +1,34 @@ +--- +title: Debugging tips +author: Gerard Capes +tags: + - debugging +subcollection: General +--- +If you run or write code as part of your research, sooner or later something isn't going to work as expected. + +# Debugging checklist +- [ ] Compare your code against the last working version (`git diff`). + If it previously worked, then something you have changed will most likely have brought about the current error. +- [ ] Google the error message. + It's quite possible that someone else has encountered the same problem as you, + and had it answered on stack overflow. +- [ ] Are you running the latest version of the software? + You might have found a bug that has subsequently been fixed. + Updating your software might resolve the problem. +- [ ] Check on the software's issue tracker. + Many software packages have a publicly accessible issue tracker (on GitHub or elsewhere) that you can search, + or use to create a new issue for the problem you're facing. + +# How to ask for help +When asking for help (e.g. from colleagues, on stack overflow), you should aim to make it easy for others to help you. This means +- [ ] Give some context - what were you trying to do? This is more useful than just asking what an error message means. +- [ ] Given them something they can run to reproduce your error. + A [minimum working example](https://en.wikipedia.org/wiki/Minimal_reproducible_example) is the target here. + A link to your code on GitHub would be great - maybe you want to create an issue too which explains everything + so the conversation can stay with the code rather than getting lost on slack. +- [ ] What versions of which software are you using? +- [ ] Show the whole error message - the important clue might not be where you think it is +- [ ] What platform are you running on (laptop/CSF etc)? + +Very often the process of verbally explaining / writing down the problem causes you to fix the problem on your own. This is known as [rubber ducky debugging](https://en.wikipedia.org/wiki/Rubber_duck_debugging). diff --git a/collections/_software_and_simulation/developing_python_packages.md b/collections/_software_and_simulation/developing_python_packages.md index 1f5fc25..ab8621a 100644 --- a/collections/_software_and_simulation/developing_python_packages.md +++ b/collections/_software_and_simulation/developing_python_packages.md @@ -5,6 +5,7 @@ toc: true tags: - python published: true +subcollection: Python --- This is a short guide on how to develop and publish Python packages. This is just my workflow; you may wish to modify to suit your needs! diff --git a/collections/_software_and_simulation/getting_matflow_help.md b/collections/_software_and_simulation/getting_matflow_help.md new file mode 100644 index 0000000..2e67dc1 --- /dev/null +++ b/collections/_software_and_simulation/getting_matflow_help.md @@ -0,0 +1,153 @@ +--- +title: Getting help with MatFlow +author: Adam Plowman +toc: true +tags: + - python + - matflow-old +published: true +subcollection: MatFlow old +--- + +## Getting help with MatFlow workflows + +First of all, make sure your packages are up to date, with this command on the CSF: + +```bash +/mnt/eps01-rds/jf01-home01/shared/matflow/update_matflow.sh +``` + +If you are having problems with loading your workflow locally (e.g. in a Jupyter notebook), make sure your local packages are up to date (run this on your computer): + +```bash +pip install -U matflow damask-parse formable matflow-damask matflow-formable matflow-defdap matflow-mtex matflow-neper matflow-demo-extension +``` + +If you are still having problems, post a new GitHub issue in the installation repository ([UoM-CSF-Matflow](https://github.com/LightForm-group/UoM-CSF-matflow)), using the ["workflow problem" issue template](https://github.com/LightForm-group/UoM-CSF-matflow/issues/new/choose). If you are certain that a bug exists in one of the MatFlow extension packages, or [MatFlow](https://github.com/LightForm-group/matflow) or [HPCFlow](https://github.com/LightForm-group/hpcflow), then please create the GitHub issue in one of the respective repositories. + +## Suggestions for new extensions/tasks/methods + +Please add a new issue to the installation repository ([UoM-CSF-Matflow](https://github.com/LightForm-group/UoM-CSF-matflow)) + +## Importing large parameters + +You may have issues when using `import` to re-use workflow parameters from an existing workflow, if the parameters are larger than the available memory on the login node at submission time. To prevent this, you can first submit, using `qsub`, a jobscipt that runs `matflow make`: + +```sh +#!/bin/bash --login + +#$ -cwd +#$ -N mf_make +#$ -pe smp.pe 6 # specify whatever resources are required to access sufficient memory + +export HDF5_USE_FILE_LOCKING=FALSE +export OMP_NUM_THREADS=1 + +matflow make workflow_file.yml + +``` + +Once this has run, a new workflow directory should be generated. You can then submit the workflow via this directory with `matflow go /path/to/workflow/directory`. + +## FAQs +**When I submit a workflow I get a message like "*The following schemas are invalid...*"; what does this mean?** + +This indicates that some of the task schemas cannot be used, given the extension packages that you currently have installed. This is not a problem, unless you want to use one of those tasks. If you do try to use one of those tasks in a workflow profile, you will receive a more obvious error from MatFlow. + +## Troubleshooting +**Installation failed due to an error message: `damask-parse current_version has requirement numpy>=1.17.5 but you'll have numpy other_version which is incompatible`** + +1. Type `pip list --user` into the command line. + - This should give you a list of modules installed on your user account. +2. Find the version of numpy in the list and check if it is the correct version. +3. If the version is incorrect, then type `pip uninstall numpy` + - This should prompt a Y/N answer, say yes. +4. Type `pip install --user numpy==1.17.5` to reinstall the correct version of numpy. +5. Check the installation with `matflow validate` + +**My workflow didn't run** + +1. Type `matflow validate` into the terminal. + - This ensures that matflow is installed correctly + - If your workflow does not work, go to 2. +2. Run the following in the terminal: `/mnt/eps01-rds/jf01-home01/shared/matflow/update_matflow.sh`. + - This ensures that you are using the latest stable version of matflow + - If your worlflow does not work, go to 3 +3. Check if there is an error with line numbers displayed in the CSF interface. + - If yes, that means that there is likely an error in the format of your YAML file, go to 4. + - If no, go to 5. +4. Check the yaml file on [https://yamlvalidator.com](https://yamlvalidator.com) . Make sure there are no indentation errors. + - If your workflow still does not work, go to 5. +5. Go to `~/.matflow/` and check `config.yml`. It should look like this: + + ```sh + task_schema_sources: + - /mnt/eps01-rds/jf01-home01/shared/matflow/task_schemas.yml + software_sources: + - /mnt/eps01-rds/jf01-home01/shared/matflow/software.yml + parallel_modes: + MPI: + command: mpirun -np <> + OpenMP: + env: export OMP_NUM_THREADS=<> + + default_preparation_run_options: + l: short + + default_processing_run_options: + l: short + + default_iterate_run_options: + l: short + + ``` + - If it is correct, go to 6. +6. Look for `stderr.log` in the `simulate_volume_element` directory. + - If you've found it, go to 7. + - If there is no such directory, the workflow did not run at all, go to 8. +7. Read the error at the bottom of the log file. Comment out the relevant tasks, starting from the one at the bottom of the error message. This is to isolate the problem out. + - If `stderr.log` is all 0s, then matflow go to 8. + - If you have tried commenting out all of the tasks, go to 8. + - If your workflow worked after commenting out tasks, go to 10. +8. Use one of the [example workflows](https://github.com/LightForm-group/UoM-CSF-matflow/tree/master/workflows). + - If the example workflow is not working, repeat with a different example. + - If you have tried all of the examples go to 1 or contact a member of the team. + - If the example workflow works go to 9. +9. Compare your workflow against the working example workflow at [https://text-compare.com](https://text-compare.com) and see what the differences are in the relevant task(s). + - This is to isolate and fix the relevant task(s). +10. You have now identified the relevant task(s) that failed. Please refer to the corresponding troubleshooting section for that task. + +**There's no visualisation of the results / There's no .vtr in the simulate_volume_element_loading task directory --WIP (more errors needed and more thorough solutions needed)** + +1. Navigate to the `output` directory of your simulation (the directory address should look like this: `#/scratch/Your_Task_Name_and_Date/output`) +2. Read `t5_pro.o-------` (or `t4_pro.o-------` / `t6_pro.o-------` depending on how many tasks you have, it should be the final task) +3. Read the error message: + - If it is something like `Failed to execute the output map for output "volume_element_response". Exception was: Unable to allocate 72.0 MiB for an array with shape (1048576, 3, 3) and data type float64`, Go to 4 + - If it is something else (I have not encountered other errors yet, please provide additional errors) +4. Try lowering your modelling domain size (bearing in mind the z-dimension must be divisible by the number of cores), or lower the number of increments to be visualised (e.g. visualising every other increment rather than all of them). ***If the above is not an option, go to 5*** +5. Open your /yaml file and add the following lines into the `simulate_colume_element_loading` task: +```sh + run_options: + num_cores: 8 + processing: + l: mem256 +``` +and try again + +6. If it still fails in the same way then replace the above with: +```sh + run_options: + num_cores: 16 + processing: + l: mem512 +``` + +7. If that still doesn't work, try: +```sh + run_options + num_cores: 16 + processing: + num_cores: 4 + l: mem512 +``` +Other tasks' troubleshooting WIP diff --git a/collections/_software_and_simulation/image_magick_animation.md b/collections/_software_and_simulation/image_magick_animation.md new file mode 100644 index 0000000..b9b1c8b --- /dev/null +++ b/collections/_software_and_simulation/image_magick_animation.md @@ -0,0 +1,12 @@ +--- +title: Animations with ImageMagick +author: Adam Plowman +tags: [] +toc: true +published: true +subcollection: Misc +--- + +ImageMagick command to convert a series of PNGs (named `ani.*.png`; as produced by ParaView) to an animated GIF: + +`convert -delay 10 ani.*.png ani.gif` diff --git a/collections/_software_and_simulation/install_dev_version.md b/collections/_software_and_simulation/install_dev_version.md new file mode 100644 index 0000000..a1fccec --- /dev/null +++ b/collections/_software_and_simulation/install_dev_version.md @@ -0,0 +1,20 @@ +--- +title: Installing development versions of Python packages (e.g. MatFlow extensions) on the CSF +author: Adam Plowman +toc: true +tags: + - python + - matflow-old +published: true +subcollection: MatFlow old +--- + +1. On the CSF, uninstall any "published" version of the package you wish to install from GitHub. For example, if you want to install a development version of the `matflow-damask` extension, first do `pip uninstall matflow-damask`. (Not sure if this step is required.) +2. Find the GitHub repository of the package you would like to install +3. Click on the green `Code` button and then copy the given URL: + + ![](/wiki/assets/images/posts/github_clone.png) + +4. On the CSF, go to your home directory: `cd ~`, and then type: `git clone [GITHUB_URL]` where you should replace `[GITHUB_URL]` with the URL copied from step 3. +5. On the CSF, change directory into the new git repository that was just downloaded from GitHub, e.g. `cd matflow-damask`, and then use `git checkout` to switch to the desired branch like this: `git checkout [DEV_BRANCH]` where you should replace `[DEV_BRANCH]` with the name of the development branch you wish to install. +6. On the CSF, install the package in "editable" mode like this: `pip install --user -e .` (note the ending full-stop). Using editable mode means that you can subsequently `git pull` changes and the changes will be reflected in your installed package. diff --git a/collections/_software_and_simulation/jupyter_CSF.md b/collections/_software_and_simulation/jupyter_CSF.md new file mode 100644 index 0000000..32ece5a --- /dev/null +++ b/collections/_software_and_simulation/jupyter_CSF.md @@ -0,0 +1,56 @@ +--- +title: Accessing Jupyter notebooks on the UoM iCSF (interactive Computational Shared Facility) +author: Rhys Thomas +tags: + - python + - CSF + - jupyter +published: true +subcollection: Python +--- + +If you are planning on analysing large datasets using DefDAP (or any external Python package in fact), you may consider using the [University’s iCSF service](http://ri.itservices.manchester.ac.uk/icsf/). As of Aug 2020, each node has 2 x 14-core Intel Broadwell CPU with 256 GB RAM. This is significantly more powerful than an university provided laptop and has more RAM than the Linux/Windows workstations at the office. The following instructions are a simple guide on how to use Jupyter Notebook via x2go on the iCSF, including how to install additional Python packages and access your files. These instructions are based on various IT Services guides, which are linked below. + +Note: You can also run Jupyter Notebook on the CSF (Computational Shared Facility) if you require more computational power and RAM, but this system is more complex to set up and you may need to wait for access. For the purposes of what we want to achieve, running on the iCSF is sufficient (for now anyway). Details of how to run Jupyter Notebook on the CSF are [here](http://ri.itservices.manchester.ac.uk/csf3/software/applications/jupyter-notebook/). + +## Running Jupyter Notebook on the iCSF via x2go + +1. First contact the IT Services RI team: its-ri-team@manchester.ac.uk to setup a iCSF\x2go user profile and provide the following details (as per [this guide by IT Services](http://ri.itservices.manchester.ac.uk/icsf/getting-started-on-icsf/user-accounts/)): + - the name of the system you wish to access – in this case the iCSF and Virutal Desktop Service (x2go) + - a brief description of why you want to use the iCSF - in this case using Jupyter Notebook on the iCSF via x2go + - your University IT Username (not password) + - your library card number + - the name of your PI/supervisor/line manager + +#### Before continuing, make sure you are connected to the VPN + +2. Follow the x2go setup guides here: for [Windows](http://ri.itservices.manchester.ac.uk/virtual-desktop-service/x2go/windows/) \ [MacOS](http://ri.itservices.manchester.ac.uk/virtual-desktop-service/x2go/mac/) \ [Linux](http://ri.itservices.manchester.ac.uk/virtual-desktop-service/x2go/linux/). This will hopefully give you access to a Linux virtual machine like this: +![](https://www.dropbox.com/s/26e94pp5cc2pdim/x2go.jpg?raw=1) + +#### Note: Avoid running computationally expensive software directly on this virtual machine, it is used in this case as a means to access the iCSF (aka Incline). + +4. Open the Terminal from the Applications menu bar in the top left (Applications > System Tools > MATE Terminal). Note: I would recommend adding a shortcut to the Terminal to the desktop (right click the shortcut in the menu bar and click ’Add this launcher to desktop). +5. Run `jupyter-notebook-icsf 3/2019.07` and enter your password when prompted. You will be provided with an URL in the terminal , with a similar format to: +`http://localhost:7777/?token=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx` +Right-click on this link in the terminal window and select ‘Open Link’. + +## To install additional packages i.e. DefDAP: + +1. First, open another Terminal window and write `ssh incline256`. Type your password to login. +2. Run the following two commands so that the internet is accessible: +`export http_proxy=http://webproxy.its.manchester.ac.uk:3128` +`export https_proxy=http://webproxy.its.manchester.ac.uk:3128` +3. Run the following command to load anaconda: `module load apps/binapps/anaconda3/2019.07` +4. Run the following command to install DefDAP: `pip install --user defdap`. You can install any package from [Python Package Index (PyPI)](https://pypi.org) by changing the final argument. +5. You can then quit this terminal and go back to your browser window. +6. You will need to restart the Python kernel to use the new package (Kernel > Restart) +![](https://www.dropbox.com/s/rsrn1tsxh5elr43/restart.jpg?raw=1) +7. Check you can import the package by typing `import defdap` into a cell and running the cell. If you don’t get any errors then it’s worked. + +## Accessing your files +When you register to use iCSF, you will be provided with storage on the university servers. This is what you will see when you first open Jupyter Notebook (hence why it will be empty apart from a Scratch folder). Instructions for accessing this storage on your local PC are given [here](http://ri.itservices.manchester.ac.uk/userdocs/file-transfer/). Make sure you use `incline.itservices.manchester.ac.uk` as the hostname. +To access my files, I personally use a program called [CyberDuck](https://cyberduck.io) which works well (on Windows and Mac) and it very easy to set up. The settings are shown below (replace username and password with your own of course): + +![](https://www.dropbox.com/s/syfn5n8osa4o5cm/cyberduck.png?raw=1) + +You will then be shown a list of your files, where you can drag and drop between the university storage and your local PC. diff --git a/collections/_software_and_simulation/matflow-damask.md b/collections/_software_and_simulation/matflow-damask.md new file mode 100644 index 0000000..9a3f4ec --- /dev/null +++ b/collections/_software_and_simulation/matflow-damask.md @@ -0,0 +1,33 @@ +--- +title: Running DAMASK with MatFlow +author: Guy Bowker +toc: true +tags: + - python + - matflow-old + - damask +published: true +subcollection: DAMASK +--- + +## Running DAMASK with Matflow + +DAMASK can be used in combination with dream3D, DefDap, and MTEX with MatFlow. See [here](https://github.com/LightForm-group/UoM-CSF-matflow/blob/master/workflows/tension_DAMASK_Al.yml) for an example and [here](https://lightform-group.github.io/wiki/software_and_simulation/matflow-first-time) for a guide. + +## Common issues with matflow-damask + +Before Running your MatFlow-DAMASK workflow ensure your `workflow.yaml` file is valid with [this free online tool](https://www.yamllint.com/). +Ensure you use the `spectral_polarization` flag to use the latest solver: +```yaml + - name: simulate_volume_element_loading + method: CP_FFT + software: DAMASK + base: + solver: + mechanical: spectral_polarization # spectral_basic(default), spectral_polarization, FEM + numerics: + grid: + itmax: 100 # number of allowed attempts to converge + maxCutBack: 1 # number of times allowed to cutback + derivative: FWBW_difference # uses smoother diffrential +``` diff --git a/collections/_software_and_simulation/matflow-new_install.md b/collections/_software_and_simulation/matflow-new_install.md new file mode 100644 index 0000000..1e6c1cf --- /dev/null +++ b/collections/_software_and_simulation/matflow-new_install.md @@ -0,0 +1,56 @@ +--- +title: Installing Matflow +author: Guy Bowker, Gerard Capes, Samuel Engel +toc: true +tags: + - python + - matflow-new + - csf + - csf3 +published: true +subcollection: MatFlow +--- +# Matflow: An API for fully reproducible computational material science workflows +This page is intended to be the new place for all information on installing and reconfiguring matflow installations. +For instructions on how to write your own workflow, see [Gerards repository here](https://github.com/LightForm-group/matflow-user-documentation) + +## Installing Python virtual environments + +## Installing matflow-damask +Steps Ive done to install matflow-new with damask functionality on my CSF space: + +Download this folder: +[matflow-new.zip](https://github.com/user-attachments/files/18733560/matflow-new.zip) + +Create an environment of `python=3.10` using conda `conda create -n damask-venv python=3.10` + +activate environment `conda activate damask-env` + +install requirements `pip install -r requirements.txt` + +Test matflow CLI works by just typing matflow in command line and pressing enter. + +install `damask` and `damask-parse` `pip install damask==3.0.0a7.post0` `pip install damask-parse==0.2.30` (NEED to be seperate commands because pip raises error if you try to do these together since 3.0.0a7.post0 isn’t considered >3.0.0 )! + +Download config files `.matflow-new/` and put this dir in home (`~/`). + +Ensure `.matflow-new/config.yaml` file l12 is `/mnt/iusers01/jf01//.matflow-new/envs_CSF3.yaml` replace `` with yours (mine is y15576gb) ex. `/mnt/iusers01/jf01/y15576gb/.matflow-new/envs_CSF3.yaml` + +Point all `setup:` variables in `.matflow-new/envs_CSF3.yaml` to your newly created environment. (For example see how `envs_CSF3.yaml` given here all have `conda activate damask-env` . + +Navigate to scratch and test by running lightest demo workflow `matflow demo-workflow go tension_DAMASK_Al`. + +ABOVE ALL CHECK `damask==3.0.0a7.post0` and `damask-parse==0.2.30` with `pip list` !!! + +## Installing matflow-MTEX + +## Workflow examples + +## Development examples + + + +## FAQ +### Q. Why is this page here? + +**A**. Becuase Guy Bowker says so. diff --git a/collections/_software_and_simulation/matflow-quickstart-install.md b/collections/_software_and_simulation/matflow-quickstart-install.md new file mode 100644 index 0000000..d7fe4c9 --- /dev/null +++ b/collections/_software_and_simulation/matflow-quickstart-install.md @@ -0,0 +1,343 @@ +--- +title: Matflow - Quickest Quickstart in the West (QQTW) +author: Samuel Engel +tags: + - CSF3 + - Matflow + - Modelling +toc: false +published: true +subcollection: MatFlow +--- + +This page details how to install Matflow (https://docs.matflow.io) using centrally installed packages on the CSF3. + +# Quickstart +## 1. Edit Bash Profile +Before we begin, it is important to add the following lines to you bash profile, this will allow you to use the modulefiles that are stored in the Clari/Lightform shared space on the CSF3. + +``` +cd + +vim .bash_profile +``` +With the editor open, add the following lines of code to the bottom + +``` +module use --append /mnt/eps01-rds/Fonseca-Lightform/shared/software/modulefiles +``` + +Make sure to save and exit, you will need to logout and log back in for the changes to take effect. + +## 2. Create your MatFlow Python Enviroment + +Now we need to create a MatFlow enviroment, this can either be a python virtual enviromnent or a conda enviroment. + +Firslty load a conda installation then create an enviroment called 'matflow-env' or something similiar. You can also create a local `envs` folder if you want to keep things neat and tidy. + +``` +module load apps/binapps/anaconda3/2023.09 + +python -m venv matflow-env + +source matflow-env/bin/activate +``` +Remember, never run the `conda init` command, no matter what Anaconda might tell or promise you. This will cause problems as it allows Anaconda to be always loaded in the background, even when you haven't directly loaded the module. + +Now that our MatFlow enviroment is activated we can install the required packages. + +``` +pip install matflow-new==0.3.0a137 +pip install hpcflow-new2==0.2.0a189 +``` +Ignore any warning it gives you. You can now run the 'matflow' command to check if things have installed correctly. + +## 3. Create your MatFlow Config File + +We now just need to add a 'config.yaml' file to tell MatFlow how to schedule jobs on the CSF3. + +Go to your home directory and create a folder called '.matflow-new', don't forget to add the full stop at the start. Then go into that directory and add create the configuration file. + +``` +cd + +mdkir .matflow-new +cd .matflow-new + +vim config.yaml +``` + +You will want to add the following into this file once you have it open. + +``` +configs: + CSF3: + invocation: + environment_setup: + match: + hostname: login*.pri.csf3.alces.network + config: + machine: CSF3 + telemetry: true + log_file_path: logs/<>_v<>.log + environment_sources: + - /mnt/eps01-rds/Fonseca-Lightform/shared/software/envs/matflow-damask-3.0.0alpha7/envs.yaml + task_schema_sources: [] + command_file_sources: [] + parameter_sources: [] + default_scheduler: SGE + default_shell: bash + schedulers: + direct: + defaults: {} + SGE: + defaults: + shebang_args: --login + parallel_environments: + null: + num_cores: [1, 1, 1] + amd.pe: + num_cores: [2, 2, 64] + num_nodes: [1, 1, 1] + + shells: + bash: + defaults: {} +``` + +Now MatFlow should be fully installed and working. + +# Building a Custom Install (Advanced Users) + +The method above installs MatFlow using released versions of MatFlow, if you want to use a different version of MatFlow, that is perhaps on Github branch, you will need to use this example. + +## 1. Install Matflow +In this example, we will use pip to install an experimental version of MatFlow that uses the release version of the crystal plasticity software DAMASK. You should repeat steps 1 and 2, but stop before you pip install anything. Instead navigate to the shared space on the CSF3. + +``` +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software +``` +Each bit of software using by MatFlow is installed here, with different versions installed inside each folder, for example the `damask` folder contains versions `3.0.0` and `3.0.0alpha7` of DAMASK. This ensures we always have a static copy of the software available, as well as the most relevant versions needed. + +Go into the MatFlow folder and pip install it to your current MatFlow enviroment, which shouldn't include any version of MatFlow yet. + +``` +cd matflow/matflow-damask_beta_0/ + +pip install -e . +``` +After that has installed, we also want to install a different version of HPCFlow. +``` +pip install hpcflow-new2==0.2.0a189 +``` +Ignore any warning it gives you. + +## 2. Change the Configuration File + +Now navigate back to your home directory, we need to change the configuration file to use a different version of DAMASK. +``` +cd + +cd .matflow-new + +vim config.yaml +``` + +Now change the configuration file to look like the following. + +``` +configs: + CSF3: + invocation: + environment_setup: + match: + hostname: login*.pri.csf3.alces.network + config: + machine: CSF3 + telemetry: true + log_file_path: logs/<>_v<>.log + environment_sources: + - /mnt/eps01-rds/Fonseca-Lightform/shared/software/envs/matflow-damask-3.0.0/envs.yaml + task_schema_sources: [] + command_file_sources: [] + parameter_sources: [] + default_scheduler: SGE + default_shell: bash + schedulers: + direct: + defaults: {} + SGE: + defaults: + shebang_args: --login + parallel_environments: + null: + num_cores: [1, 1, 1] + amd.pe: + num_cores: [2, 2, 64] + num_nodes: [1, 1, 1] + + shells: + bash: + defaults: {} +``` + +After that has done, you should be able to use the release version of DAMASK in MatFlow. You need to be careful as there have been some changes that will affect the demo workflows, which will cause them to not work correctly. This is usually because some parameters have become lists instead of single values and as such need some minor tweaking to the input file. + +# Creating your own Enviroments (Super-Advanced Users) + +Currently, this process links to pre-existing enviroments on the CSF3. You might have a version of a software that is not currently supported and therefore need to make your own custom enviroment from scratch. In this example we will look at how to create a custom DAMASK enviroment, and the best practice for installing it centrally for everyone to use. If you are shy about sharing your MatFlow enviroment or uncomfortable installing in a shared space, then you can simply create these files in your home directory, ideally in a folder called `software`, `envs` or something similiar. + +## 1. Create Custom Enviroments +Now navigate to the `envs` folder on the shared space. This is located at `/mnt/eps01-rds/Fonseca-Lightform/shared/software/envs` if you get lost. You will need to create an aptly named folder to hold your custom MatFlow installation, for example we will call this folder `matflow-damask-3.0.0temperature`. For further clarity, add a `README` in this folder to describe the specific versions of each software you are installing. + +``` +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software/envs + +mkdir matflow-damask-3.0.0temperature + +cd matflow-damask-3.0.0temperature +``` + +Now that you are in this folder you should create a local conda enviroment. + +``` +python -m venv matflow-damask-env +``` +Make sure to call the enviroment `matflow-damask-env` to avoid any confusion about the enviroments purpose. The README and enclosing folder name should give enough information to discern the enviroments purpose. Now activate the enviroment. + +``` +source matflow-damask-env/bin/activate +``` +Now that we have a conda enviroment, we need to install MatFlow, DAMASK and DAMASK-parse. For DAMASK 3.0.0 we will need to use the `matflow-damask_beta_0` installation in the `matflow` folder. + +``` +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software/matflow/matflow-damask_beta_0 + +pip install -e . +``` + +Now we can install DAMASK-parse. + +``` +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software/damask-parse/damask-parse-damask_beta0_with_T + +pip install -e . +``` + +And finally, we install DAMASK python. Note that we need to go to the `python` folder within the enclosing folder. + +``` +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software/damask/DAMASK-3.0.0-beta/python + +pip install -e . +``` +You will now have a custom MatFlow-DAMASK enviroment that is ready to go. Deactivate the enviroment and go to your home directory. You will need to create a `matflow-env` that uses the same version of MatFlow that we have installed above, following the same method in Step 2, but making sure to use `pip install -e .` method we have used here. + +``` +cd + +python -m venv matflow-env + +source matflow-env/bin/activate + +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software/matflow/matflow-damask_beta_0 + +pip install -e . + +pip install hpcflow-new2==0.2.0a189 +``` + +## 2. Edit the Configuration File + +Now that we have this set up, we just need to tell our `config.yaml` where our custom installation is. Open the config file and change the enviroment sources to point to the folder we have created. You will notice that this points to a `envs.yaml` file, which we will create next. + +``` +configs: + CSF3: + invocation: + environment_setup: + match: + hostname: login*.pri.csf3.alces.network + config: + machine: CSF3 + telemetry: true + log_file_path: logs/<>_v<>.log + environment_sources: + - /mnt/eps01-rds/Fonseca-Lightform/shared/software/envs/matflow-damask-3.0.0temperature/envs.yaml + task_schema_sources: [] + command_file_sources: [] + parameter_sources: [] + default_scheduler: SGE + default_shell: bash + schedulers: + direct: + defaults: {} + SGE: + defaults: + shebang_args: --login + parallel_environments: + null: + num_cores: [1, 1, 1] + amd.pe: + num_cores: [2, 2, 64] + num_nodes: [1, 1, 1] + + shells: + bash: + defaults: {} +``` + +## 3. Create an Enviroments File +Now go back to the `envs` folder that we have created. + +``` +cd /mnt/eps01-rds/Fonseca-Lightform/shared/software/envs/matflow-damask-3.0.0temperature +``` +Now lets create the `envs.yaml` file. + +``` +- name: damask_parse_env + setup: | + module load apps/binapps/anaconda3/2023.09 + source /mnt/eps01-rds/Fonseca-Lightform/shared/software/envs/matflow-damask-3.0.0temperature/matflow-damask-env/bin/activate + executables: + - label: python_script + instances: + - command: python <> <> + num_cores: + start: 1 + stop: 32 + parallel_mode: null + +- name: damask_env + setup: | + module load damask/3.0.0-beta + executables: + - label: damask_grid + instances: + - command: $DAMASK_grid + num_cores: 1 + parallel_mode: null + + - command: mpirun -n $NSLOTS $DAMASK_grid + num_cores: + start: 2 + stop: 32 + parallel_mode: null + +- name: python_env + setup: | + module load apps/binapps/anaconda3/2023.09 + executables: + - label: python_script + instances: + - command: python <> <> + num_cores: + start: 1 + stop: 32 + parallel_mode: null +``` + +You can add extra sections for other softwares, but the default values from other installations should be fine to use and therefore we won't discuss them here. + +Now we are all done and you should have a working custom MatFlow. diff --git a/collections/_software_and_simulation/matflow_development.md b/collections/_software_and_simulation/matflow_development.md new file mode 100644 index 0000000..fe7bf9c --- /dev/null +++ b/collections/_software_and_simulation/matflow_development.md @@ -0,0 +1,24 @@ +--- +title: Developing Matflow +author: Guy Bowker, Gerard Capes +toc: true +tags: + - python + - matflow-new + - csf + - csf3 +published: true +subcollection: MatFlow +--- +# Matflow: An API for fully reproducible computational material science workflows + +## A guide to developing your own tasks and schemas for matflow +Gerard's repository on this is an excellent place to start: https://github.com/LightForm-group/matflow-user-documentation + +1. Configure matflow-new as in [instructions here](https://lightform-group.github.io/wiki/software_and_simulation/matflow-new-install) using [`requirements.txt`](https://github.com/LightForm-group/Do-more-with-less/blob/main/requirements.txt) +2. Navigate to a local dir where you have read/write access and git clone matflow-new into it: `git clone https://github.com/hpcflow/matflow-new.git .` +3. Install locally cloned `matflow-new` in egg mode using pip `pip install -e ./matflow-new` (egg mode means if you make changes then python will use the latest code you have written for this library) +4. Best practice is to first change branch to a new one describing your addition/change to the code. Do this off the develop branch as thats usually the furthest ahead. +5. Now you can make changes to code freely. Best practice is to test the code before committing. +6. When youve added your change and it works, commit your changes to code, push your branch to github and submit a pull request to merge your feature/change with develop branch of repo. +7. Request to owner of repo pull request into main if its a feature worth having in the software and its been thouroughly tested. diff --git a/collections/_software_and_simulation/matflow_first_time.md b/collections/_software_and_simulation/matflow_first_time.md new file mode 100644 index 0000000..3244b05 --- /dev/null +++ b/collections/_software_and_simulation/matflow_first_time.md @@ -0,0 +1,58 @@ +--- +title: Running your first MatFlow simulation +author: Peter Crowther +toc: true +tags: + - python + - matflow-old + - workflow + - yaml + - yml +published: true +subcollection: MatFlow old +--- + +## Installing MatFlow +For installation on the CSF follow instructions here: + +## Running a workflow +MatFlow workflows are in the form of .yaml or .yml text files. This file includes in list format the details of the individual tasks to perform in the order that they appear in the file. + +A good example workflow is [tension_DAMASK_Al.yml](https://github.com/LightForm-group/UoM-CSF-matflow/blob/master/workflows/tension_DAMASK_Al.yml), a simulation that does a uniaxial tensile test of aluminium using DAMASK. This example workflow is in the "workflows" folder of the [Uom-CSF-matflow repository](https://github.com/LightForm-group/UoM-CSF-matflow). For convenience, this repository is also synchronised to the group shared directory on the research data storage (RDS). You can either git clone the repository to your user space on the CSF or you can directly use the workflow from the shared drive at `/mnt/eps01-rds/jf01-home01/shared/matflow/`. + +Computationally complex workflows should not be run on your home directory (`~/.`), but should be run on the scratch space. A link to the scratch space can be found in your home directory. + +A MatFlow script can be run using the command `matflow go script_name` where `script_name` is the path to the script you want to run. The workflow will be run and the results stored in the directory you are in when you run this command (not the directory the script is stored in). + +Putting these steps together to run the workflow might look something like: + +``` +cd ~ +git clone https://github.com/LightForm-group/UoM-CSF-matflow +cd scratch +mkdir matflow_simulations +cd matflow_simulations +matflow go ~/UoM-CSF-matflow/workflows/tension_DAMASK_Al.yml +``` + +## Managing the queue +If it works successfully MatFlow should process the job in a matter of seconds. MatFlow only schedules the work to be done, it doesn't do it directly. Jobs are added to the queue on the CSF and run sequentially. You can see the status of any queued or running jobs using the `qstat` command. + +Once the job starts the output will go into a directory labelled with the workflow name and the date. If you submit a job and later want to cancel it you can use the command `matflow kill /path/to/workflow/directory`. + +## Looking at the output +The sample job `tension_DAMASK_Al.yml` should take 5 to 10 minutes to run. After it is complete you can see the results in the generated output directory. +- A copy of the original submission script is placed in the output directory for reference to show which commands were run. +- One folder for each task in the workflow, these contain files generated as part of that task and the console output generated by the running of that task. +- The output folder contains the log files for the job submission scripts +- The main output is then stored in the workflow.hdf5 file + +## Understanding the output +The output of the MatFlow run is stored in the HDF5 format. This is a compressed hierarchical binary format. For a quick preview of the contents the program [hdfview](https://www.hdfgroup.org/downloads/hdfview/) can be used. However for most purposes it is likely you will want to use a Python script to parse the results. + +## Using Dropbox synchronisation +MatFlow provides the option of synchronising the completed workflow to a Dropbox directory. In order to do this you must first tell MatFlow about Dropbox by adding some lines to your matflow config. Instructions to do this can be found here: . You need to add a folder to the `path: ` key that already exists in your Dropbox. A good idea would be to create a dedicated folder in your dropbox named something like `matflow-outputs` and use this. + +After this you must initiate a connection between MatFlow and your Dropbox account. You can do this using the command `matflow cloud-connect --provider Dropbox`. MatFlow will prompt you to follow a link and authenticate the app on the Dropbox website. It is recommended that you attach MatFlow to your university Dropbox account as this is likely to have more storage space than your personal account. + +After authentication you can now enable Dropbox archiving by adding the key `archive: dropbox` to your workflows. In the case of the test workflow [tension_DAMASK_Al.yml](https://github.com/LightForm-group/UoM-CSF-matflow/blob/master/workflows/tension_DAMASK_Al.yml), you can see that the `archive: dropbox` key was commented out. You can activate it by removing the # symbol and space before the `archive` key. If you run the simulation again you should see that the workflow result is synchronised to your Dropbox after completion. diff --git a/collections/_software_and_simulation/matflow_post-processing.md b/collections/_software_and_simulation/matflow_post-processing.md new file mode 100644 index 0000000..b422ffa --- /dev/null +++ b/collections/_software_and_simulation/matflow_post-processing.md @@ -0,0 +1,29 @@ +--- +title: Post-processing a Matflow Workflow +author: Guy Bowker +toc: true +tags: + - python + - jupyter + - matflow-old + - damask +published: true +subcollection: MatFlow old +--- + +## Exploring matflow workflow metadata and results +Matflow outputs all metadata and volume element dimensions, orientation data, single crystal parameters and simulation results in a HDF file named `workflow.hdf5`, this file can be explored in two ways: + +Firstly, using the HDF view program, which can be downloaded [here](https://www.hdfgroup.org/downloads/hdfview/#download). +Right click the `workflow.hdf5` file in your file explorer and `open with HDFview`. +In the dropdown select `element_data`. Simulation results can be found under `simulate_volume_element_loading`. + +Secondly, using python. Open a jupyter notebook and define the filepath to the directory containing the `workflow.hdf5` file as a string. +import the matflow library and use load_workflow to extract the `workflow.hdf5` file's data as a python dictionary: +```python +from matflow import load_workflow +workflow_dir = "some_workflow_2023-02-07-153700/" +workflow = load_workflow(workflow_dir) +ve_response = workflow.tasks.simulate_volume_element_loading.elements[0].outputs.volume_element_response +``` +You can then explore and save aspects of the metadata and results to variables in python as shown for `ve_response`. diff --git a/collections/_software_and_simulation/matflow_presentations.md b/collections/_software_and_simulation/matflow_presentations.md new file mode 100644 index 0000000..084e8ab --- /dev/null +++ b/collections/_software_and_simulation/matflow_presentations.md @@ -0,0 +1,23 @@ +--- +title: MatFlow presentations +author: Adam Plowman +toc: true +tags: + - matflow-old +published: true +subcollection: MatFlow old +--- + +Click to download a PowerPoint presentation on MatFlow: + +- [LightForm Research showcase (October 2020)][1] +- [MatFlow Introduction (March 2021)][2] +- [Using MTEX for crystal plasticity with MatFlow - MTEX workshop (March 2021)][3] +- [MatFlow workflow changes for DAMASK v3a3 (June 2021)][4] +- [MatFlow Troubleshooting (December 2021)][5] + +[1]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/LF_research_showcase_Oct_2020.pptx +[2]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/2021.03.05_matflow_intro.pptx +[3]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/MTEX_crystal_plasticity_March_2021.pptx +[4]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/DAMASK_v3a3_workflow_changes.pdf +[5]:{{ site.url | append: site.baseurl }}/_includes/ppt_templates/2021.12.01_some_matflow_problems.pptx diff --git a/collections/_software_and_simulation/matrix_multiplication.md b/collections/_software_and_simulation/matrix_multiplication.md index 2acbeb1..59dedcc 100644 --- a/collections/_software_and_simulation/matrix_multiplication.md +++ b/collections/_software_and_simulation/matrix_multiplication.md @@ -7,7 +7,7 @@ tags: toc: true published: true layout: jupyter_notebook - +subcollection: Python --- {% include jupyter_notebooks/matrix_multiplication/matrix_multiplication.md %} diff --git a/collections/_software_and_simulation/mtex.md b/collections/_software_and_simulation/mtex.md deleted file mode 100644 index 0085392..0000000 --- a/collections/_software_and_simulation/mtex.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Common workflows in MTEX -author: Adam Plowman, Sumeet Mishra ---- - -MTEX is a Matlab tool for texture analysis. Here are some common workflows in MTEX. - -## Generating representative textures - -```matlab -clear; -close; -cs = crystalSymmetry('cubic'); -ss = specimenSymmetry('orthorhombic'); - -cube = orientation('Euler',[0,0,0]*degree,cs,ss); -odf_Cube = unimodalODF(cube,'halfwidth',5*degree); -ori1 = calcOrientations(odf_Cube,500);plotPDF(odf_Cube,[Miller(1,1,1,cs)],'antipodal','complete'); -``` diff --git a/collections/_software_and_simulation/mtex_common_workflows.md b/collections/_software_and_simulation/mtex_common_workflows.md new file mode 100644 index 0000000..bd7ae06 --- /dev/null +++ b/collections/_software_and_simulation/mtex_common_workflows.md @@ -0,0 +1,74 @@ +--- +title: Common workflows in MTEX +author: Adam Plowman, Sumeet Mishra, Christopher Daniel, Nicholas Byres +subcollection: MTEX +--- + +MTEX is a Matlab tool for texture analysis. Here are some common workflows in MTEX. + +## Crystal orientations and spatial data + +Each EBSD data point has various data associated with it. The spatial data is represented by 'X,Y' coordinates in Cartesian space. The crystal orientation data is represented by the three Euler angles. MTEX can handle these data independently. + +It is easy to get confused with the crystal and sample orientations in MTEX. It can be useful to first check the results with Aztec Channel 5 or Aztec Crystal, to check they agree. + +## Plotting convention + +The usual convention for a rolled sample is that X = RD, Y = TD, Z = ND. And the typical convention for pole figures is to have RD (X axis) NORTH, and TD (Y axis) EAST, which can be done by setting ND (Z axis) into the plane. + +```matlab +% plotting convention +setMTEXpref('xAxisDirection','north'); +setMTEXpref('zAxisDirection','intoPlane'); +``` + +Or, alternatively, you can set the Y-axis. + +```mtalab +setMTEXpref('yAxisDirection','east') +``` + +Note, this plotting convention affects both the plotting of pole figures AND maps. + +## Rotating data + +There are different ways to apply rotations to the orientation and spatial data. The most common method is explained here. + +A default sample acquisition surface has X (RD) horizontal, Y (TD) vertical and Z (ND) out-of plane. However, in our example case, when the EBSD data was collected in the SEM microscope, the sample was aligned with RD out-of-plane, TD horizontal and ND vertical. So, we apply a phi_1 = 90, PHI = 90 and phi_2 = 0 rotation to realign the Euler angle reference frame for the orientation data. We apply the command `keepXY` to keep the map coordinates as they are and to only change the Euler angle reference frame. + +However, due to our plotting convention (X is north), we find our spatial data (the map) also needs to be rotated, we can do that too. We apply the command `keepEuler` to preserve the Euler angles. + +By applying these two rotations, we can be sure that the orientations shown in our pole figures match with the ODF, and that our map is orientated as we would like to view it. + +```matlab +rot = rotation('Euler', 90*degree, 90*degree, 0*degree); +ebsd = rotate(ebsd,rot,'keepXY'); % rotate the orientation data +ebsd = rotate(ebsd,90*degree,'keepEuler') % rotate the spatial data +``` + +## Defining directions + +Once you have aligned the orientation data with the correct specimen coordinate system, it can be useful to define the directions in MTEX using the following: + + ```matlab + % as per convention + RD = vector3d.X; + TD = vector3d.Y; + ND = vector3d.Z; + ``` + + You can then use these definitions to define model orientations, for example, without confusing vector directions etc. + +## Generating representative textures + +There are a number of ways to generate model textures. + +```matlab +cs = crystalSymmetry('cubic'); +ss = specimenSymmetry('orthorhombic'); + +cube = orientation('Euler',[0,0,0]*degree,cs,ss); +odf_cube = unimodalODF(cube,'halfwidth',5*degree); +ori = calcOrientations(odf_Cube,500); +plotPDF(odf_cube,[Miller(1,1,1,cs)],'antipodal','complete'); +``` diff --git a/collections/_software_and_simulation/mtex_compilation.md b/collections/_software_and_simulation/mtex_compilation.md new file mode 100644 index 0000000..34bee6c --- /dev/null +++ b/collections/_software_and_simulation/mtex_compilation.md @@ -0,0 +1,61 @@ +--- +title: Compiling binary files for MTEX +author: Peter Crowther +subcollection: MTEX +--- + +MTEX relies on some other programs for doing maths. These programs are included with MTEX for several common systems inclusing windows and MacOS. However, it may be the case that these programs are not valid for your system. + +If this is the case you will see a message when you first load MTEX about "Invalid MEX-file". This means that the supporting programs need to be compiled from binary sources for your system. + +This guide is specific to the iCSF (incline) HPC machine at the University of Manchester, though the procedure will be boradly the same for all UNIX systems. The URLS and package versions were valid in Auguast 2021, but you should check and see if there have been updates since the + + +### Go to home directory and load important modules +```bash +cd ~ +module add apps/binapps/matlab/R2019a +module add compilers/gcc/8.3.0 +``` + +### Get mtex +```bash +wget https://github.com/mtex-toolbox/mtex/releases/download/mtex-5.7.0/mtex-5.7.0.zip +unzip -q mtex-5.7.0.zip +rm mtex-5.7.0.zip +cd mtex-5.7.0 +matlab +# Matlab will try and initialise mtex but fail to do so becuse it cant load the NFFT library. +exit +cd ~ +``` + +### Get compile and install FFTW +```bash +wget http://www.fftw.org/fftw-3.3.9.tar.gz +tar -xf fftw-3.3.9.tar.gz +rm fft fftw-3.3.9.tar.gz +cd fftw-3.3.9 +./configure --prefix=${HOME}/fftw --enable-openmp --enable-threads --enable-shared +make +make install +cd ~ +``` + +### Get compile and install NFFT +```bash +wget https://www-user.tu-chemnitz.de/~potts/nfft/download/nfft-3.5.2.tar.gz +tar -xf nfft-3.5.2.tar.gz +rm nfft-3.5.2.tar.gz +cd nfft-3.5.2 +./configure --enable-nfsoft --enable-nfsft --enable-openmp --enable-portable-binary --with-matlab=/opt/gridware/apps/binapps/matlab1/R2019a --prefix=${HOME}/nfft --with-fftw3=${HOME}/fftw +make +``` + +### Copy compiled mex files to mtex folder +```bash +cp matlab/nfsoft/nfsoftmex.mex* ~/mtex-5.7.0/extern/nfft_openMP/ +cp matlab/nfsft/nfsftmex.mex* ~/mtex-5.7.0/extern/nfft_openMP/ +``` + +If you want, you can now delete the nfft and fftw folders from your home directory, as the compiled .mex files are all that are needed. \ No newline at end of file diff --git a/collections/_software_and_simulation/mtex_intro.md b/collections/_software_and_simulation/mtex_intro.md index f1b2613..ad819b3 100644 --- a/collections/_software_and_simulation/mtex_intro.md +++ b/collections/_software_and_simulation/mtex_intro.md @@ -6,11 +6,12 @@ tags: - MATLAB - EBSD published: true +subcollection: MTEX --- # Introduction to MTEX -This guide is designed to be a basic introduction to the MTEX toolbox - a series of MATLAB scripts which can be uses for the analysis of EBSD data. +This guide is designed to be a basic introduction to the MTEX toolbox - a series of MATLAB scripts which can be used for the analysis of EBSD data. Here is a link to a useful video that will guide you through the setup process and help you get started plotting your data: https://www.dropbox.com/s/vj59i0vy67lc68y/28May2020.mp4?dl=0 ## Installing MTEX diff --git a/collections/_software_and_simulation/mtex_misorientation_crystal.md b/collections/_software_and_simulation/mtex_misorientation_crystal.md new file mode 100644 index 0000000..ee96469 --- /dev/null +++ b/collections/_software_and_simulation/mtex_misorientation_crystal.md @@ -0,0 +1,18 @@ +--- +title: Plotting misorientation in the crystal reference frame +author: Mia Maric, Christopher Daniel +tags: + - MTEX + - MATLAB + - EBSD +published: true +subcollection: MTEX +--- + +# Plotting misorientation in the crystal reference frame + +To visualise misorientation we can plot it in the crystal reference frame. Which allows us to determine the active slip systems within each grain. + +![](/wiki/assets/images/posts/Slip System IPF.png) + +The notes in [Ben Britton's tutorial](https://www-user.tu-chemnitz.de/~rahi/download/mtexWorkshop2019/exerciseBritton/MTEX_Demo_Britton2019.pdf) contain the code to do this and an explantion of this technique is explained in this [paper](https://arxiv.org/abs/1803.00236). diff --git a/collections/_software_and_simulation/mtex_nice_figures.md b/collections/_software_and_simulation/mtex_nice_figures.md new file mode 100644 index 0000000..d4e94ab --- /dev/null +++ b/collections/_software_and_simulation/mtex_nice_figures.md @@ -0,0 +1,94 @@ +--- +title: Producing nice looking figures +author: Christopher Daniel +tags: + - MTEX + - MATLAB + - EBSD +published: true +subcollection: MTEX +--- + +# Producing nice looking figures + +To produce nice looking figures in MTEX the default settings need to be changed. Unfortunately, this needs to be done each time you download a new version of MTEX. The lines where these changes are located also change each version. The current changes apply to version 5.2.8 + +## Changes to mtex_settings.m + +**Change the fontsize** + +The default MTEX fontsize of the pole figures are too small for journal figures. The fontsize is a function of the figure size. + +```matlab +fontSize = round(15 * ppi/100); +``` + +You can either increase this value, or we suggest commenting this line out (% means comment in MATLAB) and then setting the default font size in the line below. + +```matlab +fontSize = 35; +``` + +**Remove the default annotations** + +The default annotations in MTEX cover the pole figure data. We can remove these by uncommenting this line. + +```matlab +pfAnnotations = @(varargin) []; +``` + +and then reapplying the annotations. + +*Note, this is found in the section.* + +```matlab +%% default global plotting options +``` + +**Change the spacing between plots** + +To fit in the new annotations, we need to change the spacing of the pole figures. The value 30 works well to leave room between the individual pole figures. The value 50 is needed to leave enough room at the edge of the plot. + +```matlab +setMTEXpref('outerPlotSpacing',50); +setMTEXpref('innerPlotSpacing',30); +``` + +**Set the defualt colour map** + +The defualt colour map for MTEX is rainbow. This is not ideal since it is not perceptually uniform, which causes problems when printing figures in black and white, or for people who are colour blind. A much better option is the viridis colour (called 'parula' in MATLAB), but others are available if you're looking for something different. + +```matlab +setMTEXpref('defaultColorMap','parula'); +``` + +*Note, this is found in the section.* + +```matlab +%% Default ColorMap +``` + +## Changes to phi2sections.m + +The `phi2sections.m file` is located in `ODFSections` folder, in the `plotting` folder, within the MTEX package. + +To remove the labels that cover the ODF contours and change the size of the values and fonts on the ODF slices, lines 110-113 need deleting. + +```matlab +% plot data +h = plot(v,data{:},oS.sR,'TR',[int2str(oS.phi2(sec)./degree),'^\circ'],... + 'parent',ax,'projection','plain','xAxisDirection','east',... + 'xlabel','$\varphi_1$','ylabel','$\Phi$','dynamicMarkerSize',... + 'zAxisDirection','intoPlane',varargin{:},'doNotDraw'); +``` + +And the following should be added. + +```matlab +% plot data +h = plot(v,data{:},oS.sR,... + 'parent',ax,'projection','plain','xAxisDirection','east',... + 'xlabel','$\varphi_1$','ylabel','$\Phi$','dynamicMarkerSize',... + 'zAxisDirection','intoPlane',varargin{:},'doNotDraw'); +set(ax,'FontSize',35,'LineWidth',1); % additional figure settings +``` diff --git a/collections/_software_and_simulation/mtex_separating_orientation.md b/collections/_software_and_simulation/mtex_separating_orientation.md new file mode 100644 index 0000000..5422a2d --- /dev/null +++ b/collections/_software_and_simulation/mtex_separating_orientation.md @@ -0,0 +1,89 @@ +--- +title: Separating data by orientation +author: Mia Maric, Christopher Daniel, Nicholas Byres +tags: + - MTEX + - MATLAB + - EBSD +published: true +subcollection: MTEX +--- + +# Separating data by orientation - the workflow + +Data can be separated with respect to it's orientation either for a texture component or a fibre. To do this it is important to know the specimen directions for your data set i.e RD, TD and ND for a rolled material. Data can be separated point by point using the raw data, or by grain. The work flow is as follows: + +1. Import your data (you can do this using the import GUI) and rotate/define the specimen directions. + +2. Calculate a list of grains and clean the data (if separating by grain). + +3. Define the texture orientations (by component/fibre) to be separated. + +4. Separate the data. + +# Defining the texture orientations + +Orientation data can be separated either as a component, that is a single orientation where the crystal plane and a specific direction within that plane are defined or as a fibre where only the orientation of the plane normal is defined. A complete fibre includes all the orentations that share the common plane alignment, whilst a partial fibre is all the orientations that exist between two pre-defined components i.e. a fibre in orientation space that connects to the two. Examples of these are: + +1. The rotated cube component - Miller indices {001}<110> and Euler angles phi1=45, PHI=0 and phi2=0. + +2. A truncated fibre between the {113}<110> and {111}<110> orientations and + +3. The gamma fibre {111} || ND + +There are various ways to define these orientations using different commands in MTEX. They are: + +orientation.byMiller - defines a component by Miller indices + +orientation.byEuler - defines a component by Euler angles + +orientation.map - defines a component or fibre by mapping/projecting the plane normal onto a vector direction + +and fibre - defines the full fibre by the plane and specimen direction. + +The syntax for using each of these commands is as follows: + +% components + +rotated_cube = orientation.byMiller([0 0 1],[1 1 0],crystal_symmetry); or + +rotated_cube = orientation.byEuler(45\*degree,0\*degree,0\*degree,crystal_symmetry) or + +rotated_cube = orientation.map((Miller(0, 0, 1,crystal_symmetry), ND, Miller(1, 1, 0,crystal_symmetry), RD); + +% a partial fibre + +ori1_113_110 = orientation.map(Miller(1, -1, 3,ebsd_Cub), ND, Miller(1, 1, 0,ebsd_Cub), RD); + +ori2_111_110 = orientation.map(Miller(1, -1, 1,ebsd_Cub), ND, Miller(1, 1, 0,ebsd_Cub), RD); + +f = fibre(ori1_113_110,ori2_111_110); + +% a full fibre + +Gamma_Fibre = fibre(Miller(1,1,1,crystal_symmetry),ND); + +% note - RD and ND in the previous syntax must be predefined e.g. + +RD = vector3d.X; +ND = vector3d.Y; +TD = vector3d.Z; + +# Separating data by orientation +Once the orientation has been defined in the appropriate way the data can be separated. MTEX achieves this by calculating the angle between the orientation of interest and the orientation of the data points passed to the command. If this lies below a predefined tolererance threshold, the data is allocated to the variable. The easiest way to do this is using the command: + +findByOrientation + +with the following example syntax: + +ebsd_rotated_cube = ebsd('Titanium cubic').findByOrientation(rotated_cube,20\*degree); + +# Calculating volume fraction of orientations + +The volume fraction of your component/fibre can be calculated from the number of ebsd points within the separated variable. + +to find the number of ebsd points within a variable use the command numel: + +vol_rotated_cube = numel(ebsd_rotated_cube.x); + +# Plotting KAM and orientation on IPF maps diff --git a/collections/_software_and_simulation/mtex_slicing_maps.md b/collections/_software_and_simulation/mtex_slicing_maps.md new file mode 100644 index 0000000..4e0f63a --- /dev/null +++ b/collections/_software_and_simulation/mtex_slicing_maps.md @@ -0,0 +1,118 @@ +--- +title: Slicing maps and plotting texture variation +author: Christopher Daniel +tags: + - MTEX + - MATLAB + - EBSD +published: true +subcollection: MTEX +--- + +# Slicing maps and plotting texture variation + +## Selecting a sub-region + +An initial plot of the EBSD map can help to determine the x and y points which define the sub-region region. The `inpolygon` function is used to define the points that lie within the region. + +```matlab +x_top = 0 +y_left = 0 +x_bottom = 100 +y_right = 100 + +x_width = x_bottom-x_top +y_width = y_right-y_left + +region = [x_top, y_left, x_width, y_width]; +condition = inpolygon(ebsd,region); +ebsd_cropped = ebsd(condition); +ori_cropped = ebsd_cropped('Ti-Hex').orientations +``` + +The orientations from the cropped map can then be used to plot pole figures, ODFs, etc. + +The map can also be exported to ctf using: + +```matlab +ebsd_cropped.export(‘myFile.ctf’) +``` + +## Slicing a map into strips + +To slice a map into strips, the x,y positions need to be defined and iterated through, with the maps sequentially saved. + +An example script used to do this is given below; + +```matlab +num_strips = 10; % number of strips to cut the map into (resolution) + +% define the size of the EBSD map +ebsd_grid = ebsd.gridify; +ebsd_shape = size(ebsd_grid.id); +original_y = ebsd_shape(1); +original_x = ebsd_shape(2); +stepSize = ebsd_grid.dx; + +x_min = (sqrt(x_origin * x_origin)/stepSize); +x_max = original_x + (sqrt(x_origin * x_origin)/stepSize); +x_length = x_max - x_min; + +y_min = (sqrt(y_origin * y_origin)/stepSize); +y_max = original_y + (sqrt(y_origin * y_origin)/stepSize); +y_length = y_max - y_min; + +% used if splitting into strips along y +y_width = floor(y_length / num_strips); % round to nearest integer +y_axis = (1:num_strips); + +% used if splitting into strips along x +x_width = floor(x_length / num_strips); % round to nearest integer +x_axis = (1:num_strips); + +cutmap = containers.Map('KeyType', 'int32', 'ValueType', 'any'); % creates an empty Map object + +for strip_index = 0:num_strips-1 + % separate the map section + + % set out the coordinates for the edge of the region + % note, region is defined as x,y origin and an x,y width which is added onto the origin + + % if splitting into strips along y (breaking up y) + % y_min_strip = strip_index * y_width; + % region = [x_min*stepSize, y_min_strip*stepSize, x_length*stepSize, y_width*stepSize]; + + % if splitting into strips along y (breaking up y) and x is negative + % y_min_strip = strip_index * y_width; + % region = [-x_min*stepSize, y_min_strip*stepSize, -x_length*stepSize, y_width*stepSize]; + + % if splitting into strips along x (breaking up x) + % x_min_strip = strip_index * x_width; + % region = [x_min_strip*stepSize, y_min*stepSize, x_width*stepSize, y_length*stepSize]; + + % if splitting into strips along x (breaking up x) and x is negative + x_min_strip = strip_index * x_width + x_min; + region = [-x_min_strip*stepSize, y_min*stepSize, -x_width*stepSize, y_length*stepSize]; + + % Cut the EBSD map + condition = inpolygon(ebsd,region); % points located within region + ebsd_strip = ebsd(condition); % create ebsd map for region + cutmap(strip_index) = ebsd_strip; % store strip in Map object with index + ebsd_cutmap = cutmap(strip_index); % read out ebsd_cutmap from the Map object + + % plot the IPF map to check the slices + outputFileName = strcat(analysis_path,sample_name,'_IPF_map_strip_',num2str(strip_index)) + IPF_map_plot(phase, ebsd_cutmap, outputFileName, visible) + +end +``` + +## Example code + +An example analysis for slicing EBSD maps into strips and plotting the texture variation is available [here on GitHub](https://github.com/LightForm-group/MTEX-texture-slice-analysis) + +This example shows the texture variation across a Ti compression sample, outputting maps, pole figures and ODFs for the different strips, as well as plotting the variation of different texture strength values (as shown in the figure below). The code allows the user to choose any number of strips and select the orientation of those strips. This code includes fixes for any negative x or y values that might result due to rotation of the data. + +![](/wiki/assets/images/texture_variation_FE_results.png) + + diff --git a/collections/_software_and_simulation/mtex_user_group.md b/collections/_software_and_simulation/mtex_user_group.md new file mode 100644 index 0000000..54160ef --- /dev/null +++ b/collections/_software_and_simulation/mtex_user_group.md @@ -0,0 +1,67 @@ +--- +title: MTEX User Group +author: Christopher Daniel +tags: + - MTEX + - MATLAB + - EBSD +published: true +subcollection: MTEX +--- + +# MTEX User Group + +## Aim + +The aim of the the MTEX User Group is to support anyone at Manchester who uses or will be using MTEX to analyse their EBSD, X-ray, Synchrotron or Neutron Diffraction data. We aim to do this by; + +- **Supporting new users with more documentation and example code.** The documentation will be collated on this LightForm Wiki page. Example code will be included in the documentation, either written within the page, or as links to a GitHub repository. +- **Keeping up-to-date with the latest methods for complex analyses.** Every month we will have a presentation from one of our users about how they are using MTEX to analyse their data, or to present a particular MTEX concept, to provide the group with greater insight into the MTEX features. We also aim to foster collaboration within the group, by discussing what projects our users are currently working. +- **Helping troubleshoot users problems.** There will be support provided during the meeting, to see if our user group can offer any solutions to solve the problem. We also have a #mtex Slack channel where users can post their issues for input from the entire group. + +## Useful Links + +**MTEX User Group Support** + +- [LightForm Wiki](https://lightform-group.github.io/wiki/software_and_simulation/) : We will record documentation about particular analyses that have been done at Manchester on the Wiki. This will be continually updated. Example code will be included in the documentation, either written within the page, or as links to a GitHub repository. We hope that this will provide a resource for new users to quickly get up to speed with MTEX and to build on what has already been done at Manchester. We also encourage our users to contribute their novel techniques to the documentation on the main MTEX site. +- [Presentations and Meeting Minutes](https://www.dropbox.com/sh/rslr5wpnjo8roqc/AADr1_nmn2UcN_7diF8Ss38Ga?dl=0) : We will keep a record of any previous meeting's presentations and minutes on Dropbox. +- [#mtex Slack Channel](https://join.slack.com/t/ebsdmanchester/shared_invite/zt-dlb2m3b9-QkfqAYpERzV15hRoIFUjSA) : Our #mtex Slack Channel is hosted by the EBSD User Group, please click on the link to join. Please post any questions or updates related to MTEX here and our users will be quick to respond to help out. + +**MTEX Support** + +There also currently exists great suppoort and documentation from Ralf and the wider MTEX community. + +- [MTEX Website](https://mtex-toolbox.github.io) : This is the best resource to learn about MTEX, there is a great amount of documentation and examples available and the site is being continually updated. +- [MTEX Forum](https://github.com/mtex-toolbox/mtex/discussions): Please sign up to the MTEX GitHub forum, where you can view a record of past issues from the wider MTEX community. You can also opt to receive email updates containing a summary of users issues and Ralf's solutuions. Note, this was recently moved from the previous [Google Forum](https://groups.google.com/forum/?fromgroups=#!forum/mtexmail). +- [MTEX Community Scripts](https://gist.github.com/search?utf8=✓&q=%23mtexScript) : A collection of MTEX scripts from the wider community is now being shared and collated on GitHub Gist. This is a good place to check out more complex analysis code. + +## Meetings + +The MTEX User Group meeting will be held once a month at 10.00am - 11.00am, with the aim to share our MTEX analyses in a relaxed and informal environment. The meetings consist of; + +1. A **presentation** from one of our users that will introduce a new analysis, concept, or way of working with MTEX. The presentation can be in any format, such as powerpoint or even working through some example code. *Please let us know if you would like to present at the next meeting.* +2. An **overview of current projects** will share what everyone is currently working on and provide opportunity for users to help each other out and collaborate. +3. A **troubleshooting** session will give an opportunity to discuss any problems, allowing other users to suggest their solutions, or an 'expert' can nominate themselves to take a look after the meeting. + +The meeting calendar can be viewed here - https://lightform-group.github.io/wiki/ + +*Meeting Record* + +22-07-2020 : 'MTEX User Group Introduction' by Christopher Daniel and 'Using MTEX to Visualise Crystal Orientations' by Nick Byres + +19-08-2020 : 'Slicing EBSD Maps and Separating Different Texture Components' by Nick Byres. + +16-09-2020 : 'Separating Data based on Grain Orienration' and 'Understanding Dominant Slip Mechanisms using MTEX' by Mia Maric + +14-10-2020 : 'Novel Ways to Present and Analyse Data in MTEX with Additional Plots' by Sam Armson + +11-11-2020 : TBC + +## Organisers + +The current organisers of the MTEX User Group are: +[Christopher Daniel](https://lightform.org.uk/people/dr-christopher-stuart-daniel) - christopher.daniel@manchester.ac.uk, +[Wayne Heatman](https://lightform.org.uk/people/wayne-heatman) - wayne.heatman@manchester.ac.uk +and [Nick Byres](https://lightform.org.uk/people/nicholas-byres) - nicholas.byres@postgrad.manchester.ac.uk. + +*Please feel free to contact us if you have something interesting to present at our next meeting, would like to be part of the organisisng team, or if you need help contributing to this Wiki.* diff --git a/collections/_software_and_simulation/new_damask.md b/collections/_software_and_simulation/new_damask.md index b78f51b..dcd37bd 100644 --- a/collections/_software_and_simulation/new_damask.md +++ b/collections/_software_and_simulation/new_damask.md @@ -7,85 +7,93 @@ tags: - damask toc: true published: true +subcollection: DAMASK --- -The HDF5-compatible version of DAMASK (v2.0.3) should now be working on the CSF. We have installed it in the group RDS space (`/mnt/eps01-rds/jf01-home01/shared/DAMASK`). +The HDF5-compatible version of DAMASK (v3.0.0) should now be working on the CSF. We have installed it in the group RDS space (`/mnt/eps01-rds/jf01-home01/shared/DAMASK-master`). More information about DAMASK, including detail about input and output files etc. may be found at [damask.mpie.de](https://damask.mpie.de). -## Usage +Not familiar with Manchester University's Computational Shared Facility 3 (CSF3)? You can find more info on how to navigate directories and run computational jobs here: [ri.itservices.manchester.ac.uk/csf3/](http://ri.itservices.manchester.ac.uk/csf3/) -To make the `DAMASK_spectral` executable available, run the following command on the CSF: +## Running a DAMASK simulation from the command line + +To make the `DAMASK_grid` (v3.0) executable available (same function as `DAMASK_spectral` v2), run the following command on the CSF: ```bash -source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK.sh +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK-master.sh ``` -To make the `DAMASK_spectral` executable available in addition to the the pre-/post-processing commands, run the following command on the CSF: +To make the `DAMASK_grid` executable available in addition to the the pre-/post-processing commands, run the following command on the CSF: ```bash -source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK_processing.sh +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK-master_processing.sh ``` ### Example simulation jobscript #### Serial job -Place the following jobscript into a directory in which there are `DAMASK_spectral` input files (a geometry file, a load file and a `material.config` file) and submit it with `qsub jobscript_name`. +Place the following jobscript into a directory containing `DAMASK_grid` input files (`.geom`, `.load`, and `material.yaml`) and submit it with `qsub jobscript_name.sh`. Further customisation of the solver may be added in a `numerics.yaml` file if necessary. ```sh #!/bin/bash --login -source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK.sh +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK-master.sh #$ -N damask_run # Name of the job #$ -cwd # Submit in the current working directory -DAMASK_spectral -g geom_file_name -l load_case_file_name +DAMASK_grid -g geom_file_name -l load_case_file_name ``` #### Parallel job -Place the following jobscript into a directory in which there are `DAMASK_spectral` input files (a geometry file, a load file and a `material.config` file) and submit it with `qsub jobscript_name`. +Place the following jobscript into a directory containing `DAMASK_grid` input files (`.geom`, `.load`, and `material.yaml`) and submit it with `qsub jobscript_name`. ```sh #!/bin/bash --login -source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK.sh +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK-master.sh #$ -N damask_run # Name of the job #$ -cwd # Submit in the current working directory #$ -pe smp.pe 4 # Use a parallel environment with four cores -mpirun -n $NSLOTS DAMASK_spectral -g geom_file_name -l load_case_file_name +mpirun -n $NSLOTS DAMASK_grid -g geom_file_name -l load_case_file_name ``` +Running a job on the CSF will create two files in the working directory it is run within: A `jobname.o0000000` file, which contains generic job output, and `jobname.e0000000` which contains detail on errors that occured during the run. ### Example post-processing -Using the HDF5 file output, we can do processing within Python or a Jupyter notebook instance. - -Firstly, load the processing environment on the CSF using (as above): +DAMASK outputs the following files: (`geom_load.C_ref`, `geom_load.hdf5`, and `geom_load.sta`) Using the `.HDF5` file output, we can do processing within Python or a Jupyter notebook instance. +To calculate some useful values from outputs and create a visual representation of the results (`.vtr`), first load the processing environment on the CSF using (as above): ```sh -source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK_processing.sh +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK-master_processing.sh ``` - +First of all, the .hdf5 output may be navigated from the command line using the command `h5ls`. eg `h5ls geom_load.hdf5/` will list the increments the job has completed. We can then start a Python instance and, assuming our output HDF5 is called `geom_load.hdf5`, load the HDF5 file into Python. In the following example, -we add the Cauchy stress (which will have a label of `sigma`), and then the von Mises Cauchy stress: +we add the Cauchy stress, which will have a dataset label of `sigma`, and calculated von-Mises stress from this. (found using `h5ls geom_load.hdf5/inc0/phase/(user defined phase)/generic/`) +This new dataset written into the .hdf5 file may then be visualized as a .vtr file that may be opened in paraview: ```python -import damask +from subprocess import run, PIPE # Necessary to place output files into pwd. +import damask # Necessary for add_cauchy, to_vtk commands. -f = damask.DADF5('geom_load.hdf5') +f = damask.Result('geom_load.hdf5') -f.add_Cauchy() -f.add_Mises('sigma') +f.add_Cauchy() # Calculate Cauchy stress and write into .hdf5 file +f.add_Mises('sigma') # Calculate Von-Mises stress and write into .hdf5 file + +f.to_vtk(labels=["sigma", "sigma_vM"]) # Write defined datasets into .vtr for each increment ``` +Be wary that running this processing script will write the desired values into the .hdf5 file, and upon re-running, there may be an error that the defined values have already been written in. *DONT PANIC!* These calculations will be skipped and any newly defined calculations will be placed in. It is recommended that the user first backup their .hdf5 output file before performing post-processing on it. This will ensure if there are any errors that the original job output file can still be post-processed without having to run the job all over again. -The above must be run on the CSF. If we are processing a large output file, we should write a processing script in Python (like that above), and then submit it as a jobscript. If we name our Python script `processing.py`, a processing jobscript might look like this: +The above *must* be run on the CSF! If we are processing a large output file, we should write a processing script in Python (like that above), and then submit it as a jobscript. If we name our Python script `processing.py`, a processing jobscript might look like this: ```sh #!/bin/bash --login -source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK_processing.sh +source /mnt/eps01-rds/jf01-home01/shared/load_DAMASK-master_processing.sh #$ -N damask_processing # Name of the job #$ -cwd # Submit in the current working directory @@ -95,6 +103,25 @@ python processing.py Note that we can also extract data from the HDF5 file without requiring the DAMASK processing environment (e.g. on our local computer). To do this we need the `h5py` Python package installed. This can be installed using `pip install h5py`. See the [documentation](http://docs.h5py.org/en/stable/) for `h5py` for more details concerning loading an HDF5 file. +## Differences between the current version (3.0.0) and the previous installed version (2.0.3) + +- `material.config` files have now been replaced by `material.yaml` files. The syntax of a yaml file can be verified using this tool: [yamlvalidator](https://yamlvalidator.com) + +### `material.yaml` file + +- Orientations must now be defined using quaternions. Each quaternion component must be to at least 15 dp. (machine precision) eg: + +```yaml +- constituents: + - fraction: 1.0 + orientation: [1.000000000000000, 0.000000000000000, 0.000000000000000, 0.000000000000000] + phase: Titanium_alpha + homogenization: SX +``` + +- Must now follow .yaml/.yml syntax. keys must finish with a colon `:`. The value can then be defined. eg. `c/a: 1.587` +- More detail to follow + ## Differences between this version (2.0.3) and the version installed centrally on the CSF (2.0.2) When running `DAMASK_spectral`, we've found the following differences between this development version and the older version that is installed centrally on the CSF (version `2.0.2`): diff --git a/collections/_software_and_simulation/openfoam-cp-matflow.md b/collections/_software_and_simulation/openfoam-cp-matflow.md new file mode 100644 index 0000000..9135e63 --- /dev/null +++ b/collections/_software_and_simulation/openfoam-cp-matflow.md @@ -0,0 +1,190 @@ +--- +title: Setting up a Matflow workflow for a CP simulation in the Eulerean_Mechanics OpenFOAM application +author: Maria Yankova +toc: true +tags: + - openfoam + - matflow + - csf3 + - modelling +published: true +subcollection: OpenFOAM +--- + +This page goes through getting started with the crystal plasticity code + Eulerean_Mechanics, which is an OpenFOAM application, on the CSF and setting up a Matflow workflow for a simple simulation. + +The instructions have been written assuming the user is on CSF3. Please see further instructions on using OpenFOAM on [CSF3](https://ri.itservices.manchester.ac.uk/csf3/software/applications/openfoam/) and [CSF4](https://ri.itservices.manchester.ac.uk/csf4/software/applications/openfoam/). Please note, there are some errors in the current CSF3 instructions after the transition to SLURM. + +# 1. Getting started with Eulerean_Mechanics +## 1.1. Download and compile +Download the code from the [Slip branch](https://github.com/micmog/Eulerian_Mechanics/tree/Slip) and extract it in your home directory. + +Load the relevant OpenFOAM module: + +``` +module purge +module load apps/gcc/openfoam/10 +source $foamDotFile + +``` +Navigate to `solver` and compile via: + +``` +cd Eulerian_Mechanics-Slip/solver +wclean +wmake + +``` +Note Aug 2025: If the most current version does not compile, see below for an alternative. + +## 1.2. Alternatively, copy a precompiled version +You can find the necessary files in `/mnt/eps01-rds/jf01-home01/shared/Eulerean_Mechanics`. + +Copy the `Euler_Solid_MechFoam_Oct24` file to the following location in your home directory on the CSF: +``` +/OpenFOAM/-10/platforms/linux64GccDPInt32Opt/bin +``` +*(where `` stands for your actual user ID)* + +Run `chmod +x Euler_Solid_MechFoam_Oct24`. + +Copy the other files starting with `lib*` into +``` +/OpenFOAM/-10/platforms/linux64GccDPInt32Opt/lib +``` + + +# 2. Setting up Matflow + +Make a new Python virtual environment for all your MatFlow work: + +``` +module load apps/binapps/anaconda3/2024.10 +python -m venv matflow_venv + +``` +Activate the environment: +``` +source ~/matflow_venv/bin/activate + +``` +Create a `matflow_requirements.txt` file in your home directory containing the dependencies: +``` + damask==3.0.0a7.post0 + damask-parse + numpy<2 + matflow-new +``` + +Install dependencies: +``` +pip install -r matflow_requirements.txt + +``` + +Clone the Matflow repository, switch to the OpenFOAM branch and install in editable mode: +``` +git clone https://github.com/hpcflow/matflow-new/ +cd matflow-new +git switch software/open-foam +pip install --editable . +``` + +Configure matflow for CSF3: +``` +matflow config import github://hpcflow:matflow-configs@main/manchester-CSF3.yaml +``` + +Make a Matflow environment definitions file at `~/.matflow-new/envs.yaml`, which will define the Matflow environments needed as follows: + +``` +- name: python_env + executables: + - label: python_script + instances: + - command: /path/to/matflow_venv/bin/python "<>" <> + num_cores: + start: 1 + stop: 168 + parallel_mode: null + +- name: damask_parse_env + executables: + - label: python_script + instances: + - command: /path/to/matflow_venv/bin/python "<>" <> + num_cores: + start: 1 + stop: 168 + parallel_mode: null + +- name: OpenFOAM + setup: | + module purge + module load apps/gcc/openfoam/10 + + source $foamDotFile + executables: + - label: openfoam_app + instances: + - command: $APP_PATH <> + num_cores: + start: 1 + stop: 168 + parallel_mode: null + +``` +Further information about Matflow environments can be found [here](https://docs.matflow.io/stable/installation.html#environments). + +Tell MatFlow about the new file: +``` +matflow config append environment_sources ~/.matflow-new/envs.yaml +``` + +Check the configuration file via: +``` +matflow config get --all +``` +# 3. Running a workflow +Copy the `open_foam_CP.yaml` workflow from `$HOME/matflow-new/matflow/data/workflows` to an appropriate location on `scratch`, where you plan to run your simulations. + +Currently, the workflow provides a path (called `root_path`) to the inputs from an example simulation, where these inputs are copied from, i.e. the `system`, `initial` and `constant` folders. Please adjust `root_path` accordingly. To get started, you can find some examples in `Eulerian_Mechanics/tutorials/`. + +Another parameter that might need adjusting is the `app_name`. If the code is compiled as is, the default name is `Euler_Solid_MechFoam`. If either using the pre-compiled version (`Euler_Solid_MechFoam_Oct24`) or your own, please adjust. + +Please note, once the development work is finished, the `template_components` part will go in the data template components directory under Matflow but for ease it is kept within the workflow currently. + +The workflow consists of three tasks: +1) `generate_volume_element_from_voronoi`- this task creates a box for the volume element, then discretises it according to `VE_grid_size`. `Position` define the positions of the seed points, from which a Voronoi tesselation is then applied using DAMASK's Python processing library. +Please note, the orientations are directly passed to avoid the issue of the currently different formats between Matflow and Eulerian_Mechanics. +2) `visualise_VE_VTK` - the geometry `.vti` file is created, which can be visualised in ParaView. +3) `simulate_VE_loading_OpenFOAM` - perform the OpenFOAM simulation. +The steps that the task follows can be traced in the `task_schemas`, where firstly the `input_file_generators` are run, followed by the `commands`. + Currently, there are three input file scripts, which can be found in `matflow/data/scripts/open_foam` and as the workflow is expanded to programmatically generate the rest of the OpenFOAM input files, further scripts will be needed/might be combined into one. As an example, `write_phase_ID_files.py` uses `from_inputs: [volume element]` to generate the input files defined in `input_file: open_foam_phase_bool_files`, i.e. the individual grain field files `n.*`. The commands then follow the commands we would usually run in a jobscript file. Please note, further parametrisation is still needed, so for instance, if you wish to adjust the number of cores for the parallel simulation, you need to manually adjust the parameters in `decomposeParDict`, where `numberOfSubdomains` corresponds to the number of cores and these need to be split correctly in the different directions `simpleCoeffs `: ```{ + n (1 2 1);}```. + +To get started firstly, activate the Matflow environment: +``` + source ~/matflow_venv/bin/activate +``` + +Next, run the workflow via: +``` + matflow go open_foam_CP.yaml +``` + +You can track the workflow progress via: +``` + matflow show +``` +You can find the outputs under `execute`: +* `t_1` contains the VTK file of the created geometry +* `t_2` contains the OpenFOAM simulation inputs and outputs + +To cancel a running worflow use the workflow ID as found using `matflow show` (or the path): +``` + matflow cancel +``` + + diff --git a/collections/_software_and_simulation/opt_param.md b/collections/_software_and_simulation/opt_param.md new file mode 100644 index 0000000..93ba2bc --- /dev/null +++ b/collections/_software_and_simulation/opt_param.md @@ -0,0 +1,105 @@ +--- +title: Property Model Parameter Fitting +author: Sakina Rehman +tags: + - thermocalc + - morton pc + - python +toc: true +subcollection: TC Python +published: true + +--- +# Property Model Parameter Fitting + +This example shows how to optimize the parameters of a Property Model. Typically that approach is used in order +to fit a Property Model to experimental data. That can be useful for any application where no analytical correlation +between the input parameters (composition, temperature and other parameters) and the material property can be established. +Examples might be solid solution strength models or martensite temperature models where the transformation barrier is +modeled. + +For the purpose of this example, the linear fitting to noisy data is shown. + +A 'model parameter' is a value that can be changed from the outside of the model in the way demonstrated here. +Internally it can be represented in any way; the only requirement is that the Property Model implements the 'Python +Property Model Development Framework' interface methods :func:`provide_model_parameters` and :func:`set_model_parameter`. + + +``` +from tc_python import * +import numpy as np +import matplotlib.pyplot as plt +from scipy import optimize + + + +dependent_element = "Fe" +composition = {"C": 0.1} # in wt-% + + +def model(p, temps, calculator): + """Representation of the Property Model, primarily to set the model parameters `a` and `b` during the solver + iterations.""" + + # replace the model parameters by the current trial + (calculator + .set_model_parameter("a", p[0]) + .set_model_parameter("b", p[1])) + + # looping over the complete 'experimental' dataset + result = [] + for temp in temps: + calculator.set_temperature(temp) + this_result = calculator.calculate() + result.append(this_result.get_value_of("result")) + + print("Intermediate fitting result: a = {}, b= {}".format(p[0], p[1])) + return result + + +with TCPython() as session: + system = (session + .select_database_and_elements("FEDEMO", [dependent_element] + list(composition.keys())) + .get_system()) + + # the custom Property Model is not located in the default Property Model directory, but in the specified + # subdirectory + print("Available property models : {}".format(session.get_property_models("property_models"))) + calculator = (system + .with_property_model_calculation("Simple linear model", "property_models") + .set_composition_unit(CompositionUnit.MASS_PERCENT)) + + for element in composition: + calculator.set_composition(element, composition[element]) + + print("Model parameters: {}".format(calculator.get_model_parameters())) + print("Currently set value of model parameter a = {}".format(calculator.get_model_parameter_value("a"))) + print("Currently set value of model parameter b = {}".format(calculator.get_model_parameter_value("b"))) + + # generate data points with noise + num_points = 150 + temps_x = np.linspace(200, 1000, num_points) # in K + + # parameter `a` is about 1000 and parameter `b` is about 0.5 + experiment_y = 1000 + 0.5 * temps_x + np.random.uniform(-1, 1, num_points) * 100 + + # setup the initial guess of `a` and `b` and define the fitting functions + fit_func = lambda p, x, calc: model(p, x, calc) # target function + err_func = lambda p, x, y: fit_func(p, x, calculator) - y # distance to the target function + params_0 = [100.0, 0.01] # some initial guess for the parameters being far off + + # run the fitting + opt_result = optimize.least_squares(err_func, params_0, args=(temps_x, experiment_y)) # type: dict + params_opt = opt_result["x"] + print("Best optimized parameters: a = {}, b = {}".format(params_opt[0], params_opt[1])) + + # plot of the data and the fit + fig, ax = plt.subplots() + fig.suptitle('Fitting of linear Property Model', fontsize=14, fontweight='bold') + ax.plot(temps_x, experiment_y, "ro", temps_x, fit_func(params_opt, temps_x, calculator), "r-") + ax.set_xlabel("temperature / K") + ax.set_ylabel("'experimental' property") + plt.show() + +``` +![alt text](https://github.com/LightForm-group/wiki/blob/master/collections/_software_and_simulation/param_fit.png) diff --git a/collections/_software_and_simulation/orientations.md b/collections/_software_and_simulation/orientations.md index 5dce363..be6091b 100644 --- a/collections/_software_and_simulation/orientations.md +++ b/collections/_software_and_simulation/orientations.md @@ -7,7 +7,7 @@ tags: toc: true published: true layout: jupyter_notebook - +subcollection: Misc --- {% include jupyter_notebooks/orientations/orientations.md %} diff --git a/collections/_software_and_simulation/param_fit.png b/collections/_software_and_simulation/param_fit.png new file mode 100644 index 0000000..36783a4 Binary files /dev/null and b/collections/_software_and_simulation/param_fit.png differ diff --git a/collections/_software_and_simulation/paraview.md b/collections/_software_and_simulation/paraview.md new file mode 100644 index 0000000..3423565 --- /dev/null +++ b/collections/_software_and_simulation/paraview.md @@ -0,0 +1,38 @@ +--- +title: Visualising .vtr outputs using Paraview +author: Guy Bowker +tags: + - post-processing + - paraview + - damask +toc: true +published: true +subcollection: DAMASK +--- + +# Paraview: how to visualise DAMASK results + +Paraview is a data analysis visualisation software that produces 3d representations of `.vtk`, `.vtr` files. More information, as well as a link to download can be found [here](https://www.paraview.org), and a full tutorial on its use can be found [here](https://www.paraview.org/Wiki/images/b/bc/ParaViewTutorial56.pdf). + +## Transferring the DAMASK output +After post-processing a damask `geom_load.hdf5` output using a `processing.py` script as shown [here](https://lightform-group.github.io/wiki/software_and_simulation/new-damask), outputs with the conventional name `geom_load_inc####.vtr` will be placed in the current working directory (cwd), or within a sub-directory named `postProc/`. To visualise these files, the user is advised to move these files to a directory on the users local machine. This may be done one of two ways; transfer of files via Dropbox, or using the `rsync` command on your local machine's terminal: + +```bash +rsync -avz username@csf3.itservices.manchester.ac.uk:~/myresult.dat . +``` + +Remember to change username to your University login username (eg. a#####bc) and `~/myresult.dat` to the filepath of the directory you would like to transfer. Ensure also to add a `.` to specify the location you'd like the files to be stored on your loacal machine as the cwd. This can be changed to a filepath also. +More info on how Rsync works as well as how to set up Dropbox for transferring files to and from the CSF can be found [here](http://ri.itservices.manchester.ac.uk/csf3/filesystems/file-transfer/). + +## Using Paraview +### Opening `.vtk` `.vtr` files +Now that the `.vtk` or `.vtr` files are present on the local machine, start up paraview from the taskbar or .exe file. The desired files can be dragged into the left-hand 'Pipeline Browser' pane of the interface. When dragging multiple files in at once, paraview will group them together, with each one representing a time increment. This allows for some nice animations later. For now click 'apply' to apply changes. + +[comment]: <> (image/gif of paraview) + +### Applying filters to the data +From here the data can be manipulated in various ways, here is one example that is useful for damask output: + +[comment]: <> (image/gif of selecting filters in paraview) + +First select the dataset you'd like to filter. With this highlighted, from the 'Filters' dropdown on the top left menu, (on mac. May be different on windows) select the 'alphabetical' dropdown. From here scroll all the way down to 'Warp By Vector'. This will allow us to show the evolution of the volume change. (tension or compression) This will insert a new branch to the pipeline named `WarpByVector1`. Highlight this and click 'apply'. Now this is applied and highlighted, the dropdown on the center-top toolbar will contain the datasets defined in `processing.py`. Interacting with the 'play' buttons will now scrub through each time increment of the simulation, displaying the shape change during the test. diff --git a/collections/_software_and_simulation/particle_RVEs_DAMASK.md b/collections/_software_and_simulation/particle_RVEs_DAMASK.md new file mode 100644 index 0000000..85dabc9 --- /dev/null +++ b/collections/_software_and_simulation/particle_RVEs_DAMASK.md @@ -0,0 +1,56 @@ +--- +title: Generating RVEs with particle distributions +author: Adam Plowman +tags: + - simulation + - python + - damask +toc: true +subcollection: DAMASK +published: true +--- + +## Generalisation of particle-containing RVEs + +In the dual-phase Ti simulation work in which we will ultimately develop a [coupled crystal-plasticity/phase-field model](/wiki/blog/ti-cp-pf-overview), we have currently been using an RVE containing three $\alpha$-phase precipitate particles. To enable more general control over the morphology of the RVE, and, by extension, to support LightFORM researchers simulating other materials with multi-phase particles, we generalised the code to generate such RVEs. In particular, we made good use of the DAMASK Python package to implement a `ParticleRVE` class, which is parametrised according to one or more `ParticleDistribution` objects. In this code, a particle distribution is a set of ellipsoidal particles, which are characterised by their diameter lengths, major axis direction vector and major plane normal direction vector. The diameters (and other properties) can be specified in terms of normal distribution means and standard deviations. To generate a `ParticleDistribution`, we can specify two of the following three parameters: `major_axis_length`, `number`, `target_volume_fraction`, where the unspecified parameter will be calculated from the other two, and the volume of the RVE. + +The code is included in our *damask-parse* Python package (in the `particles` module[^1]), and will soon be integrated with MatFlow. + +The following animation demonstrates the construction of a particle-RVE using the code below. In this code, we generate an RVE with five grains, seeded in random positions. Voxels are assigned according to a Voronoi tessellation. We define two particle distributions, one representing "alloying elements" (in this case, fewer larger particles whose minor axes lengths are 1/2 and 1/4 of their major axes lengths), and another representing "impurities" (in this case, these are smaller and more numerous spherical particles). + +![particleRVE_camera_ani](/wiki/assets/images/posts/particleRVE_camera_ani.gif) + +```python +n_grains = 5 +size = np.array([2, 1, 1]) +cells = np.array([256, 128, 128]) +seeds = size * np.random.rand(n_grains, 3) + +pdist_1 = ParticleDistribution( + label='alloying_elements', + major_axis_length=0.5, + major_axis_length_stddev=0.005, + target_volume_fraction=0.01, + minor_axis_ratios=[0.5, 0.25], + major_axis_dir=[1, 1, 0], +) +pdist_2 = ParticleDistribution( + label='impurities', + major_axis_length=0.2, + major_axis_length_stddev=0.001, + number=10, + major_axis_dir=[1, 1, 0], +) + +RVE = ParticleRVE.from_voronoi_tessellation( + size=size, + cells=cells, + seeds=seeds, + particle_distributions=[pdist_1, pdist_2], +) +RVE.save('RVE.vtr') +``` + +## References + +[^1]: [https://github.com/LightForm-group/damask-parse/blob/master/damask_parse/particles.py](https://github.com/LightForm-group/damask-parse/blob/master/damask_parse/particles.py) diff --git a/collections/_software_and_simulation/pip_and_conda_on_CSF.md b/collections/_software_and_simulation/pip_and_conda_on_CSF.md index 88c5729..df55950 100644 --- a/collections/_software_and_simulation/pip_and_conda_on_CSF.md +++ b/collections/_software_and_simulation/pip_and_conda_on_CSF.md @@ -4,10 +4,7 @@ author: Adam Plowman tags: - python published: true ---- - -**UPDATE**: Looks like there is no problem with conda detecting the correct pip as long as the shell/environment is correctly configured using `conda init`. Original below kept for posterity. - +subcollection: Python --- Installing Python packages via pip within a conda environment on the CSF: diff --git a/collections/_software_and_simulation/post_processing_on_CSF.md b/collections/_software_and_simulation/post_processing_on_CSF.md new file mode 100644 index 0000000..3167a66 --- /dev/null +++ b/collections/_software_and_simulation/post_processing_on_CSF.md @@ -0,0 +1,77 @@ +--- +title: Using Python on the CSF for Post-processing +author: Yuchen Zheng +tags: + - simulation + - csf + - damask + - python +toc: true +published: true +subcollection: DAMASK +--- + +This content is an addition to the previous section [here](https://lightform-group.github.io/wiki/software_and_simulation/new-damask). + +After getting the `geom_load.hdf5` file as the result of DAMASK simulation. We can do processing with Python codes on CSF. + +### Output .vti File from .hdf5 +`.vti` file is used to store series of image data, which can be exported during post-processing and visualised by ParaView. Here is a detailed example can be named +`postprocessing.py`(You may create a new folder and make the file): + +```python +import damask # Necessary for .add, .export_VTK commands. +import numpy as np # Necessary for sort out the axes. + +f = damask.Result('/path/to/geom_load.hdf5') # Find the .hdf5 file. + +l1 = np.array([1,0,0]) # Represent X axis. +l2 = np.array([0,1,0]) # Represent Y axis. +l3 = np.array([0,0,1]) # Represent Z axis. +for inc in [0, 10, 20, 30, 40]: # Depends on which time-points you want to visaulise, e.g. the 5 time-points from step 0 to 40. + f_inc = f.view(increments=inc) + f_inc.add_stress_Cauchy() # Calculate Cauchy stress and write into .hdf5 file + f_inc.add_equivalent_Mises('sigma') # Calculate Von-Mises stress and write into .hdf5 file + f_inc.add_equivalent_Mises('epsilon_V^0.0(F)') # Calculate equivalent strain and write into .hdf5 file + f_inc.add_IPF_color(l=l1) # Apply IPF colours along X to RVE. + f_inc.add_IPF_color(l=l2) # Apply IPF colours along Y to RVE. + f_inc.add_IPF_color(l=l3) # Apply IPF colours along Z to RVE. + f_inc.export_VTK(output=['sigma', 'epsilon_V^0.0(F)', 'O', 'IPFcolor_(1 0 0)','IPFcolor_(0 1 0)','IPFcolor_(0 0 1)', 'phi']) # Export .vti with the parameters added above. + print(f_inc.list_data()) # Show existed dataset. +``` +We can modify it by adding more parameters we need to analyse. It is worth to mention that though the command is called `.export_VTK`, the output file is in the +format of '.vti', not `.vtk` or `.vtr`. It is a standard output format of the latest damask module. + +Don't `.add` the same parameters twice otherwise there will be errors. A good way to check if the dataset already existed is `print(f_inc.list_data())` where we can check all the parameters we added before. + +### Output Stress vs Strain Curve from .hdf5 +Write another python script or just add the following lines to the `postprocessing.py`: + +```python +import damask +import matplotlib.pyplot as plt + +f = damask.Result('/path/to/geom_load.hdf5') # Find the .hdf5 file. +f.view(increments=-1).get().keys() # Aanlysis the last step. +sigma = [np.average(s) for s in f.get('sigma_vM').values()] # Acess the stress. +epsilon = [np.average(e) for e in f.get('epsilon_V^0.0(F)_vM').values()] # Acess the strain. +plt.plot(epsilon,sigma) # Plot Stress vs Strain Curve. +plt.savefig('stress_vs_strain_plot.png') # Save the picture. +``` + +### Submit task to CSF +The `postprocessing.py` can be run by the command 'python postprocessing.py', however, when the .hdf5 file is big in size, it would take ages to complete the processing. +Therefore, it is better to use a parallel environment to run the task by create another file 'postprocessing_jobscript.sh': + +```sh +#!/bin/bash --login +#$ -cwd # Submit in the current working directory +#$ -pe smp.pe 4 # Use a parallel environment with 4 cores +#$ -l short # Select type of core, use mem512 when the task is huge. + +module load apps/binapps/anaconda3/2021.11 # Load Anaconda +source activate /mnt/eps01-rds/jf01-home01/shared/.conda/damask_v3a7 # Activate the conda environment, can be change to other envs. + +python postprocessing.py interactive # Run with Python +``` +Then we can type the command `qsub postprocessing_jobscript.sh` to submit the task to CSF. diff --git a/collections/_software_and_simulation/python_venv.md b/collections/_software_and_simulation/python_venv.md new file mode 100644 index 0000000..cc79f46 --- /dev/null +++ b/collections/_software_and_simulation/python_venv.md @@ -0,0 +1,113 @@ +--- +title: Python virtual environments +author: Gerard Capes +tags: + - python + - CSF +published: true +subcollection: Python +--- + +## What are virtual environments and why should I use them? + +A virtual environment is a way to manage dependencies separately for different projects. This can help to avoid conflicts between python packages, and makes your code [easier to reproduce](#using-a-requirementstxt-file) because others will known which packages (and maybe even which versions of them) are needed to run your code. The virtual environment is isolated from other virtual environments, so packages installed in one project's virtual environment won't interfere with those in a different project's virtual environment. + +## How to create a virtual environment + +The easiest way is to use `venv` to create the environment, and `pip` to install packages. These tools come with any python installation so it doesn't matter whether you installed python from https://www.python.org/, Anaconda, or your OS already came with python installed. + +The method is very slightly different on Linux/macOS and Windows, so read the relevant section below and/or read the [official docs](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/). + +*In your project directory*, you create the virtual environment like this for Linux/macOS: + +```bash +python -m venv .venv +``` + +and like this on Windows: + +``` +py -m venv .venv +``` + +Your prompt might change to indicate that you've activated a virtual environment, but you can check with `which python` (Linux/macOS) or `where python` (Windows). This will return the path to the python installation you're currently using which should be in the venv you just created. + +### Activating the virtual environment + +In order to use your venv and to install packages into it, you first need to activate it. For Linux/macOS: + +```bash +source .venv/bin/activate +``` + +and on Windows: + +``` +.venv\Scripts\activate +``` + +### Installation of packages + +*Having first activated your virtual environment*, you install packages with `pip` e.g. + +```bash +pip install numpy +pip install matflow-new +``` + +### Deactivating the venv + +When you want to switch projects, you first need to deactivate (shut down) the activate virtual environment using + +```bash +deactivate +``` + +### Reactivating an existing virtual environment + +Each time you want to use the virtual environment (e.g. after restarting your computer, closing your terminal etc) you need to reactivate it using either + +```bash +source .venv/bin/activate +``` + +or + +``` +.venv\Scripts\activate +``` + +### Using a `requirements.txt` file + +It is a good practice to save a list of dependencies for your project in a `requirements.txt` file, so that the project dependencies can be installed from it like this:     + +```bash +pip install -r requirements.txt +``` + +The requirements file is just a list of package (and versions) to install e.g. + +``` +matflow-new=="0.3.0a138" +numpy +pandas +``` +This aids in reproducibility, and makes it easier for others to run your code (and thus easier for them to help you). + +### Installing a local package for development + +If you are developing a package, you can install it in "editable" mode, which means that any changes you make to the source code will be reflected in your `venv` as soon as you save them. Assuming you're already in the directory containing the package to edit, you would use: + +```bash +pip install --editable . +``` + +Note that the above command ends will a dot, which refers to the current directory. + +### Considerations for HPC (e.g. CSF) + +When recreating the venv on an HPC cluster, you would first want to load a recent version of python, because the system version of python is likely to be a bit out of date. Typically you would load a recent version of python using a module file, then follow the notes from the top of this page. On CSF3 you might use a command similar to this: + +```bash +module load apps/binapps/anaconda3/2024.10 +``` diff --git a/collections/_software_and_simulation/steel_CCT_predictor.md b/collections/_software_and_simulation/steel_CCT_predictor.md new file mode 100644 index 0000000..0a59697 --- /dev/null +++ b/collections/_software_and_simulation/steel_CCT_predictor.md @@ -0,0 +1,14 @@ +--- +title: Steel CCT Predictor +author: Joshua Collins +tags: + - Steel + - CCT +toc: false +subcollection: Steel +published: true +--- + +## Low Alloy Steel CCT Predictor + +Link to Python code: [https://github.com/JoshUoM/Steel-CCT-Predictor](https://github.com/JoshUoM/Steel-CCT-Predictor) diff --git a/collections/_software_and_simulation/tc_example.png b/collections/_software_and_simulation/tc_example.png new file mode 100644 index 0000000..edc5582 Binary files /dev/null and b/collections/_software_and_simulation/tc_example.png differ diff --git a/collections/_software_and_simulation/test_2.md b/collections/_software_and_simulation/test_2.md deleted file mode 100644 index b595607..0000000 --- a/collections/_software_and_simulation/test_2.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test 2 notebook -author: Adam Plowman -tags: - - python - - numpy -toc: true -published: true -layout: jupyter_notebook - ---- - -{% include jupyter_notebooks/test_2/test_2.md %} diff --git a/collections/_software_and_simulation/test_md_export.md b/collections/_software_and_simulation/test_md_export.md deleted file mode 100644 index bd37bdf..0000000 --- a/collections/_software_and_simulation/test_md_export.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test Notebook with image -author: Adam Plowman -tags: - - python - - numpy -toc: true -published: true -layout: jupyter_notebook - ---- - -{% include jupyter_notebooks/test_md_export/test.md %} diff --git a/collections/_software_and_simulation/thermodynamics.md b/collections/_software_and_simulation/thermodynamics.md new file mode 100644 index 0000000..e897e4b --- /dev/null +++ b/collections/_software_and_simulation/thermodynamics.md @@ -0,0 +1,69 @@ +--- +title: Thermodynamic Software +author: Joe Robson +toc: true +--- + +## Thermodynamic Modelling Software at Manchester + +The following thermodynamic software packages are available within the CLARI group. +JMatPro – phase equilibria and solidification modelling (+physical property prediction) +ThermoCalc – phase diagrams and thermodynamics +This software is installed on the modelling PC. Please contact the #jmatpro_users or #mortonpc group on Slack if you have problems accessing this machine. Ask you supervisor to add you to Slack if you are not a member already. To use this software from elsewhere, you need to remote log-in or install on your own machine (JMatPro only). + +## JMatPro + +The current version of JMatPro can either be run using remote desktop (see later) or by installing on your own computer. Installing on your computer is preferred as multiple users are allowed. Up to 5 users can use JMatPro simultaneously. For it to work, the USB key must be inserted in the modelling PC in Royce (if you get a licence error, post a message on Slack jmatpro channel). + +## Multiuser Method: + +To install JMatPro on your own machine (Windows only), first download it at: +You will need the **login details** - please see pinned post on the **#jmatpro_users** Slack group (ask your supervisor to add you to the Slack group #jmatpro_users if you are not a member). + +You can then download and install the latest version (you want the standard version, not the node-locked version). Providing you are on a Materials based PC or VPN, the USB key should be found when you run JMatPro. There is a small possibility that your computer cannot find the licence on the network. In this case, if you get a licence error, try the following procedure: + +``` +1. Open a web browser and follow the link +2. Make sure the option "Allow Access to Remote Licenses" is checked (if not, check it) +3. Uncheck the option "Broadcast Search for Remote Licenses", and check the option "Aggressive Search for Remote Licenses" +4. Under "Specify Search Parameters", enter the IP address of the license server, i.e. 130.88.44.27:56224 +5. Click "Submit" +``` + +You may need to wait a few minutes before the changes take effect. You may also need to find 'JMatPro\configs\system.def' file in your installation directory, put the IP address in the [ContactServer] field. + +## Remote Desktop Method + +The most straightforward way to use all of the above software is to use Windows Remote Desktop. This allows you to be effectively sat at the modelling PC without being there. You can use this from outside the University provided you connect via VPN to the University system first. + + +For Windows 10, the Remote Desktop connection is straightforward. + + +Details of the remote machine you will need to enter to log-in are: + +``` +PC name: b-11uxx52773bx +IP address: 130.88.44.27:56224 +``` + +**Please see the pinned post for the log-in password on the Slack channel #jmatpro_users** + +You may get a warning message, which you can ignore and proceed. +**Note: if another user is also trying to use the same computer, you may find you get logged out during your session. If this happens, you will need to wait and try again another time when the computer is not in use. Please check the booking form on the home screen and make a booking for the time you want to use the PC. If another user is booked at for the date/time you log in, please book another slot and log off.** +After logging in, you will see the desktop. Please do not close any other modeling windows if it appears they are still running a model (i.e. calculations not complete) as this will be another user’s work. For this reason it is an important discipline to close all modelling windows once you have finished and log-out. +If you use the machine remotely and wish to get access to your files the easiest way is to email them to yourself using Outlook webmail . You can then open the email on your own computer and get the files. + +## Thermodynamic Databases + +We have the following additional databases for use in JMatPro. If you want to do multicomponent calculations, these should be the most accurate databases to use. +``` +ALDATA – Aluminium alloy database +MGDATA – Magnesium alloy database +ZRDATA – Zirconium alloy database +TIDATA – Titanium alloy database +``` + +## ThermoCalc + +Thermocalc can be accessed through Remote Desktop method only. Follow the instructions for this as for JMatPro (above). diff --git a/collections/_software_and_simulation/ttt_cct.png b/collections/_software_and_simulation/ttt_cct.png new file mode 100644 index 0000000..13b452c Binary files /dev/null and b/collections/_software_and_simulation/ttt_cct.png differ diff --git a/collections/_software_and_simulation/ttt_cct_al_ni_cr.md b/collections/_software_and_simulation/ttt_cct_al_ni_cr.md new file mode 100644 index 0000000..61e5053 --- /dev/null +++ b/collections/_software_and_simulation/ttt_cct_al_ni_cr.md @@ -0,0 +1,73 @@ +--- +title: TTT and CCT Curves of Ni-Al-Cr System +author: Sakina Rehman +tags: + - thermocalc + - morton pc + - python +toc: true +subcollection: TC Python +published: true +--- +# TTT and CCT Curves of Ni-Al-Cr System + +This example simulates the precipitation of gamma-prime phase from gamma phase in a Ni-Al-Cr alloy. It demonstrates how to run TTT(temperature-time-transformation) and CCT(continuous-cooling-transformation) diagrams. +It also shows +(1) how to set up objects(the system, precipitate phase, and matrix phase) and then reuse them, and (2) how to set an alias for the precipitate phase +``` +from tc_python import * +import matplotlib.pyplot as plt + +with TCPython(): + system = (SetUp() + .set_cache_folder(os.path.basename(__file__) + "_cache") + .select_thermodynamic_and_kinetic_databases_with_elements("NIDEMO", "MNIDEMO", ["Ni", "Al", "Cr"]) + .select_phase("DIS_FCC_A1") + .select_phase("FCC_L12#2") + .get_system() + ) + + precip = PrecipitatePhase("FCC_L12#2").set_interfacial_energy(0.023).set_alias('GAMMA_PRIME') + matrix = MatrixPhase("DIS_FCC_A1").add_precipitate_phase(precip) + + ttt_results = (system.with_ttt_precipitation_calculation() + .set_composition_unit(CompositionUnit.MOLE_PERCENT) + .set_composition("Al", 10) + .set_composition("Cr", 10) + .with_matrix_phase(matrix) + .set_min_temperature(1000) + .set_max_temperature(1160) + .set_temperature_step(10) + .set_max_annealing_time(1.0e6) + .stop_at_volume_fraction_of_phase(1.e-4) + .calculate() + ) + + cct_results = (system.with_cct_precipitation_calculation() + .set_composition_unit(CompositionUnit.MOLE_PERCENT) + .set_composition("Al", 10) + .set_composition("Cr", 10) + .with_matrix_phase(matrix) + .set_min_temperature(1000) + .set_max_temperature(1200) + .set_cooling_rates([1., 1.e1, 1.e2, 1.e3]) + .stop_at_volume_fraction_of_phase(1.e-4) + .calculate() + ) + + time_1, temperature_1 = ttt_results.get_result_for_precipitate("GAMMA_PRIME") + time_2, temperature_2 = cct_results.get_result_for_precipitate("GAMMA_PRIME") + +# Plot result +fig, ax = plt.subplots() +fig.suptitle('TTT and CCT', fontsize=14, fontweight='bold') +ax.set_xlabel('Time [s]') +ax.set_ylabel('Temperature [K]') +ax.semilogx(time_1, temperature_1, 'b-', label="GAMMA_PRIME (TTT)") +ax.semilogx(time_2, temperature_2, 'r--', label="GAMMA_PRIME (CCT)") +ax.legend() +plt.show() + + +``` +![alt text](https://github.com/LightForm-group/wiki/blob/master/collections/_software_and_simulation/ttt_cct.png) diff --git a/collections/_software_and_simulation/workflows.md b/collections/_software_and_simulation/workflows.md index 5dcc767..1863bbd 100644 --- a/collections/_software_and_simulation/workflows.md +++ b/collections/_software_and_simulation/workflows.md @@ -2,9 +2,11 @@ title: Towards fully reproducible scientific workflows author: Adam Plowman tags: + - matflow-old - simulation published: true toc: true +subcollection: MatFlow old --- ![matflow.png]({{site.baseurl}}/assets/images/posts/matflow.png) diff --git a/collections/_software_and_simulation/yaml.md b/collections/_software_and_simulation/yaml.md new file mode 100644 index 0000000..33b7aa3 --- /dev/null +++ b/collections/_software_and_simulation/yaml.md @@ -0,0 +1,62 @@ +--- +title: Tips for using YAML +author: Gerard Capes +tags: + - matflow-new + - yaml + - debugging +published: true +subcollection: MatFlow +--- + +# Useful things to know about yaml + + + +## Indentation +YAML can use indented, or zero-indented lists (sequences), where the hyphen is +at the same level of indentation as the previous line. + +```yaml +top_level_key: +- zero_indented_item +- another_zero_indented_item + +different_top_level_key: + - indented_list_item + - another_indented_list_item +``` + +The creators recommend the zero-indented approach, but you can use either. +It can help to view the indentation by considering the text rather than the hyphen. +As ever, it's best to stick to one convention within the same file, for reasons of clarity. + +## Splitting a long path +If you have a long path that you want to split over more than one line, +the best approach is to use a quoted string, and use a backslash to concatenate lines +without spaces e.g. + +``` +script: "<>" +``` + +## Multi-line strings +Block scalars using the literal style ("|") are useful for multi-line strings +e.g. + +``` +environments: +- name: abaqus_env +setup: | + source /mnt/iusers01/support/mbexegc2/scratch/Abaqus_bayesian_matflow/.venv/bin/activate + module load apps/binapps/abaqus/2022 +``` + +## Null = None +In YAML, you use `Null` if you want to set a (python) value of `None`. + +## Checking your YAML file +If you're struggling to understand a new error from MatFlow, +it's worth checking you're using valid syntax in your yaml file(s) +using something like . diff --git a/collections/_tutorials/automated_optical_microscope_ZEN_2.md b/collections/_tutorials/automated_optical_microscope_ZEN_2.md new file mode 100644 index 0000000..550b083 --- /dev/null +++ b/collections/_tutorials/automated_optical_microscope_ZEN_2.md @@ -0,0 +1,106 @@ +--- +title: Automated Optical Microscopy with ZEN 2 +author: Bernadeta Karnasiewicz +tags: + - Microscopy +subcollection: Optical microscopy +--- + +# ZEISS Axio Imager 2 microscope combined with ZEN 2 image-processing and analysis software for digital microscopy (D 010) + +![](/wiki/assets/images/posts/Zenn_Picture1.jpg) + + +ZEISS Axio Imager 2 microscope offers couple of contrasting techniques for reflected light mode, which is used for imaging metal specimens: *Brightfield, Darkfield, +Circular Differential Interference Contrast (C-DIC), Polarization Contrast and Polarization with Additional Lambda Plate*. Illustrative images for all of them are shown below. +For Ti and its alloys, the common way to reveal the microstructure is to prepare and etch the specimen in Kroll’s reagent and to use brightfield technique to look at it. +However, microstructural details in Ti alloys can also be seen using polarized light, without etching. This is possible when the surfaces contain structures that alter the +state of polarization during the reflection process, which is the case for hexagonal α Ti, where individual grains in a polycrystalline sample reflect different light intensities +depending on their orientation. Most often polarization contrast is used for this purpose. Although, the C-DIC technique uses polarized light too, by using the Nomarksy prism +it reveals small differences on the surface of the specimen and therefore focuses more on the topography of the surface. + +![](/wiki/assets/images/posts/Zenn_Picture2.png) + + +There are also other advantages of this microscope. The first is the motorized stage that enables to achieve reproducible illumination settings together with constant image quality. The second is the stage carrier that have been designed as a vibration-free unit, isolated from the rest of the stand, which creates good measurement conditions for adequate, multiple measurements. This unit is combined with ZEN 2 software, which is image-processing and analysis software for digital microscopy. In addition to basic functionality for image acquisition and elementary image processing, annotations, etc, it offers additional modules like Tiles module that acquire number of individual images and combine them together to create high-resolution map of the entire sample or a region of interest, using suitable focus strategy. In Lightform this is the main purpose of using this microscope, for individual imaging other microscopes need to be used. The example of the map acquired using the Tiles mode is shown below (Brightfield, RGB, 5X). This is Ti6Al4V specimen after specific heat treatment. The overview of the specimen with given magnification gives us the opportunity to look at the entire sample instead of at individual frames, zoom into chosen area without loosing the resolution (the limit is of course the magnification we acquired the map at) and look for microstructural features that are of interest. If particular region was interesting for us, we could do another map of this region with higher magnification, capturing finer properties of given microstructure. + +![](/wiki/assets/images/posts/Zenn_Picture3.jpg) + + +# Manual + +Switch on the power supply and the microscope first. + +Log into the PC and open up ZEN Pro (don’t place the specimen on the stage yet). + +Click ‘calibrate now’ to calibrate the stage. + +Before setting up the microscope, make sure that the surface of your specimen is flat by using the squeezing tool and some plasticine, which is very important step in acquiring stitched map of your specimen. Lower down the stage of the microscope and safely place the specimen on the stage. Choose the smallest magnification on the microscope’s screen and the contrasting technique you wish to use: Bright Field, Dark Field, C-DIC, Pol. + +![](/wiki/assets/images/posts/Zenn_Picture4.png) + + +Focus on the surface of the sample carefully by looking through the eye piece. + +In the interface of ZEN Pro you will see 4 main tabs: + +![](/wiki/assets/images/posts/Zenn_Picture5.png) + + +Under the ‘Locate’ tab, click the life image to see your specimen. Adjust the intensity of the light to your sample on the microscope (knob under the display screen as shown above). + +In locate tab you need to make sure that the microscope settings are suitable for your material. One of the important parameters you can adjust is the exposure time, it measures how long the camera will be exposed to the light (photons) reflected from your sample. The longer the exposure time, the more photons the detector will receive, resulting in increased pixel intensity and a “brighter” image. Ideally, when you acquire an image you want to use the longest possible exposure time, without saturating any of the pixels. On the histogram under ‘Display’ tab below the life image you want to use the entire dynamic range of your camera without saturating any pixels. Below you will find an example of well adjusted (left) and oversaturated (right) image together with the histogram below it. + +![](/wiki/assets/images/posts/Zenn_Picture6.png) + + +You can adjust the colour channels in the ‘Camera’ tab and chose the color mode (RGB – red green blue or BW – black white) in the ‘Mode’ tab. + +![](/wiki/assets/images/posts/Zenn_Picture7.png) + +Under the live image area, you can adjust the general view options on ‘Dimensions’, ‘Graphics’ and ‘Display’ tab. The ‘Display’ tab shows the settings for brightness and contrast. You can move the controls under the plot to left and right in order to adjust the values for Brightness and contrast. Anything below this value (small triangle – Black) will be shown as black and anything above that value (small traiangle – White) will be shown as white, and anything between will be contrast scaled, using the gamma value for the display contrast curve that you enter. The 0.45 of gamma value will set the optimum colour presentation. ‘Best Fit’ option should give you good results too. + +![](/wiki/assets/images/posts/Zenn_Picture8.png) + +When you are happy with the settings, move to ‘Acquisition’ tab, then new and give it a name (usually your name and something regarding the sample that you are looking at). Click on ‘Channels’ tab and +WF and add the channel that you wish to use (e.g. Reflected Light RL brightfield, which corresponds to mode of the microscope that you chose), click ‘add’ and close. The programme will remember the channel you chose under the name you gave, so If you use the microscope multiple times, just open the one with your name from this drop-down menu. Activate the **Tiles** tool by clicking the Tiles checkbox in the Acquisition tab. Then click ‘live’ (If the image appears black then check the light intensity on the microscope). + +![](/wiki/assets/images/posts/Zenn_Picture9.png) + +Make sure that the area you want to scan is within the range of the stage movement in X and Y direction. On the joystick there are various speed options available (F1 – F4), generally the slow speed is recommended for small samples and for higher magnifications, for smaller magnifications and bigger samples faster speed can be used. + +The purpose of the **Tiles** tool is to acquire images that are made up of a number of individual images. To do Tiles experiment, we define the tile region and points with different Z-positions within the region, using suitable focus strategy. You can now set up Tiles experiment. In the ‘Tiles’ tab select ‘Stake’ (this option defines the tile region by specific marker points located at the corners of desired region. In the Tile Regions section u can define the tile region by other options too – ‘Tiles’ and ‘Size’, where the first determines the number of tiles you want in the X and Y input fields, e.g., X = 3, Y = 3 equals a tiles region containing 9 tiles, so that you can control how big will be one focus region. Alternatively, you can enter the size of the tile region that you want to add.  these two options are for advanced users). + +![](/wiki/assets/images/posts/Zenn_Picture10.png) + +In the Contour, the square shaped scan option requires you to determine 4 corners of the region you want to acquire. Rotate your sample in the way that the edges of region you want to scan are along X and Y axes of the stage and move to the first corner of your region. Focus there and click ![](/wiki/assets/images/posts/Zenn_Picture11.png) so you will add the first marker position. To add corners of the region, move the stage to another position on the sample and repeat the steps. The added region (TR1, TR2 - tile region 1 etc) is displayed in the tile regions list. You can see there a shape of the region, number of tiles and the size of your region, as well as the averaged Z position of the tile region. + +![](/wiki/assets/images/posts/Zenn_Picture12.png) + +To ensure that the individual Z-positions of the tile regions are taken into account during acquisition, you need to use a focus strategy. If not, the software will use the current Z-position at the time the experiment is started for all the tile regions. + +Advanced Setup makes it easier for you to create tile region with individual Z positions by displaying the distribution and dimensions of tile regions. You need to generate a Preview Scan (you can use an objective with a lower magnification so it will be quicker  advanced users). Click on the ‘Tiles’, ‘Advanced Setup button’ and ‘start preview scan’. A series of snap images is acquired to generate a preview of the marked region - yellow colour. Before you start the preview scan, make sure that the intensity is adjusted correctly, otherwise the image will be black. + +![](/wiki/assets/images/posts/Zenn_Picture13.png) + +Once the preview scan is finished, you need to acquire the tile region with different Z-positions. From the list of tile Regions (TR1 etc) select the one that you are interested in. Select the *‘Support Points’* tab from the Tiles - Advanced Setup view options. Under *Distribute Support Points on Selected Tile Regions*, indicate the number of columns and rows for the distribution of the reference points and click distribute. These will be shown as yellow points in the stage view. You can adjust the distribution of the support points manually, as well as add individual support points by clicking on the *Add Support Points at Current Stage and Focus Position* button on the *‘Support Points’* tab. + +![](/wiki/assets/images/posts/Zenn_Picture14.png) + +Distribute the support points across your tile region and adjust their number to the surface of your sample but also remember about the purpose of your map. A high reference-point density leads to more precise result, although the maximum useful density is one reference point per tile. + +Now you will Verify Z-positions of support points. Click on the Verify Support Points button in the Focus Surface section of the Tiles tool. Click on the Move to Current Point button. Use the Live mode to set the Z-positions using the focus drive. Click Set Z & Move to Next on the first point and repeat for all your support points and close. Click ‘Start Experiment’ to begin the scan. + +![](/wiki/assets/images/posts/Zenn_Picture15.png) + +When the experiment is finished, you need to stitch individual images together, so the tiles are aligned and constitute a coherent map. The software uses a program to detect where pixels are misaligned in adjoining tiles and fuses them properly. Click ‘processing’ tab and then under the ‘Method’ click ‘stitching’. Under ‘input’ tab choose the experiment you want to stich and then Apply (at the top). Because of stitching, you might get an uneven edge of the map and you might want to crop them out. To do that, right click on the scan u want to crop, click create subset from R.O.I (region of interest) and crop the image out. Then right click on the image again and click create subset from R.O.I. Before you save the image, you can adjust some settings on histogram which is placed below the scan to your liking and add the scale bar by clicking ‘scale bar’ from the top menu (marked on red in above image). You can also change the properties of the scale bar itself by right click on it and clicking Format Graphical Elements. + +![](/wiki/assets/images/posts/Zenn_Picture16.png) + +If you don’t want to change anything more on the image you can now save it. You can save the experiment in CZI format which will allow you to open it again in ZEN pro. In order to do that, click file and save as CZI to your external storage. You can also save Jpeg image by clicking ‘image export’ under the ‘Method’ tab. Under ‘Parameters’ tab select the destination folder of the export and change image format to Jpeg. Choose the input image and click Apply at the top. Check If your Jpeg image appears in your destination folder. Remember to save all your images to your own USB stick as the data from the University computer is regularly cleaned. Jpeg format compresses the image so the resultant map is not of full resolution. In principle you should save the map in TIFF format as it keeps the original resolution and therefore is appropriate for image analyses, however often there is a problem when trying to save TIFF on this computer (this needs to be resolved). + +![](/wiki/assets/images/posts/Zenn_Picture17.png) + +If the software comes up with the memory issue during saving (this happens usually with big samples or the areas scanned with big magnification, both resulting in big size of the map), click File, ‘save as with options’ and change the type of the file to tiff and then save. However, this means that you have a lower quality image as the software applies different compression in this case. + +Once you are done, lower down the stage with the sample on it (to the maximum), go back to the lowest magnification and take your sample off from the stage. Close the software, turn the microscope off, power supply off and log off the PC. + diff --git a/collections/_tutorials/beta_reconstruction.md b/collections/_tutorials/beta_reconstruction.md new file mode 100644 index 0000000..dd79173 --- /dev/null +++ b/collections/_tutorials/beta_reconstruction.md @@ -0,0 +1,39 @@ +--- +title: Beta reconstruction +author: Christopher Daniel +tags: + - EBSD +subcollection: EBSD +--- + +# Beta reconstruction + +## Running reconstruction.exe + +The reconstruction.exe can be downloaded [here](https://github.com/LightForm-group/beta-reconstruction-archive/releases/tag/v1.1), this includes bug fixes of the source code to improve the file read speed and the size of maps that can be reconstructed. + +*Notes on cleaning data using Aztec Channel 5 or Aztec Crystal* - Before running the beta reconstruction using the exectable (reconstruction.exe), the EBSD data will need to be cleaned. During cleaning, the indexed beta phase will need to be deleted, the alpha grains will need to be grown out so there are no non-indexed points, and the file will need to be exported in .ctf format. The step-by-step process is described in the `Tutorial for cleaning EBSD data`. + +Open the reconstruction.exe on any windows computer. + +Click File, Import CTF to import the data. This will take a few mins to load the data. Once loaded the screen will appear blank. + +Click File, New Map, Beta Reconstruction. This will load an image of the map. + +You can change the Min. Disor (minimum disorientation) and the Recon Dev. (reconstruction deviation) values in the cells, but the default 2 and 3 degress are usually fine for reconstruction in Ti alloys. These values will alter the maximum misorientation between orientations to be considered the same alpha variant and the maximum misorientation from the Burgers relationship for the reconstruction to be accepted. + +Make sure you tick the Hide Progress (faster) cell, otherwise the reconstruction will take much longer to complete. Then click Reconstruct. + +When the reconstruction is complete a new beta map will appear. Click Save Beta CTF to save it. + +The reconstructed .ctf file can then be loaded in Channel 5 by first importing the data - Project, Import, Channel Text File (\*.ctf). + +## Possible errors with reconstruction.exe and how to solve them + +## Beta reconstruction in Python + +A python package to run the beta reconstruction is also available [here](https://github.com/LightForm-group/beta-reconstruction), which currently uses the existing functions of [DefDap](https://github.com/MechMicroMan/DefDAP). This python package will have a number of advantages compared with the executable - it is a lot faster, the statistics for the grain reconstruction can checked, and the results can be weighted by particular factors such as any indexed beta orientations recorded at room temperature. + +## Beta reconstruction in MTEX + +A beta reconstruction feature is also available in the latest version of MTEX, as explained in the documentation - [Parent Beta Phase Reconstruction in Titanium Alloys](https://mtex-toolbox.github.io/TiBetaReconstruction.html). diff --git a/collections/_tutorials/cleaning_EBSD_data.md b/collections/_tutorials/cleaning_EBSD_data.md new file mode 100644 index 0000000..f63e08b --- /dev/null +++ b/collections/_tutorials/cleaning_EBSD_data.md @@ -0,0 +1,106 @@ +--- +title: Cleaning EBSD data +author: Christopher Daniel +tags: + - EBSD +subcollection: EBSD +--- + +# Tutorial for cleaning EBSD data + +## Why we need to clean EBSD data? [Chris] + +- Producing nice figures +- Beta reconstruction +- Reproduction of features (twins, precipitate phases, misindexed orientations, etc.) + +## Important examples of cleaned data + +Cast Mg alloy - soft orientated grains deformed heavily during preparation and do not index + +![](/wiki/assets/images/EBSD_misindexing_1.png) + +To fix: (see magnified image) can use consistent orientation indexed points to assumed true orientation of the misindexed grains. Using software such as CHANNEL5, copy and paste the trusted orientation around the grain edge, then 'grow out' the orientations to fill in the unindexed grains. + +Alternatively, re-prepare the sample with less/no deformation using less physically aggressive polishing methods such vibration polishing or electropolishing. + +Cold rolled Mg alloy - deformation twins too small to index during EBSD due to overlapping EBSD patterns + +![](/wiki/assets/images/EBSD_misindexing_2.png) + +Unfortunately, there is no easy way to fix this. A lower current/smaller electron beam spot size may be able to index these twins, but the scan would run to slow for this to be feasible. Instead, transmission kikuchi diffraction (TKD) can be used since it has a much higher resolution than EBSD. (Even higher resolution mapping can be obtain using scanning preccesion electron diffraction in the TEM). + +Ti-6Al-4V % commercially pure titanium additively manufactured alloy-alloy composite + +![](/wiki/assets/images/EBSD_misindexing_3.png) + +The soft CP Ti does not scan well due to deformation introduced during polishing, but the (relatively hard) Ti64 scans well, producing the regions of unindexed/misindexed points. This map of the Ti alpha phase was then reconstructed to the beta phase using the UoS beta reconstruction software. The unindexed and misindexed alpha data points are translated through the beta reconstruction and re-produced the regions of CP Ti in the final EBSD map (shown above on the left). In the magnified insert, the misindexed points can be seen. This cannot simply be 'grown out' to clean the map and must be removed manually using software like CHANNEL5 - the true beta grains can be seen clearly either side of the misindexed regions, so the grains boundaries can be outlined using copy and paste of trusted orientations, then they can be grown out to produce a good estimate of the true beta microstructure. For more information, please see: + +https://doi.org/10.1016/j.msea.2019.138289 +Davis et al. Mat. Sci. Eng. A 765 (2019), 1 - 16. + +## Different cleaning algorithms [Nick] + +- Removing wild spikes +- Nearest neighbour +- Kuwahara + +## Cleaning data using Aztec Channel 5 for beta reconstruction + +Open up Aztec Channel 5 Project Manager - this manages the Tango (Map), Mambo (pole figure) and Salsa (ODF) packages. Load the Channel 5 `.cpr` project file, which allows Channel 5 to read the binary `.crc` data files, by clicking on the Project and then Open... tabs. + +Click on statistics at the bottom of the window, select the Titanium Cubic phase, and click the Remove Phase from Project tab. This will remove all of the beta phase from the data. *Note, do not save this project as the original .cpr file as this will permanently delete any indexed beta phase.* + +Drag the project to the Tango icon to open up the map. At this point it could be useful to plot an IPF map to view the orientations, by clicking on the Create new map icon. + +Click on the Perform noise reduction icon to clean the map. Clicking on Wild spikes: Extrapolate will remove any single indexed points that are not surrounded by similar orientations. Depending on the resolution of the data, it may either be neccessary to remove all wild spikes, but it could also detrimentally affect the beta reconstruction. Our group tend to avoid removing wild spikes. + +Use the standard nearest neighbour extrapolation to grow the alpha grains. Start with low level (8 nearest neighbour) extrapolation and make sure the iterate box is ticked. This will perform noise reduction and show the remaining fraction of non-indexed points. Allow a refinement to finish, then reduce the level to 7 nearest neightbours, then to 6, etc. until the remaining non-indexed points are exactly 0%. In cases where the map has a border, because the sample was smaller than the scan area, then you may need to reclick on extrapolate at 1 nearest neightbour to grow out the edges. Then click apply to apply this refinement to the data, this will change the map. + +To save the data, click on Project, Export, as Channel Text File (\*.ctf). + +*Note, if the data is too big to load into the beta reconstruction software or if there is a small region of the map you are interested in, you can use the subset selection tool, select a rectangle, and crop the area.* + +## Cleaning data using Aztec Crystal for beta reconstruction + +Open AZtec Crystal and load your EBSD map. + +Select ‘Clean up’ tab: + +![](/wiki/assets/images/Crystal_cleaning_tutorial_1.png) + +On the right side you will find the ‘settings’ tab for clean up: + +![](/wiki/assets/images/Crystal_cleaning_tutorial_2.png) + +Select the phases and dataset you wish to clean up. The ‘Exclude Voids’ tab will identify features such as sample edges and cracks and avoid those during clean up. How Crystal identifies these features is not trivial – please see the help file for more information. Finally, ‘Auto-Clean Up’ is self-explanatory – it is not recommended. Click this button at your own peril. +Below this section is the ‘Wild Spike Removal’ tab: + +![](/wiki/assets/images/Crystal_cleaning_tutorial_3.png) + + The help file is particularly useful regarding this feature: + +![](/wiki/assets/images/Crystal_cleaning_tutorial_4.png) + +The next step is ‘Zero Solution Removal’: + +![](/wiki/assets/images/Crystal_cleaning_tutorial_5.png) + +The ‘level of cleaning’ is scaled from 1 – the least aggressive – to 8 – the most aggressive. Level 1 requires 8 indexed pixels surrounding a non-indexed point to ‘fill’ it in, while level 8 requires only 1 neighbouring pixel to fill in an un-indexed point. Selecting the ‘Iterate’ function just repeats the zero solution removal until there are no points left to fill that fulfil your selected level criteria, unless you input a desired number of iterations into the ‘max’ field. + +A good place to start with this function, is to run level 1 until no more points can be filled, then run level 2, then level 3 and so on. However, care must be taken not to skew EBSD maps and data to the point where they are unrepresentative of your material. + +Each step in the cleaning you select in Crystal will be conveniently listed at the bottom of the cleaning tab; e.g. + + ![](/wiki/assets/images/Crystal_cleaning_tutorial_6.png) + +The amount of non-indexed points remaining after each step is listed in parenthesis, making it easy to delete unnecessary steps. Unfortunately, at time of writing, Crystal cannot store a particular cleaning ‘routine’ for use on other maps. + +Once a cleaning routine has been selected, it can be applied at the top of the tab using the ‘Apply’ button: + +![](/wiki/assets/images/Crystal_cleaning_tutorial_7.png) + +Note that there is also a ‘Restore’ option, allowing you to return your original data if you are unhappy with the cleaning results. + + +## Cleaning data using MTEX [Nick] diff --git a/collections/_tutorials/dilatometer_hot.md b/collections/_tutorials/dilatometer_hot.md deleted file mode 100644 index e4eeb03..0000000 --- a/collections/_tutorials/dilatometer_hot.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Tutorial for dilatometer hot-compression tests -author: Adam Plowman ---- - -Example tutorial. diff --git a/collections/_tutorials/hydra wedge_psc.md b/collections/_tutorials/hydra wedge_psc.md new file mode 100644 index 0000000..f3b4428 --- /dev/null +++ b/collections/_tutorials/hydra wedge_psc.md @@ -0,0 +1,53 @@ +--- +title: Hydrawedge plane strain compression tests +author: Muzamul Nawaz, Christopher Daniel +tags: + - Compression +subcollection: Compression Testing +--- + +# Tutorial for Hydrawedge plane strain compression tests + +## Setting up the Gleeble Hydrawedge +### Turning on the Gleeble Hydrawedge +1) Firstly, go to the control of the chiller/cooler. It should read “Stby”. Press and hold down the snowflake button until it displays the temperatures. + +2) Pull down the lever on the mains switch for the Gleeble to turn it on. + +3) Go over to the mains switch for the Hydraulic system and pull the lever down to also turn it on. Then take off the padlock from the on/off switch for the hydraulics. + +4) Head-over to the gleeble power unit and hold down the white coloured power switch until it turns on. + +5) Wait for all the configurations to load, check the thermocouple readout is set to ‘K’ and press accept. + +6) Hit the blue reset button. + +7) Press ‘OpnDr’ and lift the door of the unit. Then close it and check the top right corner turns from a green unlocked padlock to a red locked padlock. + +8) Switch on the PC and make sure it is connected to the Gleeble. + +### Setting up the Gleeble Hydrawedge for experiments +1) Take measurements of sample + + + +2) Welding thermocouples to the sample using spot welder, try getting as close to centre as possible. + +3) Inserting sample into the Gleeble Hydrawedge with graphite paper or tantalum stuck to the anvils via nickel paste. And make sure green wire goes into red thermocouple reader and white wire into white thermocouple reader. + + +4) Push autoloader forward so it clicks into place. + +5) Turn on the hydraulic and turn mechanical on. Move stroke piston into full compression (0.00 mm) resulting in the anvil touching the specimen in the autoloader. + +6) Turn AirRam on and AirRamComp on and adjust the Air Ram pressure using the yellow knob on the gleeble system. Should be between (0.5-1kN0 for small plane strain. + +7) Move wedge as close to sample but not touching. + +8) Turn off the mechanical and AirRam but keep on the AirRamComp. + +9) Close the door of the chamber and turn the VacOn, make sure to close lever on chamber. + +10) If you want to use HighVac follow step 9, and wait for the Chamber read to go down to 7x10-1 Torr. Once this happens turn on HighVac (this will lead to VacOn turning off). Wait for the ChamberRead to show “VacStandby”. Once this is shown turn on VacOn. + +11) Open up QuikSim and enter values to create a script. Once script has been created run the script and start the test. diff --git a/collections/_tutorials/sample_preparation.md b/collections/_tutorials/sample_preparation.md new file mode 100644 index 0000000..7b4ced2 --- /dev/null +++ b/collections/_tutorials/sample_preparation.md @@ -0,0 +1,234 @@ +--- +title: EBSD Sample preparation +author: Bernadeta Karnasiewicz and Alec E Davis +tags: + - EBSD +subcollection: EBSD +--- + +# Metallurgical preparation of Ti6Al4V specimens for EBSD examination (B12 lab) + +First the specimen needs to be mounted in the resin, in hot compression mounting machine. If you are preparing the sample for EBSD examination, remember that If your sample is +not flat on both sides, it will be very difficult to obtain EBSD map on this sample, regardless of the faultlessness of the preparation. + +Turn Struers TetraPol grinder on and water on the main tap. For grinding we use the SiC abrasive papers starting from 180, 320, 600, 800, 1200, 2400 and ending at 4000 grade, under flowing water with sliding speed between 200 to 300 rpm. Regarding details of the grinding technique, people often have different methods for the force and time that they grind for, these vary from couple of seconds to 10 mins on every paper with manual force adjusted to the size and state of the material of the specimen (for example CP Ti is softer and more prone to scratches in comparison with Ti alloys, also Ti64 specimen in deformed state will be more difficult to prepare than the one that has been cast or annealed at β phase temperature). The rule of thumb is that after grinding on each paper you should not see any scratches from the previous paper (by eye). You should use *User Defined* mode (you can control the speed and the water supply). + +![](/wiki/assets/images/posts/prep_Picture1.jpg) + +When the machine is set, press the green button (by pressing the red one, you can always stop the grinder). +Place the specimen on the disc, but not in the centre. Hold your specimen in the way that your hands are aligned with the net force on the disc but counteract it (left image). +You can also use the designed tool to keep the sample on the rotating disc (right image). + +![](/wiki/assets/images/posts/prep_Picture2.jpg) +![](/wiki/assets/images/posts/prep_Picture3.jpg) + + +After grinding, chemical-mechanical polishing with a mixture of colloidal silica (OP-S) and hydrogen peroxide should be performed on a polishing cloth with proportions: +4 OP-S to 1 hydrogen peroxide. Put on the gloves. Mix the liquids in a small container, install the polishing cloth on the magnetic disc, turn the water off, adjust the speed +and press the green button. Pour some of the mixture onto the cloth and place your sample on the rotating disk in the same way as for grinding. Adjust the force and the time +of polishing to the state of material of your sample (cast, deformed, hot annealed etc) and the size of it. As in case of grinding, people often have different methods for +force and time that they apply for polishing too, these vary from 2 -5 mins on OPS with a “hard force” or 20 mins of “light” pressure. When you are done, apply ethanol and dry +up your sample. If you see any kind of residual smear from the OP-S mixture you can try to remove it by turning the water on and as remaining OP-S mixture is still on the cloth, +slightly press the sample on the rotating disk. + +![](/wiki/assets/images/posts/prep_Picture4.jpg) +![](/wiki/assets/images/posts/prep_Picture5.jpg) + + +Titanium and titanium alloys should normally look very clean after polishing so you should not see any dirt or anything like that on the surface. Due to the hexagonal +crystal lattice of the α phase, the microstructural details in titanium can be seen using cross-polarized light on a light optical microscope, without etching. This can be used as rapid and easy assessment of the polish quality. Remember that highly deformed material are in general more difficult to prepare and exhibit poorer response to cross-polarized light (lack of sharp detail and poor contrast). + +Once you are satisfied with the surface quality of your sample, you need to break the resin with the use of handsaw (make 4 cuts around your sample), glove and vice. Be careful to not scratch the surface of your sample. Adhere the sample to the SEM holder using the smallest amount of super glue. Clean with the stream of ethanol and dry thoroughly using the dryer on the wall --> this is important as it helps maintain the vacuum chambers of our SEMs in a good condition.Coat your sample and sample holder with a silver paint to minimise charging during EBSD examination (give it enough time to dry before continuing with EBSD). + +![](/wiki/assets/images/posts/prep_Picture6.jpg) + + +# Light Alloy Sample Preparation for Optical and Electron Microscopy + +### Disclaimer: these preparation guides are a recommended starting point for your preparation routines – successful parameters will vary depending on your alloy composition and thermomechanical processing. + +# 1. Mg + +Sample mounting: for EBSD, avoid resin if possible – mechanical (e.g. sawing) removal can cause scratching and distortion of sample. For cold-mount resin, if sample precipitation/heat treatment is not important, sample can be removed from resin by heating to ~150°C when resin becomes soft. Brass/aluminium holders recommended. Conductive resin can be successful, but depends on what voltage/beam/EBSD scanning parameters you use. + +## 1.1. Grinding and Polishing + +### 1.1.1. Grinding with SiC Paper and Water + +-180/320 grit to shape sample + +-Can then jump straight to 4000 grit (make sure all remaining scratches are fine) + +-Clean with ethanol and dry with an air dryer immediately. + +### 1.1.2. Polishing with Diamond Suspension & Fumed Silica Particles + +-NO WATER + +-Use dedicated 3μm diamond paste pad – top up with 3μm diamond suspension spray when necessary. Use a few drops of oil-based lubricant for each sample. Usually 2 minutes is enough time. + +-Clean with ethanol in ultrasonic bath. Dry immediately with air dryer. + +-Use dedicated 1μm diamond paste pad – top up with 1μm diamond suspension spray when necessary. Use a few drops of oil-based lubricant for each sample. Usually 2 minutes is enough time. + +-Clean with ethanol in ultrasonic bath. Dry immediately with air dryer. + +-Use dedicated final polishing step pad – clean twice by dousing pad with ethanol and running a Mg/Al rounded scraper from centre to edge while pad is spinning, cleaning the scraper with tissue paper between each scrape. Pour a small amount of fumed silica polishing solution (~0.25 μm) onto the pad where you intend to polish your sample – there should not be any excess. 4 minutes should be enough polishing time. Repeat this step for every sample. + +-Clean with fresh ethanol mixing in ethylene glycol with a ~10:1 ratio in ultrasonic bath. Give sample a final clean with ethanol and dry immediately with air dryer. + +Note: the fumed silica can be difficult to remove. This doesn’t need to be removed if followed by etching/broad ion beam polishing. If this isn’t the case, dedicate the outer rim of the polishing pad to this cleaning by not applying the fumed silica in this region, then douse constantly with ethanol and gently polish the sample surface for a few seconds. + +## 1.2. EBSD Preparation Steps + +Polishing Mg often creates a thin deformed top layer on the sample and can significantly reduce the intensity/contrast of EBSD patterns, leading to low indexing. This needs to be removed. + +### 1.2.1. Manual Electropolishing (electrochemical material removal) + +-Place electrolyte in centre beaker and stir with magnetic stirrer. + +-If reduced temperature operation required, pour liquid nitrogen in the outer beaker. + +-Positive current wire from DC power source attaches to tweezers and negative current to a conductive plate placed in the centre beaker. + +-For material removal, use tweezers to submerge sample in electrolyte. + +-To ensure the sample surface is electropolished as much as possible, reduce depth of sample (electropolishing focuses on the sample sides if not) and submerge as little of the tweezers as possible in the electrolyte. + +-After removal from electrolyte, transfer to ethanol as quickly as possible. Once clean, dry immediately with air dryer. + +-Electrolyte composition and temperature, and electropolishing time are alloy/heat treatment dependent. A good place to start is a 30% nitric acid concentration in methanol (methanol cools much quicker than ethanol) at -40°C (must be kept below -20°C at all times once mixed for safety), electropolishing at 9V for 3s at a time and checking the sample surface. Note: add the nitric acid to methanol at -40°C a little at a time, as the reaction is exothermic and the solution heats up quickly. + +-Once electropolished, you will not be able to see visibly if the sample will scan by EBSD, so you will have to experiment and test. Note: sometimes a slightly corroded sample surface will still give good EBSD patterns. The trick is to balance material removal and corrosion rate – if the sample is too corroded, use lower temperature or less acid concentration to slow the corrosion rate. + +### 1.2.2. Broad Ion Beam (BIB) Preparation Steps + +-Ensure your sample will fit in the BIB holder before you start preparation. + +-Again, the preparation parameters are material dependent, but a good place to start is: 1) 7kV, 12.5° beam angle, 15 minutes polishing; 2) 7kV, 5° beam angle, 15 minutes polishing. + +## 1.3. TEM 3mm Disc Grinding and Electropolishing + +-Grind material to a foil ~80-150μm thick (thinner is usually better). + +-Twin-jet polish using an electrolyte of 10% nitric acid and methanol solution, at -40°C with a voltage of 16V. Place sample under vacuum as soon as possible. + +-Corrosion product is common and sometimes unavoidable after twin-jet polishing, so follow with a precision ion beam polishing system (PIPS) using a decreasing voltage of 1 – 0.1kV and angle of 5-1° (depending on starting disc thickness). This can take up to 24 hours to get sufficient clean electron transparent material. Place sample under vacuum as soon as possible. + +-PIPS can also be used to thin the disc from the start without the use of electropolishing (using 3kV or higher), but this is time consuming and not recommended. + +# 2. Al + +Sample mounting: for EBSD, avoid resin if possible – mechanical (e.g. sawing) removal can cause scratching and distortion of sample. For cold-mount resin, if sample precipitation/heat treatment is not important, sample can be removed from resin by heating to ~150°C when resin becomes soft. Brass/aluminium holders recommended. Conductive resin can be successful, but depends on what voltage/beam/EBSD scanning parameters you use. + +## 2.1. Grinding and Polishing + +### 2.1.1. Grinding with SiC Paper and Water + +-180/320 grit to shape sample + +-Can then jump straight to 4000 grit (make sure all remaining scratches are fine) + +-Clean with ethanol/methanol and dry with an air dryer immediately. + +### 2.1.2. Polishing with Diamond Suspension & OPS + +-NO WATER for corrosion sensitive alloys (e.g. high Zn 7xxx series alloys) + +-Use dedicated 3μm diamond paste pad – top up with 3μm diamond suspension spray when necessary. Use a few drops of water/oil-based lubricant for each sample. Usually 2 minutes is enough time. + +-Clean with ethanol in ultrasonic bath. Dry immediately with air dryer. + +-Use dedicated 1μm diamond paste pad – top up with 1μm diamond suspension spray when necessary. Use a few drops of water/oil-based lubricant for each sample. Usually 2 minutes is enough time. + +-Clean with ethanol in ultrasonic bath. Dry immediately with air dryer. + +-Use dedicated final polishing step pad – clean twice by dousing pad with ethanol/methanol and running a Mg/Al rounded scraper from centre to edge while pad is spinning, cleaning the scraper with tissue paper between each scrape. Pour a small amount of neat OPS (~0.25μm) onto the pad where you intend to polish your sample – there should not be any excess. 1-2 minutes should be enough polishing time. If polishing a corrosion sensitive sample, reduce polishing time as much as possible since even neat OPS contains water. Repeat this step for every sample. Note: fine scratches can be removed with the BIB. + +-Clean with fresh ethanol in ultrasonic bath. Give sample a final clean with ethanol and dry immediately with air dryer. + +## 2.2. EBSD Preparation Steps + +Polishing Al often creates a thin deformed top layer on the sample and can significantly reduce the intensity/contrast of EBSD patterns, leading to low indexing. This needs to be removed. + +### 2.2.1. Manual Electropolishing + +– see Mg (§1.2.1.) for setup. + +-Electrolyte composition and temperature, and electropolishing time are alloy/heat treatment dependent. A good place to start is a 30% nitric acid concentration in methanol (methanol cools much quicker than ethanol) at -40°C (must be kept below -20°C at all times once mixed for safety), electropolishing at 9V for 3s at a time and checking the sample surface. Note: add the nitric acid to methanol at -40°C a little at a time, as the reaction is exothermic and the solution heats up quickly. + +-Once electropolished, you will not be able to see visibly if the sample will scan by EBSD, so you will have to experiment and test. Note: sometimes a slightly corroded sample surface will still give good EBSD patterns. The trick is to balance material removal and corrosion rate – if the sample is too corroded, use lower temperature or less acid concentration to slow the corrosion rate. + +### 2.2.2. BIB Preparation Steps + +-Ensure your sample will fit in the BIB holder before you start preparation. + +-Again, the preparation parameters are material dependent, but a good place to start is: 1) 7kV, 12.5° beam angle, 15 minutes polishing; 2) 7kV, 5° beam angle, 15 minutes polishing. + +## 2.3. TEM 3mm Disc Grinding and Electropolishing + +-Grind material to a foil ~80-150μm thick (thinner is usually better). + +-Twin-jet polish using an electrolyte of 30% nitric acid and methanol solution, at -40°C with a voltage of 12V. The trick is to balance material removal and corrosion rate – if the sample is too corroded, use lower temperature or less acid concentration to slow the corrosion rate. Place sample under vacuum as soon as possible. + +-PIPS can also be used to thin the disc from the start without the use of electropolishing (using 3kV or higher), but this is time consuming and not recommended. + +# 3. Ti + +Sample mounting: for EBSD, avoid resin when putting in the SEM, but samples can be prepared in it as breaking them out rarely damages the sample surface. Do not use cold-mount resin – it is too soft and causes rounded sample surfaces. Conductive resin can be successful, but depends on what voltage/beam/EBSD scanning parameters you use. + +## 3.1. Grinding and Polishing +### 3.1.1. Grinding with SiC Paper and Water + +-180/320 grit to shape sample + +-Grind at 600, 1200, and 4000 grit for about 30s each, making sure all remaining scratches are approximately the same width. + +-Clean with ethanol/methanol and air dry immediately. + +### 3.1.2. Polishing with OPS + +#### 3.1.2.1. Harder alloys (~350HV or higher, including (100%) basketweave microstructure Ti-6Al-4V and other α+β alloys, and aged β-stabilised alloys) + +-Use dedicated final polishing step pad – clean twice by dousing pad with water and running a Mg/Al rounded scraper from centre to edge while pad is spinning, cleaning the scraper with tissue paper between each scrape. Clean again in this manner with ethanol/methanol. Pour a small amount of a water:OPS:hydrogen peroxide, 4:1:1 solution onto the pad where you intend to polish your sample – there should not be any excess. Push hard on your sample for 1-2 minutes, then push lightly for ~30s. + +-OPS is tough to remove completely from Ti. Run sample under hot water then, with a clean gloved finger, gently rub standard dish washing soap (e.g. Fairy Liquid) onto the surface. Run under the hot tap again while continuing to rub the surface gently, then clean with ethanol/methanol and air dry. + +#### 3.1.2.2. Softer alloys (~350HV or lower, including coarse, single-variant colony and bimodal microstructure Ti-6Al-4V and other α+β alloys, α alloys, and CPTi) + +-Use dedicated final polishing step pad – clean twice by dousing pad with water and running a Mg/Al rounded scraper from centre to edge while pad is spinning, cleaning the scraper with tissue paper between each scrape. Clean again in this manner with ethanol/methanol. Pour a small amount of a water:OPS:hydrogen peroxide, 4:1:1 solution onto the pad where you intend to polish your sample – there should not be any excess. Push softly on your sample for 15 minutes. + +-OPS is tough to remove completely from Ti. Run sample under hot water then, with a clean gloved finger, gently rub standard dish washing soap (e.g. Fairy Liquid) onto the surface. Run under the hot tap again while continuing to rub the surface gently, then clean with ethanol/methanol and air dry. + +### 3.1.3. Alternative Polishing Methods + +- For EBSD of bi-modal Ti64 the grinding process is as described above, however the polishing process used to achieve good indexing in this instance is quite different to that previously described. Instead of a water: OPS: hydrogen peroxide, 4:1:1 solution softly polished for 15 minutes, I have found that better indexing is achieved for the Ti64 fan blade sampples investigated when using a solution of 4:1 hydrogen peroxide: OPS for at least 25 minutes. The polishing step is immediately followed by a 5-minute period of polishing under running water in order, to prevent OPS from sticking to the surface. After polishing under water the same cleaning process as previously described in 3.1.2.2 was also conducted. + +- For EBSD of fully lamellar Ti64 a polishing solution of hydrogen peroxide and OPS in a 1:1 ratio is used initially used for 5 minutes, the sample surface is then cleaned as previously described in section 3.1.2.2, before polishing is continued using a polishing solution of hydrogen peroxide: OPS: water in a 1:1:4 ratio until there are no visiblie scratches or defects in darkfield imaging on optical microscopes. + +## 3.2. EBSD Preparation Steps + +For soft Ti alloys, polishing often creates a thin deformed top layer on the sample and can significantly reduce the intensity/contrast of EBSD patterns, leading to low indexing. This needs to be removed. + +### 3.2.1. Manual Electropolishing + +– see Mg (§1.2.1.) for setup. + +-Electrolyte composition and temperature, and electropolishing time are alloy/heat treatment dependent. A good place to start is a 5% perchloric acid concentration in methanol at -40°C, electropolishing at 12V for 3s at a time and checking the sample surface. Note: add the perchloric acid to methanol at + +-40°C a little at a time, as the reaction is exothermic and the solution heats up quickly. + +-Once electropolished, you will not be able to see visibly if the sample will scan by EBSD, so you will have to experiment and test. Note: sometimes a slightly corroded sample surface will still give good EBSD patterns. The trick is to balance material removal and corrosion rate – if the sample is too corroded, use lower temperature or less acid concentration to slow the corrosion rate. + +### 3.2.2. BIB Preparation Steps + +-Ensure your sample will fit in the BIB holder before you start preparation. + +-Again, the preparation parameters are material dependent, but a good place to start is: 1) 7kV, 12.5° beam angle, 30 minutes polishing; 2) 7kV, 5° beam angle, 30 minutes polishing. + +### 3.2.3. TEM 3mm Disc Grinding and Electropolishing + +-Grind material to a foil ~80-150μm thick (thinner is usually better). + +-Twin-jet polish using an electrolyte of 5% perchloric acid and methanol solution, at -40°C with a voltage of 17V. The trick is to balance material removal and corrosion rate – if the sample is too corroded, use lower temperature or less acid concentration to slow the corrosion rate. Place sample under vacuum as soon as possible. + diff --git a/collections/_tutorials/start-notebook.sh b/collections/_tutorials/start-notebook.sh new file mode 100644 index 0000000..59880a0 --- /dev/null +++ b/collections/_tutorials/start-notebook.sh @@ -0,0 +1,123 @@ +#!/bin/bash --login + +# --------------------------------------------------------------------------- +# This script should be called by the jupyter-notebook-icsf script +# from nyx5,6,7. +# +# Load the anaconda modulefile and start a Jupyter Notebook server +# without popping up a browser. It will listen on the port supplied +# as the first arg to this script (the port num is required!) +# +# Jupyter is the new name for iPython (the language agnostic part). +# +# Given we will be called from another host (nyx3 mainly) via ssh +# then we need to trap SIGINT and send the Jupyter server two +# CTRL+C's (SIGINTs) to shutdown. There is no nice way to shutdown +# an Jupyter server other than with two CTRL+C's. +# +# Arg2 is optional - can supply the version number of Anaconda Python +# to load. This should be the version number used by the modulefiles +# on iCSF. For example: 1.9.2 or 3/2.3.0 +# If not supplied the default (latest) modulefile will be loaded). +# +# We now also set an env var JUPYTER_EXAMPLE pointing to the example_job dir +# +# george.leaver@manchester.ac.uk, RI Team, Mar 2018 +# --------------------------------------------------------------------------- + +# This will be called when the script receives an interrupt +# from the ssh call that launched us. +function finish { + if [ -n "$IPID" ]; then + # Give the Jupyter server two CTRL+C's to shutdown. + # This seems to have stopped working with python3 + #kill -s SIGINT $IPID 2>/dev/null + #kill -s SIGINT $IPID 2>/dev/null + kill -s TERM $IPID 2>/dev/null + if [ "$?" -eq "0" ]; then + wait $IPID + echo "" + echo "Terminated Jupyter Notebook on $HOSTNAME (PID $IPID)." + echo "If the command-prompt does not appear, press Ctrl+C again." + echo "" + exit + fi + fi +} +trap finish EXIT SIGINT SIGTERM SIGHUP SIGKILL + +if [ $# -lt 1 ]; then + echo First arg must be a port number + exit 1; +fi + +# Optional second arg +if [ $# -eq 2 ]; then + CONDAVER=$2 +fi + +# Optional virtual environment to activate +if [ $# -eq 3 ]; then + VENV=$3 +fi + +# Report backend iCSF node +echo "Setting up jupyter notenook on iCSF node `hostname -s` ..." +echo "" + +# Load the given version of anaconda python (gives us Jupyter) +if [ -n "$CONDAVER" ]; then + echo "Starting Jupyter Notebook server (in Anaconda Python $CONDAVER) on $HOSTNAME ..." + + if [ `echo $CONDAVER | grep -c "/"` -eq 1 ]; then + module load apps/binapps/anaconda$CONDAVER + else + module load apps/binapps/anaconda/$CONDAVER + fi + MODCOUNT=`module list 2>&1 | grep -c $CONDAVER` + if [ $MODCOUNT -eq 0 ]; then + echo "Unable to load Anaconda version $CONDAVER. Using latest installed version..." + module load apps/binapps/anaconda3/2019.07 + fi +else + echo "Starting Jupyter Notebook server (in Anaconda Python) on $HOSTNAME ..." + + module load apps/binapps/anaconda3/2019.07 +fi + +if [ ! -z $VENV ]; then + if source $VENV/bin/activate; then + echo "Loaded virtual environment $VENV" + else + echo "venv '$VENV' not found. Did not load venv." + fi +fi + +# Load the latest version of R so that %Rmagic commands work +module load apps/gcc/R +module load tools/env/proxy + +# Set an env var we can use in online docs to locate an example script +export JUPYTER_EXAMPLE=/opt/gridware/scripts/example_job +export JUPYTER_HOME=/opt/gridware/scripts + +# Start the server. + +# Mar 2018 - I'm commenting this out and will force users on to incline256 (el7) nodes. + +# Aug-2017. Looks like libreadline.so.6.2 is broken in anaconda (again). +# Use the system-wide version (we've needed this trick before with rpy2.) +# env LD_PRELOAD=/lib64/libreadline.so.6.0 jupyter notebook --no-browser --port $1 & + +python -m jupyter notebook --no-browser --port $1 & +IPID=$! + +# Output a reminder about shutting down after the server starts +sleep 8 +echo "" +echo "======= ** IMPORTANT - YOU MUST DO THIS WHEN FINISHED WITH Jupyter ** =======" +echo " Please use CTRL-C in this window to terminate Jupyter Notebook on $HOSTNAME" +echo " (PID $IPID) ..." +echo "" +# Wait for the server to finish +wait $IPID diff --git a/collections/_tutorials/sxrd-caking-dioptas.md b/collections/_tutorials/sxrd-caking-dioptas.md new file mode 100644 index 0000000..185d0f7 --- /dev/null +++ b/collections/_tutorials/sxrd-caking-dioptas.md @@ -0,0 +1,185 @@ +--- +title: Caking SXRD diffraction pattern images using Dioptas and PyFAI +author: Christopher Daniel +subcollection: SXRD +--- + +## Caking diffraction pattern images using Dioptas and pyFAI on the iCSF + +### iCSF + +You will first need to contact IT services to get setup with an iCSF account. The iCSF ([interactive Computational Shared Facility](http://ri.itservices.manchester.ac.uk/icsf/)) gives us access to the University's high performance computing environment. This allows us to store and analyse our large Synchrotron X-ray Diffraction (SXRD) datasets as a group. + +### What is caking? + +Before we can analyse the diffraction pattern rings for changes in phase fraction, micromechanical response and texture, we need to *cake* the data. *Caking* converts our 2-dimensional image into slices (of particular azimuthal angles) to produce a number of intensity profiles versus 2-theta angle (or pixel position). We can then investigate how the intensity peak profile of particular lattice plane peaks change in particular directions over time. We can also run a full *azimuthal integration*, which sums up the intensities around the whole image, to produce a single intensity profile versus 2-theta angle. This can then be used to calculate the phase fraction, for instance. + +### Opening Dioptas on the iCSF + +To run the *caking* and *azimuthal integration* we have setup the program [Dioptas](http://www.clemensprescher.com/programs/dioptas) on the iCSF. Dioptas is a python-based data analysis and processing program used for processing synchrotron data. The program was developed at the DESY beamline in Germany. It can do the same job as DAWN, but is slightly easier to use and once the calibration files are setup the caking and azimuthal integration can be automated using a python script and the [PyFAI](https://pyfai.readthedocs.io/en/latest/) package. + +Log onto the iCSF by opening the terminal and using the secure shell (ssh) protocol; + +```bash +ssh -X mbcx9cd4@incline256.itservices.manchester.ac.uk +``` + +You will then be prompted to enter your password. + +![](/wiki/assets/images/posts/DAWN_screenshot1.png) + +Load the Dioptas module using the following commands in the terminal; + +[TODO] + +### Creating a .poni calibration file in Dioptas + +The documentation for using Dioptas can be found [here](https://dioptas.readthedocs.io/en/stable/). However, the following instructions should be enough for a simple calibration. + +**Start Values** + +This will then open the Dioptas graphical user interface (GUI). + +Then, click on *'Load File'* and select a calibration diffraction pattern. + +Select a 'Custom' detector and set pixel width and height (e.g. 172 um x 172 um) + +Input the distance (e.g. 750 mm) and leave the box ticked as we want to refine this value. + +Input the wavelength (e.g. 0.12423 A) and leave the box unticked as we don't want to refine this value. + +Leave polarization as is (0.990). + +Select calibrant (e.g. CeO2). + +**Peak Selection** + +Untick automatic increase. + +Select automatic peak search. + +Click 'clear all peaks'. + +Select current ring number as 1. + +Then click on the map to select the ring closest to the centre, which will become highlighted with points. You will need to click more than once to select around the entire ring. + +Once a ring is fully selected, select the next ring number (2), then click around the next ring away from the centre. + +Repeat until 10 or so rings have been selected. + +**Refinement Options** + +Make sure automatic refinement is ticked. + +Leave the default values in this window, but be sure to change the number of rings to match the number of rings that have been highlighted. + +**Masking** + +There is also an option to use a mask, which can filter out any dead pixels or gride lines from the pixel array. + +On the far left panel, click on the Mask tab. + +In the 'below thresh' box type 1 and click on the 'below thresh' box. This will highlight all points below a certain threshold. + +Note, there is an option to apply a transparent threshold, as well as the standard fill threshold. + +**Run Calibration** + +Click 'Calibrate' to run a calibration. + +**Save calibration as .poni file** + +Click the 'save calibration' button at the bottom right of the window and save the calibration as a .poni file. + +These two images show the setup just before and just after running a calibration in Dioptas; + +![](/wiki/assets/images/Dioptas_calibration_1.png) + +![](/wiki/assets/images/Dioptas_calibration_2.png) + +### Caking and Azimuthal Integration using PyFAI + +Dioptas uses elements of [FabIO](https://pythonhosted.org/fabio/) and [PyFAI](https://pyfai.readthedocs.io/en/latest/) to read in the diffraction images and to perform parts of the calibration. Dioptas also uses PyFAI to perform image integration. However, it is easy to setup a python script using PyFAI to perform the caking or a full azimuthal integration on a set of diffraction image data. A notebook explaining how to do this and the features of PyFAI is available on this [link](https://github.com/LightForm-group/pyFAI-integration-caking). Some interesting videos explaining how PyFAI works are available on this [link](http://www.silx.org/doc/pyFAI/dev/index.html#). + +**Load Calibration** + +First, we load the .poni calibration file, which contains information about the beamline setup. We load an azimuthal integrator object or ai, which we will use to perform an azimuthal integration or caking to the rest of our as-yet 'uncalibrated' data. + +```python +ai = pyFAI.load("calibration/DLS_CeO2_1200mm.poni") +``` + +**Azimuthal Integration** + +An azimuthal integration can be performed using the `integrate1d` function. + +* The number of points in 2-theta is defined by the user. +* The azimuthal range runs from -180 to 180, or -pi to pi, rather than 0 to 360 as in DAWN. +* An output .dat file can be saved, which contains a header of metadata. +* The result is returned as a numpy array of 2-theta and intensity. + +An azimuthal integration can be performed like this. + +```python +result = ai.integrate1d(pattern_image_array, + npt=10000, + azimuth_range=(-180,180), + unit="2th_deg", + correctSolidAngle=True, + polarization_factor=0.99, + method='full_csr', + filename="analysis/integrated.dat") +``` + +**Caking** + +The `integrate2d` function is designed for caking of the data. The input arguments are similar to above, but now a number of azimuthal cakes can be chosen. + +The following script uses a loop to iterate through some images, create an array for the data and then save it as a text file. + +```python +# supress warnings when TIFFs are read +logging.getLogger("fabio.TiffIO").setLevel(logging.ERROR) + +# user inputs +number_of_points = 10000 +number_of_cakes = 36 + +# rotate the detector so that the cardinal direction is in the center of the first cake. +first_cake_angle = 360 / number_of_cakes +ai.rot3 = (first_cake_angle / 2) * (math.pi / 180) # convert rotation to radians + +# get a list of the files +image_list = sorted(pathlib.Path("data/").glob("pixium*")) + +for image_path in image_list: + # create empty array + caked_data = np.zeros((number_of_cakes + 1, number_of_points)) + + # create an image array and cake the data + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + image = fabio.open(image_path) + pattern_image_array = image.data + result2d = ai.integrate2d(pattern_image_array, + number_of_points, + number_of_cakes, + unit="2th_deg", + polarization_factor=0.99, + method='full_csr') + + # flip the intensity data to order cakes clockwise rather than anticlockwise + intensity = np.flip(result2d.intensity.T, axis=1) + + # reshape radial labels to 2D array so they can be attached to the intensity data. + radial = np.reshape(result2d.radial, (-1, 1)) + + result_array = np.hstack((radial, intensity)) + + # write out the caked data to a text file + output_path = f"analysis/{image_path.stem}.dat" + np.savetxt(output_path, result_array) +``` + +This caked dataset is now saved in a format that can be used in [xrdfit](https://xrdfit.readthedocs.io/en/stable/) to analyse how the single peak profiles change over time. diff --git a/collections/_tutorials/sxrd-caking.md b/collections/_tutorials/sxrd-caking.md index 885236b..d86ff59 100644 --- a/collections/_tutorials/sxrd-caking.md +++ b/collections/_tutorials/sxrd-caking.md @@ -1,6 +1,7 @@ --- -title: Tutorial for caking SXRD diffraction pattern images +title: Caking SXRD diffraction pattern images using DAWN author: Christopher Daniel +subcollection: SXRD --- ## Caking diffraction pattern images using DAWN on the iCSF diff --git a/collections/_tutorials/sxrd_analysis_guide.md b/collections/_tutorials/sxrd_analysis_guide.md new file mode 100644 index 0000000..c14234f --- /dev/null +++ b/collections/_tutorials/sxrd_analysis_guide.md @@ -0,0 +1,11 @@ +--- +title: Synchrotron X-ray diffraction analysis guide +author: Christopher Daniel +subcollection: SXRD +--- + +## Synchrotron X-ray diffraction analysis guide + +### Flowchart for analysis packages + +The following flow chart should be used to help guide your analysis of synchrotron data and to choose the analysis packages. The analysis packages should contain example notebooks, including example data, which can teach you about the principles of synchrotron diffraction data analysis. The notebooks are also based on text (YAML) file inputs, which should make the process of analysing data as simple as possible. Eventually the aim will be to remove as much of the interaction as possible so that the data can be automatically analysed. However, this automation is difficult due to the variety of different beamlines and different thermomechanical testers, and hence different datasets. diff --git a/collections/_tutorials/sxrd_analysis_iCSF.md b/collections/_tutorials/sxrd_analysis_iCSF.md new file mode 100644 index 0000000..50b9de7 --- /dev/null +++ b/collections/_tutorials/sxrd_analysis_iCSF.md @@ -0,0 +1,270 @@ +--- +title: Synchrotron X-ray diffraction analysis on iCSF / CSF +author: Christopher Daniel, Guy Bowker +subcollection: SXRD +--- + +## Synchrotron X-ray diffraction analysis on iCSF / CSF + +### iCSF + +You will first need to contact IT services to get setup with an iCSF account. The iCSF (interactive Computational Shared Facility) gives us access to the University’s high performance computing environment. IT can also give you access to the Research Database Storage (RDS) space, which is a safe, secure and backed up space to save all of our invaluable data. This allows us to store and analyse our large Synchrotron X-ray Diffraction (SXRD) datasets as a group and to keep it all in one place. + +### Interacting with the iCSF using remote desktop + +The iCSF is usually accessed via a remote desktop client. You will first need to download the [X2Go Client](https://wiki.x2go.org/doku.php/download:start). Within X2Go you can setup a host to the nyx connection nodes and set the session preferences like this; + +*Host: incline.itservices.manchester.ac.uk +User Name: (your university username e.g. mbcx9cd4) +Password: (your university password) +Port: 22 +Tick ‘Follow Symbolic Links’, ‘Auto Cache’ and ‘Defer Permissions’ +Mount Point: (where you want to view the virtual desktop files i.e. /Users/(mac user name)/iCSF-Home) +Volume Name: iCSF-Home +Tick ‘Show in Finder Sidebar’ +[TODO] - Check session preferences.* + +Once this is setup you can connect to the remote desktop linux environment, from which you can launch connections with the iCSF. To connect with the iCSF you will need to open the terminal and use linux bash commands. + +### Basic commands + +#### Launching a basic Jupyter Notebook + +A jupyter notebook can be launched like so *(note the version number may change as python is updated)*; + +```bash +jupyter-notebook-icsf 3/2019.07 +``` + +This allows you to interact and analyse data stored on your iCSF (incline) data storage space. + +#### Launching the iCSF + +The connection with the iCSF is created through a secure shell protocol like so **(note replace username with your actual username)**; + +```bash +ssh -X username@incline256.itservices.manchester.ac.uk +``` + +From here it is possible to view and search through your data storage space using *ls* (list), *cd* (change directory) commands. You can also *mv* (move) and *rm -rf* (delete) files, *mkdir* (make directory), and do pretty much whatever you like with the bash commands. + +#### Connecting iCSF (incline) storage with RDS storage + +Our group has allocated funds to purchase space for research data storage (RDS) from the university, which is stored and backed up on the servers. You will need to contact IT services to be given a portion of this space, which you will have read/write access to, and others in the group will be able to read and download. You will also be given access to shared space, where everyone has read/write access. For instance; + +`/mnt/eps01-rds/Fonseca-Lightform/username` + +and + +`/mnt/eps01-rds/Fonseca-Lightform/shared` + +To access the RDS data on the iCSF (incline) storage space you will first need to setup some symbolic links. A symbolic link, also known as a sym or a soft link is a special type of file that points to another file or directory (like a shortcut in Windows), this will allow you to view the contents of the folder. First, remote desktop into the interactive linux environment, launch the terminal, and log in to the iCSF using the command; + +```bash +ssh -X username@incline256.itservices.manchester.ac.uk +``` + +Then, it is probably a good idea to make a new directory in your home folder on the iCSF where you would like to store this data. For instance, + +```bash +mkdir rds_lightform +``` + +Then, create the symbolic link; + +```bash +ln -s /mnt/eps01-rds/Fonseca-Lightform/mbcx9cd4 ~/rds_lightform +``` + +In case there is a mistake in your filename you can delete a sym link using; + +```bash +unlink ~/rds_lightform +``` + +#### Loading modules + +The most useful thing about the iCSF is the ability to launch interactive modules. The most useful modules for analysing synchrotron data are MATLAB, DAWN, DIOPTAS and MAUD; + +```bash +module load apps/binapps/matlab/R2019a +matlab +``` + +```bash +module load apps/binapps/dawn/2.16.1 +dawn +``` + +```bash +source activate dioptas +dioptas +``` + +```bash +module load apps/binapps/maud/2.93 +maud.sh +``` + +When using the iCSF for the first time, you will need to ensure lines to enable an internet connection and use of python are contained within the `.bash_profile` file of your iCSF home directory - please see FAQs at the bottom of the page. + +#### Memory Check + +If you are experiencing memory issues on the iCSF at any time, you can check the memeory usage of the home partition (space shared by all users): + +```bash +df -h /home +``` + +Or you can also check how much space your home directory is using; + +```bash +du -skh $HOME +``` + +### Interacting with the iCSF externally + +It is also possible to interact with the iCSF and RDS space through an external terminal. From here it is possible to view the data stored on the RDS, and it is also possible to create port links to launch analyses and notebooks that run on the iCSF processors. To view your iCSF (incline) storage space you can use the secure shell protocol like so; + +```bash +ssh -t -L 7779:localhost:7779 mbcx9cd4@incline256.itservices.manchester.ac.uk +``` + +From here, **and on your own terminal** it is possible to view and search through your data storage space using *ls* (list), *cd* (change directory) commands. You can also *mv* (move) and *rm -rf* (delete) files, *mkdir* (make directory), and do pretty much whatever you like with the bash commands. + +#### Viewing files on iCSF (incline) using OS X Fuse + +To view files on your computer from the iCSF **(just like you would on an external hard drive)**, you can install the [OS X Fuse](https://osxfuse.github.io) package. Then, in the terminal you can create a directory within which you can view all of the folders and files on the iCSF *(including sym links to the RDS space)*; + +```bash +cd Desktop +mkdir iCSF-Home +sshfs -o follow_symlinks username@incline.itservices.manchester.ac.uk: iCSF-Home +``` + +In cases where this is not connected (usually due to the node being overloaded), you can instead use the slightly different command; + +```bash +sshfs -o follow_symlinks username@incline256.itservices.manchester.ac.uk: iCSF-Home +``` + +Sometimes, there can also be an issue with permissions to a shared folder, such as the sym link to the RDS shared folder, which shows up as permission denied. In this case, you can view the files in shared sym linked folders using; + +```bash +sshfs -o defer_permissions username@incline256.itservices.manchester.ac.uk:/mnt/eps01-rds/Fonseca-Lightform RDS +``` + +or; + +```bash +sshfs -o defer_permissions username@rds-ssh.itservices.manchester.ac.uk:/mnt/eps01-rds/Fonseca-Lightform RDS +``` + +To close the directory you used to be able to use; + +```bash +umount -f /Users/mbcx9cd4/Desktop/iCSF-Home +``` + +However, this seems to cause problems with the latest Mac OS, so better now to use; + +```bash +diskutil unmount /Users/mbcx9cd4/Desktop/iCSF-Home +``` + +#### Copying data to the iCSF (incline) + +For copying bulk experimental data from synchrotron beamlines such as Diamond or DESY, please see the tutorial `Transferring data from Diamond or DESY beamlines` found under the `SXRD running experiment` tab on the LightForm Wiki. + +For general copying of data to and from the iCSF (incline) and RDS space, you can use the `cp` command *(for copying everything without comparison)*, or the `rsync` command *(for only copying changes ot the source data)*; + +```bash +cp -vr source destination +``` + +```bash +rsync -azv source destination +``` + +*Note leading ~/ in path means your home folder, leading `./` means in this folder or stuff in current working directory.* + +So, in this example the command is used to transfer data from a data refinement (TOPAS) from a personal computer onto the RDS space, which is sym linked to the iCSF (incline) storage space. + +```bash +rsync -azv ~/Documents/Dropbox\ \(The\ University\ of\ Manchester\)/Zr\ Computer/TOPAS/Data\ Transfer/Batch05/ username@incline256.itservices.manchester.ac.uk:rds_lightform/SXRD_analysis/desy_2021/experiment05-deformation/phase-fraction-TOPAS/Batch05 +``` + +It would also be valid to use `username@rds-ssh.itservices.manchester.ac.uk:` instead of `username@incline256.itservices.manchester.ac.uk:`, to transfer the data to the RDS space directly. + +#### Setting up virtual environment for Jupyter Notebook + +To ensure that your packages do not mix up and interfere with each other during your analysis it is important to setup a virtual python environment, and to work within that python environment. A virtual environment can be setup like so using a list of the required packages *(found in requirements.txt)*; + +```bash +cd python_package/ +python -m venv venv +source venv/bin/activate +pip install --upgrade pip +pip install jupyter +pip install -r requirements.txt +``` + +*Please note if the command `python -m venv venv` does not work here you may first need to ensure python is usable from your iCSF/CSF space. This can be done by ensuring the following lines are included in the `.bash_profile` file of your iCSF/CSF home directory - please see FAQs at the bottom of the page.* +From here, you could launch python (`python`) or launch a notebook (`python -m jupyter notebook`) within the virtual environment. **However, the notebook will not launch in the browser unless you are able to launch it via a port link (see below on how to set this up).** + +You can check what versions are downloaded using; + +```bash +pip list +``` + +And if it's necessary to force upgrade some packages then you can use; + +```bash +pip install --upgrade -r requirements.txt +pip install --upgrade specific_python_package +``` + +Note in some instances, for developers, you may want to run in egg mode, to update packages instantly, for example using; + +```bash +pip install -e . +``` + +#### Launching Jupyter Notebook via port link + +First you need to download the shell script and place it somewhere on your iCSF space for launching the port link (ideally in the same directory as the venv you would like to use); + +```bash +chmod 744 start-notebook.sh +``` + +Then, after having setting up a virtual environment and closed it, you can launch a notebook via a port link in the terminal, replacing with your manchester IT username, the path to start-notebook.sh, and the path the your desired virtual environment like so; + +```bash +ssh -t -L 7780:localhost:7780 @incline256.itservices.manchester.ac.uk "virtualenv-setup/start-notebook.sh 7780 . rds_lightform/SXRD_analysis_packages/pyFAI-integration-caking/venv && exit" +``` + +This will create a link to launch the notebook, which you can open on your own computer, but will run all processes via the iCSF, saving your own computer processing power. This notebook does not become slow and laggy, like the one on the iCSF, and is therefore very useful for editing notebooks. + +You can also use this same command to launch the notebook on the linux remote desktop, which is useful for when you are not editing the notebook, but want to analyse larger datasets using the iCSF. + + If the command has not correctly loaded the desired virtual environment, it will instead default to your user-installed pip packages (using `pip install --user`). You can check which python packages you have installed directly from the notebook with `!pip list` in a run cell. + +#### ADVANCED - Altering the bashrc file + +At some points you may need to alter the bashrc file, this is done by opening the file in a text editor; + +```bash +nano ~/.Basic +vi ~/.bashrc +echo $PATH +``` +#### FAQs + - Please note if any commands using `python` do not work on the iCSF you may first need to ensure python is usable from your iCSF space - this can be done by ensuring the following lines are included in the `.bash_profile` file of your iCSF home directory (note - changes to .bashrc will not take effect until re-logging in to the iCSF): +```bash +module load apps/binapps/anaconda3/2019.07 +module add tools/env/proxy2 +``` +The first line ensures you may use the currently installed version of python available to all CSF and iCSF users. +The second line loads a tool for allowing your iCSF space to connect to the internet. diff --git a/collections/_tutorials/sxrd_etmt_setup.md b/collections/_tutorials/sxrd_etmt_setup.md new file mode 100644 index 0000000..46693af --- /dev/null +++ b/collections/_tutorials/sxrd_etmt_setup.md @@ -0,0 +1,67 @@ +--- +title: Tips for setting up ETMT at Diamond +author: Christopher Daniel +subcollection: SXRD +--- + +## Planning using the ETMT at Diamond + +The Instron Electro-Thermal Mechanical Testing System (ETMT) is stored at Diamond, either in the Harwell complex, or in experimental hutch 2 on the I12 beamline. The beamline scientists can crane the ETMT into position on the I12 beamline, as well as connect up the cabling and water supply. This includes connecting 3x analogue outputs, which can be used for synching data from the ETMT with the recorded sycnhrotron patterns. + +However, the ETMT is not an easy to use equipment. The ETMT at Diamond is also slightly different to the one at Manchester. Therefore, it is advised to have at least one day of training with Andrew Pearce (Instron), followed by at least one/two days of practice experiments with your samples, before the beamline experiment. + +## Differences between operating Manchester ETMT and Diamond ETMT + +The Diamond ETMT has a newer software for running the tests. Instead of the buttons 1,2,3 on the cabinet, there is now a switch on the controller. The furthest left means no power to the machine, the middle (with a button hold to illuminate the green light) is for manual adjustment with the controller and for enabling sample protect, and the far left button is for control from the computer. To setup a test, you first need to set the gauge length manually with the middle button (after illuminating the green light). Then screw in the top of the sample, then balance the load, then turn on sample protect. Then continue to screw in the sample as you see the load adjust. + +After connecting the thermocouple and resistivity wires, you can then put on the front of the chamber (a stupidly heavy door that makes a mockery of the safety assessment procedure). If you put this on at any other stage it can ruin your sample by sending the load control mental. + +After the door is bolted on you can then turn the switch to far position (wait a few seconds). Set the load and current to zero in the setpoint tool on the computer. Then (as long as the power supply is on) you can press the green "System Enable" button, which gives heating power to the ETMT. + +## Practice check-list + +Practice sessions with the ETMT at Diamond should be used before the synchrotron beamtime to; + +- Set the gauge length of the machine. The samples at Diamond need to be tested in a chamber, which restricts the sample gauge length that can be tested. The largest gauge length of samples is about 40 mm, but samples should be less than this length if being deformed (for obvious reasons). To allow movement up to 40 mm it will be neccessary to remove the vacuum seal to the chamber attached to the top grip, which loosens a spring baffle. This is done by loosening the two grips underneath the rubber seal and above the baffle, using two rods and turning with opposite rotation. Note, this will mean that you now do not have a vacuum seal on the chamber. +- Connecting thermocouples. The chamber restricts the space, making it difficult to connect the thermocouples. For this reason, it is advised to have longer thermocouple and resistivity wires than usual (> 70 mm). It also helps to make the Pt/Rh (+ve terminal) wire slightly longer, by ~ 5 mm, than the Pt (-ve terminal) for easy identification and quick sample changeovers. Note, the Pt/Rh (+ve) wire also needs to be coming out from the top of the sample. The +ve on the top and -ve on the bottom prevents erroneous hot-spots forming along the sample length (no-one seems to know exactly why this happens). +- Create holders to support the thermocouple and resistivity connectors. If the connectors are pulling on the thermocouple wires, this is likely to pull them off during the experiment. So, use tape/metal objects to support the thermocouple connectors. Having the connectors upside down also makes it easier to insert the wires into the connector during the experiment. +- Check the connections into the computer are correct. There is a split connector for the thermocouple running into the back of the Eurotherm controller. In case of R-type the two orange connectors should connect into T/C-1 and T/C-2. However, in the case of K-type thermocouple it seems a connector with a looped wire is used to 'trick' the Eurortherm readout. Therefore, it is important to check the thermocouples connectors and the Eurotherm readout before starting an experiment. +- Check that no drive limits have been enabled. These limts are found in the settings (two arrows, bottom left) and are sometimes changed by users. +- Calibrate the PID settings. The PID settings for the different materials and sample geometries can vary. Calibrate these prior to the experiment to reduce time on the beamline. +- Check the ThermoLin settings for the temperature range you are testing at. The Diamond ETMT has a slightly different ThermoLin setting to the one used at Manchester. In this case, adjusting the "Thermocouple Range" temperature seems to largely affect the gradient at lower temperatures, whereas high temperatures remain largely unaffected. A typical value for "Thermocouple Range" is 1450C for tests running at high temperatures, but this can be adjusted to say 500C for more accurate temperature readouts for lower temperature test. Again, the "Adjusted Min T" seems to have more of an effect on lower temperatures, a typical value for this is 0. +- Check the analogue output settings for your data. Find the typical range of the Load (max N), Position (max mm), and Temperature (max C) from your tests. The output limit for the analogue output at Diamond is 10 volts. Therefore, using the maximum values allows you to set a scale for recording the Load (e.g. 200 N/V), Position (e.g. 0.5 mm/V), and Temperature (e.g. 150 C/V). You can check the output to the analgoue signal in the WaveForm software. +- Check you are recording the Eurotherm temperature in the data. The Eurotherm temperature is more accurate than the ETMT (controller) readout, with typically ~ 10 to 15 degrees difference. This is because the Eurotherm temperature can only be recorded at ~ 20 Hz, which is too slow to control during fast tests. Nevertheless, the Eurotherm temperature can be recorded as "Tempearture (Controller 1)" in Channels for correcting the data after a test. +- Check you can record the voltage output. The voltage output from two resistivity wires of the same type, e.g. Pt-Pt or Cu-Cu, and spot-welded ~ 2mm across the centre of the sample, can be used to record phase changes in the sample and plastic strain during a test. The voltage and current can be used to calculate electrical resistivity, which can be used to calculate phase fraction and plastic strain - see [Christopher S. Daniel EngD Thesis](https://www.research.manchester.ac.uk/portal/en/theses/an-investigation-into-the-texture-development-during-hotrolling-of-dualphase-zirconium-alloys(416cf1d6-15de-41d0-ad7c-3d0db664ae84).html). A BNC cable will need to be connected to the back of the computer to record this voltage signal. And the calibration will need to run with either High or Low voltage and a maximum of ~ 0.5 Volts. Also, check that you are actually recording this voltage signal in the data. +- Write out the testing methods. **Note, methods cannot be copied from the Manchester ETMT software**. Copying methods from different software versions can lead to errors running the tests and ouputting the data. +- Check the methods on the Diamond ETMT run correctly. +- Check data output is in the correct format (correct acquisition frequency). + +## Typical PID settings for Diamond ETMT + +A useful description of PID settings can be found [here](https://www.reddit.com/r/FromTheDepths/comments/kuyftc/pid_tuning_guide_for_dummies/) + +The PID settings on the ETMT can be tested by setting a waveform - using the waveform button, located in the bottom left panel. The ouput can be monitored using the scope tool. Note, it may be neccesary to change the axes of the scope to check the output. + +Typical PID setting for the Linear Position Encoder are; +- P = 30 dB +- I = 1 /s +- D = 0 ms +- Lag = 0 ms + +Typical PID setting for the Load are; +- P = 25.5 dB +- I = 0.5 /s +- D = 0 ms +- Lag = 0.5 ms + +Typical PID setting for the Current are; +- P = 10 dB +- I = 15 /s +- D = 0 ms +- Lag = 20 ms + +Typical PID setting for the Temperature are; +- P = 2 dB +- I = 0.2 /s +- D = 0 ms +- Lag = 50 ms diff --git a/collections/_tutorials/sxrd_operating_diamond.md b/collections/_tutorials/sxrd_operating_diamond.md new file mode 100644 index 0000000..a257ea2 --- /dev/null +++ b/collections/_tutorials/sxrd_operating_diamond.md @@ -0,0 +1,80 @@ +--- +title: Operating the Diamond beamline +author: Christopher Daniel +subcollection: SXRD +--- + +## Setting up the beamline + +The beamline scientists will take you through the beam calibration. It is useful at this stage to know how many rings of a particular phase you would like to collect. The beamline scientist can then take you through the appropriate beam size (eg. 0.5 x 0.5 mm) and detector distance (eg. 750 mm). The new pilatus detector has lots of gaps which can be a problem for capturing the rings. It could also be an advantage to assume orthorhombic symmetry and adjust the detector height, so that you can capture only the bottom half of the pattern. This can improve the resolution of the rings for better pattern fitting. However, it may be a problem for calculating accurate texture and phase fraction, which require full rings. + +## Operating the Diamond beamline + +The beamline scientists will also show you how to operate the beamline during your session. However, this can be quite an overwhelming experience. The notes here can provide a checklist for running particular tests, particularly late at night, so that you do not miss out recording important data. + +## Launching the controls + +In case the computer crashes late at night, here are where the beamline controls are found... + +Beamline controls: Start (top right corner) / Beamlines / EH1 + +GDA (for running scripts to operate beamline): Start / Data Acquisition / GDA Client +GDA scripts: GDA / Scripts / src / users / "Experiment Number" + +Camera: Equipment / EH1 End +Start camera: npeg / start / image mode / continuous +See sample image: cam / QT Viewer + + +## Method for standard acquisition + +1. Rotate ETMT to run experiment - change TAB2 from 45 to 0 degrees +2. Change beamline setup to capture image of sample - MoveToEndOfDetectorDiagnostics +3. Open slits (once move has completed) - press Reset, then Open +4. Change camera diagnostic to continuously record - npeg, continuous +5. Open camera to view sample - cam, open QT Viewer +6. Adjust X and Y positions of TAB2 to move beam to centre of sample +7. Take sample image at 0.2 second acquisition frequency - scan ix 1 1 1 camEH1end 0.2 +8. Change beamline setup to capture diffraction patterns - MoveToDiffractionMode +9. Open slits (once move has completed) - press Open +10. Start recording diffraction patterns at 2 second acquisition frequency - scan ix 1 10000 1 pilatus_eh1_sw 2 +11. Start ETMT test using a laptop with remote desktop link +12. Once test has finished, stop recording diffraction patterns by pression stop button (top right of script window) +13. Change beamline setup to capture image of sample - MoveToEndOfDetectorDiagnostics +14. Open slits (once move has completed) - press Open +15. Change camera diagnostic to continuously record - npeg, continuous +16. Open camera to view sample - cam, open QT Viewer +17. Take sample image at 0.2 second acquisition frequency - scan ix 1 1 1 camEH1end 0.2 +18. Close slits +19. Rotate ETMT to take out sample - change TAB2 from 0 to 45 degrees +20. Open hutch and replace sample to setup another test... + +## Method for fast acquisition + +For fast acquisition a new script had to be created. This script (Version 2) can be found on the beamline directory - .../2021/mg28894-1/processing/scripts/GDA and Stefan Michalik should also have a copy. It seems there is a limit with how many frames can be recorded, so 100 Hz can at max be ran for up to 60 seconds. There is also a delay after running fast acquisition mode for the data from the Pilatus detector to be saved out to the Diamond storage. Additionally, it seems the it takes longer than expected to record at a particular frequency, so setting the number of frames can cause the acquisition to overrun - Stefan Michalik can advise on this. + +1. Rotate ETMT to run experiment - change TAB2 from 45 to 0 degrees +2. Change beamline setup to capture image of sample - MoveToEndOfDetectorDiagnostics +3. Open slits (once move has completed) - press Reset, then Open +4. Change camera diagnostic to continuously record - npeg, continuous +5. Open camera to view sample - cam, open QT Viewer +6. Adjust X and Y positions of TAB2 to move beam to centre of sample +7. Enter the X and Y positions into xstage_pos and ystage_pos variables in the script +8. Save the script +9. Adjust folderName and timeDuration variables in the script to match the test +10. Take sample image at 0.2 second acquisition frequency - scan ix 1 1 1 camEH1end 0.2 +11. Change beamline setup to capture diffraction patterns - MoveToDiffractionMode +12. Open slits (once move has completed) - press Open +13. Record a single diffraction pattern at 5 second acquisition frequency - scan ix 1 1 1 pilatus_eh1_sw 5 +15. Prepare to start the ETMT test using a laptop with remote desktop link +16. At same time as starting ETMT test, click 'run' on the script +17. Once test has finished, stop recording diffraction patterns by pression stop button (top right of script window) +18. Record a single diffraction pattern at 5 second acquisition frequency - scan ix 1 1 1 pilatus_eh1_sw 5 +19. Change beamline setup to capture image of sample - MoveToEndOfDetectorDiagnostics +20. Open slits (once move has completed) - press Open +21. Change camera diagnostic to continuously record - npeg, continuous +22. Open camera to view sample - cam, open QT Viewer +23. Take sample image at 0.2 second acquisition frequency - scan ix 1 1 1 camEH1end 0.2 +24. Close slits +25. Rotate ETMT to take out sample - change TAB2 from 0 to 45 degrees +26. Open hutch and replace sample to setup another test... diff --git a/collections/_tutorials/sxrd_transfer.md b/collections/_tutorials/sxrd_transfer.md new file mode 100644 index 0000000..df4fd3c --- /dev/null +++ b/collections/_tutorials/sxrd_transfer.md @@ -0,0 +1,133 @@ +--- +title: Transferring data from Diamond or DESY beamlines +author: Christopher Daniel +subcollection: SXRD +--- + +## Transferring data from Diamond and DESY beamlines + +It is important to try and transfer your data from Diamond or DESY beamlines as soon as possible, before it is transferred to tape (which can take time to restore). At Diamond, there is a **40 day limit** for storing data on their servers before it is moved to tape. At DESY, the data is kept for longer, but it still runs the risk the data could be lost. + +### iCSF + +You will first need to contact IT services to get setup with an iCSF account. The iCSF ([interactive Computational Shared Facility](http://ri.itservices.manchester.ac.uk/icsf/)) gives us access to the University's high performance computing environment. IT can also give you access to the Research Database Storage (RDS) space, which is a safe, secure and backed up space to save all of our invaluable data. This allows us to store and analyse our large Synchrotron X-ray Diffraction (SXRD) datasets as a group. + +### Accessing Research Database Storage (RDS) space + +Our group has allocated funds to purchase space for research data storage (RDS) from the university, which is stored and backed up on the servers. You will need to contact IT services to be given a portion of this space, which you will have read/write access to, and others in the group will be able to read and download. You will also be given access to shared space, where everyone has read/write access. For instance; + +`/mnt/eps01-rds/Fonseca-Lightform/username` + +and + +`/mnt/eps01-rds/Fonseca-Lightform/shared` + +To access this data on the iCSF you will first need to setup some symbolic links. A symbolic link, also known as a sym or a soft link is a special type of file that points to another file or directory (like a shortcut in Windows), this will allow you to view the contents of the folder. First, remote desktop into the interactive linux environment, launch the terminal, and log in to the iCSF using the command; + +`ssh -X username@incline256.itservices.manchester.ac.uk` + +Then, it is probably a good idea to make a new directory in your home folder on the iCSF where you would like to store this data. For instance, + +`mkdir rds_lightform` + +Then, create the symbolic link; + +`ln -s /mnt/eps01-rds/Fonseca-Lightform/mbcx9cd4 ~/rds_lightform` + +In case there is a mistake in your filename you can delete a sym link using; + +`unlink ~/rds_lightform` + +### RDS-SSH service + +It is not possible to transfer data within the Manchester's iCSF terminal or using Diamond's remote desktop, due to security issues. + +So, you will then need to contact IT services to get setup with the RDS-SSH service. Your basic iCSF account will not allow you to transfer large amounts of data, which is why you will need the [RDS-SSH service](http://ri.itservices.manchester.ac.uk/rds/the-rds-ssh-service/). The basic commands for copying files using the RDS-SSH service are given here on this [webpage](http://ri.itservices.manchester.ac.uk/rds/user-faq/copying-files-between-cifs-and-nfs-shares/). + +### Copying data from Diamond + +Once you have the RDS space and the sym links to the folders setup AND you have been given access to the RDS-SSH service, you can begin the transfer. + +Open a terminal on your mac or laptop. Connect to the SSH; + +`ssh username@rds-ssh.itservices.manchester.ac.uk` + +Change directory so you can view the folder you would like to transfer the data to. + +Then, run the following command, which will use rsync to transfer the data from the path on Diamond's servers to our RDS folder at Manchester. Note, this will not direct any data through your computer. + +`rsync -azvn username@ssh.diamond.ac.uk:/dls/i12/data/2021/mg25682-2 diamond_data_rds` + +The 'n' in the command after rsync is used as a test of the transfer. If it is working it will return a confirmation message such as; + +`sent 110,286 bytes received 907,569 bytes 15,306.09 bytes/sec +total size is 87,034,154,221 speedup is 85,507.42 (DRY RUN)` + +Then, you are ready to run the full transfer; + +`rsync -azv username@ssh.diamond.ac.uk:/dls/i12/data/2021/mg25682-2 diamond_data_rds` + +The final confirmation should return something like; + +`sent 693,246 bytes received 65,792,616,969 bytes 11,708,036.34 bytes/sec +total size is 87,034,154,221 speedup is 1.32` + +### Copying data from DESY + +For DESY the data transfer is slightly different. + +First, using DESY's data management portal (Gamma Portal), you will need to register the FTP system. This is easily done by browsing the data on the portal and clicking a link for FTP registration. This will activate for 7 days. Here are the instructions for doing using the [Gamma Portal](https://confluence.desy.de/display/ASAP3/The+Gamma+Portal). + +The FTP server provides access to the data using the FTP protocol. DESY has it's own instructions for doing this - [accessing beamtime data using FTP](https://confluence.desy.de/display/ASAP3/Accessing+beamtime+data+using+FTP). + +However, we need to do run this FTP protocol on the **RDS-SSH service** at Manchester. + +Open a terminal on your mac or laptop. Connect to the SSH; + +`ssh username@rds-ssh.itservices.manchester.ac.uk` + +Change directory so you are within the folder you would like to transfer the data to. + +Then, run the following series of commands to connect to the FTP server; + +`mkdir -p ~/.lftp/certs` +`curl https://pki.pca.dfn.de/dfn-ca-global-g2/pub/cacert/chain.txt > ~/.lftp/certs/desy.pem` +`echo 'set ssl:ca-file ~/.lftp/certs/desy.pem' >> ~/.lftp/rc` +`echo 'debug 3' >> ~/.lftp/rc` +`lftp` +`open ftp://psftp.desy.de` +`user USERNAME` + +Note the USERNAME should be your DESY (DOOR) username. You will then be prompted to enter your DESY (DOOR) password. + +Now you are in your homepage for the for FTP server that you registered. Using the command `ls` you will see the list of folders. For example; + +``` bash +---- Connecting to psftp.desy.de (2001:638:700:1004::1:37) port 21 +**** connect(control_sock): Network is unreachable +---- Connecting to psftp.desy.de (131.169.4.55) port 21 +<--- 220-*** +<--- 220-Welcome to psftp.desy.de +<--- 220-*** +<--- 220 This is a private system - No anonymous login +<--- 230 OK. Current restricted directory is / +dr-xr-x--- 6 26666 6666 4096 Feb 6 09:51 . +dr-xr-x--- 6 26666 6666 4096 Feb 6 09:51 .. +-rw-r--r-- 1 26666 6666 457 Mar 16 13:58 README.non-conformant-files.txt +-r-------- 1 26666 6666 1581 Dec 6 17:05 beamtime-metadata-11010750.json +dr-xr-x--- 2 26666 6666 4096 Dec 6 17:05 processed +dr-xr-x--- 5 26666 6666 4096 Dec 7 22:10 raw +dr-xr-x--- 2 26666 6666 4096 Dec 6 17:05 scratch_cc +dr-xr-x--- 2 26666 6666 4096 Dec 6 17:05 shared +``` + +To copy the data run the mirror command. This will transfer the data from DESY's FTP server to the folder that you are working within on the RDS-SSH service. + +`mirror raw` + +After this data transferred you can run the next folder; + +`mirror processing` + +etc. + diff --git a/collections/_tutorials/topas_phase_fraction.md b/collections/_tutorials/topas_phase_fraction.md new file mode 100644 index 0000000..873bba0 --- /dev/null +++ b/collections/_tutorials/topas_phase_fraction.md @@ -0,0 +1,96 @@ +--- +title: Analysing phase fraction changes using TOPAS +author: Christopher Daniel +subcollection: SXRD +--- + +## Notes on manual fitting in TOPAS using the GUI + +**1. Load scan files** +- In .raw format for XRD, or .xy format for SXRD. +- Load scan files option found under *‘File’* tab. Can load more than one scan at a time or refine each pattern separately. +- Options to change settings in global (changes all at once across all scans), or you can change each scan individually. + +**2. Emission profile** +- If Lab-XRD data, then load *‘emission profile’* for each of the scans (CoKa3.Iam is the standard Cobalt emission profile). This builds the profile of apolychromatic source by sharing the beam composition. +- If SXRD data, only one profile is needed in *‘emission profile’*, since it is monochromatic, so the second profile can be deleted. +- Change the area to 0.01, wavelength to 0.1391986 angstrom, Lortz/area to 0.01 (lowest possible), delete other column. +- Set Ymin – Ymax as 1 e-5 in emission profile options, which defines the ratio of peak to width. Sharp peaks in SXRD mean low value of 1 e-5 is appropriate. +- *Note, @ is used to refine a parameter.* + +**3. Background** +- Set Chebychev order 5 and tick 1/x bkg and refine with 1000 steps. The 1/x background corrects for large background at low scattering angles in L-XRD. + +**4. Instrument** +- For XRD set radius of 33 cm (330 mm) for the distance of the primary and secondary goniometer arms (for Bruker D8-Discover). Select linear PSD and refine. Use simple axial model, due to effect of conical beam of varying intensity, to account for the umbrella effect of the diffraction cone within the Ewald sphere, where intensity can vary depending on source/detector position. +- For SXRD untick point detector. Beam delivering slits in EH1 at Diamond I12 mean R-primary = 47 m (47000 mm) and sample-detector distance mean R-secondary = 1500 mm for Zr or 12000 mm for Ti. Use simple axial model with default value of 12. + +**5. Corrections** +- For XRD tick Lorentz-Polarisation (LP) factor and change to 0. A graphite monochromator = 26.4 +- For SXRD change LP factor to 90 and fix. +- 2th correction for sample displacement. Corrects for sample Z height (defining zero position). Start at zero, only refine at the end. Not required for SXRD, but certainly needed for XRD setup. +- Option to tick zero order error in peak shift to improve fit. Zero error occurs due to slight detector misalignment between the actual zero position of the goniometer versus what the microswitch believes is zero. + +**6. Miscellaneous** +- 0.0001 calculation steps (finer will slow refinement down, coarser could lead to square peaks). +- Set start x = 2.4$^\circ$ and finish x = 8.94$^\circ$ for the range of 2-theta. + +**7. Load cifs and refine** +- Load cifs by right clicking in the scan experiment. +- For two-phase materials, first start with only alpha-phase and refine the parameters, then tick use the beta phase cif and to improve the fit to the second phase. +- Untick crystal size steps (switch off). Choose peak type = PV_TCHZ, which combines L and G (Lorentzian and Gaussian). TCHZ Thompson-Cox-Hastings psuedo-Voight is best for L-XRD and SXRD. Pseudo-Voigt is fast, but not the best fit. For sycnhrotron may also use PVII (Pearson 7), but this can go awry. +- Firstly, fix cell parameters and refine only the scale parameters. Scale governs magnitude of peak intensites based on a convolution of the various structural parameters. Leave preferred orientation turned off. +- Then, refine cell params and add in texture if needed. Try increasing preferred orientation from order 2, 4 and 6 (maybe 8 for very strong alignment). + +**8. Goodness of fit** +- A value of Rwp < 15 is a good result. But, the fitting quality is better determined by the goodness of fit (GOF). Generally GOF < 2 is considered excellent, < 3 is acceptable and < 1 is beginning to over-constrain the model fitting. +*Note, possible error (cannot find file in isotopes.txt) is due to missing atomic sites, such as a Ti atom needing to be filled in in the SITES option. Another error can also arise due to cubic phase missing a hyphen Im3m = Im-3m.* + +** 9. Save analysis as .pro file** +- For reloading in TOPAS + +## Setting up TOPAS in batch mode + +**1. Download jedit 4.3** +- See John Evans TOPAS jEdit Install for instructions - https://community.dur.ac.uk/john.evans/topas_academic/jedit_setup.htm +- *Note, there is a problem with the plugins disappearing.* + +**2. Use TOPAS5 (version 5.0)** + +**3. Create TOPAS .inp analysis file** +- Use example .inp file for Ti-64 or Zr-2.5Nb materials and open in jEdit. +- *Note, commented instructions are included throughout the .inp file.* +- In java the **’** signals a commented line. To load an individual fit, uncomment ‘macro filename’ and give the absolute path to the .xy data file. To run batch mode comment out ‘macro filename’ to reference different data files. +- *‘#define report’* at the top writes out a report, with the contents defined at the bottom *‘#ifdef report...’* +- Results for named parameters can be included in the output report - **results.txt** +- The command xdd runs the analysis on the given filename. +- @ means refine, no @ means fix. +- If named value then ! is needed to fix it, i.e. ba1 will refine, !ba1 is fixed. +- Set zero background (5 zeros for refining 5 parameters) - *bkg @ 0 0 0 0 0* +- Set zero 1/x, zero error, scale. +- Fix LP factor 90, specimen displacement 0, simple axial model 12, ymin - ymax 0.00001, wavelength 0.1391986, calculation step 0.0001. +- Delete spherical harmonics and replace with *‘PO_Spherical_Harmonics(sh_1, 6)’* +- Biso refinement not needed. *Note, to refine biso use Beq @ 0 min = 0* + +**4. Run refinment for first .inp file** +- Open TOPAS5. +- Click on launch mode (rocket icon, right hand side). +- *‘Launch Mode: C:\ChrisBatch\ ...inp’* should appear. +- Click run (play icon, right hand side). + +**5. Write batch mode as ‘batch.bat’ file using the script below** +- Assume working in C-drive, so change directory to *C:\TOPAS5*. +- *C:\TOPAS5\tc* launches an executable for refining external .inp files. +- Batch files should be located in a seperate folder on C-drive - *C:\ChrisBatch\* +- Each line of the batch file contains: ‘macro filename’ of current analysis, absolute path for the next .xy data files, filename for current analysis output, filename for current output .inp file. +- *‘pause’* leaves command window open at the end (rather than *‘end’* which would close it). + +**6. Data organisation** +- C:\ChrisBatch\ + - .bat batch mode file + - .inp file for first refined analysis + - Data\ + - .xy data files + +**7. Run batch mode** +- Double click .bat file. diff --git a/datalight_index.yml b/datalight_index.yml deleted file mode 100644 index 38c513e..0000000 --- a/datalight_index.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: none ---- -{%- capture new_line %} -{% endcapture -%} - -{%- include get_checklists.html -%} -{%- for cl in checklists -%} - {% assign cl_full = cl | append: ".yml" -%} - {% for i in site.static_files -%} - {% assign inc_url_split = i.path | split: "/_includes/" -%} - {% assign inc_url = inc_url_split[1] -%} - {% assign inc_dir_split = inc_url | split: "/" -%} - {% assign inc_dir = inc_dir_split[0] -%} - {% if inc_dir == "checklists" -%} - {% if i.name == cl_full -%} - {% include {{ inc_url }} -%} - {{ new_line -}} - {% endif -%} - {% endif -%} - {% endfor -%} -{% endfor -%} \ No newline at end of file diff --git a/index.md b/index.md index de892b4..f218468 100644 --- a/index.md +++ b/index.md @@ -1,12 +1,16 @@ --- layout: post -title: Welcome to the LightForm Wiki! +title: Welcome to the CLARI (formally LightForm) Wiki! published: true --- ## Purpose -The purpose of this Wiki is to share our collective knowledge about procedures, methods and hacks that are useful for our research. Please contribute anything you think might be useful to others. We use Markdown to format the text in this Wiki. If you'd like to know more about Markdown, GitHub has a nice into [here](https://guides.github.com/features/mastering-markdown/). +The purpose of this Wiki is to share our collective knowledge about procedures, methods and hacks that are useful for our research. Please contribute anything you think might be useful to others. We use Markdown to format the text in this Wiki. If you'd like to know more about Markdown, GitHub has a nice intro [here](https://guides.github.com/features/mastering-markdown/). + +## Announcements + +No new announcements at this time. Please chack back here for new updates. ## Group Calendar diff --git a/people.html b/people.html index 56b2f61..aade90a 100644 --- a/people.html +++ b/people.html @@ -4,7 +4,7 @@ toc: true --- -Listed below are the pages that people have contributed to. +Listed below are the pages that people have contributed to the LightForm wiki. {% assign people_sorted = site.data.people | sort: "name" %} {% for person in people_sorted %}