From 4a3f6b8f9ad40a33c76d6ff1cf892bfbc19f0560 Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Wed, 20 Sep 2023 12:31:00 +1000 Subject: [PATCH 1/2] Added `unsafe_op_in_unsafe_fn` to Clippy CI This is a work-in-progress, there are thousands of violations of this lint that must be addressed before CI will complete successfully. --- crates/bevy_mikktspace/src/generated.rs | 1562 ++++++++++++----------- crates/bevy_ptr/src/lib.rs | 51 +- tools/ci/src/main.rs | 3 +- 3 files changed, 868 insertions(+), 748 deletions(-) diff --git a/crates/bevy_mikktspace/src/generated.rs b/crates/bevy_mikktspace/src/generated.rs index c1586235da489..a7022e2d786c6 100644 --- a/crates/bevy_mikktspace/src/generated.rs +++ b/crates/bevy_mikktspace/src/generated.rs @@ -230,110 +230,113 @@ pub unsafe fn genTangSpace(geometry: &mut I, fAngularThreshold: f32 let mut piTriListIn = vec![0i32; 3 * iNrTrianglesIn]; let mut pTriInfos = vec![STriInfo::zero(); iNrTrianglesIn]; - iNrTSPaces = GenerateInitialVerticesIndexList( - &mut pTriInfos, - &mut piTriListIn, - geometry, - iNrTrianglesIn, - ); - GenerateSharedVerticesIndexList(piTriListIn.as_mut_ptr(), geometry, iNrTrianglesIn); - iTotTris = iNrTrianglesIn; - iDegenTriangles = 0; - t = 0; - while t < iTotTris as usize { - let i0 = piTriListIn[t * 3 + 0]; - let i1 = piTriListIn[t * 3 + 1]; - let i2 = piTriListIn[t * 3 + 2]; - let p0 = get_position(geometry, i0 as usize); - let p1 = get_position(geometry, i1 as usize); - let p2 = get_position(geometry, i2 as usize); - if p0 == p1 || p0 == p2 || p1 == p2 { - pTriInfos[t].iFlag |= 1i32; - iDegenTriangles += 1 + unsafe { + iNrTSPaces = GenerateInitialVerticesIndexList( + &mut pTriInfos, + &mut piTriListIn, + geometry, + iNrTrianglesIn, + ); + GenerateSharedVerticesIndexList(piTriListIn.as_mut_ptr(), geometry, iNrTrianglesIn); + iTotTris = iNrTrianglesIn; + iDegenTriangles = 0; + t = 0; + while t < iTotTris as usize { + let i0 = piTriListIn[t * 3 + 0]; + let i1 = piTriListIn[t * 3 + 1]; + let i2 = piTriListIn[t * 3 + 2]; + let p0 = get_position(geometry, i0 as usize); + let p1 = get_position(geometry, i1 as usize); + let p2 = get_position(geometry, i2 as usize); + if p0 == p1 || p0 == p2 || p1 == p2 { + pTriInfos[t].iFlag |= 1i32; + iDegenTriangles += 1 + } + t += 1 } - t += 1 - } - iNrTrianglesIn = iTotTris - iDegenTriangles; - DegenPrologue( - pTriInfos.as_mut_ptr(), - piTriListIn.as_mut_ptr(), - iNrTrianglesIn as i32, - iTotTris as i32, - ); - InitTriInfo( - pTriInfos.as_mut_ptr(), - piTriListIn.as_ptr(), - geometry, - iNrTrianglesIn, - ); - iNrMaxGroups = iNrTrianglesIn * 3; + iNrTrianglesIn = iTotTris - iDegenTriangles; + DegenPrologue( + pTriInfos.as_mut_ptr(), + piTriListIn.as_mut_ptr(), + iNrTrianglesIn as i32, + iTotTris as i32, + ); + InitTriInfo( + pTriInfos.as_mut_ptr(), + piTriListIn.as_ptr(), + geometry, + iNrTrianglesIn, + ); + iNrMaxGroups = iNrTrianglesIn * 3; - let mut pGroups = vec![SGroup::zero(); iNrMaxGroups]; - let mut piGroupTrianglesBuffer = vec![0; iNrTrianglesIn * 3]; + let mut pGroups = vec![SGroup::zero(); iNrMaxGroups]; + let mut piGroupTrianglesBuffer = vec![0; iNrTrianglesIn * 3]; - iNrActiveGroups = Build4RuleGroups( - pTriInfos.as_mut_ptr(), - pGroups.as_mut_ptr(), - piGroupTrianglesBuffer.as_mut_ptr(), - piTriListIn.as_ptr(), - iNrTrianglesIn as i32, - ); + iNrActiveGroups = Build4RuleGroups( + pTriInfos.as_mut_ptr(), + pGroups.as_mut_ptr(), + piGroupTrianglesBuffer.as_mut_ptr(), + piTriListIn.as_ptr(), + iNrTrianglesIn as i32, + ); - let mut psTspace = vec![ - STSpace { - vOs: Vec3::new(1.0, 0.0, 0.0), - fMagS: 1.0, - vOt: Vec3::new(0.0, 1.0, 0.0), - fMagT: 1.0, - ..STSpace::zero() - }; - iNrTSPaces - ]; + let mut psTspace = vec![ + STSpace { + vOs: Vec3::new(1.0, 0.0, 0.0), + fMagS: 1.0, + vOt: Vec3::new(0.0, 1.0, 0.0), + fMagT: 1.0, + ..STSpace::zero() + }; + iNrTSPaces + ]; - bRes = GenerateTSpaces( - &mut psTspace, - pTriInfos.as_ptr(), - pGroups.as_ptr(), - iNrActiveGroups, - piTriListIn.as_ptr(), - fThresCos, - geometry, - ); - if !bRes { - return false; - } - DegenEpilogue( - psTspace.as_mut_ptr(), - pTriInfos.as_mut_ptr(), - piTriListIn.as_mut_ptr(), - geometry, - iNrTrianglesIn as i32, - iTotTris as i32, - ); - index = 0; - f = 0; - while f < iNrFaces { - let verts_0 = geometry.num_vertices_of_face(f); - if !(verts_0 != 3 && verts_0 != 4) { - i = 0; - while i < verts_0 { - let mut pTSpace: *const STSpace = &mut psTspace[index] as *mut STSpace; - let mut tang = Vec3::new((*pTSpace).vOs.x, (*pTSpace).vOs.y, (*pTSpace).vOs.z); - let mut bitang = Vec3::new((*pTSpace).vOt.x, (*pTSpace).vOt.y, (*pTSpace).vOt.z); - geometry.set_tangent( - tang.into(), - bitang.into(), - (*pTSpace).fMagS, - (*pTSpace).fMagT, - (*pTSpace).bOrient, - f, - i, - ); - index += 1; - i += 1 + bRes = GenerateTSpaces( + &mut psTspace, + pTriInfos.as_ptr(), + pGroups.as_ptr(), + iNrActiveGroups, + piTriListIn.as_ptr(), + fThresCos, + geometry, + ); + if !bRes { + return false; + } + DegenEpilogue( + psTspace.as_mut_ptr(), + pTriInfos.as_mut_ptr(), + piTriListIn.as_mut_ptr(), + geometry, + iNrTrianglesIn as i32, + iTotTris as i32, + ); + index = 0; + f = 0; + while f < iNrFaces { + let verts_0 = geometry.num_vertices_of_face(f); + if !(verts_0 != 3 && verts_0 != 4) { + i = 0; + while i < verts_0 { + let mut pTSpace: *const STSpace = &mut psTspace[index] as *mut STSpace; + let mut tang = Vec3::new((*pTSpace).vOs.x, (*pTSpace).vOs.y, (*pTSpace).vOs.z); + let mut bitang = + Vec3::new((*pTSpace).vOt.x, (*pTSpace).vOt.y, (*pTSpace).vOt.z); + geometry.set_tangent( + tang.into(), + bitang.into(), + (*pTSpace).fMagS, + (*pTSpace).fMagT, + (*pTSpace).bOrient, + f, + i, + ); + index += 1; + i += 1 + } } + f += 1 } - f += 1 } return true; @@ -349,85 +352,88 @@ unsafe fn DegenEpilogue( let mut t: i32 = 0i32; let mut i: i32 = 0i32; t = iNrTrianglesIn; - while t < iTotTris { - let bSkip: bool = if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { - true - } else { - false - }; - if !bSkip { - i = 0i32; - while i < 3i32 { - let index1: i32 = *piTriListIn.offset((t * 3i32 + i) as isize); - let mut bNotFound: bool = true; - let mut j: i32 = 0i32; - while bNotFound && j < 3i32 * iNrTrianglesIn { - let index2: i32 = *piTriListIn.offset(j as isize); - if index1 == index2 { - bNotFound = false - } else { - j += 1 + unsafe { + while t < iTotTris { + let bSkip: bool = if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { + true + } else { + false + }; + if !bSkip { + i = 0i32; + while i < 3i32 { + let index1: i32 = *piTriListIn.offset((t * 3i32 + i) as isize); + let mut bNotFound: bool = true; + let mut j: i32 = 0i32; + while bNotFound && j < 3i32 * iNrTrianglesIn { + let index2: i32 = *piTriListIn.offset(j as isize); + if index1 == index2 { + bNotFound = false + } else { + j += 1 + } } + if !bNotFound { + let iTri: i32 = j / 3i32; + let iVert: i32 = j % 3i32; + let iSrcVert: i32 = + (*pTriInfos.offset(iTri as isize)).vert_num[iVert as usize] as i32; + let iSrcOffs: i32 = (*pTriInfos.offset(iTri as isize)).iTSpacesOffs; + let iDstVert: i32 = + (*pTriInfos.offset(t as isize)).vert_num[i as usize] as i32; + let iDstOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; + *psTspace.offset((iDstOffs + iDstVert) as isize) = + *psTspace.offset((iSrcOffs + iSrcVert) as isize) + } + i += 1 } - if !bNotFound { - let iTri: i32 = j / 3i32; - let iVert: i32 = j % 3i32; - let iSrcVert: i32 = - (*pTriInfos.offset(iTri as isize)).vert_num[iVert as usize] as i32; - let iSrcOffs: i32 = (*pTriInfos.offset(iTri as isize)).iTSpacesOffs; - let iDstVert: i32 = (*pTriInfos.offset(t as isize)).vert_num[i as usize] as i32; - let iDstOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; - *psTspace.offset((iDstOffs + iDstVert) as isize) = - *psTspace.offset((iSrcOffs + iSrcVert) as isize) - } - i += 1 } + t += 1 } - t += 1 - } - t = 0i32; - while t < iNrTrianglesIn { - if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { - let mut vDstP = Vec3::new(0.0, 0.0, 0.0); - let mut iOrgF: i32 = -1i32; - let mut i_0: i32 = 0i32; - let mut bNotFound_0: bool = false; - let mut pV: *mut u8 = (*pTriInfos.offset(t as isize)).vert_num.as_mut_ptr(); - let mut iFlag: i32 = 1i32 << *pV.offset(0isize) as i32 - | 1i32 << *pV.offset(1isize) as i32 - | 1i32 << *pV.offset(2isize) as i32; - let mut iMissingIndex: i32 = 0i32; - if iFlag & 2i32 == 0i32 { - iMissingIndex = 1i32 - } else if iFlag & 4i32 == 0i32 { - iMissingIndex = 2i32 - } else if iFlag & 8i32 == 0i32 { - iMissingIndex = 3i32 - } - iOrgF = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; - vDstP = get_position( - geometry, - face_vert_to_index(iOrgF as usize, iMissingIndex as usize), - ); - bNotFound_0 = true; - i_0 = 0i32; - while bNotFound_0 && i_0 < 3i32 { - let iVert_0: i32 = *pV.offset(i_0 as isize) as i32; - let vSrcP = get_position( + t = 0i32; + while t < iNrTrianglesIn { + if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { + let mut vDstP = Vec3::new(0.0, 0.0, 0.0); + let mut iOrgF: i32 = -1i32; + let mut i_0: i32 = 0i32; + let mut bNotFound_0: bool = false; + let mut pV: *mut u8 = (*pTriInfos.offset(t as isize)).vert_num.as_mut_ptr(); + let mut iFlag: i32 = 1i32 << *pV.offset(0isize) as i32 + | 1i32 << *pV.offset(1isize) as i32 + | 1i32 << *pV.offset(2isize) as i32; + let mut iMissingIndex: i32 = 0i32; + if iFlag & 2i32 == 0i32 { + iMissingIndex = 1i32 + } else if iFlag & 4i32 == 0i32 { + iMissingIndex = 2i32 + } else if iFlag & 8i32 == 0i32 { + iMissingIndex = 3i32 + } + iOrgF = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + vDstP = get_position( geometry, - face_vert_to_index(iOrgF as usize, iVert_0 as usize), + face_vert_to_index(iOrgF as usize, iMissingIndex as usize), ); - if vSrcP == vDstP { - let iOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; - *psTspace.offset((iOffs + iMissingIndex) as isize) = - *psTspace.offset((iOffs + iVert_0) as isize); - bNotFound_0 = false - } else { - i_0 += 1 + bNotFound_0 = true; + i_0 = 0i32; + while bNotFound_0 && i_0 < 3i32 { + let iVert_0: i32 = *pV.offset(i_0 as isize) as i32; + let vSrcP = get_position( + geometry, + face_vert_to_index(iOrgF as usize, iVert_0 as usize), + ); + if vSrcP == vDstP { + let iOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; + *psTspace.offset((iOffs + iMissingIndex) as isize) = + *psTspace.offset((iOffs + iVert_0) as isize); + bNotFound_0 = false + } else { + i_0 += 1 + } } } + t += 1 } - t += 1 } } @@ -445,188 +451,194 @@ unsafe fn GenerateTSpaces( let mut g: i32 = 0i32; let mut i: i32 = 0i32; g = 0i32; - while g < iNrActiveGroups { - if iMaxNrFaces < (*pGroups.offset(g as isize)).iNrFaces as usize { - iMaxNrFaces = (*pGroups.offset(g as isize)).iNrFaces as usize + unsafe { + while g < iNrActiveGroups { + if iMaxNrFaces < (*pGroups.offset(g as isize)).iNrFaces as usize { + iMaxNrFaces = (*pGroups.offset(g as isize)).iNrFaces as usize + } + g += 1 + } + if iMaxNrFaces == 0 { + return true; } - g += 1 - } - if iMaxNrFaces == 0 { - return true; - } - let mut pSubGroupTspace = vec![STSpace::zero(); iMaxNrFaces]; - let mut pUniSubGroups = vec![SSubGroup::zero(); iMaxNrFaces]; - let mut pTmpMembers = vec![0i32; iMaxNrFaces]; + let mut pSubGroupTspace = vec![STSpace::zero(); iMaxNrFaces]; + let mut pUniSubGroups = vec![SSubGroup::zero(); iMaxNrFaces]; + let mut pTmpMembers = vec![0i32; iMaxNrFaces]; - iUniqueTspaces = 0; - g = 0i32; - while g < iNrActiveGroups { - let mut pGroup: *const SGroup = &*pGroups.offset(g as isize) as *const SGroup; - let mut iUniqueSubGroups = 0; - let mut s = 0; - i = 0i32; - while i < (*pGroup).iNrFaces { - let f: i32 = *(*pGroup).pFaceIndices.offset(i as isize); - let mut index: i32 = -1i32; - let mut iVertIndex: i32 = -1i32; - let mut iOF_1: i32 = -1i32; - let mut iMembers: usize = 0; - let mut j: i32 = 0i32; - let mut l: usize = 0; - let mut tmp_group: SSubGroup = SSubGroup { - iNrFaces: 0, - pTriMembers: Vec::new(), - }; - let mut bFound: bool = false; - let mut n = Vec3::new(0.0, 0.0, 0.0); - let mut vOs = Vec3::new(0.0, 0.0, 0.0); - let mut vOt = Vec3::new(0.0, 0.0, 0.0); - if (*pTriInfos.offset(f as isize)).AssignedGroup[0usize] == pGroup as *mut SGroup { - index = 0i32 - } else if (*pTriInfos.offset(f as isize)).AssignedGroup[1usize] == pGroup as *mut SGroup - { - index = 1i32 - } else if (*pTriInfos.offset(f as isize)).AssignedGroup[2usize] == pGroup as *mut SGroup - { - index = 2i32 - } - iVertIndex = *piTriListIn.offset((f * 3i32 + index) as isize); - n = get_normal(geometry, iVertIndex as usize); - let mut vOs = (*pTriInfos.offset(f as isize)).vOs - - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); - let mut vOt = (*pTriInfos.offset(f as isize)).vOt - - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); - if VNotZero(vOs) { - vOs = Normalize(vOs) - } - if VNotZero(vOt) { - vOt = Normalize(vOt) - } - iOF_1 = (*pTriInfos.offset(f as isize)).iOrgFaceNumber; - iMembers = 0; - j = 0i32; - while j < (*pGroup).iNrFaces { - let t: i32 = *(*pGroup).pFaceIndices.offset(j as isize); - let iOF_2: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; - let mut vOs2 = (*pTriInfos.offset(t as isize)).vOs - - (n.dot((*pTriInfos.offset(t as isize)).vOs) * n); - let mut vOt2 = (*pTriInfos.offset(t as isize)).vOt - - (n.dot((*pTriInfos.offset(t as isize)).vOt) * n); - if VNotZero(vOs2) { - vOs2 = Normalize(vOs2) + iUniqueTspaces = 0; + g = 0i32; + while g < iNrActiveGroups { + let mut pGroup: *const SGroup = &*pGroups.offset(g as isize) as *const SGroup; + let mut iUniqueSubGroups = 0; + let mut s = 0; + i = 0i32; + while i < (*pGroup).iNrFaces { + let f: i32 = *(*pGroup).pFaceIndices.offset(i as isize); + let mut index: i32 = -1i32; + let mut iVertIndex: i32 = -1i32; + let mut iOF_1: i32 = -1i32; + let mut iMembers: usize = 0; + let mut j: i32 = 0i32; + let mut l: usize = 0; + let mut tmp_group: SSubGroup = SSubGroup { + iNrFaces: 0, + pTriMembers: Vec::new(), + }; + let mut bFound: bool = false; + let mut n = Vec3::new(0.0, 0.0, 0.0); + let mut vOs = Vec3::new(0.0, 0.0, 0.0); + let mut vOt = Vec3::new(0.0, 0.0, 0.0); + if (*pTriInfos.offset(f as isize)).AssignedGroup[0usize] == pGroup as *mut SGroup { + index = 0i32 + } else if (*pTriInfos.offset(f as isize)).AssignedGroup[1usize] + == pGroup as *mut SGroup + { + index = 1i32 + } else if (*pTriInfos.offset(f as isize)).AssignedGroup[2usize] + == pGroup as *mut SGroup + { + index = 2i32 } - if VNotZero(vOt2) { - vOt2 = Normalize(vOt2) + iVertIndex = *piTriListIn.offset((f * 3i32 + index) as isize); + n = get_normal(geometry, iVertIndex as usize); + let mut vOs = (*pTriInfos.offset(f as isize)).vOs + - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); + let mut vOt = (*pTriInfos.offset(f as isize)).vOt + - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); + if VNotZero(vOs) { + vOs = Normalize(vOs) } - let bAny: bool = if ((*pTriInfos.offset(f as isize)).iFlag - | (*pTriInfos.offset(t as isize)).iFlag) - & 4i32 - != 0i32 - { - true - } else { - false - }; - let bSameOrgFace: bool = iOF_1 == iOF_2; - let fCosS: f32 = vOs.dot(vOs2); - let fCosT: f32 = vOt.dot(vOt2); - if bAny || bSameOrgFace || fCosS > fThresCos && fCosT > fThresCos { - let fresh0 = iMembers; - iMembers = iMembers + 1; - pTmpMembers[fresh0] = t + if VNotZero(vOt) { + vOt = Normalize(vOt) } - j += 1 - } - if iMembers > 1 { - let mut uSeed: u32 = 39871946i32 as u32; - QuickSort(pTmpMembers.as_mut_ptr(), 0i32, (iMembers - 1) as i32, uSeed); - } - tmp_group.iNrFaces = iMembers as i32; - tmp_group.pTriMembers = pTmpMembers.clone(); - bFound = false; - l = 0; - while l < iUniqueSubGroups && !bFound { - bFound = CompareSubGroups(&mut tmp_group, &mut pUniSubGroups[l]); - if !bFound { - l += 1 + iOF_1 = (*pTriInfos.offset(f as isize)).iOrgFaceNumber; + iMembers = 0; + j = 0i32; + while j < (*pGroup).iNrFaces { + let t: i32 = *(*pGroup).pFaceIndices.offset(j as isize); + let iOF_2: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let mut vOs2 = (*pTriInfos.offset(t as isize)).vOs + - (n.dot((*pTriInfos.offset(t as isize)).vOs) * n); + let mut vOt2 = (*pTriInfos.offset(t as isize)).vOt + - (n.dot((*pTriInfos.offset(t as isize)).vOt) * n); + if VNotZero(vOs2) { + vOs2 = Normalize(vOs2) + } + if VNotZero(vOt2) { + vOt2 = Normalize(vOt2) + } + let bAny: bool = if ((*pTriInfos.offset(f as isize)).iFlag + | (*pTriInfos.offset(t as isize)).iFlag) + & 4i32 + != 0i32 + { + true + } else { + false + }; + let bSameOrgFace: bool = iOF_1 == iOF_2; + let fCosS: f32 = vOs.dot(vOs2); + let fCosT: f32 = vOt.dot(vOt2); + if bAny || bSameOrgFace || fCosS > fThresCos && fCosT > fThresCos { + let fresh0 = iMembers; + iMembers = iMembers + 1; + pTmpMembers[fresh0] = t + } + j += 1 } - } - if !bFound { - pUniSubGroups[iUniqueSubGroups].iNrFaces = iMembers as i32; - pUniSubGroups[iUniqueSubGroups].pTriMembers = tmp_group.pTriMembers.clone(); + if iMembers > 1 { + let mut uSeed: u32 = 39871946i32 as u32; + QuickSort(pTmpMembers.as_mut_ptr(), 0i32, (iMembers - 1) as i32, uSeed); + } + tmp_group.iNrFaces = iMembers as i32; + tmp_group.pTriMembers = pTmpMembers.clone(); + bFound = false; + l = 0; + while l < iUniqueSubGroups && !bFound { + bFound = CompareSubGroups(&mut tmp_group, &mut pUniSubGroups[l]); + if !bFound { + l += 1 + } + } + if !bFound { + pUniSubGroups[iUniqueSubGroups].iNrFaces = iMembers as i32; + pUniSubGroups[iUniqueSubGroups].pTriMembers = tmp_group.pTriMembers.clone(); - pSubGroupTspace[iUniqueSubGroups] = EvalTspace( - tmp_group.pTriMembers.as_mut_ptr(), - iMembers as i32, - piTriListIn, - pTriInfos, - geometry, - (*pGroup).iVertexRepresentitive, - ); - iUniqueSubGroups += 1 - } - let iOffs = (*pTriInfos.offset(f as isize)).iTSpacesOffs as usize; - let iVert = (*pTriInfos.offset(f as isize)).vert_num[index as usize] as usize; - let mut pTS_out: *mut STSpace = &mut psTspace[iOffs + iVert] as *mut STSpace; - if (*pTS_out).iCounter == 1i32 { - *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); - (*pTS_out).iCounter = 2i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering - } else { - *pTS_out = pSubGroupTspace[l]; - (*pTS_out).iCounter = 1i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering + pSubGroupTspace[iUniqueSubGroups] = EvalTspace( + tmp_group.pTriMembers.as_mut_ptr(), + iMembers as i32, + piTriListIn, + pTriInfos, + geometry, + (*pGroup).iVertexRepresentitive, + ); + iUniqueSubGroups += 1 + } + let iOffs = (*pTriInfos.offset(f as isize)).iTSpacesOffs as usize; + let iVert = (*pTriInfos.offset(f as isize)).vert_num[index as usize] as usize; + let mut pTS_out: *mut STSpace = &mut psTspace[iOffs + iVert] as *mut STSpace; + if (*pTS_out).iCounter == 1i32 { + *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); + (*pTS_out).iCounter = 2i32; + (*pTS_out).bOrient = (*pGroup).bOrientPreservering + } else { + *pTS_out = pSubGroupTspace[l]; + (*pTS_out).iCounter = 1i32; + (*pTS_out).bOrient = (*pGroup).bOrientPreservering + } + i += 1 } - i += 1 + iUniqueTspaces += iUniqueSubGroups; + g += 1 } - iUniqueTspaces += iUniqueSubGroups; - g += 1 } return true; } unsafe fn AvgTSpace(mut pTS0: *const STSpace, mut pTS1: *const STSpace) -> STSpace { - let mut ts_res: STSpace = STSpace { - vOs: Vec3::new(0.0, 0.0, 0.0), - fMagS: 0., - vOt: Vec3::new(0.0, 0.0, 0.0), - fMagT: 0., - iCounter: 0, - bOrient: false, - }; - if (*pTS0).fMagS == (*pTS1).fMagS - && (*pTS0).fMagT == (*pTS1).fMagT - && (*pTS0).vOs == (*pTS1).vOs - && (*pTS0).vOt == (*pTS1).vOt - { - ts_res.fMagS = (*pTS0).fMagS; - ts_res.fMagT = (*pTS0).fMagT; - ts_res.vOs = (*pTS0).vOs; - ts_res.vOt = (*pTS0).vOt - } else { - ts_res.fMagS = 0.5f32 * ((*pTS0).fMagS + (*pTS1).fMagS); - ts_res.fMagT = 0.5f32 * ((*pTS0).fMagT + (*pTS1).fMagT); - ts_res.vOs = (*pTS0).vOs + (*pTS1).vOs; - ts_res.vOt = (*pTS0).vOt + (*pTS1).vOt; - if VNotZero(ts_res.vOs) { - ts_res.vOs = Normalize(ts_res.vOs) - } - if VNotZero(ts_res.vOt) { - ts_res.vOt = Normalize(ts_res.vOt) + unsafe { + let mut ts_res: STSpace = STSpace { + vOs: Vec3::new(0.0, 0.0, 0.0), + fMagS: 0., + vOt: Vec3::new(0.0, 0.0, 0.0), + fMagT: 0., + iCounter: 0, + bOrient: false, + }; + if (*pTS0).fMagS == (*pTS1).fMagS + && (*pTS0).fMagT == (*pTS1).fMagT + && (*pTS0).vOs == (*pTS1).vOs + && (*pTS0).vOt == (*pTS1).vOt + { + ts_res.fMagS = (*pTS0).fMagS; + ts_res.fMagT = (*pTS0).fMagT; + ts_res.vOs = (*pTS0).vOs; + ts_res.vOt = (*pTS0).vOt + } else { + ts_res.fMagS = 0.5f32 * ((*pTS0).fMagS + (*pTS1).fMagS); + ts_res.fMagT = 0.5f32 * ((*pTS0).fMagT + (*pTS1).fMagT); + ts_res.vOs = (*pTS0).vOs + (*pTS1).vOs; + ts_res.vOt = (*pTS0).vOt + (*pTS1).vOt; + if VNotZero(ts_res.vOs) { + ts_res.vOs = Normalize(ts_res.vOs) + } + if VNotZero(ts_res.vOt) { + ts_res.vOt = Normalize(ts_res.vOt) + } } + return ts_res; } - return ts_res; } -unsafe fn Normalize(v: Vec3) -> Vec3 { +fn Normalize(v: Vec3) -> Vec3 { return (1.0 / v.length()) * v; } -unsafe fn VNotZero(v: Vec3) -> bool { +fn VNotZero(v: Vec3) -> bool { NotZero(v.x) || NotZero(v.y) || NotZero(v.z) } -unsafe fn NotZero(fX: f32) -> bool { +fn NotZero(fX: f32) -> bool { fX.abs() > 1.17549435e-38f32 } @@ -658,77 +670,81 @@ unsafe fn EvalTspace( res.fMagT = 0i32 as f32; face = 0i32; while face < iFaces { - let f: i32 = *face_indices.offset(face as isize); - if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 { - let mut n = Vec3::new(0.0, 0.0, 0.0); - let mut vOs = Vec3::new(0.0, 0.0, 0.0); - let mut vOt = Vec3::new(0.0, 0.0, 0.0); - let mut p0 = Vec3::new(0.0, 0.0, 0.0); - let mut p1 = Vec3::new(0.0, 0.0, 0.0); - let mut p2 = Vec3::new(0.0, 0.0, 0.0); - let mut v1 = Vec3::new(0.0, 0.0, 0.0); - let mut v2 = Vec3::new(0.0, 0.0, 0.0); - let mut fCos: f32 = 0.; - let mut fAngle: f32 = 0.; - let mut fMagS: f32 = 0.; - let mut fMagT: f32 = 0.; - let mut i: i32 = -1i32; - let mut index: i32 = -1i32; - let mut i0: i32 = -1i32; - let mut i1: i32 = -1i32; - let mut i2: i32 = -1i32; - if *piTriListIn.offset((3i32 * f + 0i32) as isize) == iVertexRepresentitive { - i = 0i32 - } else if *piTriListIn.offset((3i32 * f + 1i32) as isize) == iVertexRepresentitive { - i = 1i32 - } else if *piTriListIn.offset((3i32 * f + 2i32) as isize) == iVertexRepresentitive { - i = 2i32 - } - index = *piTriListIn.offset((3i32 * f + i) as isize); - n = get_normal(geometry, index as usize); - let mut vOs = (*pTriInfos.offset(f as isize)).vOs - - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); - let mut vOt = (*pTriInfos.offset(f as isize)).vOt - - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); - if VNotZero(vOs) { - vOs = Normalize(vOs) - } - if VNotZero(vOt) { - vOt = Normalize(vOt) - } - i2 = *piTriListIn.offset((3i32 * f + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); - i1 = *piTriListIn.offset((3i32 * f + i) as isize); - i0 = *piTriListIn.offset((3i32 * f + if i > 0i32 { i - 1i32 } else { 2i32 }) as isize); - p0 = get_position(geometry, i0 as usize); - p1 = get_position(geometry, i1 as usize); - p2 = get_position(geometry, i2 as usize); - v1 = p0 - p1; - v2 = p2 - p1; - let mut v1 = v1 - (n.dot(v1) * n); - if VNotZero(v1) { - v1 = Normalize(v1) - } - let mut v2 = v2 - (n.dot(v2) * n); - if VNotZero(v2) { - v2 = Normalize(v2) - } - let fCos = v1.dot(v2); + unsafe { + let f: i32 = *face_indices.offset(face as isize); + if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 { + let mut n = Vec3::new(0.0, 0.0, 0.0); + let mut vOs = Vec3::new(0.0, 0.0, 0.0); + let mut vOt = Vec3::new(0.0, 0.0, 0.0); + let mut p0 = Vec3::new(0.0, 0.0, 0.0); + let mut p1 = Vec3::new(0.0, 0.0, 0.0); + let mut p2 = Vec3::new(0.0, 0.0, 0.0); + let mut v1 = Vec3::new(0.0, 0.0, 0.0); + let mut v2 = Vec3::new(0.0, 0.0, 0.0); + let mut fCos: f32 = 0.; + let mut fAngle: f32 = 0.; + let mut fMagS: f32 = 0.; + let mut fMagT: f32 = 0.; + let mut i: i32 = -1i32; + let mut index: i32 = -1i32; + let mut i0: i32 = -1i32; + let mut i1: i32 = -1i32; + let mut i2: i32 = -1i32; + if *piTriListIn.offset((3i32 * f + 0i32) as isize) == iVertexRepresentitive { + i = 0i32 + } else if *piTriListIn.offset((3i32 * f + 1i32) as isize) == iVertexRepresentitive { + i = 1i32 + } else if *piTriListIn.offset((3i32 * f + 2i32) as isize) == iVertexRepresentitive { + i = 2i32 + } + index = *piTriListIn.offset((3i32 * f + i) as isize); + n = get_normal(geometry, index as usize); + let mut vOs = (*pTriInfos.offset(f as isize)).vOs + - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); + let mut vOt = (*pTriInfos.offset(f as isize)).vOt + - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); + if VNotZero(vOs) { + vOs = Normalize(vOs) + } + if VNotZero(vOt) { + vOt = Normalize(vOt) + } + i2 = *piTriListIn + .offset((3i32 * f + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); + i1 = *piTriListIn.offset((3i32 * f + i) as isize); + i0 = *piTriListIn + .offset((3i32 * f + if i > 0i32 { i - 1i32 } else { 2i32 }) as isize); + p0 = get_position(geometry, i0 as usize); + p1 = get_position(geometry, i1 as usize); + p2 = get_position(geometry, i2 as usize); + v1 = p0 - p1; + v2 = p2 - p1; + let mut v1 = v1 - (n.dot(v1) * n); + if VNotZero(v1) { + v1 = Normalize(v1) + } + let mut v2 = v2 - (n.dot(v2) * n); + if VNotZero(v2) { + v2 = Normalize(v2) + } + let fCos = v1.dot(v2); - let fCos = if fCos > 1i32 as f32 { - 1i32 as f32 - } else if fCos < -1i32 as f32 { - -1i32 as f32 - } else { - fCos - }; - fAngle = (fCos as f64).acos() as f32; - fMagS = (*pTriInfos.offset(f as isize)).fMagS; - fMagT = (*pTriInfos.offset(f as isize)).fMagT; - res.vOs = res.vOs + (fAngle * vOs); - res.vOt = res.vOt + (fAngle * vOt); - res.fMagS += fAngle * fMagS; - res.fMagT += fAngle * fMagT; - fAngleSum += fAngle + let fCos = if fCos > 1i32 as f32 { + 1i32 as f32 + } else if fCos < -1i32 as f32 { + -1i32 as f32 + } else { + fCos + }; + fAngle = (fCos as f64).acos() as f32; + fMagS = (*pTriInfos.offset(f as isize)).fMagS; + fMagT = (*pTriInfos.offset(f as isize)).fMagT; + res.vOs = res.vOs + (fAngle * vOs); + res.vOt = res.vOt + (fAngle * vOt); + res.fMagS += fAngle * fMagS; + res.fMagT += fAngle * fMagT; + fAngleSum += fAngle + } } face += 1 } @@ -748,17 +764,19 @@ unsafe fn EvalTspace( unsafe fn CompareSubGroups(mut pg1: *const SSubGroup, mut pg2: *const SSubGroup) -> bool { let mut bStillSame: bool = true; let mut i = 0; - if (*pg1).iNrFaces != (*pg2).iNrFaces { - return false; - } - while i < (*pg1).iNrFaces as usize && bStillSame { - bStillSame = if (*pg1).pTriMembers[i] == (*pg2).pTriMembers[i] { - true - } else { - false - }; - if bStillSame { - i += 1 + unsafe { + if (*pg1).iNrFaces != (*pg2).iNrFaces { + return false; + } + while i < (*pg1).iNrFaces as usize && bStillSame { + bStillSame = if (*pg1).pTriMembers[i] == (*pg2).pTriMembers[i] { + true + } else { + false + }; + if bStillSame { + i += 1 + } } } return bStillSame; @@ -781,31 +799,33 @@ unsafe fn QuickSort(mut pSortBuffer: *mut i32, mut iLeft: i32, mut iRight: i32, iR = iRight; n = iR - iL + 1i32; index = uSeed.wrapping_rem(n as u32) as i32; - iMid = *pSortBuffer.offset((index + iL) as isize); - loop { - while *pSortBuffer.offset(iL as isize) < iMid { - iL += 1 - } - while *pSortBuffer.offset(iR as isize) > iMid { - iR -= 1 - } - if iL <= iR { - iTmp = *pSortBuffer.offset(iL as isize); - *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); - *pSortBuffer.offset(iR as isize) = iTmp; - iL += 1; - iR -= 1 + unsafe { + iMid = *pSortBuffer.offset((index + iL) as isize); + loop { + while *pSortBuffer.offset(iL as isize) < iMid { + iL += 1 + } + while *pSortBuffer.offset(iR as isize) > iMid { + iR -= 1 + } + if iL <= iR { + iTmp = *pSortBuffer.offset(iL as isize); + *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); + *pSortBuffer.offset(iR as isize) = iTmp; + iL += 1; + iR -= 1 + } + if !(iL <= iR) { + break; + } } - if !(iL <= iR) { - break; + if iLeft < iR { + QuickSort(pSortBuffer, iLeft, iR, uSeed); } + if iL < iRight { + QuickSort(pSortBuffer, iL, iRight, uSeed); + }; } - if iLeft < iR { - QuickSort(pSortBuffer, iLeft, iR, uSeed); - } - if iL < iRight { - QuickSort(pSortBuffer, iL, iRight, uSeed); - }; } unsafe fn Build4RuleGroups( mut pTriInfos: *mut STriInfo, @@ -823,64 +843,66 @@ unsafe fn Build4RuleGroups( while f < iNrTrianglesIn { i = 0i32; while i < 3i32 { - if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 - && (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize].is_null() - { - let mut bOrPre: bool = false; - let mut neigh_indexL: i32 = 0; - let mut neigh_indexR: i32 = 0; - let vert_index: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); - let ref mut fresh2 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; - *fresh2 = &mut *pGroups.offset(iNrActiveGroups as isize) as *mut SGroup; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) - .iVertexRepresentitive = vert_index; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreservering = - (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; - let ref mut fresh3 = - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).pFaceIndices; - *fresh3 = &mut *piGroupTrianglesBuffer.offset(iOffset as isize) as *mut i32; - iNrActiveGroups += 1; - AddTriToGroup((*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], f); - bOrPre = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - neigh_indexL = (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize]; - neigh_indexR = (*pTriInfos.offset(f as isize)).FaceNeighbors - [(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; - if neigh_indexL >= 0i32 { - let bAnswer: bool = AssignRecur( - piTriListIn, - pTriInfos, - neigh_indexL, - (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], - ); - let bOrPre2: bool = - if (*pTriInfos.offset(neigh_indexL as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - let bDiff: bool = if bOrPre != bOrPre2 { true } else { false }; - } - if neigh_indexR >= 0i32 { - let bAnswer_0: bool = AssignRecur( - piTriListIn, - pTriInfos, - neigh_indexR, - (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], - ); - let bOrPre2_0: bool = - if (*pTriInfos.offset(neigh_indexR as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - let bDiff_0: bool = if bOrPre != bOrPre2_0 { true } else { false }; + unsafe { + if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 + && (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize].is_null() + { + let mut bOrPre: bool = false; + let mut neigh_indexL: i32 = 0; + let mut neigh_indexR: i32 = 0; + let vert_index: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); + let ref mut fresh2 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; + *fresh2 = &mut *pGroups.offset(iNrActiveGroups as isize) as *mut SGroup; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) + .iVertexRepresentitive = vert_index; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) + .bOrientPreservering = (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; + let ref mut fresh3 = + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).pFaceIndices; + *fresh3 = &mut *piGroupTrianglesBuffer.offset(iOffset as isize) as *mut i32; + iNrActiveGroups += 1; + AddTriToGroup((*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], f); + bOrPre = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + neigh_indexL = (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize]; + neigh_indexR = (*pTriInfos.offset(f as isize)).FaceNeighbors + [(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; + if neigh_indexL >= 0i32 { + let bAnswer: bool = AssignRecur( + piTriListIn, + pTriInfos, + neigh_indexL, + (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], + ); + let bOrPre2: bool = + if (*pTriInfos.offset(neigh_indexL as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bDiff: bool = if bOrPre != bOrPre2 { true } else { false }; + } + if neigh_indexR >= 0i32 { + let bAnswer_0: bool = AssignRecur( + piTriListIn, + pTriInfos, + neigh_indexR, + (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], + ); + let bOrPre2_0: bool = + if (*pTriInfos.offset(neigh_indexR as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bDiff_0: bool = if bOrPre != bOrPre2_0 { true } else { false }; + } + iOffset += (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces } - iOffset += (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces } i += 1 } @@ -896,64 +918,68 @@ unsafe fn AssignRecur( iMyTriIndex: i32, mut pGroup: *mut SGroup, ) -> bool { - let mut pMyTriInfo: *mut STriInfo = - &mut *psTriInfos.offset(iMyTriIndex as isize) as *mut STriInfo; - // track down vertex - let iVertRep: i32 = (*pGroup).iVertexRepresentitive; - let mut pVerts: *const i32 = - &*piTriListIn.offset((3i32 * iMyTriIndex + 0i32) as isize) as *const i32; - let mut i: i32 = -1i32; - if *pVerts.offset(0isize) == iVertRep { - i = 0i32 - } else if *pVerts.offset(1isize) == iVertRep { - i = 1i32 - } else if *pVerts.offset(2isize) == iVertRep { - i = 2i32 - } - if (*pMyTriInfo).AssignedGroup[i as usize] == pGroup { - return true; - } else { - if !(*pMyTriInfo).AssignedGroup[i as usize].is_null() { - return false; + unsafe { + let mut pMyTriInfo: *mut STriInfo = + &mut *psTriInfos.offset(iMyTriIndex as isize) as *mut STriInfo; + // track down vertex + let iVertRep: i32 = (*pGroup).iVertexRepresentitive; + let mut pVerts: *const i32 = + &*piTriListIn.offset((3i32 * iMyTriIndex + 0i32) as isize) as *const i32; + let mut i: i32 = -1i32; + if *pVerts.offset(0isize) == iVertRep { + i = 0i32 + } else if *pVerts.offset(1isize) == iVertRep { + i = 1i32 + } else if *pVerts.offset(2isize) == iVertRep { + i = 2i32 } - } - if (*pMyTriInfo).iFlag & 4i32 != 0i32 { - if (*pMyTriInfo).AssignedGroup[0usize].is_null() - && (*pMyTriInfo).AssignedGroup[1usize].is_null() - && (*pMyTriInfo).AssignedGroup[2usize].is_null() - { - (*pMyTriInfo).iFlag &= !8i32; - (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { - 8i32 - } else { - 0i32 + if (*pMyTriInfo).AssignedGroup[i as usize] == pGroup { + return true; + } else { + if !(*pMyTriInfo).AssignedGroup[i as usize].is_null() { + return false; } } - } - let bOrient: bool = if (*pMyTriInfo).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - if bOrient != (*pGroup).bOrientPreservering { - return false; - } - AddTriToGroup(pGroup, iMyTriIndex); - (*pMyTriInfo).AssignedGroup[i as usize] = pGroup; - let neigh_indexL: i32 = (*pMyTriInfo).FaceNeighbors[i as usize]; - let neigh_indexR: i32 = - (*pMyTriInfo).FaceNeighbors[(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; - if neigh_indexL >= 0i32 { - AssignRecur(piTriListIn, psTriInfos, neigh_indexL, pGroup); - } - if neigh_indexR >= 0i32 { - AssignRecur(piTriListIn, psTriInfos, neigh_indexR, pGroup); + if (*pMyTriInfo).iFlag & 4i32 != 0i32 { + if (*pMyTriInfo).AssignedGroup[0usize].is_null() + && (*pMyTriInfo).AssignedGroup[1usize].is_null() + && (*pMyTriInfo).AssignedGroup[2usize].is_null() + { + (*pMyTriInfo).iFlag &= !8i32; + (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { + 8i32 + } else { + 0i32 + } + } + } + let bOrient: bool = if (*pMyTriInfo).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + if bOrient != (*pGroup).bOrientPreservering { + return false; + } + AddTriToGroup(pGroup, iMyTriIndex); + (*pMyTriInfo).AssignedGroup[i as usize] = pGroup; + let neigh_indexL: i32 = (*pMyTriInfo).FaceNeighbors[i as usize]; + let neigh_indexR: i32 = + (*pMyTriInfo).FaceNeighbors[(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; + if neigh_indexL >= 0i32 { + AssignRecur(piTriListIn, psTriInfos, neigh_indexL, pGroup); + } + if neigh_indexR >= 0i32 { + AssignRecur(piTriListIn, psTriInfos, neigh_indexR, pGroup); + } } return true; } unsafe fn AddTriToGroup(mut pGroup: *mut SGroup, iTriIndex: i32) { - *(*pGroup).pFaceIndices.offset((*pGroup).iNrFaces as isize) = iTriIndex; - (*pGroup).iNrFaces += 1; + unsafe { + *(*pGroup).pFaceIndices.offset((*pGroup).iNrFaces as isize) = iTriIndex; + (*pGroup).iNrFaces += 1; + } } unsafe fn InitTriInfo( mut pTriInfos: *mut STriInfo, @@ -968,30 +994,36 @@ unsafe fn InitTriInfo( while f < iNrTrianglesIn { i = 0i32; while i < 3i32 { - (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize] = -1i32; - let ref mut fresh4 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; - *fresh4 = 0 as *mut SGroup; - (*pTriInfos.offset(f as isize)).vOs.x = 0.0f32; - (*pTriInfos.offset(f as isize)).vOs.y = 0.0f32; - (*pTriInfos.offset(f as isize)).vOs.z = 0.0f32; - (*pTriInfos.offset(f as isize)).vOt.x = 0.0f32; - (*pTriInfos.offset(f as isize)).vOt.y = 0.0f32; - (*pTriInfos.offset(f as isize)).vOt.z = 0.0f32; - (*pTriInfos.offset(f as isize)).fMagS = 0i32 as f32; - (*pTriInfos.offset(f as isize)).fMagT = 0i32 as f32; - (*pTriInfos.offset(f as isize)).iFlag |= 4i32; + unsafe { + (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize] = -1i32; + let ref mut fresh4 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; + *fresh4 = 0 as *mut SGroup; + (*pTriInfos.offset(f as isize)).vOs.x = 0.0f32; + (*pTriInfos.offset(f as isize)).vOs.y = 0.0f32; + (*pTriInfos.offset(f as isize)).vOs.z = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.x = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.y = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.z = 0.0f32; + (*pTriInfos.offset(f as isize)).fMagS = 0i32 as f32; + (*pTriInfos.offset(f as isize)).fMagT = 0i32 as f32; + (*pTriInfos.offset(f as isize)).iFlag |= 4i32; + } i += 1 } f += 1 } f = 0; while f < iNrTrianglesIn { - let v1 = get_position(geometry, *piTriListIn.offset((f * 3 + 0) as isize) as usize); - let v2 = get_position(geometry, *piTriListIn.offset((f * 3 + 1) as isize) as usize); - let v3 = get_position(geometry, *piTriListIn.offset((f * 3 + 2) as isize) as usize); - let t1 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 0) as isize) as usize); - let t2 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 1) as isize) as usize); - let t3 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 2) as isize) as usize); + let a = unsafe { *piTriListIn.offset((f * 3 + 0) as isize) } as usize; + let b = unsafe { *piTriListIn.offset((f * 3 + 1) as isize) } as usize; + let c = unsafe { *piTriListIn.offset((f * 3 + 2) as isize) } as usize; + + let v1 = get_position(geometry, a); + let v2 = get_position(geometry, b); + let v3 = get_position(geometry, c); + let t1 = get_tex_coord(geometry, a); + let t2 = get_tex_coord(geometry, b); + let t3 = get_tex_coord(geometry, c); let t21x: f32 = t2.x - t1.x; let t21y: f32 = t2.y - t1.y; let t31x: f32 = t3.x - t1.x; @@ -1001,7 +1033,7 @@ unsafe fn InitTriInfo( let fSignedAreaSTx2: f32 = t21x * t31y - t21y * t31x; let mut vOs = (t31y * d1) - (t21y * d2); let mut vOt = (-t31x * d1) + (t21x * d2); - (*pTriInfos.offset(f as isize)).iFlag |= if fSignedAreaSTx2 > 0i32 as f32 { + unsafe { *pTriInfos.offset(f as isize) }.iFlag |= if fSignedAreaSTx2 > 0i32 as f32 { 8i32 } else { 0i32 @@ -1010,66 +1042,77 @@ unsafe fn InitTriInfo( let fAbsArea: f32 = fSignedAreaSTx2.abs(); let fLenOs: f32 = vOs.length(); let fLenOt: f32 = vOt.length(); - let fS: f32 = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 == 0i32 { + let fS: f32 = if unsafe { *pTriInfos.offset(f as isize) }.iFlag & 8i32 == 0i32 { -1.0f32 } else { 1.0f32 }; - if NotZero(fLenOs) { - (*pTriInfos.offset(f as isize)).vOs = (fS / fLenOs) * vOs + unsafe { + if NotZero(fLenOs) { + (*pTriInfos.offset(f as isize)).vOs = (fS / fLenOs) * vOs + } } - if NotZero(fLenOt) { - (*pTriInfos.offset(f as isize)).vOt = (fS / fLenOt) * vOt + unsafe { + if NotZero(fLenOt) { + (*pTriInfos.offset(f as isize)).vOt = (fS / fLenOt) * vOt + } } - (*pTriInfos.offset(f as isize)).fMagS = fLenOs / fAbsArea; - (*pTriInfos.offset(f as isize)).fMagT = fLenOt / fAbsArea; - if NotZero((*pTriInfos.offset(f as isize)).fMagS) - && NotZero((*pTriInfos.offset(f as isize)).fMagT) + unsafe { *pTriInfos.offset(f as isize) }.fMagS = fLenOs / fAbsArea; + unsafe { *pTriInfos.offset(f as isize) }.fMagT = fLenOt / fAbsArea; + if unsafe { NotZero((*pTriInfos.offset(f as isize)).fMagS) } + && unsafe { NotZero((*pTriInfos.offset(f as isize)).fMagT) } { - (*pTriInfos.offset(f as isize)).iFlag &= !4i32 + unsafe { *pTriInfos.offset(f as isize) }.iFlag &= !4i32 } } f += 1 } while t < iNrTrianglesIn - 1 { - let iFO_a: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; - let iFO_b: i32 = (*pTriInfos.offset((t + 1) as isize)).iOrgFaceNumber; + let iFO_a: i32 = unsafe { *pTriInfos.offset(t as isize) }.iOrgFaceNumber; + let iFO_b: i32 = unsafe { *pTriInfos.offset((t + 1) as isize) }.iOrgFaceNumber; if iFO_a == iFO_b { - let bIsDeg_a: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 != 0i32 { - true - } else { - false - }; - let bIsDeg_b: bool = if (*pTriInfos.offset((t + 1) as isize)).iFlag & 1i32 != 0i32 { + let bIsDeg_a: bool = if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 1i32 != 0i32 { true } else { false }; - if !(bIsDeg_a || bIsDeg_b) { - let bOrientA: bool = if (*pTriInfos.offset(t as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - let bOrientB: bool = if (*pTriInfos.offset((t + 1) as isize)).iFlag & 8i32 != 0i32 { + let bIsDeg_b: bool = + if unsafe { *pTriInfos.offset((t + 1) as isize) }.iFlag & 1i32 != 0i32 { true } else { false }; + if !(bIsDeg_a || bIsDeg_b) { + let bOrientA: bool = + if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bOrientB: bool = + if unsafe { *pTriInfos.offset((t + 1) as isize) }.iFlag & 8i32 != 0i32 { + true + } else { + false + }; if bOrientA != bOrientB { let mut bChooseOrientFirstTri: bool = false; - if (*pTriInfos.offset((t + 1) as isize)).iFlag & 4i32 != 0i32 { + if unsafe { *pTriInfos.offset((t + 1) as isize) }.iFlag & 4i32 != 0i32 { bChooseOrientFirstTri = true - } else if CalcTexArea(geometry, piTriListIn.offset((t * 3 + 0) as isize)) - >= CalcTexArea(geometry, piTriListIn.offset(((t + 1) * 3 + 0) as isize)) - { + } else if unsafe { + CalcTexArea(geometry, piTriListIn.offset((t * 3 + 0) as isize)) + } >= unsafe { + CalcTexArea(geometry, piTriListIn.offset(((t + 1) * 3 + 0) as isize)) + } { bChooseOrientFirstTri = true } let t0 = if bChooseOrientFirstTri { t } else { t + 1 }; let t1_0 = if bChooseOrientFirstTri { t + 1 } else { t }; - (*pTriInfos.offset(t1_0 as isize)).iFlag &= !8i32; - (*pTriInfos.offset(t1_0 as isize)).iFlag |= - (*pTriInfos.offset(t0 as isize)).iFlag & 8i32 + unsafe { + (*pTriInfos.offset(t1_0 as isize)).iFlag &= !8i32; + (*pTriInfos.offset(t1_0 as isize)).iFlag |= + (*pTriInfos.offset(t0 as isize)).iFlag & 8i32 + } } } t += 2 @@ -1079,12 +1122,14 @@ unsafe fn InitTriInfo( } let mut pEdges = vec![SEdge::zero(); iNrTrianglesIn * 3]; - BuildNeighborsFast( - pTriInfos, - pEdges.as_mut_ptr(), - piTriListIn, - iNrTrianglesIn as i32, - ); + unsafe { + BuildNeighborsFast( + pTriInfos, + pEdges.as_mut_ptr(), + piTriListIn, + iNrTrianglesIn as i32, + ); + } } unsafe fn BuildNeighborsFast( @@ -1104,97 +1149,115 @@ unsafe fn BuildNeighborsFast( while f < iNrTrianglesIn { i = 0i32; while i < 3i32 { - let i0: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); - let i1: i32 = - *piTriListIn.offset((f * 3i32 + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); - (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i0 = if i0 < i1 { i0 } else { i1 }; - (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i1 = if !(i0 < i1) { i0 } else { i1 }; - (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.f = f; + unsafe { + let i0: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); + let i1: i32 = *piTriListIn + .offset((f * 3i32 + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i0 = + if i0 < i1 { i0 } else { i1 }; + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i1 = + if !(i0 < i1) { i0 } else { i1 }; + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.f = f; + } i += 1 } f += 1 } - QuickSortEdges(pEdges, 0i32, iNrTrianglesIn * 3i32 - 1i32, 0i32, uSeed); + unsafe { + QuickSortEdges(pEdges, 0i32, iNrTrianglesIn * 3i32 - 1i32, 0i32, uSeed); + } iEntries = iNrTrianglesIn * 3i32; iCurStartIndex = 0i32; i = 1i32; while i < iEntries { - if (*pEdges.offset(iCurStartIndex as isize)).unnamed.i0 - != (*pEdges.offset(i as isize)).unnamed.i0 + if unsafe { (*pEdges.offset(iCurStartIndex as isize)).unnamed }.i0 + != unsafe { (*pEdges.offset(i as isize)).unnamed }.i0 { let iL: i32 = iCurStartIndex; let iR: i32 = i - 1i32; iCurStartIndex = i; - QuickSortEdges(pEdges, iL, iR, 1i32, uSeed); + unsafe { + QuickSortEdges(pEdges, iL, iR, 1i32, uSeed); + } } i += 1 } iCurStartIndex = 0i32; i = 1i32; while i < iEntries { - if (*pEdges.offset(iCurStartIndex as isize)).unnamed.i0 - != (*pEdges.offset(i as isize)).unnamed.i0 - || (*pEdges.offset(iCurStartIndex as isize)).unnamed.i1 - != (*pEdges.offset(i as isize)).unnamed.i1 + if unsafe { (*pEdges.offset(iCurStartIndex as isize)).unnamed }.i0 + != unsafe { (*pEdges.offset(i as isize)).unnamed }.i0 + || unsafe { (*pEdges.offset(iCurStartIndex as isize)).unnamed }.i1 + != unsafe { (*pEdges.offset(i as isize)).unnamed }.i1 { let iL_0: i32 = iCurStartIndex; let iR_0: i32 = i - 1i32; iCurStartIndex = i; - QuickSortEdges(pEdges, iL_0, iR_0, 2i32, uSeed); + unsafe { + QuickSortEdges(pEdges, iL_0, iR_0, 2i32, uSeed); + } } i += 1 } i = 0i32; while i < iEntries { - let i0_0: i32 = (*pEdges.offset(i as isize)).unnamed.i0; - let i1_0: i32 = (*pEdges.offset(i as isize)).unnamed.i1; - let f_0: i32 = (*pEdges.offset(i as isize)).unnamed.f; + let i0_0: i32 = unsafe { (*pEdges.offset(i as isize)).unnamed }.i0; + let i1_0: i32 = unsafe { (*pEdges.offset(i as isize)).unnamed }.i1; + let f_0: i32 = unsafe { (*pEdges.offset(i as isize)).unnamed }.f; let mut bUnassigned_A: bool = false; let mut i0_A: i32 = 0; let mut i1_A: i32 = 0; let mut edgenum_A: i32 = 0; let mut edgenum_B: i32 = 0i32; - GetEdge( - &mut i0_A, - &mut i1_A, - &mut edgenum_A, - &*piTriListIn.offset((f_0 * 3i32) as isize), - i0_0, - i1_0, - ); - bUnassigned_A = - if (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] == -1i32 { - true - } else { - false - }; + unsafe { + GetEdge( + &mut i0_A, + &mut i1_A, + &mut edgenum_A, + &*piTriListIn.offset((f_0 * 3i32) as isize), + i0_0, + i1_0, + ); + } + bUnassigned_A = if unsafe { *pTriInfos.offset(f_0 as isize) }.FaceNeighbors + [edgenum_A as usize] + == -1i32 + { + true + } else { + false + }; if bUnassigned_A { let mut j: i32 = i + 1i32; let mut t: i32 = 0; let mut bNotFound: bool = true; while j < iEntries - && i0_0 == (*pEdges.offset(j as isize)).unnamed.i0 - && i1_0 == (*pEdges.offset(j as isize)).unnamed.i1 + && i0_0 == unsafe { (*pEdges.offset(j as isize)).unnamed }.i0 + && i1_0 == unsafe { (*pEdges.offset(j as isize)).unnamed }.i1 && bNotFound { let mut bUnassigned_B: bool = false; let mut i0_B: i32 = 0; let mut i1_B: i32 = 0; - t = (*pEdges.offset(j as isize)).unnamed.f; - GetEdge( - &mut i1_B, - &mut i0_B, - &mut edgenum_B, - &*piTriListIn.offset((t * 3i32) as isize), - (*pEdges.offset(j as isize)).unnamed.i0, - (*pEdges.offset(j as isize)).unnamed.i1, - ); - bUnassigned_B = - if (*pTriInfos.offset(t as isize)).FaceNeighbors[edgenum_B as usize] == -1i32 { - true - } else { - false - }; + t = unsafe { (*pEdges.offset(j as isize)).unnamed }.f; + unsafe { + GetEdge( + &mut i1_B, + &mut i0_B, + &mut edgenum_B, + &*piTriListIn.offset((t * 3i32) as isize), + (*pEdges.offset(j as isize)).unnamed.i0, + (*pEdges.offset(j as isize)).unnamed.i1, + ); + } + bUnassigned_B = if unsafe { *pTriInfos.offset(t as isize) }.FaceNeighbors + [edgenum_B as usize] + == -1i32 + { + true + } else { + false + }; if i0_A == i0_B && i1_A == i1_B && bUnassigned_B { bNotFound = false } else { @@ -1202,9 +1265,11 @@ unsafe fn BuildNeighborsFast( } } if !bNotFound { - let mut t_0: i32 = (*pEdges.offset(j as isize)).unnamed.f; - (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] = t_0; - (*pTriInfos.offset(t_0 as isize)).FaceNeighbors[edgenum_B as usize] = f_0 + unsafe { + let mut t_0: i32 = (*pEdges.offset(j as isize)).unnamed.f; + (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] = t_0; + (*pTriInfos.offset(t_0 as isize)).FaceNeighbors[edgenum_B as usize] = f_0 + } } } i += 1 @@ -1218,22 +1283,24 @@ unsafe fn GetEdge( i0_in: i32, i1_in: i32, ) { - *edgenum_out = -1i32; - if *indices.offset(0isize) == i0_in || *indices.offset(0isize) == i1_in { - if *indices.offset(1isize) == i0_in || *indices.offset(1isize) == i1_in { - *edgenum_out.offset(0isize) = 0i32; - *i0_out.offset(0isize) = *indices.offset(0isize); - *i1_out.offset(0isize) = *indices.offset(1isize) + unsafe { + *edgenum_out = -1i32; + if *indices.offset(0isize) == i0_in || *indices.offset(0isize) == i1_in { + if *indices.offset(1isize) == i0_in || *indices.offset(1isize) == i1_in { + *edgenum_out.offset(0isize) = 0i32; + *i0_out.offset(0isize) = *indices.offset(0isize); + *i1_out.offset(0isize) = *indices.offset(1isize) + } else { + *edgenum_out.offset(0isize) = 2i32; + *i0_out.offset(0isize) = *indices.offset(2isize); + *i1_out.offset(0isize) = *indices.offset(0isize) + } } else { - *edgenum_out.offset(0isize) = 2i32; - *i0_out.offset(0isize) = *indices.offset(2isize); - *i1_out.offset(0isize) = *indices.offset(0isize) - } - } else { - *edgenum_out.offset(0isize) = 1i32; - *i0_out.offset(0isize) = *indices.offset(1isize); - *i1_out.offset(0isize) = *indices.offset(2isize) - }; + *edgenum_out.offset(0isize) = 1i32; + *i0_out.offset(0isize) = *indices.offset(1isize); + *i1_out.offset(0isize) = *indices.offset(2isize) + }; + } } // /////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////// @@ -1259,12 +1326,14 @@ unsafe fn QuickSortEdges( return; } else { if iElems == 2i32 { - if (*pSortBuffer.offset(iLeft as isize)).array[channel as usize] - > (*pSortBuffer.offset(iRight as isize)).array[channel as usize] - { - sTmp = *pSortBuffer.offset(iLeft as isize); - *pSortBuffer.offset(iLeft as isize) = *pSortBuffer.offset(iRight as isize); - *pSortBuffer.offset(iRight as isize) = sTmp + unsafe { + if (*pSortBuffer.offset(iLeft as isize)).array[channel as usize] + > (*pSortBuffer.offset(iRight as isize)).array[channel as usize] + { + sTmp = *pSortBuffer.offset(iLeft as isize); + *pSortBuffer.offset(iLeft as isize) = *pSortBuffer.offset(iRight as isize); + *pSortBuffer.offset(iRight as isize) = sTmp + } } return; } @@ -1280,18 +1349,20 @@ unsafe fn QuickSortEdges( iR = iRight; n = iR - iL + 1i32; index = uSeed.wrapping_rem(n as u32) as i32; - iMid = (*pSortBuffer.offset((index + iL) as isize)).array[channel as usize]; + iMid = unsafe { (*pSortBuffer.offset((index + iL) as isize)).array[channel as usize] }; loop { - while (*pSortBuffer.offset(iL as isize)).array[channel as usize] < iMid { + while unsafe { (*pSortBuffer.offset(iL as isize)).array[channel as usize] } < iMid { iL += 1 } - while (*pSortBuffer.offset(iR as isize)).array[channel as usize] > iMid { + while unsafe { (*pSortBuffer.offset(iR as isize)).array[channel as usize] } > iMid { iR -= 1 } if iL <= iR { - sTmp = *pSortBuffer.offset(iL as isize); - *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); - *pSortBuffer.offset(iR as isize) = sTmp; + unsafe { + sTmp = *pSortBuffer.offset(iL as isize); + *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); + *pSortBuffer.offset(iR as isize) = sTmp; + } iL += 1; iR -= 1 } @@ -1300,18 +1371,22 @@ unsafe fn QuickSortEdges( } } if iLeft < iR { - QuickSortEdges(pSortBuffer, iLeft, iR, channel, uSeed); + unsafe { + QuickSortEdges(pSortBuffer, iLeft, iR, channel, uSeed); + } } if iL < iRight { - QuickSortEdges(pSortBuffer, iL, iRight, channel, uSeed); + unsafe { + QuickSortEdges(pSortBuffer, iL, iRight, channel, uSeed); + } }; } // returns the texture area times 2 unsafe fn CalcTexArea(geometry: &mut I, mut indices: *const i32) -> f32 { - let t1 = get_tex_coord(geometry, *indices.offset(0isize) as usize); - let t2 = get_tex_coord(geometry, *indices.offset(1isize) as usize); - let t3 = get_tex_coord(geometry, *indices.offset(2isize) as usize); + let t1 = get_tex_coord(geometry, unsafe { *indices.offset(0isize) } as usize); + let t2 = get_tex_coord(geometry, unsafe { *indices.offset(1isize) } as usize); + let t3 = get_tex_coord(geometry, unsafe { *indices.offset(2isize) } as usize); let t21x: f32 = t2.x - t1.x; let t21y: f32 = t2.y - t1.y; let t31x: f32 = t3.x - t1.x; @@ -1336,22 +1411,25 @@ unsafe fn DegenPrologue( // locate quads with only one good triangle let mut t: i32 = 0i32; while t < iTotTris - 1i32 { - let iFO_a: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; - let iFO_b: i32 = (*pTriInfos.offset((t + 1i32) as isize)).iOrgFaceNumber; + let iFO_a: i32 = unsafe { *pTriInfos.offset(t as isize) }.iOrgFaceNumber; + let iFO_b: i32 = unsafe { *pTriInfos.offset((t + 1i32) as isize) }.iOrgFaceNumber; if iFO_a == iFO_b { - let bIsDeg_a: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 != 0i32 { - true - } else { - false - }; - let bIsDeg_b: bool = if (*pTriInfos.offset((t + 1i32) as isize)).iFlag & 1i32 != 0i32 { + let bIsDeg_a: bool = if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 1i32 != 0i32 { true } else { false }; + let bIsDeg_b: bool = + if unsafe { *pTriInfos.offset((t + 1i32) as isize) }.iFlag & 1i32 != 0i32 { + true + } else { + false + }; if bIsDeg_a ^ bIsDeg_b != false { - (*pTriInfos.offset(t as isize)).iFlag |= 2i32; - (*pTriInfos.offset((t + 1i32) as isize)).iFlag |= 2i32 + unsafe { + (*pTriInfos.offset(t as isize)).iFlag |= 2i32; + (*pTriInfos.offset((t + 1i32) as isize)).iFlag |= 2i32 + } } t += 2i32 } else { @@ -1362,7 +1440,7 @@ unsafe fn DegenPrologue( t = 0i32; bStillFindingGoodOnes = true; while t < iNrTrianglesIn && bStillFindingGoodOnes { - let bIsGood: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 == 0i32 { + let bIsGood: bool = if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 1i32 == 0i32 { true } else { false @@ -1377,7 +1455,8 @@ unsafe fn DegenPrologue( let mut bJustADegenerate: bool = true; while bJustADegenerate && iNextGoodTriangleSearchIndex < iTotTris { let bIsGood_0: bool = - if (*pTriInfos.offset(iNextGoodTriangleSearchIndex as isize)).iFlag & 1i32 + if unsafe { *pTriInfos.offset(iNextGoodTriangleSearchIndex as isize) }.iFlag + & 1i32 == 0i32 { true @@ -1397,15 +1476,19 @@ unsafe fn DegenPrologue( let mut i: i32 = 0i32; i = 0i32; while i < 3i32 { - let index: i32 = *piTriList_out.offset((t0 * 3i32 + i) as isize); - *piTriList_out.offset((t0 * 3i32 + i) as isize) = - *piTriList_out.offset((t1 * 3i32 + i) as isize); - *piTriList_out.offset((t1 * 3i32 + i) as isize) = index; + unsafe { + let index: i32 = *piTriList_out.offset((t0 * 3i32 + i) as isize); + *piTriList_out.offset((t0 * 3i32 + i) as isize) = + *piTriList_out.offset((t1 * 3i32 + i) as isize); + *piTriList_out.offset((t1 * 3i32 + i) as isize) = index; + } i += 1 } - let tri_info: STriInfo = *pTriInfos.offset(t0 as isize); - *pTriInfos.offset(t0 as isize) = *pTriInfos.offset(t1 as isize); - *pTriInfos.offset(t1 as isize) = tri_info + unsafe { + let tri_info: STriInfo = *pTriInfos.offset(t0 as isize); + *pTriInfos.offset(t0 as isize) = *pTriInfos.offset(t1 as isize); + *pTriInfos.offset(t1 as isize) = tri_info + } } else { bStillFindingGoodOnes = false } @@ -1432,7 +1515,7 @@ unsafe fn GenerateSharedVerticesIndexList( let mut fMax: f32 = 0.; i = 1; while i < iNrTrianglesIn * 3 { - let index: i32 = *piTriList_in_and_out.offset(i as isize); + let index: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; let vP = get_position(geometry, index as usize); if vMin.x > vP.x { vMin.x = vP.x @@ -1472,7 +1555,7 @@ unsafe fn GenerateSharedVerticesIndexList( i = 0; while i < iNrTrianglesIn * 3 { - let index_0: i32 = *piTriList_in_and_out.offset(i as isize); + let index_0: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; let vP_0 = get_position(geometry, index_0 as usize); let fVal: f32 = if iChannel == 0i32 { vP_0.x @@ -1481,7 +1564,7 @@ unsafe fn GenerateSharedVerticesIndexList( } else { vP_0.z }; - let iCell = FindGridCell(fMin, fMax, fVal); + let iCell = unsafe { FindGridCell(fMin, fMax, fVal) }; piHashCount[iCell] += 1; i += 1 } @@ -1493,7 +1576,7 @@ unsafe fn GenerateSharedVerticesIndexList( } i = 0; while i < iNrTrianglesIn * 3 { - let index_1: i32 = *piTriList_in_and_out.offset(i as isize); + let index_1: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; let vP_1 = get_position(geometry, index_1 as usize); let fVal_0: f32 = if iChannel == 0i32 { vP_1.x @@ -1502,7 +1585,7 @@ unsafe fn GenerateSharedVerticesIndexList( } else { vP_1.z }; - let iCell_0 = FindGridCell(fMin, fMax, fVal_0); + let iCell_0 = unsafe { FindGridCell(fMin, fMax, fVal_0) }; piHashTable[(piHashOffsets[iCell_0] + piHashCount2[iCell_0]) as usize] = i as i32; piHashCount2[iCell_0] += 1; i += 1 @@ -1523,29 +1606,32 @@ unsafe fn GenerateSharedVerticesIndexList( k = 0; while k < g_iCells { // extract table of cell k and amount of entries in it - let pTable_0 = piHashTable.as_mut_ptr().offset(piHashOffsets[k] as isize); + let pTable_0 = unsafe { piHashTable.as_mut_ptr().offset(piHashOffsets[k] as isize) }; let iEntries = piHashCount[k] as usize; if !(iEntries < 2) { e = 0; while e < iEntries { - let mut i_0: i32 = *pTable_0.offset(e as isize); - let vP_2 = get_position( - geometry, - *piTriList_in_and_out.offset(i_0 as isize) as usize, - ); + let mut i_0: i32 = unsafe { *pTable_0.offset(e as isize) }; + let vP_2 = + get_position( + geometry, + unsafe { *piTriList_in_and_out.offset(i_0 as isize) } as usize, + ); pTmpVert[e].vert[0usize] = vP_2.x; pTmpVert[e].vert[1usize] = vP_2.y; pTmpVert[e].vert[2usize] = vP_2.z; pTmpVert[e].index = i_0; e += 1 } - MergeVertsFast( - piTriList_in_and_out, - pTmpVert.as_mut_ptr(), - geometry, - 0i32, - (iEntries - 1) as i32, - ); + unsafe { + MergeVertsFast( + piTriList_in_and_out, + pTmpVert.as_mut_ptr(), + geometry, + 0i32, + (iEntries - 1) as i32, + ); + } } k += 1 } @@ -1570,7 +1656,7 @@ unsafe fn MergeVertsFast( let mut fSep: f32 = 0i32 as f32; c = 0i32; while c < 3i32 { - fvMin[c as usize] = (*pTmpVert.offset(iL_in as isize)).vert[c as usize]; + fvMin[c as usize] = unsafe { *pTmpVert.offset(iL_in as isize) }.vert[c as usize]; fvMax[c as usize] = fvMin[c as usize]; c += 1 } @@ -1578,10 +1664,10 @@ unsafe fn MergeVertsFast( while l <= iR_in { c = 0i32; while c < 3i32 { - if fvMin[c as usize] > (*pTmpVert.offset(l as isize)).vert[c as usize] { - fvMin[c as usize] = (*pTmpVert.offset(l as isize)).vert[c as usize] - } else if fvMax[c as usize] < (*pTmpVert.offset(l as isize)).vert[c as usize] { - fvMax[c as usize] = (*pTmpVert.offset(l as isize)).vert[c as usize] + if fvMin[c as usize] > unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] { + fvMin[c as usize] = unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] + } else if fvMax[c as usize] < unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] { + fvMax[c as usize] = unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] } c += 1 } @@ -1600,8 +1686,8 @@ unsafe fn MergeVertsFast( if fSep >= fvMax[channel as usize] || fSep <= fvMin[channel as usize] { l = iL_in; while l <= iR_in { - let mut i: i32 = (*pTmpVert.offset(l as isize)).index; - let index: i32 = *piTriList_in_and_out.offset(i as isize); + let mut i: i32 = unsafe { *pTmpVert.offset(l as isize) }.index; + let index: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; let vP = get_position(geometry, index as usize); let vN = get_normal(geometry, index as usize); let vT = get_tex_coord(geometry, index as usize); @@ -1609,8 +1695,8 @@ unsafe fn MergeVertsFast( let mut l2: i32 = iL_in; let mut i2rec: i32 = -1i32; while l2 < l && bNotFound { - let i2: i32 = (*pTmpVert.offset(l2 as isize)).index; - let index2: i32 = *piTriList_in_and_out.offset(i2 as isize); + let i2: i32 = unsafe { *pTmpVert.offset(l2 as isize) }.index; + let index2: i32 = unsafe { *piTriList_in_and_out.offset(i2 as isize) }; let vP2 = get_position(geometry, index2 as usize); let vN2 = get_normal(geometry, index2 as usize); let vT2 = get_tex_coord(geometry, index2 as usize); @@ -1631,8 +1717,10 @@ unsafe fn MergeVertsFast( } } if !bNotFound { - *piTriList_in_and_out.offset(i as isize) = - *piTriList_in_and_out.offset(i2rec as isize) + unsafe { + *piTriList_in_and_out.offset(i as isize) = + *piTriList_in_and_out.offset(i2rec as isize) + } } l += 1 } @@ -1643,28 +1731,32 @@ unsafe fn MergeVertsFast( let mut bReadyLeftSwap: bool = false; let mut bReadyRightSwap: bool = false; while !bReadyLeftSwap && iL < iR { - bReadyLeftSwap = !((*pTmpVert.offset(iL as isize)).vert[channel as usize] < fSep); + bReadyLeftSwap = + !(unsafe { *pTmpVert.offset(iL as isize) }.vert[channel as usize] < fSep); if !bReadyLeftSwap { iL += 1 } } while !bReadyRightSwap && iL < iR { - bReadyRightSwap = (*pTmpVert.offset(iR as isize)).vert[channel as usize] < fSep; + bReadyRightSwap = + unsafe { *pTmpVert.offset(iR as isize) }.vert[channel as usize] < fSep; if !bReadyRightSwap { iR -= 1 } } if bReadyLeftSwap && bReadyRightSwap { - let sTmp: STmpVert = *pTmpVert.offset(iL as isize); - *pTmpVert.offset(iL as isize) = *pTmpVert.offset(iR as isize); - *pTmpVert.offset(iR as isize) = sTmp; + unsafe { + let sTmp: STmpVert = *pTmpVert.offset(iL as isize); + *pTmpVert.offset(iL as isize) = *pTmpVert.offset(iR as isize); + *pTmpVert.offset(iR as isize) = sTmp; + } iL += 1; iR -= 1 } } if iL == iR { let bReadyRightSwap_0: bool = - (*pTmpVert.offset(iR as isize)).vert[channel as usize] < fSep; + unsafe { *pTmpVert.offset(iR as isize) }.vert[channel as usize] < fSep; if bReadyRightSwap_0 { iL += 1 } else { @@ -1672,10 +1764,14 @@ unsafe fn MergeVertsFast( } } if iL_in < iR { - MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL_in, iR); + unsafe { + MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL_in, iR); + } } if iL < iR_in { - MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL, iR_in); + unsafe { + MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL, iR_in); + } } }; } diff --git a/crates/bevy_ptr/src/lib.rs b/crates/bevy_ptr/src/lib.rs index f6212c505ba70..7cdcfaf6dc7ba 100644 --- a/crates/bevy_ptr/src/lib.rs +++ b/crates/bevy_ptr/src/lib.rs @@ -105,7 +105,8 @@ macro_rules! impl_ptr { #[inline] pub unsafe fn byte_offset(self, count: isize) -> Self { Self( - NonNull::new_unchecked(self.as_ptr().offset(count)), + // SAFETY: the caller must uphold the safety contract for `offset`. + unsafe { NonNull::new_unchecked(self.as_ptr().offset(count)) }, PhantomData, ) } @@ -125,7 +126,8 @@ macro_rules! impl_ptr { #[inline] pub unsafe fn byte_add(self, count: usize) -> Self { Self( - NonNull::new_unchecked(self.as_ptr().add(count)), + // SAFETY: the caller must uphold the safety contract for `add`. + unsafe { NonNull::new_unchecked(self.as_ptr().add(count)) }, PhantomData, ) } @@ -175,7 +177,10 @@ impl<'a, A: IsAligned> Ptr<'a, A> { /// for the pointee type `T`. #[inline] pub unsafe fn deref(self) -> &'a T { - &*self.as_ptr().cast::().debug_ensure_aligned() + let pointer = self.as_ptr().cast::().debug_ensure_aligned(); + + // SAFETY: the caller must uphold the safety contract for `deref`. + unsafe { &*pointer } } /// Gets the underlying pointer, erasing the associated lifetime. @@ -229,7 +234,10 @@ impl<'a, A: IsAligned> PtrMut<'a, A> { /// for the pointee type `T`. #[inline] pub unsafe fn deref_mut(self) -> &'a mut T { - &mut *self.as_ptr().cast::().debug_ensure_aligned() + let pointer = self.as_ptr().cast::().debug_ensure_aligned(); + + // SAFETY: the caller must uphold the safety contract for `deref`. + unsafe { &mut *pointer } } /// Gets the underlying pointer, erasing the associated lifetime. @@ -298,7 +306,10 @@ impl<'a, A: IsAligned> OwningPtr<'a, A> { /// for the pointee type `T`. #[inline] pub unsafe fn read(self) -> T { - self.as_ptr().cast::().debug_ensure_aligned().read() + let pointer = self.as_ptr().cast::().debug_ensure_aligned(); + + // SAFETY: the caller must uphold the safety contract for `read`. + unsafe { pointer.read() } } /// Consumes the [`OwningPtr`] to drop the underlying data of type `T`. @@ -309,10 +320,12 @@ impl<'a, A: IsAligned> OwningPtr<'a, A> { /// for the pointee type `T`. #[inline] pub unsafe fn drop_as(self) { - self.as_ptr() - .cast::() - .debug_ensure_aligned() - .drop_in_place(); + let pointer = self.as_ptr().cast::().debug_ensure_aligned(); + + // SAFETY: the caller must uphold the safety contract for `drop_in_place`. + unsafe { + pointer.drop_in_place(); + } } /// Gets the underlying pointer, erasing the associated lifetime. @@ -345,7 +358,8 @@ impl<'a> OwningPtr<'a, Unaligned> { /// # Safety /// - `T` must be the erased pointee type for this [`OwningPtr`]. pub unsafe fn read_unaligned(self) -> T { - self.as_ptr().cast::().read_unaligned() + // SAFETY: It is the responsibility of the caller to ensure T is the erased pointee type for this. + unsafe { self.as_ptr().cast::().read_unaligned() } } } @@ -367,7 +381,10 @@ impl<'a, T> ThinSlicePtr<'a, T> { #[cfg(debug_assertions)] debug_assert!(index < self.len); - &*self.ptr.as_ptr().add(index) + // SAFETY: The caller assures that index is within range, and interior + // mutability ensures we have permission to hand out a read-only + // reference. + unsafe { &*self.ptr.as_ptr().add(index) } } } @@ -434,11 +451,15 @@ pub trait UnsafeCellDeref<'a, T>: private::SealedUnsafeCell { impl<'a, T> UnsafeCellDeref<'a, T> for &'a UnsafeCell { #[inline] unsafe fn deref_mut(self) -> &'a mut T { - &mut *self.get() + // SAFETY: It is the responsibility of the caller to ensure it is safe + // to deref the contents of this cell and obtain a mutable reference. + unsafe { &mut *self.get() } } #[inline] unsafe fn deref(self) -> &'a T { - &*self.get() + // SAFETY: It is the responsibility of the caller to ensure it is safe + // to deref the contents of this cell and obtain a reference. + unsafe { &*self.get() } } #[inline] @@ -446,7 +467,9 @@ impl<'a, T> UnsafeCellDeref<'a, T> for &'a UnsafeCell { where T: Copy, { - self.get().read() + // SAFETY: It is the responsibility of the caller to ensure it is safe + // to read the contents of this cell. + unsafe { self.get().read() } } } diff --git a/tools/ci/src/main.rs b/tools/ci/src/main.rs index 703c1e062bd95..97830dda6de46 100644 --- a/tools/ci/src/main.rs +++ b/tools/ci/src/main.rs @@ -17,13 +17,14 @@ bitflags! { } } -const CLIPPY_FLAGS: [&str; 7] = [ +const CLIPPY_FLAGS: [&str; 8] = [ "-Aclippy::type_complexity", "-Wclippy::doc_markdown", "-Wclippy::redundant_else", "-Wclippy::match_same_arms", "-Wclippy::semicolon_if_nothing_returned", "-Wclippy::map_flatten", + "-Wunsafe_op_in_unsafe_fn", "-Dwarnings", ]; From a47cf6c5825cb1fb3de2d867868024a18d2369f4 Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Wed, 20 Sep 2023 17:33:44 +1000 Subject: [PATCH 2/2] Added lint to all crates Still WIP, currently 600+ compiler errors. Tentative Completion WIP Reverted Changes to `bevy_mikktspace` --- crates/bevy_animation/src/lib.rs | 1 + crates/bevy_app/src/lib.rs | 1 + crates/bevy_asset/src/lib.rs | 2 + crates/bevy_asset/src/reflect.rs | 2 +- crates/bevy_core/src/lib.rs | 1 + crates/bevy_core_pipeline/src/lib.rs | 1 + crates/bevy_derive/src/lib.rs | 1 + crates/bevy_diagnostic/src/lib.rs | 1 + crates/bevy_dylib/src/lib.rs | 1 + crates/bevy_dynamic_plugin/src/lib.rs | 1 + crates/bevy_dynamic_plugin/src/loader.rs | 15 +- crates/bevy_ecs/src/archetype.rs | 2 +- crates/bevy_ecs/src/bundle.rs | 231 ++- crates/bevy_ecs/src/change_detection.rs | 35 +- crates/bevy_ecs/src/component.rs | 18 +- crates/bevy_ecs/src/entity/mod.rs | 10 +- crates/bevy_ecs/src/lib.rs | 1 + crates/bevy_ecs/src/query/fetch.rs | 271 +-- crates/bevy_ecs/src/query/filter.rs | 38 +- crates/bevy_ecs/src/query/iter.rs | 221 ++- crates/bevy_ecs/src/query/state.rs | 402 +++-- crates/bevy_ecs/src/reflect/component.rs | 2 +- crates/bevy_ecs/src/reflect/resource.rs | 2 +- crates/bevy_ecs/src/removal_detection.rs | 4 +- .../src/schedule/executor/multi_threaded.rs | 12 +- crates/bevy_ecs/src/storage/blob_vec.rs | 112 +- crates/bevy_ecs/src/storage/resource.rs | 26 +- crates/bevy_ecs/src/storage/sparse_set.rs | 12 +- crates/bevy_ecs/src/storage/table.rs | 127 +- crates/bevy_ecs/src/system/adapter_system.rs | 5 +- crates/bevy_ecs/src/system/combinator.rs | 24 +- crates/bevy_ecs/src/system/function_system.rs | 22 +- crates/bevy_ecs/src/system/query.rs | 28 +- crates/bevy_ecs/src/system/system_param.rs | 236 +-- crates/bevy_ecs/src/world/entity_ref.rs | 115 +- crates/bevy_ecs/src/world/mod.rs | 23 +- .../bevy_ecs/src/world/unsafe_world_cell.rs | 54 +- crates/bevy_ecs_compile_fail_tests/src/lib.rs | 1 + crates/bevy_encase_derive/src/lib.rs | 1 + crates/bevy_gilrs/src/lib.rs | 1 + crates/bevy_gizmos/src/lib.rs | 1 + crates/bevy_gltf/src/lib.rs | 1 + crates/bevy_hierarchy/src/lib.rs | 1 + crates/bevy_input/src/lib.rs | 1 + crates/bevy_internal/src/lib.rs | 1 + crates/bevy_log/src/lib.rs | 1 + crates/bevy_macro_utils/src/lib.rs | 1 + .../bevy_macros_compile_fail_tests/src/lib.rs | 1 + crates/bevy_math/src/lib.rs | 1 + crates/bevy_mikktspace/src/generated.rs | 1565 ++++++++--------- crates/bevy_mikktspace/src/lib.rs | 1 + crates/bevy_pbr/src/lib.rs | 1 + crates/bevy_ptr/src/lib.rs | 1 + crates/bevy_reflect/src/lib.rs | 1 + crates/bevy_reflect/src/type_registry.rs | 6 +- .../src/lib.rs | 2 + crates/bevy_render/src/extract_param.rs | 14 +- crates/bevy_render/src/lib.rs | 1 + crates/bevy_scene/src/lib.rs | 1 + crates/bevy_sprite/src/lib.rs | 1 + crates/bevy_tasks/src/lib.rs | 1 + crates/bevy_text/src/lib.rs | 1 + crates/bevy_time/src/lib.rs | 1 + crates/bevy_transform/src/lib.rs | 1 + crates/bevy_ui/src/lib.rs | 1 + crates/bevy_utils/src/lib.rs | 1 + crates/bevy_window/src/lib.rs | 1 + crates/bevy_winit/src/lib.rs | 1 + src/lib.rs | 1 + 69 files changed, 2030 insertions(+), 1645 deletions(-) diff --git a/crates/bevy_animation/src/lib.rs b/crates/bevy_animation/src/lib.rs index d108782119cc5..669754c84c53e 100644 --- a/crates/bevy_animation/src/lib.rs +++ b/crates/bevy_animation/src/lib.rs @@ -2,6 +2,7 @@ #![warn(missing_docs)] #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] use std::ops::Deref; use std::time::Duration; diff --git a/crates/bevy_app/src/lib.rs b/crates/bevy_app/src/lib.rs index 3439620a46633..2db6dd7b50e67 100644 --- a/crates/bevy_app/src/lib.rs +++ b/crates/bevy_app/src/lib.rs @@ -2,6 +2,7 @@ #![warn(missing_docs)] #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod app; mod main_schedule; diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index fcf4b09f19ba3..bf581121a59db 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -1,3 +1,5 @@ +#![forbid(unsafe_op_in_unsafe_fn)] + pub mod io; pub mod meta; pub mod processor; diff --git a/crates/bevy_asset/src/reflect.rs b/crates/bevy_asset/src/reflect.rs index 6e00323826de1..c0e7f3c902fdc 100644 --- a/crates/bevy_asset/src/reflect.rs +++ b/crates/bevy_asset/src/reflect.rs @@ -88,7 +88,7 @@ impl ReflectAsset { handle: UntypedHandle, ) -> Option<&'w mut dyn Reflect> { // SAFETY: requirements are deferred to the caller - (self.get_unchecked_mut)(world, handle) + unsafe { (self.get_unchecked_mut)(world, handle) } } /// Equivalent of [`Assets::add`] diff --git a/crates/bevy_core/src/lib.rs b/crates/bevy_core/src/lib.rs index e5682d3a92262..5ecdec6106759 100644 --- a/crates/bevy_core/src/lib.rs +++ b/crates/bevy_core/src/lib.rs @@ -1,5 +1,6 @@ #![warn(missing_docs)] #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] //! This crate provides core functionality for Bevy Engine. mod name; diff --git a/crates/bevy_core_pipeline/src/lib.rs b/crates/bevy_core_pipeline/src/lib.rs index 51e5ae36278e9..b27ff00bbb651 100644 --- a/crates/bevy_core_pipeline/src/lib.rs +++ b/crates/bevy_core_pipeline/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] pub mod blit; pub mod bloom; diff --git a/crates/bevy_derive/src/lib.rs b/crates/bevy_derive/src/lib.rs index 9030d8791ba71..ef924a4ab50fd 100644 --- a/crates/bevy_derive/src/lib.rs +++ b/crates/bevy_derive/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] extern crate proc_macro; diff --git a/crates/bevy_diagnostic/src/lib.rs b/crates/bevy_diagnostic/src/lib.rs index 0422df0cb4380..b9d067e84a552 100644 --- a/crates/bevy_diagnostic/src/lib.rs +++ b/crates/bevy_diagnostic/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod diagnostic; mod entity_count_diagnostics_plugin; diff --git a/crates/bevy_dylib/src/lib.rs b/crates/bevy_dylib/src/lib.rs index 2c5d6429676b5..abb045ff01063 100644 --- a/crates/bevy_dylib/src/lib.rs +++ b/crates/bevy_dylib/src/lib.rs @@ -1,6 +1,7 @@ #![warn(missing_docs)] #![allow(clippy::type_complexity)] #![allow(clippy::single_component_path_imports)] +#![forbid(unsafe_op_in_unsafe_fn)] //! Forces dynamic linking of Bevy. //! diff --git a/crates/bevy_dynamic_plugin/src/lib.rs b/crates/bevy_dynamic_plugin/src/lib.rs index ee59ff2cb135a..cf3181b626273 100644 --- a/crates/bevy_dynamic_plugin/src/lib.rs +++ b/crates/bevy_dynamic_plugin/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod loader; diff --git a/crates/bevy_dynamic_plugin/src/loader.rs b/crates/bevy_dynamic_plugin/src/loader.rs index 50f2bb34cb072..6277ab7565ec4 100644 --- a/crates/bevy_dynamic_plugin/src/loader.rs +++ b/crates/bevy_dynamic_plugin/src/loader.rs @@ -24,11 +24,13 @@ pub enum DynamicPluginLoadError { pub unsafe fn dynamically_load_plugin>( path: P, ) -> Result<(Library, Box), DynamicPluginLoadError> { - let lib = Library::new(path).map_err(DynamicPluginLoadError::Library)?; - let func: Symbol = lib - .get(b"_bevy_create_plugin") - .map_err(DynamicPluginLoadError::Plugin)?; - let plugin = Box::from_raw(func()); + // SAFETY: the caller must uphold the safety contract for `new`. + let lib = unsafe { Library::new(path) }.map_err(DynamicPluginLoadError::Library)?; + // SAFETY: the caller must uphold the safety contract for `get`. + let func: Symbol = + unsafe { lib.get(b"_bevy_create_plugin") }.map_err(DynamicPluginLoadError::Plugin)?; + // SAFETY: the caller must uphold the safety contract for `from_raw`. + let plugin = unsafe { Box::from_raw(func()) }; Ok((lib, plugin)) } @@ -41,7 +43,8 @@ pub trait DynamicPluginExt { impl DynamicPluginExt for App { unsafe fn load_plugin>(&mut self, path: P) -> &mut Self { - let (lib, plugin) = dynamically_load_plugin(path).unwrap(); + // SAFETY: the caller must uphold the safety contract for `dynamically_load_plugin`. + let (lib, plugin) = unsafe { dynamically_load_plugin(path) }.unwrap(); std::mem::forget(lib); // Ensure that the library is not automatically unloaded plugin.build(self); self diff --git a/crates/bevy_ecs/src/archetype.rs b/crates/bevy_ecs/src/archetype.rs index 7fb308dadae7f..0163f5c99075f 100644 --- a/crates/bevy_ecs/src/archetype.rs +++ b/crates/bevy_ecs/src/archetype.rs @@ -121,7 +121,7 @@ impl BundleComponentStatus for AddBundle { #[inline] unsafe fn get_status(&self, index: usize) -> ComponentStatus { // SAFETY: caller has ensured index is a valid bundle index for this bundle - *self.bundle_status.get_unchecked(index) + unsafe { *self.bundle_status.get_unchecked(index) } } } diff --git a/crates/bevy_ecs/src/bundle.rs b/crates/bevy_ecs/src/bundle.rs index 79f3098146432..524475dee8a76 100644 --- a/crates/bevy_ecs/src/bundle.rs +++ b/crates/bevy_ecs/src/bundle.rs @@ -194,7 +194,7 @@ unsafe impl Bundle for C { Self: Sized, { // Safety: The id given in `component_ids` is for `Self` - func(ctx).read() + unsafe { func(ctx).read() } } } @@ -227,7 +227,10 @@ macro_rules! tuple_impl { { // Rust guarantees that tuple calls are evaluated 'left to right'. // https://doc.rust-lang.org/reference/expressions.html#evaluation-order-of-operands - ($(<$name as Bundle>::from_components(ctx, func),)*) + ($( + // SAFETY: It is the caller's responsibility to uphold the invariants of `from_components` + unsafe { <$name as Bundle>::from_components(ctx, func) }, + )*) } } @@ -459,7 +462,9 @@ impl BundleInfo { // bundle_info.component_ids are also in "bundle order" let mut bundle_component = 0; bundle.get_components(&mut |storage_type, component_ptr| { - let component_id = *self.component_ids.get_unchecked(bundle_component); + let component_id = + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_unchecked` + unsafe { *self.component_ids.get_unchecked(bundle_component) }; match storage_type { StorageType::Table => { let column = @@ -467,12 +472,14 @@ impl BundleInfo { // the target table contains the component. unsafe { table.get_column_mut(component_id).debug_checked_unwrap() }; // SAFETY: bundle_component is a valid index for this bundle - match bundle_component_status.get_status(bundle_component) { - ComponentStatus::Added => { - column.initialize(table_row, component_ptr, change_tick); - } - ComponentStatus::Mutated => { - column.replace(table_row, component_ptr, change_tick); + unsafe { + match bundle_component_status.get_status(bundle_component) { + ComponentStatus::Added => { + column.initialize(table_row, component_ptr, change_tick); + } + ComponentStatus::Mutated => { + column.replace(table_row, component_ptr, change_tick); + } } } } @@ -481,7 +488,10 @@ impl BundleInfo { // SAFETY: If component_id is in self.component_ids, BundleInfo::new requires that // a sparse set exists for the component. unsafe { sparse_sets.get_mut(component_id).debug_checked_unwrap() }; - sparse_set.insert(entity, component_ptr, change_tick); + // SAFETY: bundle_component is a valid index for this bundle + unsafe { + sparse_set.insert(entity, component_ptr, change_tick); + } } } bundle_component += 1; @@ -615,35 +625,44 @@ impl<'a, 'b> BundleInserter<'a, 'b> { .get_add_bundle_internal(self.bundle_info.id) .debug_checked_unwrap() }; - self.bundle_info.write_components( - self.table, - self.sparse_sets, - add_bundle, - entity, - location.table_row, - self.change_tick, - bundle, - ); + // SAFETY: It is the caller's responsiblilty to uphold the invariants of `write_components` + unsafe { + self.bundle_info.write_components( + self.table, + self.sparse_sets, + add_bundle, + entity, + location.table_row, + self.change_tick, + bundle, + ); + } location } InsertBundleResult::NewArchetypeSameTable { new_archetype } => { let result = self.archetype.swap_remove(location.archetype_row); if let Some(swapped_entity) = result.swapped_entity { - let swapped_location = - // SAFETY: If the swap was successful, swapped_entity must be valid. - unsafe { self.entities.get(swapped_entity).debug_checked_unwrap() }; - self.entities.set( - swapped_entity.index(), - EntityLocation { - archetype_id: swapped_location.archetype_id, - archetype_row: location.archetype_row, - table_id: swapped_location.table_id, - table_row: swapped_location.table_row, - }, - ); + // SAFETY: If the swap was successful, swapped_entity must be valid. + unsafe { + let swapped_location = + self.entities.get(swapped_entity).debug_checked_unwrap(); + self.entities.set( + swapped_entity.index(), + EntityLocation { + archetype_id: swapped_location.archetype_id, + archetype_row: location.archetype_row, + table_id: swapped_location.table_id, + table_row: swapped_location.table_row, + }, + ); + } } - let new_location = new_archetype.allocate(entity, result.table_row); - self.entities.set(entity.index(), new_location); + // SAFETY: The allocation is immediately written to with a valid entity + let new_location = unsafe { + let new_location = new_archetype.allocate(entity, result.table_row); + self.entities.set(entity.index(), new_location); + new_location + }; // PERF: this could be looked up during Inserter construction and stored (but borrowing makes this nasty) // SAFETY: The edge is assured to be initialized when creating the BundleInserter @@ -653,15 +672,18 @@ impl<'a, 'b> BundleInserter<'a, 'b> { .get_add_bundle_internal(self.bundle_info.id) .debug_checked_unwrap() }; - self.bundle_info.write_components( - self.table, - self.sparse_sets, - add_bundle, - entity, - result.table_row, - self.change_tick, - bundle, - ); + // SAFETY: The bundle component status is assured to be correct as from above. + unsafe { + self.bundle_info.write_components( + self.table, + self.sparse_sets, + add_bundle, + entity, + result.table_row, + self.change_tick, + bundle, + ); + } new_location } InsertBundleResult::NewArchetypeNewTable { @@ -670,26 +692,32 @@ impl<'a, 'b> BundleInserter<'a, 'b> { } => { let result = self.archetype.swap_remove(location.archetype_row); if let Some(swapped_entity) = result.swapped_entity { - let swapped_location = - // SAFETY: If the swap was successful, swapped_entity must be valid. - unsafe { self.entities.get(swapped_entity).debug_checked_unwrap() }; - self.entities.set( - swapped_entity.index(), - EntityLocation { - archetype_id: swapped_location.archetype_id, - archetype_row: location.archetype_row, - table_id: swapped_location.table_id, - table_row: swapped_location.table_row, - }, - ); + // SAFETY: If the swap was successful, swapped_entity must be valid. + unsafe { + let swapped_location = + self.entities.get(swapped_entity).debug_checked_unwrap(); + self.entities.set( + swapped_entity.index(), + EntityLocation { + archetype_id: swapped_location.archetype_id, + archetype_row: location.archetype_row, + table_id: swapped_location.table_id, + table_row: swapped_location.table_row, + }, + ); + } } // PERF: store "non bundle" components in edge, then just move those to avoid // redundant copies - let move_result = self - .table - .move_to_superset_unchecked(result.table_row, new_table); - let new_location = new_archetype.allocate(entity, move_result.new_row); - self.entities.set(entity.index(), new_location); + // SAFETY: Allocation is immediately initialized. + let (move_result, new_location) = unsafe { + let move_result = self + .table + .move_to_superset_unchecked(result.table_row, new_table); + let new_location = new_archetype.allocate(entity, move_result.new_row); + self.entities.set(entity.index(), new_location); + (move_result, new_location) + }; // if an entity was moved into this entity's table spot, update its table row if let Some(swapped_entity) = move_result.swapped_entity { @@ -703,20 +731,25 @@ impl<'a, 'b> BundleInserter<'a, 'b> { new_archetype } else { // SAFETY: the only two borrowed archetypes are above and we just did collision checks - &mut *self - .archetypes_ptr - .add(swapped_location.archetype_id.index()) + unsafe { + &mut *self + .archetypes_ptr + .add(swapped_location.archetype_id.index()) + } }; - self.entities.set( - swapped_entity.index(), - EntityLocation { - archetype_id: swapped_location.archetype_id, - archetype_row: swapped_location.archetype_row, - table_id: swapped_location.table_id, - table_row: result.table_row, - }, - ); + // SAFETY: The above ensures the entity and location are valid + unsafe { + self.entities.set( + swapped_entity.index(), + EntityLocation { + archetype_id: swapped_location.archetype_id, + archetype_row: swapped_location.archetype_row, + table_id: swapped_location.table_id, + table_row: result.table_row, + }, + ); + } swapped_archetype .set_entity_table_row(swapped_location.archetype_row, result.table_row); } @@ -729,15 +762,18 @@ impl<'a, 'b> BundleInserter<'a, 'b> { .get_add_bundle_internal(self.bundle_info.id) .debug_checked_unwrap() }; - self.bundle_info.write_components( - new_table, - self.sparse_sets, - add_bundle, - entity, - move_result.new_row, - self.change_tick, - bundle, - ); + // SAFETY: The above ensures bundle_component_status is correct. + unsafe { + self.bundle_info.write_components( + new_table, + self.sparse_sets, + add_bundle, + entity, + move_result.new_row, + self.change_tick, + bundle, + ); + } new_location } } @@ -766,20 +802,23 @@ impl<'a, 'b> BundleSpawner<'a, 'b> { entity: Entity, bundle: T, ) -> EntityLocation { - let table_row = self.table.allocate(entity); - let location = self.archetype.allocate(entity, table_row); - self.bundle_info.write_components( - self.table, - self.sparse_sets, - &SpawnBundleStatus, - entity, - table_row, - self.change_tick, - bundle, - ); - self.entities.set(entity.index(), location); - - location + // SAFETY: It is the caller's responsibility to ensure that entity is allocated, + // non-existent, and that T matches this type. + unsafe { + let table_row = self.table.allocate(entity); + let location = self.archetype.allocate(entity, table_row); + self.bundle_info.write_components( + self.table, + self.sparse_sets, + &SpawnBundleStatus, + entity, + table_row, + self.change_tick, + bundle, + ); + self.entities.set(entity.index(), location); + location + } } /// # Safety @@ -788,7 +827,9 @@ impl<'a, 'b> BundleSpawner<'a, 'b> { pub unsafe fn spawn(&mut self, bundle: T) -> Entity { let entity = self.entities.alloc(); // SAFETY: entity is allocated (but non-existent), `T` matches this BundleInfo's type - self.spawn_non_existent(entity, bundle); + unsafe { + self.spawn_non_existent(entity, bundle); + } entity } } diff --git a/crates/bevy_ecs/src/change_detection.rs b/crates/bevy_ecs/src/change_detection.rs index 0db970ba58831..578c12aef00ef 100644 --- a/crates/bevy_ecs/src/change_detection.rs +++ b/crates/bevy_ecs/src/change_detection.rs @@ -417,11 +417,14 @@ impl<'a> Ticks<'a> { last_run: Tick, this_run: Tick, ) -> Self { - Self { - added: cells.added.deref(), - changed: cells.changed.deref(), - last_run, - this_run, + // SAFETY: It is the caller's responsibility to uphold the invariants of `deref` + unsafe { + Self { + added: cells.added.deref(), + changed: cells.changed.deref(), + last_run, + this_run, + } } } } @@ -442,11 +445,14 @@ impl<'a> TicksMut<'a> { last_run: Tick, this_run: Tick, ) -> Self { - Self { - added: cells.added.deref_mut(), - changed: cells.changed.deref_mut(), - last_run, - this_run, + // SAFETY: It is the caller's responsibility to uphold the invariants of `deref_mut` + unsafe { + Self { + added: cells.added.deref_mut(), + changed: cells.changed.deref_mut(), + last_run, + this_run, + } } } } @@ -847,9 +853,12 @@ impl<'a> MutUntyped<'a> { /// # Safety /// - `T` must be the erased pointee type for this [`MutUntyped`]. pub unsafe fn with_type(self) -> Mut<'a, T> { - Mut { - value: self.value.deref_mut(), - ticks: self.ticks, + // SAFETY: It is the responsibility of the caller to uphold the invariants of `deref_mut` + unsafe { + Mut { + value: self.value.deref_mut(), + ticks: self.ticks, + } } } } diff --git a/crates/bevy_ecs/src/component.rs b/crates/bevy_ecs/src/component.rs index d3b4970f21f6a..0e9c6bfd714b9 100644 --- a/crates/bevy_ecs/src/component.rs +++ b/crates/bevy_ecs/src/component.rs @@ -349,9 +349,11 @@ impl std::fmt::Debug for ComponentDescriptor { } impl ComponentDescriptor { - // SAFETY: The pointer points to a valid value of type `T` and it is safe to drop this value. unsafe fn drop_ptr(x: OwningPtr<'_>) { - x.drop_as::(); + // SAFETY: The pointer points to a valid value of type `T` and it is safe to drop this value. + unsafe { + x.drop_as::(); + } } /// Create a new `ComponentDescriptor` for the type `T`. @@ -536,7 +538,8 @@ impl Components { #[inline] pub unsafe fn get_info_unchecked(&self, id: ComponentId) -> &ComponentInfo { debug_assert!(id.index() < self.components.len()); - self.components.get_unchecked(id.0) + // SAFETY: It is the caller's responsibility to ensure that `id` is a valid `ComponentId` + unsafe { self.components.get_unchecked(id.0) } } /// Type-erased equivalent of [`Components::component_id()`]. @@ -759,9 +762,12 @@ impl<'a> TickCells<'a> { /// All cells contained within must uphold the safety invariants of [`UnsafeCellDeref::read`]. #[inline] pub(crate) unsafe fn read(&self) -> ComponentTicks { - ComponentTicks { - added: self.added.read(), - changed: self.changed.read(), + // SAFETY: It is the caller's responsibility to uphold the invariants of `read` + unsafe { + ComponentTicks { + added: self.added.read(), + changed: self.changed.read(), + } } } } diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index 3a49c99aedcf6..a71a5078841e1 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -620,7 +620,7 @@ impl Entities { #[inline] pub(crate) unsafe fn set(&mut self, index: u32, location: EntityLocation) { // SAFETY: Caller guarantees that `index` a valid entity index - self.meta.get_unchecked_mut(index as usize).location = location; + unsafe { self.meta.get_unchecked_mut(index as usize) }.location = location; } /// Increments the `generation` of a freed [`Entity`]. The next entity ID allocated with this @@ -736,9 +736,11 @@ impl Entities { let free_cursor = self.free_cursor.get_mut(); *free_cursor = 0; self.meta.reserve(count); - // the EntityMeta struct only contains integers, and it is valid to have all bytes set to u8::MAX - self.meta.as_mut_ptr().write_bytes(u8::MAX, count); - self.meta.set_len(count); + // SAFETY: The EntityMeta struct only contains integers, and it is valid to have all bytes set to u8::MAX + unsafe { + self.meta.as_mut_ptr().write_bytes(u8::MAX, count); + self.meta.set_len(count); + } self.len = count as u32; } diff --git a/crates/bevy_ecs/src/lib.rs b/crates/bevy_ecs/src/lib.rs index 06001b584dbdc..56c7a788a6119 100644 --- a/crates/bevy_ecs/src/lib.rs +++ b/crates/bevy_ecs/src/lib.rs @@ -1,6 +1,7 @@ #![warn(clippy::undocumented_unsafe_blocks)] #![warn(missing_docs)] #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] #![doc = include_str!("../README.md")] #[cfg(target_pointer_width = "16")] diff --git a/crates/bevy_ecs/src/query/fetch.rs b/crates/bevy_ecs/src/query/fetch.rs index 1f7fe29ec729f..73c3c37a465c2 100644 --- a/crates/bevy_ecs/src/query/fetch.rs +++ b/crates/bevy_ecs/src/query/fetch.rs @@ -570,9 +570,9 @@ unsafe impl WorldQuery for EntityRef<'_> { _table_row: TableRow, ) -> Self::Item<'w> { // SAFETY: `fetch` must be called with an entity that exists in the world - let cell = world.get_entity(entity).debug_checked_unwrap(); + let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; // SAFETY: Read-only access to every component has been registered. - EntityRef::new(cell) + unsafe { EntityRef::new(cell) } } fn update_component_access(_state: &Self::State, access: &mut FilteredAccess) { @@ -650,9 +650,9 @@ unsafe impl<'a> WorldQuery for EntityMut<'a> { _table_row: TableRow, ) -> Self::Item<'w> { // SAFETY: `fetch` must be called with an entity that exists in the world - let cell = world.get_entity(entity).debug_checked_unwrap(); + let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; // SAFETY: mutable access to every component has been registered. - EntityMut::new(cell) + unsafe { EntityMut::new(cell) } } fn update_component_access(_state: &Self::State, access: &mut FilteredAccess) { @@ -728,15 +728,17 @@ unsafe impl WorldQuery for &T { ReadFetch { table_components: None, sparse_set: (T::Storage::STORAGE_TYPE == StorageType::SparseSet).then(|| { - world - // SAFETY: The underlying type associated with `component_id` is `T`, - // which we are allowed to access since we registered it in `update_archetype_component_access`. - // Note that we do not actually access any components in this function, we just get a shared - // reference to the sparse set, which is used to access the components in `Self::fetch`. - .storages() - .sparse_sets - .get(component_id) - .debug_checked_unwrap() + // SAFETY: The underlying type associated with `component_id` is `T`, + // which we are allowed to access since we registered it in `update_archetype_component_access`. + // Note that we do not actually access any components in this function, we just get a shared + // reference to the sparse set, which is used to access the components in `Self::fetch`. + unsafe { + world + .storages() + .sparse_sets + .get(component_id) + .debug_checked_unwrap() + } }), } } @@ -749,7 +751,10 @@ unsafe impl WorldQuery for &T { table: &'w Table, ) { if Self::IS_DENSE { - Self::set_table(fetch, component_id, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { + Self::set_table(fetch, component_id, table); + } } } @@ -760,11 +765,14 @@ unsafe impl WorldQuery for &T { table: &'w Table, ) { fetch.table_components = Some( - table - .get_column(component_id) - .debug_checked_unwrap() - .get_data_slice() - .into(), + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_data_slice` + unsafe { + table + .get_column(component_id) + .debug_checked_unwrap() + .get_data_slice() + .into() + }, ); } @@ -774,18 +782,21 @@ unsafe impl WorldQuery for &T { entity: Entity, table_row: TableRow, ) -> Self::Item<'w> { - match T::Storage::STORAGE_TYPE { - StorageType::Table => fetch - .table_components - .debug_checked_unwrap() - .get(table_row.index()) - .deref(), - StorageType::SparseSet => fetch - .sparse_set - .debug_checked_unwrap() - .get(entity) - .debug_checked_unwrap() - .deref(), + // SAFETY: It is the caller's responsibility to uphold the invariants of `get` + unsafe { + match T::Storage::STORAGE_TYPE { + StorageType::Table => fetch + .table_components + .debug_checked_unwrap() + .get(table_row.index()) + .deref(), + StorageType::SparseSet => fetch + .sparse_set + .debug_checked_unwrap() + .get(entity) + .debug_checked_unwrap() + .deref(), + } } } @@ -878,12 +889,14 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { RefFetch { table_data: None, sparse_set: (T::Storage::STORAGE_TYPE == StorageType::SparseSet).then(|| { - world - // SAFETY: See &T::init_fetch. - .storages() - .sparse_sets - .get(component_id) - .debug_checked_unwrap() + // SAFETY: See &T::init_fetch. + unsafe { + world + .storages() + .sparse_sets + .get(component_id) + .debug_checked_unwrap() + } }), last_run, this_run, @@ -898,7 +911,10 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { table: &'w Table, ) { if Self::IS_DENSE { - Self::set_table(fetch, component_id, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { + Self::set_table(fetch, component_id, table); + } } } @@ -908,9 +924,11 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { &component_id: &ComponentId, table: &'w Table, ) { - let column = table.get_column(component_id).debug_checked_unwrap(); + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_column` + let column = unsafe { table.get_column(component_id).debug_checked_unwrap() }; fetch.table_data = Some(( - column.get_data_slice().into(), + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_data_slice` + unsafe { column.get_data_slice() }.into(), column.get_added_ticks_slice().into(), column.get_changed_ticks_slice().into(), )); @@ -925,26 +943,36 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { match T::Storage::STORAGE_TYPE { StorageType::Table => { let (table_components, added_ticks, changed_ticks) = - fetch.table_data.debug_checked_unwrap(); - Ref { - value: table_components.get(table_row.index()).deref(), - ticks: Ticks { - added: added_ticks.get(table_row.index()).deref(), - changed: changed_ticks.get(table_row.index()).deref(), - this_run: fetch.this_run, - last_run: fetch.last_run, - }, + // SAFETY: It is the caller's responsibility to ensure `table_data` is valid + unsafe { fetch.table_data.debug_checked_unwrap() }; + // SAFETY: It is the caller's responsibility to ensure the `table_data` can be packed into a `Ref` + unsafe { + Ref { + value: table_components.get(table_row.index()).deref(), + ticks: Ticks { + added: added_ticks.get(table_row.index()).deref(), + changed: changed_ticks.get(table_row.index()).deref(), + this_run: fetch.this_run, + last_run: fetch.last_run, + }, + } } } StorageType::SparseSet => { - let (component, ticks) = fetch - .sparse_set - .debug_checked_unwrap() - .get_with_ticks(entity) - .debug_checked_unwrap(); - Ref { - value: component.deref(), - ticks: Ticks::from_tick_cells(ticks, fetch.last_run, fetch.this_run), + // SAFETY: It is the caller's responsibility to ensure `table_data` is valid + let (component, ticks) = unsafe { + fetch + .sparse_set + .debug_checked_unwrap() + .get_with_ticks(entity) + .debug_checked_unwrap() + }; + // SAFETY: It is the caller's responsibility to ensure the `sparse_set` can be packed into a `Ref` + unsafe { + Ref { + value: component.deref(), + ticks: Ticks::from_tick_cells(ticks, fetch.last_run, fetch.this_run), + } } } } @@ -1039,12 +1067,14 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { WriteFetch { table_data: None, sparse_set: (T::Storage::STORAGE_TYPE == StorageType::SparseSet).then(|| { - world - // SAFETY: See &T::init_fetch. - .storages() - .sparse_sets - .get(component_id) - .debug_checked_unwrap() + // SAFETY: See &T::init_fetch. + unsafe { + world + .storages() + .sparse_sets + .get(component_id) + .debug_checked_unwrap() + } }), last_run, this_run, @@ -1059,7 +1089,10 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { table: &'w Table, ) { if Self::IS_DENSE { - Self::set_table(fetch, component_id, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { + Self::set_table(fetch, component_id, table); + } } } @@ -1069,9 +1102,11 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { &component_id: &ComponentId, table: &'w Table, ) { - let column = table.get_column(component_id).debug_checked_unwrap(); + // SAFETY: It is the caller's responsibility to uphold the invariantts of `get_column` + let column = unsafe { table.get_column(component_id).debug_checked_unwrap() }; fetch.table_data = Some(( - column.get_data_slice().into(), + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_data_slice` + unsafe { column.get_data_slice() }.into(), column.get_added_ticks_slice().into(), column.get_changed_ticks_slice().into(), )); @@ -1086,26 +1121,36 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { match T::Storage::STORAGE_TYPE { StorageType::Table => { let (table_components, added_ticks, changed_ticks) = - fetch.table_data.debug_checked_unwrap(); - Mut { - value: table_components.get(table_row.index()).deref_mut(), - ticks: TicksMut { - added: added_ticks.get(table_row.index()).deref_mut(), - changed: changed_ticks.get(table_row.index()).deref_mut(), - this_run: fetch.this_run, - last_run: fetch.last_run, - }, + // SAFETY: It is the caller's responsibility to ensure `table_data` is valid + unsafe { fetch.table_data.debug_checked_unwrap() }; + // SAFETY: It is the caller's responsibility to ensure a `Mut` can be constructed from `fetch` + unsafe { + Mut { + value: table_components.get(table_row.index()).deref_mut(), + ticks: TicksMut { + added: added_ticks.get(table_row.index()).deref_mut(), + changed: changed_ticks.get(table_row.index()).deref_mut(), + this_run: fetch.this_run, + last_run: fetch.last_run, + }, + } } } StorageType::SparseSet => { - let (component, ticks) = fetch - .sparse_set - .debug_checked_unwrap() - .get_with_ticks(entity) - .debug_checked_unwrap(); - Mut { - value: component.assert_unique().deref_mut(), - ticks: TicksMut::from_tick_cells(ticks, fetch.last_run, fetch.this_run), + // SAFETY: It is the caller's responsibility to ensure `sparse_set` is valid + let (component, ticks) = unsafe { + fetch + .sparse_set + .debug_checked_unwrap() + .get_with_ticks(entity) + .debug_checked_unwrap() + }; + // SAFETY: It is the caller's responsibility to ensure a `Mut` can be constructed from `fetch` + unsafe { + Mut { + value: component.assert_unique().deref_mut(), + ticks: TicksMut::from_tick_cells(ticks, fetch.last_run, fetch.this_run), + } } } } @@ -1183,7 +1228,8 @@ unsafe impl WorldQuery for Option { this_run: Tick, ) -> OptionFetch<'w, T> { OptionFetch { - fetch: T::init_fetch(world, state, last_run, this_run), + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + fetch: unsafe { T::init_fetch(world, state, last_run, this_run) }, matches: false, } } @@ -1197,7 +1243,8 @@ unsafe impl WorldQuery for Option { ) { fetch.matches = T::matches_component_set(state, &|id| archetype.contains(id)); if fetch.matches { - T::set_archetype(&mut fetch.fetch, state, archetype, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_archetype` + unsafe { T::set_archetype(&mut fetch.fetch, state, archetype, table) }; } } @@ -1205,7 +1252,8 @@ unsafe impl WorldQuery for Option { unsafe fn set_table<'w>(fetch: &mut OptionFetch<'w, T>, state: &T::State, table: &'w Table) { fetch.matches = T::matches_component_set(state, &|id| table.has_column(id)); if fetch.matches { - T::set_table(&mut fetch.fetch, state, table); + // SAFETY: It is the caller's responsiblity to uphold the invariants of `set_table` + unsafe { T::set_table(&mut fetch.fetch, state, table) }; } } @@ -1215,9 +1263,10 @@ unsafe impl WorldQuery for Option { entity: Entity, table_row: TableRow, ) -> Self::Item<'w> { - fetch - .matches - .then(|| T::fetch(&mut fetch.fetch, entity, table_row)) + fetch.matches.then(|| { + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch` + unsafe { T::fetch(&mut fetch.fetch, entity, table_row) } + }) } fn update_component_access(state: &T::State, access: &mut FilteredAccess) { @@ -1418,7 +1467,10 @@ macro_rules! impl_tuple_fetch { #[allow(clippy::unused_unit)] unsafe fn init_fetch<'w>(_world: UnsafeWorldCell<'w>, state: &Self::State, _last_run: Tick, _this_run: Tick) -> Self::Fetch<'w> { let ($($name,)*) = state; - ($($name::init_fetch(_world, $name, _last_run, _this_run),)*) + ($( + // SAFETY: It is the caller's responsibility to uphold `init_fetch` + unsafe { $name::init_fetch(_world, $name, _last_run, _this_run) }, + )*) } const IS_DENSE: bool = true $(&& $name::IS_DENSE)*; @@ -1434,14 +1486,20 @@ macro_rules! impl_tuple_fetch { ) { let ($($name,)*) = _fetch; let ($($state,)*) = _state; - $($name::set_archetype($name, $state, _archetype, _table);)* + $( + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_archetype` + unsafe { $name::set_archetype($name, $state, _archetype, _table); } + )* } #[inline] unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { let ($($name,)*) = _fetch; let ($($state,)*) = _state; - $($name::set_table($name, $state, _table);)* + $( + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { $name::set_table($name, $state, _table); } + )* } #[inline(always)] @@ -1452,7 +1510,10 @@ macro_rules! impl_tuple_fetch { _table_row: TableRow ) -> Self::Item<'w> { let ($($name,)*) = _fetch; - ($($name::fetch($name, _entity, _table_row),)*) + ($( + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch` + unsafe { $name::fetch($name, _entity, _table_row) }, + )*) } #[inline(always)] @@ -1462,7 +1523,10 @@ macro_rules! impl_tuple_fetch { _table_row: TableRow ) -> bool { let ($($name,)*) = _fetch; - true $(&& $name::filter_fetch($name, _entity, _table_row))* + true $( + // SAFETY: It is the caller's responsibility to uphold the invariants of `filter_fetch` + && unsafe { $name::filter_fetch($name, _entity, _table_row) } + )* } fn update_component_access(state: &Self::State, _access: &mut FilteredAccess) { @@ -1521,7 +1585,13 @@ macro_rules! impl_anytuple_fetch { #[allow(clippy::unused_unit)] unsafe fn init_fetch<'w>(_world: UnsafeWorldCell<'w>, state: &Self::State, _last_run: Tick, _this_run: Tick) -> Self::Fetch<'w> { let ($($name,)*) = state; - ($(($name::init_fetch(_world, $name, _last_run, _this_run), false),)*) + ($( + ( + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + unsafe { $name::init_fetch(_world, $name, _last_run, _this_run) }, + false, + ), + )*) } const IS_DENSE: bool = true $(&& $name::IS_DENSE)*; @@ -1540,7 +1610,8 @@ macro_rules! impl_anytuple_fetch { $( $name.1 = $name::matches_component_set($state, &|id| _archetype.contains(id)); if $name.1 { - $name::set_archetype(&mut $name.0, $state, _archetype, _table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_archetype` + unsafe { $name::set_archetype(&mut $name.0, $state, _archetype, _table); } } )* } @@ -1552,7 +1623,8 @@ macro_rules! impl_anytuple_fetch { $( $name.1 = $name::matches_component_set($state, &|id| _table.has_column(id)); if $name.1 { - $name::set_table(&mut $name.0, $state, _table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { $name::set_table(&mut $name.0, $state, _table); } } )* } @@ -1566,7 +1638,10 @@ macro_rules! impl_anytuple_fetch { ) -> Self::Item<'w> { let ($($name,)*) = _fetch; ($( - $name.1.then(|| $name::fetch(&mut $name.0, _entity, _table_row)), + $name.1.then(|| { + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch` + unsafe { $name::fetch(&mut $name.0, _entity, _table_row) } + }), )*) } diff --git a/crates/bevy_ecs/src/query/filter.rs b/crates/bevy_ecs/src/query/filter.rs index f9a2738b066b7..34623fb88fb64 100644 --- a/crates/bevy_ecs/src/query/filter.rs +++ b/crates/bevy_ecs/src/query/filter.rs @@ -288,7 +288,8 @@ macro_rules! impl_query_filter_tuple { unsafe fn init_fetch<'w>(world: UnsafeWorldCell<'w>, state: &Self::State, last_run: Tick, this_run: Tick) -> Self::Fetch<'w> { let ($($filter,)*) = state; ($(OrFetch { - fetch: $filter::init_fetch(world, $filter, last_run, this_run), + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + fetch: unsafe { $filter::init_fetch(world, $filter, last_run, this_run) }, matches: false, },)*) } @@ -300,7 +301,8 @@ macro_rules! impl_query_filter_tuple { $( $filter.matches = $filter::matches_component_set($state, &|id| table.has_column(id)); if $filter.matches { - $filter::set_table(&mut $filter.fetch, $state, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { $filter::set_table(&mut $filter.fetch, $state, table); } } )* } @@ -317,7 +319,8 @@ macro_rules! impl_query_filter_tuple { $( $filter.matches = $filter::matches_component_set($state, &|id| archetype.contains(id)); if $filter.matches { - $filter::set_archetype(&mut $filter.fetch, $state, archetype, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_archetype` + unsafe { $filter::set_archetype(&mut $filter.fetch, $state, archetype, table); } } )* } @@ -329,7 +332,10 @@ macro_rules! impl_query_filter_tuple { _table_row: TableRow ) -> Self::Item<'w> { let ($($filter,)*) = fetch; - false $(|| ($filter.matches && $filter::filter_fetch(&mut $filter.fetch, _entity, _table_row)))* + false $(|| ( + // SAFETY: It is the caller's responsibility to uphold the invariants of `filter_fetch` + $filter.matches && unsafe { $filter::filter_fetch(&mut $filter.fetch, _entity, _table_row) } + ))* } #[inline(always)] @@ -338,7 +344,8 @@ macro_rules! impl_query_filter_tuple { entity: Entity, table_row: TableRow ) -> bool { - Self::fetch(fetch, entity, table_row) + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch` + unsafe { Self::fetch(fetch, entity, table_row) } } fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { @@ -427,11 +434,12 @@ macro_rules! impl_tick_filter { table_ticks: None, sparse_set: (T::Storage::STORAGE_TYPE == StorageType::SparseSet) .then(|| { - world.unsafe_world() + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch` + unsafe { world.unsafe_world() .storages() .sparse_sets .get(id) - .debug_checked_unwrap() + .debug_checked_unwrap() } }), last_run, this_run, @@ -455,9 +463,10 @@ macro_rules! impl_tick_filter { ) { fetch.table_ticks = Some( $get_slice( - &table + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_column` + unsafe { &table .get_column(component_id) - .debug_checked_unwrap() + .debug_checked_unwrap() } ).into(), ); } @@ -470,7 +479,8 @@ macro_rules! impl_tick_filter { table: &'w Table ) { if Self::IS_DENSE { - Self::set_table(fetch, component_id, table); + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { Self::set_table(fetch, component_id, table); } } } @@ -480,7 +490,8 @@ macro_rules! impl_tick_filter { entity: Entity, table_row: TableRow ) -> Self::Item<'w> { - match T::Storage::STORAGE_TYPE { + // SAFETY: It is the caller's responsibility to ensure the `fetch` is valid in this context + unsafe { match T::Storage::STORAGE_TYPE { StorageType::Table => { fetch .table_ticks @@ -498,7 +509,7 @@ macro_rules! impl_tick_filter { .deref() .is_newer_than(fetch.last_run, fetch.this_run) } - } + } } } #[inline(always)] @@ -507,7 +518,8 @@ macro_rules! impl_tick_filter { entity: Entity, table_row: TableRow ) -> bool { - Self::fetch(fetch, entity, table_row) + // SAFETY: It is the caller's responsibility to uphold the invariants of `set_table` + unsafe { Self::fetch(fetch, entity, table_row) } } #[inline] diff --git a/crates/bevy_ecs/src/query/iter.rs b/crates/bevy_ecs/src/query/iter.rs index 142877efc59ca..c5156cb91e6af 100644 --- a/crates/bevy_ecs/src/query/iter.rs +++ b/crates/bevy_ecs/src/query/iter.rs @@ -34,9 +34,10 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIter<'w, 's, Q, F> { QueryIter { query_state, // SAFETY: We only access table data that has been registered in `query_state`. - tables: &world.storages().tables, + tables: unsafe { &world.storages().tables }, archetypes: world.archetypes(), - cursor: QueryIterationCursor::init(world, query_state, last_run, this_run), + // SAFETY: It is the caller's responsibility to uphold the invariants of `init` + cursor: unsafe { QueryIterationCursor::init(world, query_state, last_run, this_run) }, } } } @@ -99,15 +100,17 @@ where last_run: Tick, this_run: Tick, ) -> QueryManyIter<'w, 's, Q, F, I> { - let fetch = Q::init_fetch(world, &query_state.fetch_state, last_run, this_run); - let filter = F::init_fetch(world, &query_state.filter_state, last_run, this_run); + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + let fetch = unsafe { Q::init_fetch(world, &query_state.fetch_state, last_run, this_run) }; + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + let filter = unsafe { F::init_fetch(world, &query_state.filter_state, last_run, this_run) }; QueryManyIter { query_state, entities: world.entities(), archetypes: world.archetypes(), // SAFETY: We only access table data that has been registered in `query_state`. // This means `world` has permission to access the data we use. - tables: &world.storages().tables, + tables: unsafe { &world.storages().tables }, fetch, filter, entity_iter: entity_list.into_iter(), @@ -137,36 +140,43 @@ where continue; } - let archetype = self - .archetypes - .get(location.archetype_id) - .debug_checked_unwrap(); - let table = self.tables.get(location.table_id).debug_checked_unwrap(); + // SAFETY: The archetype was checked above + let archetype = unsafe { + self.archetypes + .get(location.archetype_id) + .debug_checked_unwrap() + }; + // SAFETY: The table is assumed to exist based on the archetype + let table = unsafe { self.tables.get(location.table_id).debug_checked_unwrap() }; // SAFETY: `archetype` is from the world that `fetch/filter` were created for, // `fetch_state`/`filter_state` are the states that `fetch/filter` were initialized with - Q::set_archetype( - &mut self.fetch, - &self.query_state.fetch_state, - archetype, - table, - ); + unsafe { + Q::set_archetype( + &mut self.fetch, + &self.query_state.fetch_state, + archetype, + table, + ); + } // SAFETY: `table` is from the world that `fetch/filter` were created for, // `fetch_state`/`filter_state` are the states that `fetch/filter` were initialized with - F::set_archetype( - &mut self.filter, - &self.query_state.filter_state, - archetype, - table, - ); + unsafe { + F::set_archetype( + &mut self.filter, + &self.query_state.filter_state, + archetype, + table, + ); + } // SAFETY: set_archetype was called prior. // `location.archetype_row` is an archetype index row in range of the current archetype, because if it was not, the match above would have `continue`d - if F::filter_fetch(&mut self.filter, entity, location.table_row) { + if unsafe { F::filter_fetch(&mut self.filter, entity, location.table_row) } { // SAFETY: // - set_archetype was called prior, `location.archetype_row` is an archetype index in range of the current archetype // - fetch is only called once for each entity. - return Some(Q::fetch(&mut self.fetch, entity, location.table_row)); + return unsafe { Some(Q::fetch(&mut self.fetch, entity, location.table_row)) }; } } None @@ -299,28 +309,35 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery, const K: usize> .as_mut_ptr() .cast::>(); if K != 0 { - ptr.write(QueryIterationCursor::init( - world, - query_state, - last_run, - this_run, - )); + // SAFETY: It is the caller's responsibility to uphold the invariants of `write` and `init` + unsafe { + ptr.write(QueryIterationCursor::init( + world, + query_state, + last_run, + this_run, + )); + } } - for slot in (1..K).map(|offset| ptr.add(offset)) { - slot.write(QueryIterationCursor::init_empty( - world, - query_state, - last_run, - this_run, - )); + // SAFETY: The pointer is valid for all offsets between 1..K + unsafe { + for slot in (1..K).map(|offset| ptr.add(offset)) { + slot.write(QueryIterationCursor::init_empty( + world, + query_state, + last_run, + this_run, + )); + } } QueryCombinationIter { query_state, // SAFETY: We only access table data that has been registered in `query_state`. - tables: &world.storages().tables, + tables: unsafe { &world.storages().tables }, archetypes: world.archetypes(), - cursors: array.assume_init(), + // SAFETY: The array is initialized above + cursors: unsafe { array.assume_init() }, } } @@ -343,11 +360,15 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery, const K: usize> // Make cursor in index `j` for all `j` in `[i, K)` a copy of `c` advanced `j-i+1` times. // If no such `c` exists, return `None` 'outer: for i in (0..K).rev() { - match self.cursors[i].next(self.tables, self.archetypes, self.query_state) { + // SAFETY: Tables, Archetypes, and Query State are all matched + match unsafe { self.cursors[i].next(self.tables, self.archetypes, self.query_state) } { Some(_) => { for j in (i + 1)..K { self.cursors[j] = self.cursors[j - 1].clone(); - match self.cursors[j].next(self.tables, self.archetypes, self.query_state) { + // SAFETY: Tables, Archetypes, and Query State are all matched + match unsafe { + self.cursors[j].next(self.tables, self.archetypes, self.query_state) + } { Some(_) => {} None if i > 0 => continue 'outer, None => return None, @@ -364,10 +385,14 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery, const K: usize> let ptr = values.as_mut_ptr().cast::>(); for (offset, cursor) in self.cursors.iter_mut().enumerate() { - ptr.add(offset).write(cursor.peek_last().unwrap()); + // SAFETY: Internally assured offsets are valid + unsafe { + ptr.add(offset).write(cursor.peek_last().unwrap()); + } } - Some(values.assume_init()) + // SAFETY: The values are initialized above + unsafe { Some(values.assume_init()) } } /// Get next combination of queried components @@ -484,7 +509,8 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, QueryIterationCursor { table_id_iter: [].iter(), archetype_id_iter: [].iter(), - ..Self::init(world, query_state, last_run, this_run) + // SAFETY: It is the caller's responsibility to uphold the invariants of `init` + ..unsafe { Self::init(world, query_state, last_run, this_run) } } } @@ -497,8 +523,10 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, last_run: Tick, this_run: Tick, ) -> Self { - let fetch = Q::init_fetch(world, &query_state.fetch_state, last_run, this_run); - let filter = F::init_fetch(world, &query_state.filter_state, last_run, this_run); + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + let fetch = unsafe { Q::init_fetch(world, &query_state.fetch_state, last_run, this_run) }; + // SAFETY: It is the caller's responsibility to uphold the invariants of `init_fetch` + let filter = unsafe { F::init_fetch(world, &query_state.filter_state, last_run, this_run) }; QueryIterationCursor { fetch, filter, @@ -515,17 +543,20 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, #[inline] unsafe fn peek_last(&mut self) -> Option> { if self.current_row > 0 { - let index = self.current_row - 1; - if Self::IS_DENSE { - let entity = self.table_entities.get_unchecked(index); - Some(Q::fetch(&mut self.fetch, *entity, TableRow::new(index))) - } else { - let archetype_entity = self.archetype_entities.get_unchecked(index); - Some(Q::fetch( - &mut self.fetch, - archetype_entity.entity(), - archetype_entity.table_row(), - )) + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_unchecked`, and `fetch` + unsafe { + let index = self.current_row - 1; + if Self::IS_DENSE { + let entity = self.table_entities.get_unchecked(index); + Some(Q::fetch(&mut self.fetch, *entity, TableRow::new(index))) + } else { + let archetype_entity = self.archetype_entities.get_unchecked(index); + Some(Q::fetch( + &mut self.fetch, + archetype_entity.entity(), + archetype_entity.table_row(), + )) + } } } else { None @@ -565,11 +596,14 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, // we are on the beginning of the query, or finished processing a table, so skip to the next if self.current_row == self.current_len { let table_id = self.table_id_iter.next()?; - let table = tables.get(*table_id).debug_checked_unwrap(); + // SAFETY: The caller ensures the tables are valid in this context + let table = unsafe { tables.get(*table_id).debug_checked_unwrap() }; // SAFETY: `table` is from the world that `fetch/filter` were created for, // `fetch_state`/`filter_state` are the states that `fetch/filter` were initialized with - Q::set_table(&mut self.fetch, &query_state.fetch_state, table); - F::set_table(&mut self.filter, &query_state.filter_state, table); + unsafe { + Q::set_table(&mut self.fetch, &query_state.fetch_state, table); + F::set_table(&mut self.filter, &query_state.filter_state, table); + } self.table_entities = table.entities(); self.current_len = table.entity_count(); self.current_row = 0; @@ -578,9 +612,10 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, // SAFETY: set_table was called prior. // `current_row` is a table row in range of the current table, because if it was not, then the if above would have been executed. - let entity = self.table_entities.get_unchecked(self.current_row); + let entity = unsafe { self.table_entities.get_unchecked(self.current_row) }; let row = TableRow::new(self.current_row); - if !F::filter_fetch(&mut self.filter, *entity, row) { + // SAFETY: It is the caller's responsibility to ensure the invariants for `filter_fetch` are upheld + if !unsafe { F::filter_fetch(&mut self.filter, *entity, row) } { self.current_row += 1; continue; } @@ -590,7 +625,7 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, // - `current_row` must be a table row in range of the current table, // because if it was not, then the if above would have been executed. // - fetch is only called once for each `entity`. - let item = Q::fetch(&mut self.fetch, *entity, row); + let item = unsafe { Q::fetch(&mut self.fetch, *entity, row) }; self.current_row += 1; return Some(item); @@ -599,31 +634,45 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, loop { if self.current_row == self.current_len { let archetype_id = self.archetype_id_iter.next()?; - let archetype = archetypes.get(*archetype_id).debug_checked_unwrap(); + // SAFETY: The caller ensures archetypes is valid in this context + let archetype = unsafe { archetypes.get(*archetype_id).debug_checked_unwrap() }; // SAFETY: `archetype` and `tables` are from the world that `fetch/filter` were created for, // `fetch_state`/`filter_state` are the states that `fetch/filter` were initialized with - let table = tables.get(archetype.table_id()).debug_checked_unwrap(); - Q::set_archetype(&mut self.fetch, &query_state.fetch_state, archetype, table); - F::set_archetype( - &mut self.filter, - &query_state.filter_state, - archetype, - table, - ); + unsafe { + let table = tables.get(archetype.table_id()).debug_checked_unwrap(); + Q::set_archetype( + &mut self.fetch, + &query_state.fetch_state, + archetype, + table, + ); + F::set_archetype( + &mut self.filter, + &query_state.filter_state, + archetype, + table, + ); + } self.archetype_entities = archetype.entities(); self.current_len = archetype.len(); self.current_row = 0; continue; } - // SAFETY: set_archetype was called prior. - // `current_row` is an archetype index row in range of the current archetype, because if it was not, then the if above would have been executed. - let archetype_entity = self.archetype_entities.get_unchecked(self.current_row); - if !F::filter_fetch( - &mut self.filter, - archetype_entity.entity(), - archetype_entity.table_row(), - ) { + let archetype_entity = { + // SAFETY: set_archetype was called prior. + // `current_row` is an archetype index row in range of the current archetype, because if it was not, then the if above would have been executed. + unsafe { self.archetype_entities.get_unchecked(self.current_row) } + }; + + // SAFETY: It is the caller's responsibility to uphold the invariants of `filter_fetch` + if !unsafe { + F::filter_fetch( + &mut self.filter, + archetype_entity.entity(), + archetype_entity.table_row(), + ) + } { self.current_row += 1; continue; } @@ -633,11 +682,13 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> QueryIterationCursor<'w, 's, // - `current_row` must be an archetype index row in range of the current archetype, // because if it was not, then the if above would have been executed. // - fetch is only called once for each `archetype_entity`. - let item = Q::fetch( - &mut self.fetch, - archetype_entity.entity(), - archetype_entity.table_row(), - ); + let item = unsafe { + Q::fetch( + &mut self.fetch, + archetype_entity.entity(), + archetype_entity.table_row(), + ) + }; self.current_row += 1; return Some(item); } diff --git a/crates/bevy_ecs/src/query/state.rs b/crates/bevy_ecs/src/query/state.rs index 45115c2d540f5..154b7e8ad25d3 100644 --- a/crates/bevy_ecs/src/query/state.rs +++ b/crates/bevy_ecs/src/query/state.rs @@ -91,7 +91,8 @@ impl QueryState { >( &self, ) -> &QueryState { - &*(self as *const QueryState as *const QueryState) + // SAFETY: See [`# Safety`] + unsafe { &*(self as *const QueryState as *const QueryState) } } } @@ -458,7 +459,10 @@ impl QueryState { entity: Entity, ) -> Result, QueryEntityError> { self.update_archetypes_unsafe_world_cell(world); - self.get_unchecked_manual(world, entity, world.last_change_tick(), world.change_tick()) + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_unchecked_manual` + unsafe { + self.get_unchecked_manual(world, entity, world.last_change_tick(), world.change_tick()) + } } /// Gets the query result for the given [`World`] and [`Entity`], where the last change and @@ -488,25 +492,29 @@ impl QueryState { { return Err(QueryEntityError::QueryDoesNotMatch(entity)); } - let archetype = world - .archetypes() - .get(location.archetype_id) - .debug_checked_unwrap(); - let mut fetch = Q::init_fetch(world, &self.fetch_state, last_run, this_run); - let mut filter = F::init_fetch(world, &self.filter_state, last_run, this_run); - - let table = world - .storages() - .tables - .get(location.table_id) - .debug_checked_unwrap(); - Q::set_archetype(&mut fetch, &self.fetch_state, archetype, table); - F::set_archetype(&mut filter, &self.filter_state, archetype, table); - - if F::filter_fetch(&mut filter, entity, location.table_row) { - Ok(Q::fetch(&mut fetch, entity, location.table_row)) - } else { - Err(QueryEntityError::QueryDoesNotMatch(entity)) + + // SAFETY: See [`# Safety`] + unsafe { + let archetype = world + .archetypes() + .get(location.archetype_id) + .debug_checked_unwrap(); + let mut fetch = Q::init_fetch(world, &self.fetch_state, last_run, this_run); + let mut filter = F::init_fetch(world, &self.filter_state, last_run, this_run); + + let table = world + .storages() + .tables + .get(location.table_id) + .debug_checked_unwrap(); + Q::set_archetype(&mut fetch, &self.fetch_state, archetype, table); + F::set_archetype(&mut filter, &self.filter_state, archetype, table); + + if F::filter_fetch(&mut filter, entity, location.table_row) { + Ok(Q::fetch(&mut fetch, entity, location.table_row)) + } else { + Err(QueryEntityError::QueryDoesNotMatch(entity)) + } } } @@ -633,17 +641,19 @@ impl QueryState { for (value, entity) in std::iter::zip(&mut values, entities) { // SAFETY: fetch is read-only // and world must be validated - let item = self.as_readonly().get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - last_run, - this_run, - )?; - *value = MaybeUninit::new(item); + unsafe { + let item = self.as_readonly().get_unchecked_manual( + world.as_unsafe_world_cell_readonly(), + entity, + last_run, + this_run, + )?; + *value = MaybeUninit::new(item); + } } // SAFETY: Each value has been fully initialized. - Ok(values.map(|x| x.assume_init())) + unsafe { Ok(values.map(|x| x.assume_init())) } } /// Gets the query results for the given [`World`] and array of [`Entity`], where the last change and @@ -675,12 +685,15 @@ impl QueryState { let mut values = [(); N].map(|_| MaybeUninit::uninit()); for (value, entity) in std::iter::zip(&mut values, entities) { - let item = self.get_unchecked_manual(world, entity, last_run, this_run)?; - *value = MaybeUninit::new(item); + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_unchecked_manual` + unsafe { + let item = self.get_unchecked_manual(world, entity, last_run, this_run)?; + *value = MaybeUninit::new(item); + } } // SAFETY: Each value has been fully initialized. - Ok(values.map(|x| x.assume_init())) + unsafe { Ok(values.map(|x| x.assume_init())) } } /// Returns an [`Iterator`] over the query results for the given [`World`]. @@ -909,7 +922,8 @@ impl QueryState { world: UnsafeWorldCell<'w>, ) -> QueryIter<'w, 's, Q, F> { self.update_archetypes_unsafe_world_cell(world); - self.iter_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + // SAFETY: It is the caller's responsibility to uphold the invariants of `iter_unchecked_manual` + unsafe { self.iter_unchecked_manual(world, world.last_change_tick(), world.change_tick()) } } /// Returns an [`Iterator`] over all possible combinations of `K` query results for the @@ -926,11 +940,14 @@ impl QueryState { world: UnsafeWorldCell<'w>, ) -> QueryCombinationIter<'w, 's, Q, F, K> { self.update_archetypes_unsafe_world_cell(world); - self.iter_combinations_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) + // SAFETY: It is the caller's responsibility to uphold the invariants of `iter_combinations_unchecked_manual` + unsafe { + self.iter_combinations_unchecked_manual( + world, + world.last_change_tick(), + world.change_tick(), + ) + } } /// Returns an [`Iterator`] for the given [`World`], where the last change and @@ -949,7 +966,8 @@ impl QueryState { last_run: Tick, this_run: Tick, ) -> QueryIter<'w, 's, Q, F> { - QueryIter::new(world, self, last_run, this_run) + // SAFETY: It is the caller's responsibility to uphold the invariants of `new` + unsafe { QueryIter::new(world, self, last_run, this_run) } } /// Returns an [`Iterator`] for the given [`World`] and list of [`Entity`]'s, where the last change and @@ -973,7 +991,8 @@ impl QueryState { where EntityList::Item: Borrow, { - QueryManyIter::new(world, self, entities, last_run, this_run) + // SAFETY: It is the caller's responsibility to uphold the invariants of `new` + unsafe { QueryManyIter::new(world, self, entities, last_run, this_run) } } /// Returns an [`Iterator`] over all possible combinations of `K` query results for the @@ -993,7 +1012,8 @@ impl QueryState { last_run: Tick, this_run: Tick, ) -> QueryCombinationIter<'w, 's, Q, F, K> { - QueryCombinationIter::new(world, self, last_run, this_run) + // SAFETY: It is the caller's responsibility to uphold the invariants of `new` + unsafe { QueryCombinationIter::new(world, self, last_run, this_run) } } /// Runs `func` on each query result for the given [`World`]. This is faster than the equivalent @@ -1046,7 +1066,15 @@ impl QueryState { func: FN, ) { self.update_archetypes_unsafe_world_cell(world); - self.for_each_unchecked_manual(world, func, world.last_change_tick(), world.change_tick()); + // SAFETY: It is the caller's responsibility to uphold the invariants of `for_each_unchecked_manual` + unsafe { + self.for_each_unchecked_manual( + world, + func, + world.last_change_tick(), + world.change_tick(), + ); + } } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -1105,51 +1133,54 @@ impl QueryState { last_run: Tick, this_run: Tick, ) { - // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual - let mut fetch = Q::init_fetch(world, &self.fetch_state, last_run, this_run); - let mut filter = F::init_fetch(world, &self.filter_state, last_run, this_run); - - let tables = &world.storages().tables; - if Q::IS_DENSE && F::IS_DENSE { - for table_id in &self.matched_table_ids { - let table = tables.get(*table_id).debug_checked_unwrap(); - Q::set_table(&mut fetch, &self.fetch_state, table); - F::set_table(&mut filter, &self.filter_state, table); - - let entities = table.entities(); - for row in 0..table.entity_count() { - let entity = entities.get_unchecked(row); - let row = TableRow::new(row); - if !F::filter_fetch(&mut filter, *entity, row) { - continue; + // SAFETY: See [`# Safety`] + unsafe { + // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual + let mut fetch = Q::init_fetch(world, &self.fetch_state, last_run, this_run); + let mut filter = F::init_fetch(world, &self.filter_state, last_run, this_run); + + let tables = &world.storages().tables; + if Q::IS_DENSE && F::IS_DENSE { + for table_id in &self.matched_table_ids { + let table = tables.get(*table_id).debug_checked_unwrap(); + Q::set_table(&mut fetch, &self.fetch_state, table); + F::set_table(&mut filter, &self.filter_state, table); + + let entities = table.entities(); + for row in 0..table.entity_count() { + let entity = entities.get_unchecked(row); + let row = TableRow::new(row); + if !F::filter_fetch(&mut filter, *entity, row) { + continue; + } + func(Q::fetch(&mut fetch, *entity, row)); } - func(Q::fetch(&mut fetch, *entity, row)); } - } - } else { - let archetypes = world.archetypes(); - for archetype_id in &self.matched_archetype_ids { - let archetype = archetypes.get(*archetype_id).debug_checked_unwrap(); - let table = tables.get(archetype.table_id()).debug_checked_unwrap(); - Q::set_archetype(&mut fetch, &self.fetch_state, archetype, table); - F::set_archetype(&mut filter, &self.filter_state, archetype, table); - - let entities = archetype.entities(); - for idx in 0..archetype.len() { - let archetype_entity = entities.get_unchecked(idx); - if !F::filter_fetch( - &mut filter, - archetype_entity.entity(), - archetype_entity.table_row(), - ) { - continue; + } else { + let archetypes = world.archetypes(); + for archetype_id in &self.matched_archetype_ids { + let archetype = archetypes.get(*archetype_id).debug_checked_unwrap(); + let table = tables.get(archetype.table_id()).debug_checked_unwrap(); + Q::set_archetype(&mut fetch, &self.fetch_state, archetype, table); + F::set_archetype(&mut filter, &self.filter_state, archetype, table); + + let entities = archetype.entities(); + for idx in 0..archetype.len() { + let archetype_entity = entities.get_unchecked(idx); + if !F::filter_fetch( + &mut filter, + archetype_entity.entity(), + archetype_entity.table_row(), + ) { + continue; + } + func(Q::fetch( + &mut fetch, + archetype_entity.entity(), + archetype_entity.table_row(), + )); } - func(Q::fetch( - &mut fetch, - archetype_entity.entity(), - archetype_entity.table_row(), - )); } } } @@ -1186,105 +1217,108 @@ impl QueryState { // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter, QueryState::for_each_unchecked_manual, QueryState::par_for_each_unchecked_manual bevy_tasks::ComputeTaskPool::get().scope(|scope| { - if Q::IS_DENSE && F::IS_DENSE { - // SAFETY: We only access table data that has been registered in `self.archetype_component_access`. - let tables = &world.storages().tables; - for table_id in &self.matched_table_ids { - let table = &tables[*table_id]; - if table.is_empty() { - continue; - } - - let mut offset = 0; - while offset < table.entity_count() { - let func = func.clone(); - let len = batch_size.min(table.entity_count() - offset); - let task = async move { - let mut fetch = - Q::init_fetch(world, &self.fetch_state, last_run, this_run); - let mut filter = - F::init_fetch(world, &self.filter_state, last_run, this_run); - let tables = &world.storages().tables; - let table = tables.get(*table_id).debug_checked_unwrap(); - let entities = table.entities(); - Q::set_table(&mut fetch, &self.fetch_state, table); - F::set_table(&mut filter, &self.filter_state, table); - for row in offset..offset + len { - let entity = entities.get_unchecked(row); - let row = TableRow::new(row); - if !F::filter_fetch(&mut filter, *entity, row) { - continue; + // SAFETY: See [`# Safety`] + unsafe { + if Q::IS_DENSE && F::IS_DENSE { + // SAFETY: We only access table data that has been registered in `self.archetype_component_access`. + let tables = &world.storages().tables; + for table_id in &self.matched_table_ids { + let table = &tables[*table_id]; + if table.is_empty() { + continue; + } + + let mut offset = 0; + while offset < table.entity_count() { + let func = func.clone(); + let len = batch_size.min(table.entity_count() - offset); + let task = async move { + let mut fetch = + Q::init_fetch(world, &self.fetch_state, last_run, this_run); + let mut filter = + F::init_fetch(world, &self.filter_state, last_run, this_run); + let tables = &world.storages().tables; + let table = tables.get(*table_id).debug_checked_unwrap(); + let entities = table.entities(); + Q::set_table(&mut fetch, &self.fetch_state, table); + F::set_table(&mut filter, &self.filter_state, table); + for row in offset..offset + len { + let entity = entities.get_unchecked(row); + let row = TableRow::new(row); + if !F::filter_fetch(&mut filter, *entity, row) { + continue; + } + func(Q::fetch(&mut fetch, *entity, row)); } - func(Q::fetch(&mut fetch, *entity, row)); - } - }; - #[cfg(feature = "trace")] - let span = bevy_utils::tracing::info_span!( - "par_for_each", - query = std::any::type_name::(), - filter = std::any::type_name::(), - count = len, - ); - #[cfg(feature = "trace")] - let task = task.instrument(span); - scope.spawn(task); - offset += batch_size; + }; + #[cfg(feature = "trace")] + let span = bevy_utils::tracing::info_span!( + "par_for_each", + query = std::any::type_name::(), + filter = std::any::type_name::(), + count = len, + ); + #[cfg(feature = "trace")] + let task = task.instrument(span); + scope.spawn(task); + offset += batch_size; + } } - } - } else { - let archetypes = world.archetypes(); - for archetype_id in &self.matched_archetype_ids { - let mut offset = 0; - let archetype = &archetypes[*archetype_id]; - if archetype.is_empty() { - continue; - } - - while offset < archetype.len() { - let func = func.clone(); - let len = batch_size.min(archetype.len() - offset); - let task = async move { - let mut fetch = - Q::init_fetch(world, &self.fetch_state, last_run, this_run); - let mut filter = - F::init_fetch(world, &self.filter_state, last_run, this_run); - let tables = &world.storages().tables; - let archetype = - world.archetypes().get(*archetype_id).debug_checked_unwrap(); - let table = tables.get(archetype.table_id()).debug_checked_unwrap(); - Q::set_archetype(&mut fetch, &self.fetch_state, archetype, table); - F::set_archetype(&mut filter, &self.filter_state, archetype, table); - - let entities = archetype.entities(); - for archetype_row in offset..offset + len { - let archetype_entity = entities.get_unchecked(archetype_row); - if !F::filter_fetch( - &mut filter, - archetype_entity.entity(), - archetype_entity.table_row(), - ) { - continue; + } else { + let archetypes = world.archetypes(); + for archetype_id in &self.matched_archetype_ids { + let mut offset = 0; + let archetype = &archetypes[*archetype_id]; + if archetype.is_empty() { + continue; + } + + while offset < archetype.len() { + let func = func.clone(); + let len = batch_size.min(archetype.len() - offset); + let task = async move { + let mut fetch = + Q::init_fetch(world, &self.fetch_state, last_run, this_run); + let mut filter = + F::init_fetch(world, &self.filter_state, last_run, this_run); + let tables = &world.storages().tables; + let archetype = + world.archetypes().get(*archetype_id).debug_checked_unwrap(); + let table = tables.get(archetype.table_id()).debug_checked_unwrap(); + Q::set_archetype(&mut fetch, &self.fetch_state, archetype, table); + F::set_archetype(&mut filter, &self.filter_state, archetype, table); + + let entities = archetype.entities(); + for archetype_row in offset..offset + len { + let archetype_entity = entities.get_unchecked(archetype_row); + if !F::filter_fetch( + &mut filter, + archetype_entity.entity(), + archetype_entity.table_row(), + ) { + continue; + } + func(Q::fetch( + &mut fetch, + archetype_entity.entity(), + archetype_entity.table_row(), + )); } - func(Q::fetch( - &mut fetch, - archetype_entity.entity(), - archetype_entity.table_row(), - )); - } - }; - - #[cfg(feature = "trace")] - let span = bevy_utils::tracing::info_span!( - "par_for_each", - query = std::any::type_name::(), - filter = std::any::type_name::(), - count = len, - ); - #[cfg(feature = "trace")] - let task = task.instrument(span); - - scope.spawn(task); - offset += batch_size; + }; + + #[cfg(feature = "trace")] + let span = bevy_utils::tracing::info_span!( + "par_for_each", + query = std::any::type_name::(), + filter = std::any::type_name::(), + count = len, + ); + #[cfg(feature = "trace")] + let task = task.instrument(span); + + scope.spawn(task); + offset += batch_size; + } } } } @@ -1391,7 +1425,10 @@ impl QueryState { world: UnsafeWorldCell<'w>, ) -> Result, QuerySingleError> { self.update_archetypes_unsafe_world_cell(world); - self.get_single_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_single_unchecked_manual` + unsafe { + self.get_single_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + } } /// Returns a query result when there is exactly one entity matching the query, @@ -1411,7 +1448,8 @@ impl QueryState { last_run: Tick, this_run: Tick, ) -> Result, QuerySingleError> { - let mut query = self.iter_unchecked_manual(world, last_run, this_run); + // SAFETY: It is the caller's responsibility to uphold the invariants of `iter_unchecked_manual` + let mut query = unsafe { self.iter_unchecked_manual(world, last_run, this_run) }; let first = query.next(); let extra = query.next().is_some(); diff --git a/crates/bevy_ecs/src/reflect/component.rs b/crates/bevy_ecs/src/reflect/component.rs index f91ac8e900225..6f98b0bdfab06 100644 --- a/crates/bevy_ecs/src/reflect/component.rs +++ b/crates/bevy_ecs/src/reflect/component.rs @@ -177,7 +177,7 @@ impl ReflectComponent { entity: UnsafeEntityCell<'a>, ) -> Option> { // SAFETY: safety requirements deferred to caller - (self.0.reflect_unchecked_mut)(entity) + unsafe { (self.0.reflect_unchecked_mut)(entity) } } /// Gets the value of this [`Component`] type from entity from `source_world` and [applies](Self::apply()) it to the value of this [`Component`] type in entity in `destination_world`. diff --git a/crates/bevy_ecs/src/reflect/resource.rs b/crates/bevy_ecs/src/reflect/resource.rs index 924eb54b09c8e..a8804fbd2c98d 100644 --- a/crates/bevy_ecs/src/reflect/resource.rs +++ b/crates/bevy_ecs/src/reflect/resource.rs @@ -116,7 +116,7 @@ impl ReflectResource { world: UnsafeWorldCell<'w>, ) -> Option> { // SAFETY: caller promises to uphold uniqueness guarantees - (self.0.reflect_unchecked_mut)(world) + unsafe { (self.0.reflect_unchecked_mut)(world) } } /// Gets the value of this [`Resource`] type from `source_world` and [applies](Self::apply()) it to the value of this [`Resource`] type in `destination_world`. diff --git a/crates/bevy_ecs/src/removal_detection.rs b/crates/bevy_ecs/src/removal_detection.rs index c645fe017a30d..d921e9b2f0c4e 100644 --- a/crates/bevy_ecs/src/removal_detection.rs +++ b/crates/bevy_ecs/src/removal_detection.rs @@ -274,6 +274,8 @@ unsafe impl<'a> SystemParam for &'a RemovedComponentEvents { world: UnsafeWorldCell<'w>, _change_tick: Tick, ) -> Self::Item<'w, 's> { - world.world_metadata().removed_components() + // SAFETY: no component value access, removed component events can be read in parallel and are + // never mutably borrowed during system execution + unsafe { world.world_metadata() }.removed_components() } } diff --git a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs index dad3fa4459d20..7e3d0f18b7b6a 100644 --- a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs +++ b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs @@ -333,7 +333,7 @@ impl MultiThreadedExecutor { // SAFETY: `can_run` returned true, which means that: // - It must have called `update_archetype_component_access` for each run condition. // - There can be no systems running whose accesses would conflict with any conditions. - if !self.should_run(system_index, system, conditions, world_cell) { + if !unsafe { self.should_run(system_index, system, conditions, world_cell) } { self.skip_system_and_signal_dependents(system_index); continue; } @@ -450,8 +450,9 @@ impl MultiThreadedExecutor { // - The caller ensures that `world` has permission to read any data // required by the conditions. // - `update_archetype_component_access` has been called for each run condition. - let set_conditions_met = - evaluate_and_fold_conditions(&mut conditions.set_conditions[set_idx], world); + let set_conditions_met = unsafe { + evaluate_and_fold_conditions(&mut conditions.set_conditions[set_idx], world) + }; if !set_conditions_met { self.skipped_systems @@ -467,8 +468,9 @@ impl MultiThreadedExecutor { // - The caller ensures that `world` has permission to read any data // required by the conditions. // - `update_archetype_component_access` has been called for each run condition. - let system_conditions_met = - evaluate_and_fold_conditions(&mut conditions.system_conditions[system_index], world); + let system_conditions_met = unsafe { + evaluate_and_fold_conditions(&mut conditions.system_conditions[system_index], world) + }; if !system_conditions_met { self.skipped_systems.insert(system_index); diff --git a/crates/bevy_ecs/src/storage/blob_vec.rs b/crates/bevy_ecs/src/storage/blob_vec.rs index 6875b14cc76bf..a87bf9439404e 100644 --- a/crates/bevy_ecs/src/storage/blob_vec.rs +++ b/crates/bevy_ecs/src/storage/blob_vec.rs @@ -121,7 +121,6 @@ impl BlobVec { } // SAFETY: must not be called for a ZST item layout - #[warn(unsafe_op_in_unsafe_fn)] // to allow unsafe blocks in unsafe fn unsafe fn grow_exact(&mut self, increment: NonZeroUsize) { debug_assert!(self.item_layout.size() != 0); @@ -163,8 +162,16 @@ impl BlobVec { #[inline] pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { debug_assert!(index < self.len()); - let ptr = self.get_unchecked_mut(index); - std::ptr::copy_nonoverlapping::(value.as_ptr(), ptr.as_ptr(), self.item_layout.size()); + // SAFETY: It is the caller's responsibility to ensure the index is in bounds and the + // OwningPtr is initialized properly. + unsafe { + let ptr = self.get_unchecked_mut(index); + std::ptr::copy_nonoverlapping::( + value.as_ptr(), + ptr.as_ptr(), + self.item_layout.size(), + ); + } } /// Replaces the value at `index` with `value`. This function does not do any bounds checking. @@ -180,8 +187,10 @@ impl BlobVec { debug_assert!(index < self.len()); // Pointer to the value in the vector that will get replaced. - // SAFETY: The caller ensures that `index` fits in this vector. - let destination = NonNull::from(self.get_unchecked_mut(index)); + let destination = NonNull::from( + // SAFETY: The caller ensures that `index` fits in this vector. + unsafe { self.get_unchecked_mut(index) }, + ); let source = value.as_ptr(); if let Some(drop) = self.drop { @@ -199,13 +208,16 @@ impl BlobVec { // that the element will not get observed or double dropped later. // - If a panic occurs, `self.len` will remain `0`, which ensures a double-drop // does not occur. Instead, all elements will be forgotten. - let old_value = OwningPtr::new(destination); + let old_value = unsafe { OwningPtr::new(destination) }; - // This closure will run in case `drop()` panics, + // SAFETY: This closure will run in case `drop()` panics, // which ensures that `value` does not get forgotten. - let on_unwind = OnDrop::new(|| drop(value)); + let on_unwind = OnDrop::new(|| unsafe { drop(value) }); - drop(old_value); + // SAFETY: This location will be initialized again shortly + unsafe { + drop(old_value); + } // If the above code does not panic, make sure that `value` doesn't get dropped. core::mem::forget(on_unwind); @@ -222,7 +234,13 @@ impl BlobVec { // so it must still be initialized and it is safe to transfer ownership into the vector. // - `source` and `destination` were obtained from different memory locations, // both of which we have exclusive access to, so they are guaranteed not to overlap. - std::ptr::copy_nonoverlapping::(source, destination.as_ptr(), self.item_layout.size()); + unsafe { + std::ptr::copy_nonoverlapping::( + source, + destination.as_ptr(), + self.item_layout.size(), + ); + } } /// Appends an element to the back of the vector. @@ -234,7 +252,10 @@ impl BlobVec { self.reserve_exact(1); let index = self.len; self.len += 1; - self.initialize_unchecked(index, value); + // SAFETY: It is the caller's responsibility to ensure value has a matching layout. + unsafe { + self.initialize_unchecked(index, value); + } } /// Forces the length of the vector to `len`. @@ -265,11 +286,14 @@ impl BlobVec { let new_len = self.len - 1; let size = self.item_layout.size(); if index != new_len { - std::ptr::swap_nonoverlapping::( - self.get_unchecked_mut(index).as_ptr(), - self.get_unchecked_mut(new_len).as_ptr(), - size, - ); + // SAFETY: The caller ensures index is valid + unsafe { + std::ptr::swap_nonoverlapping::( + self.get_unchecked_mut(index).as_ptr(), + self.get_unchecked_mut(new_len).as_ptr(), + size, + ); + } } self.len = new_len; // Cannot use get_unchecked here as this is technically out of bounds after changing len. @@ -277,7 +301,7 @@ impl BlobVec { // - `new_len` is less than the old len, so it must fit in this vector's allocation. // - `size` is a multiple of the erased type's alignment, // so adding a multiple of `size` will preserve alignment. - self.get_ptr_mut().byte_add(new_len * size).promote() + unsafe { self.get_ptr_mut().byte_add(new_len * size).promote() } } /// Removes the value at `index` and copies the value stored into `ptr`. @@ -290,14 +314,17 @@ impl BlobVec { #[inline] pub unsafe fn swap_remove_unchecked(&mut self, index: usize, ptr: PtrMut<'_>) { debug_assert!(index < self.len()); - let last = self.get_unchecked_mut(self.len - 1).as_ptr(); - let target = self.get_unchecked_mut(index).as_ptr(); - // Copy the item at the index into the provided ptr - std::ptr::copy_nonoverlapping::(target, ptr.as_ptr(), self.item_layout.size()); - // Recompress the storage by moving the previous last element into the - // now-free row overwriting the previous data. The removed row may be the last - // one so a non-overlapping copy must not be used here. - std::ptr::copy::(last, target, self.item_layout.size()); + // SAFETY: It is the caller's responsibility to ensure the index points to proper memory + unsafe { + let last = self.get_unchecked_mut(self.len - 1).as_ptr(); + let target = self.get_unchecked_mut(index).as_ptr(); + // Copy the item at the index into the provided ptr + std::ptr::copy_nonoverlapping::(target, ptr.as_ptr(), self.item_layout.size()); + // Recompress the storage by moving the previous last element into the + // now-free row overwriting the previous data. The removed row may be the last + // one so a non-overlapping copy must not be used here. + std::ptr::copy::(last, target, self.item_layout.size()); + } // Invalidate the data stored in the last row, as it has been moved self.len -= 1; } @@ -312,9 +339,12 @@ impl BlobVec { pub unsafe fn swap_remove_and_drop_unchecked(&mut self, index: usize) { debug_assert!(index < self.len()); let drop = self.drop; - let value = self.swap_remove_and_forget_unchecked(index); - if let Some(drop) = drop { - (drop)(value); + // SAFETY: It is the caller's responsibility to ensure index is valid + unsafe { + let value = self.swap_remove_and_forget_unchecked(index); + if let Some(drop) = drop { + (drop)(value); + } } } @@ -331,7 +361,7 @@ impl BlobVec { // so this operation will not overflow the original allocation. // - `size` is a multiple of the erased type's alignment, // so adding a multiple of `size` will preserve alignment. - self.get_ptr().byte_add(index * size) + unsafe { self.get_ptr().byte_add(index * size) } } /// Returns a mutable reference to the element at `index`, without doing bounds checking. @@ -347,7 +377,7 @@ impl BlobVec { // so this operation will not overflow the original allocation. // - `size` is a multiple of the erased type's alignment, // so adding a multiple of `size` will preserve alignment. - self.get_ptr_mut().byte_add(index * size) + unsafe { self.get_ptr_mut().byte_add(index * size) } } /// Gets a [`Ptr`] to the start of the vec @@ -370,7 +400,7 @@ impl BlobVec { /// The type `T` must be the type of the items in this [`BlobVec`]. pub unsafe fn get_slice(&self) -> &[UnsafeCell] { // SAFETY: the inner data will remain valid for as long as 'self. - std::slice::from_raw_parts(self.data.as_ptr() as *const UnsafeCell, self.len) + unsafe { std::slice::from_raw_parts(self.data.as_ptr() as *const UnsafeCell, self.len) } } /// Clears the vector, removing (and dropping) all values. @@ -477,7 +507,10 @@ mod tests { // SAFETY: The pointer points to a valid value of type `T` and it is safe to drop this value. unsafe fn drop_ptr(x: OwningPtr<'_>) { - x.drop_as::(); + // SAFETY: the caller must uphold the safety contract for `drop_as`. + unsafe { + x.drop_as::(); + } } /// # Safety @@ -485,7 +518,10 @@ mod tests { /// `blob_vec` must have a layout that matches `Layout::new::()` unsafe fn push(blob_vec: &mut BlobVec, value: T) { OwningPtr::make(value, |ptr| { - blob_vec.push(ptr); + // SAFETY: the caller must uphold the safety contract for `push`. + unsafe { + blob_vec.push(ptr); + } }); } @@ -494,8 +530,11 @@ mod tests { /// `blob_vec` must have a layout that matches `Layout::new::()` unsafe fn swap_remove(blob_vec: &mut BlobVec, index: usize) -> T { assert!(index < blob_vec.len()); - let value = blob_vec.swap_remove_and_forget_unchecked(index); - value.read::() + // SAFETY: the caller must uphold the safety contract for `swap_remove_and_forget_unchecked` and `read`. + unsafe { + let value = blob_vec.swap_remove_and_forget_unchecked(index); + value.read::() + } } /// # Safety @@ -504,7 +543,8 @@ mod tests { /// value at the given `index` unsafe fn get_mut(blob_vec: &mut BlobVec, index: usize) -> &mut T { assert!(index < blob_vec.len()); - blob_vec.get_unchecked_mut(index).deref_mut::() + // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut` and `deref_mut`. + unsafe { blob_vec.get_unchecked_mut(index).deref_mut::() } } #[test] diff --git a/crates/bevy_ecs/src/storage/resource.rs b/crates/bevy_ecs/src/storage/resource.rs index c9e771e2aac06..98b9f073d916f 100644 --- a/crates/bevy_ecs/src/storage/resource.rs +++ b/crates/bevy_ecs/src/storage/resource.rs @@ -134,12 +134,14 @@ impl ResourceData { pub(crate) unsafe fn insert(&mut self, value: OwningPtr<'_>, change_tick: Tick) { if self.is_present() { self.validate_access(); - self.column.replace(Self::ROW, value, change_tick); + // SAFETY: It is the caller's responsibility to ensure `value` is valid for `replace` + unsafe { self.column.replace(Self::ROW, value, change_tick) }; } else { if !SEND { self.origin_thread_id = Some(std::thread::current().id()); } - self.column.push(value, ComponentTicks::new(change_tick)); + // SAFETY: It is the caller's responsibility to ensure `value` is valid for `push` + unsafe { self.column.push(value, ComponentTicks::new(change_tick)) }; } } @@ -160,17 +162,23 @@ impl ResourceData { ) { if self.is_present() { self.validate_access(); - self.column.replace_untracked(Self::ROW, value); - *self.column.get_added_tick_unchecked(Self::ROW).deref_mut() = change_ticks.added; - *self - .column - .get_changed_tick_unchecked(Self::ROW) - .deref_mut() = change_ticks.changed; + // SAFETY: It is the caller's responsibility to ensure `value` is valid + unsafe { + self.column.replace_untracked(Self::ROW, value); + *self.column.get_added_tick_unchecked(Self::ROW).deref_mut() = change_ticks.added; + *self + .column + .get_changed_tick_unchecked(Self::ROW) + .deref_mut() = change_ticks.changed; + } } else { if !SEND { self.origin_thread_id = Some(std::thread::current().id()); } - self.column.push(value, change_ticks); + // SAFETY: It is the caller's responsibility to ensure `value` is valid for `push` + unsafe { + self.column.push(value, change_ticks); + } } } diff --git a/crates/bevy_ecs/src/storage/sparse_set.rs b/crates/bevy_ecs/src/storage/sparse_set.rs index 9779f38d9185b..3b8360ada132a 100644 --- a/crates/bevy_ecs/src/storage/sparse_set.rs +++ b/crates/bevy_ecs/src/storage/sparse_set.rs @@ -172,11 +172,17 @@ impl ComponentSparseSet { if let Some(&dense_index) = self.sparse.get(entity.index()) { #[cfg(debug_assertions)] assert_eq!(entity, self.entities[dense_index as usize]); - self.dense - .replace(TableRow::new(dense_index as usize), value, change_tick); + // SAFETY: It is the caller's responsibility to uphold the invariants of `replace` + unsafe { + self.dense + .replace(TableRow::new(dense_index as usize), value, change_tick); + } } else { let dense_index = self.dense.len(); - self.dense.push(value, ComponentTicks::new(change_tick)); + // SAFETY: It is the caller's responsibility to uphold the invariants of `push` + unsafe { + self.dense.push(value, ComponentTicks::new(change_tick)); + } self.sparse.insert(entity.index(), dense_index as u32); #[cfg(debug_assertions)] assert_eq!(self.entities.len(), dense_index); diff --git a/crates/bevy_ecs/src/storage/table.rs b/crates/bevy_ecs/src/storage/table.rs index 48276ce3b3344..b6151a3698c9e 100644 --- a/crates/bevy_ecs/src/storage/table.rs +++ b/crates/bevy_ecs/src/storage/table.rs @@ -141,9 +141,12 @@ impl Column { #[inline] pub(crate) unsafe fn initialize(&mut self, row: TableRow, data: OwningPtr<'_>, tick: Tick) { debug_assert!(row.index() < self.len()); - self.data.initialize_unchecked(row.index(), data); - *self.added_ticks.get_unchecked_mut(row.index()).get_mut() = tick; - *self.changed_ticks.get_unchecked_mut(row.index()).get_mut() = tick; + // SAFETY: It is the caller's responsibility to ensure this row is properly allocated + unsafe { + self.data.initialize_unchecked(row.index(), data); + *self.added_ticks.get_unchecked_mut(row.index()).get_mut() = tick; + *self.changed_ticks.get_unchecked_mut(row.index()).get_mut() = tick; + } } /// Writes component data to the column at given row. @@ -154,8 +157,11 @@ impl Column { #[inline] pub(crate) unsafe fn replace(&mut self, row: TableRow, data: OwningPtr<'_>, change_tick: Tick) { debug_assert!(row.index() < self.len()); - self.data.replace_unchecked(row.index(), data); - *self.changed_ticks.get_unchecked_mut(row.index()).get_mut() = change_tick; + // SAFETY: It is the caller's responsibility to ensure this row is properly allocated + unsafe { + self.data.replace_unchecked(row.index(), data); + *self.changed_ticks.get_unchecked_mut(row.index()).get_mut() = change_tick; + } } /// Writes component data to the column at given row. @@ -167,7 +173,10 @@ impl Column { #[inline] pub(crate) unsafe fn replace_untracked(&mut self, row: TableRow, data: OwningPtr<'_>) { debug_assert!(row.index() < self.len()); - self.data.replace_unchecked(row.index(), data); + // SAFETY: It is the caller's responsibility to ensure this row is properly allocated + unsafe { + self.data.replace_unchecked(row.index(), data); + } } /// Gets the current number of elements stored in the column. @@ -195,7 +204,10 @@ impl Column { /// [`Drop`]: std::ops::Drop #[inline] pub(crate) unsafe fn swap_remove_unchecked(&mut self, row: TableRow) { - self.data.swap_remove_and_drop_unchecked(row.index()); + // SAFETY: It is the caller's responsibilty to ensure the row is valid + unsafe { + self.data.swap_remove_and_drop_unchecked(row.index()); + } self.added_ticks.swap_remove(row.index()); self.changed_ticks.swap_remove(row.index()); } @@ -243,7 +255,8 @@ impl Column { &mut self, row: TableRow, ) -> (OwningPtr<'_>, ComponentTicks) { - let data = self.data.swap_remove_and_forget_unchecked(row.index()); + // SAFETY: It is the caller's responsibilty to ensure the row is valid + let data = unsafe { self.data.swap_remove_and_forget_unchecked(row.index()) }; let added = self.added_ticks.swap_remove(row.index()).into_inner(); let changed = self.changed_ticks.swap_remove(row.index()).into_inner(); (data, ComponentTicks { added, changed }) @@ -268,12 +281,15 @@ impl Column { dst_row: TableRow, ) { debug_assert!(self.data.layout() == other.data.layout()); - let ptr = self.data.get_unchecked_mut(dst_row.index()); - other.data.swap_remove_unchecked(src_row.index(), ptr); - *self.added_ticks.get_unchecked_mut(dst_row.index()) = - other.added_ticks.swap_remove(src_row.index()); - *self.changed_ticks.get_unchecked_mut(dst_row.index()) = - other.changed_ticks.swap_remove(src_row.index()); + // SAFETY: See [`# Safety`] + unsafe { + let ptr = self.data.get_unchecked_mut(dst_row.index()); + other.data.swap_remove_unchecked(src_row.index(), ptr); + *self.added_ticks.get_unchecked_mut(dst_row.index()) = + other.added_ticks.swap_remove(src_row.index()); + *self.changed_ticks.get_unchecked_mut(dst_row.index()) = + other.changed_ticks.swap_remove(src_row.index()); + } } /// Pushes a new value onto the end of the [`Column`]. @@ -281,7 +297,10 @@ impl Column { /// # Safety /// `ptr` must point to valid data of this column's component type pub(crate) unsafe fn push(&mut self, ptr: OwningPtr<'_>, ticks: ComponentTicks) { - self.data.push(ptr); + // SAFETY: It is the caller's responsibility to ensure the pointer is valid. + unsafe { + self.data.push(ptr); + } self.added_ticks.push(UnsafeCell::new(ticks.added)); self.changed_ticks.push(UnsafeCell::new(ticks.changed)); } @@ -314,7 +333,8 @@ impl Column { /// /// [`UnsafeCell`]: std::cell::UnsafeCell pub unsafe fn get_data_slice(&self) -> &[UnsafeCell] { - self.data.get_slice() + // SAFETY: It is the caller's responsibility to ensure T is an appropriate type + unsafe { self.data.get_slice() } } /// Fetches the slice to the [`Column`]'s "added" change detection ticks. @@ -379,7 +399,8 @@ impl Column { #[inline] pub unsafe fn get_data_unchecked(&self, row: TableRow) -> Ptr<'_> { debug_assert!(row.index() < self.data.len()); - self.data.get_unchecked(row.index()) + // SAFETY: It is the caller's responsibility to ensure row is valid an uniquely accessed + unsafe { self.data.get_unchecked(row.index()) } } /// Fetches a mutable reference to the data at `row`. @@ -401,7 +422,8 @@ impl Column { #[inline] pub(crate) unsafe fn get_data_unchecked_mut(&mut self, row: TableRow) -> PtrMut<'_> { debug_assert!(row.index() < self.data.len()); - self.data.get_unchecked_mut(row.index()) + // SAFETY: The caller ensures the index is in bounds and no other references exist + unsafe { self.data.get_unchecked_mut(row.index()) } } /// Fetches the "added" change detection tick for the value at `row`. @@ -453,7 +475,8 @@ impl Column { #[inline] pub unsafe fn get_added_tick_unchecked(&self, row: TableRow) -> &UnsafeCell { debug_assert!(row.index() < self.added_ticks.len()); - self.added_ticks.get_unchecked(row.index()) + // SAFETY: The caller ensures row is valid + unsafe { self.added_ticks.get_unchecked(row.index()) } } /// Fetches the "changed" change detection tick for the value at `row`. Unlike [`Column::get_changed_tick`] @@ -464,7 +487,8 @@ impl Column { #[inline] pub unsafe fn get_changed_tick_unchecked(&self, row: TableRow) -> &UnsafeCell { debug_assert!(row.index() < self.changed_ticks.len()); - self.changed_ticks.get_unchecked(row.index()) + // SAFETY: The caller ensures row is valid + unsafe { self.changed_ticks.get_unchecked(row.index()) } } /// Fetches the change detection ticks for the value at `row`. Unlike [`Column::get_ticks`] @@ -476,9 +500,12 @@ impl Column { pub unsafe fn get_ticks_unchecked(&self, row: TableRow) -> ComponentTicks { debug_assert!(row.index() < self.added_ticks.len()); debug_assert!(row.index() < self.changed_ticks.len()); - ComponentTicks { - added: self.added_ticks.get_unchecked(row.index()).read(), - changed: self.changed_ticks.get_unchecked(row.index()).read(), + // SAFETY: The caller ensures row is valid + unsafe { + ComponentTicks { + added: self.added_ticks.get_unchecked(row.index()).read(), + changed: self.changed_ticks.get_unchecked(row.index()).read(), + } } } @@ -572,7 +599,10 @@ impl Table { /// `row` must be in-bounds pub(crate) unsafe fn swap_remove_unchecked(&mut self, row: TableRow) -> Option { for column in self.columns.values_mut() { - column.swap_remove_unchecked(row); + // SAFETY: The caller ensures row is valid + unsafe { + column.swap_remove_unchecked(row); + } } let is_last = row.index() == self.entities.len() - 1; self.entities.swap_remove(row.index()); @@ -598,13 +628,19 @@ impl Table { ) -> TableMoveResult { debug_assert!(row.index() < self.entity_count()); let is_last = row.index() == self.entities.len() - 1; - let new_row = new_table.allocate(self.entities.swap_remove(row.index())); + // SAFETY: The caller ensures the row is valid + let new_row = unsafe { new_table.allocate(self.entities.swap_remove(row.index())) }; for (component_id, column) in self.columns.iter_mut() { if let Some(new_column) = new_table.get_column_mut(*component_id) { - new_column.initialize_from_unchecked(column, row, new_row); + // SAFETY: The location is valid for this row and colum + unsafe { + new_column.initialize_from_unchecked(column, row, new_row); + } } else { - // It's the caller's responsibility to drop these cases. - let (_, _) = column.swap_remove_and_forget_unchecked(row); + // SAFETY: It's the caller's responsibility to drop these cases. + unsafe { + let (_, _) = column.swap_remove_and_forget_unchecked(row); + } } } TableMoveResult { @@ -630,12 +666,17 @@ impl Table { ) -> TableMoveResult { debug_assert!(row.index() < self.entity_count()); let is_last = row.index() == self.entities.len() - 1; - let new_row = new_table.allocate(self.entities.swap_remove(row.index())); + // SAFETY: The caller ensures the row is valid + let new_row = unsafe { new_table.allocate(self.entities.swap_remove(row.index())) }; for (component_id, column) in self.columns.iter_mut() { if let Some(new_column) = new_table.get_column_mut(*component_id) { - new_column.initialize_from_unchecked(column, row, new_row); + // SAFETY: The location is valid for this row and colum + unsafe { + new_column.initialize_from_unchecked(column, row, new_row); + } } else { - column.swap_remove_unchecked(row); + // SAFETY: The caller ensures the row is valid + unsafe { column.swap_remove_unchecked(row) }; } } TableMoveResult { @@ -661,12 +702,16 @@ impl Table { ) -> TableMoveResult { debug_assert!(row.index() < self.entity_count()); let is_last = row.index() == self.entities.len() - 1; - let new_row = new_table.allocate(self.entities.swap_remove(row.index())); + // SAFETY: The caller ensures the row is valid + let new_row = unsafe { new_table.allocate(self.entities.swap_remove(row.index())) }; for (component_id, column) in self.columns.iter_mut() { - new_table - .get_column_mut(*component_id) - .debug_checked_unwrap() - .initialize_from_unchecked(column, row, new_row); + // SAFETY: The location is valid for this row and colum + unsafe { + new_table + .get_column_mut(*component_id) + .debug_checked_unwrap() + .initialize_from_unchecked(column, row, new_row); + } } TableMoveResult { new_row, @@ -733,7 +778,10 @@ impl Table { let index = self.entities.len(); self.entities.push(entity); for column in self.columns.values_mut() { - column.data.set_len(self.entities.len()); + // SAFETY: The caller ensures they will initialize this memory + unsafe { + column.data.set_len(self.entities.len()); + } column.added_ticks.push(UnsafeCell::new(Tick::new(0))); column.changed_ticks.push(UnsafeCell::new(Tick::new(0))); } @@ -865,7 +913,10 @@ impl Tables { .or_insert_with(|| { let mut table = TableBuilder::with_capacity(0, component_ids.len()); for component_id in component_ids { - table.add_column(components.get_info_unchecked(*component_id)); + table.add_column( + // SAFETY: The caller ensures the component ID is valid + unsafe { components.get_info_unchecked(*component_id) }, + ); } tables.push(table.build()); (component_ids.to_vec(), TableId::new(tables.len() - 1)) diff --git a/crates/bevy_ecs/src/system/adapter_system.rs b/crates/bevy_ecs/src/system/adapter_system.rs index 288e91138868b..34c0be5fce003 100644 --- a/crates/bevy_ecs/src/system/adapter_system.rs +++ b/crates/bevy_ecs/src/system/adapter_system.rs @@ -107,8 +107,9 @@ where #[inline] unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out { // SAFETY: `system.run_unsafe` has the same invariants as `self.run_unsafe`. - self.func - .adapt(input, |input| self.system.run_unsafe(input, world)) + self.func.adapt(input, |input| unsafe { + self.system.run_unsafe(input, world) + }) } #[inline] diff --git a/crates/bevy_ecs/src/system/combinator.rs b/crates/bevy_ecs/src/system/combinator.rs index 1d20927ef995b..2f317738b5267 100644 --- a/crates/bevy_ecs/src/system/combinator.rs +++ b/crates/bevy_ecs/src/system/combinator.rs @@ -162,17 +162,19 @@ where } unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out { - Func::combine( - input, - // SAFETY: The world accesses for both underlying systems have been registered, - // so the caller will guarantee that no other systems will conflict with `a` or `b`. - // Since these closures are `!Send + !Sync + !'static`, they can never be called - // in parallel, so their world accesses will not conflict with each other. - // Additionally, `update_archetype_component_access` has been called, - // which forwards to the implementations for `self.a` and `self.b`. - |input| self.a.run_unsafe(input, world), - |input| self.b.run_unsafe(input, world), - ) + // SAFETY: The world accesses for both underlying systems have been registered, + // so the caller will guarantee that no other systems will conflict with `a` or `b`. + // Since these closures are `!Send + !Sync + !'static`, they can never be called + // in parallel, so their world accesses will not conflict with each other. + // Additionally, `update_archetype_component_access` has been called, + // which forwards to the implementations for `self.a` and `self.b`. + unsafe { + Func::combine( + input, + |input| self.a.run_unsafe(input, world), + |input| self.b.run_unsafe(input, world), + ) + } } fn run<'w>(&mut self, input: Self::In, world: &'w mut World) -> Self::Out { diff --git a/crates/bevy_ecs/src/system/function_system.rs b/crates/bevy_ecs/src/system/function_system.rs index aa11b1a03f3cb..9560f11217434 100644 --- a/crates/bevy_ecs/src/system/function_system.rs +++ b/crates/bevy_ecs/src/system/function_system.rs @@ -323,7 +323,8 @@ impl SystemState { world: UnsafeWorldCell<'w>, ) -> SystemParamItem<'w, 's, Param> { let change_tick = world.increment_change_tick(); - self.fetch(world, change_tick) + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch` + unsafe { self.fetch(world, change_tick) } } /// # Safety @@ -336,7 +337,10 @@ impl SystemState { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> SystemParamItem<'w, 's, Param> { - let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick); + let param = { + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_param` + unsafe { Param::get_param(&mut self.param_state, &self.meta, world, change_tick) } + }; self.meta.last_run = change_tick; param } @@ -470,12 +474,14 @@ where // if the world does not match. // - All world accesses used by `F::Param` have been registered, so the caller // will ensure that there are no data access conflicts. - let params = F::Param::get_param( - self.param_state.as_mut().expect(Self::PARAM_MESSAGE), - &self.system_meta, - world, - change_tick, - ); + let params = unsafe { + F::Param::get_param( + self.param_state.as_mut().expect(Self::PARAM_MESSAGE), + &self.system_meta, + world, + change_tick, + ) + }; let out = self.func.run(input, params); self.system_meta.last_run = change_tick; out diff --git a/crates/bevy_ecs/src/system/query.rs b/crates/bevy_ecs/src/system/query.rs index e3b1f16a2b314..3318d0fe0bd43 100644 --- a/crates/bevy_ecs/src/system/query.rs +++ b/crates/bevy_ecs/src/system/query.rs @@ -658,8 +658,10 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> Query<'w, 's, Q, F> { // SAFETY: // - `self.world` has permission to access the required components. // - The caller ensures that this operation will not result in any aliased mutable accesses. - self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) + unsafe { + self.state + .iter_unchecked_manual(self.world, self.last_run, self.this_run) + } } /// Iterates over all possible combinations of `K` query items without repetition. @@ -679,8 +681,10 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> Query<'w, 's, Q, F> { // SAFETY: // - `self.world` has permission to access the required components. // - The caller ensures that this operation will not result in any aliased mutable accesses. - self.state - .iter_combinations_unchecked_manual(self.world, self.last_run, self.this_run) + unsafe { + self.state + .iter_combinations_unchecked_manual(self.world, self.last_run, self.this_run) + } } /// Returns an [`Iterator`] over the query items generated from an [`Entity`] list. @@ -704,8 +708,14 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> Query<'w, 's, Q, F> { // SAFETY: // - `self.world` has permission to access the required components. // - The caller ensures that this operation will not result in any aliased mutable accesses. - self.state - .iter_many_unchecked_manual(entities, self.world, self.last_run, self.this_run) + unsafe { + self.state.iter_many_unchecked_manual( + entities, + self.world, + self.last_run, + self.this_run, + ) + } } /// Runs `f` on each read-only query item. @@ -1064,8 +1074,10 @@ impl<'w, 's, Q: WorldQuery, F: ReadOnlyWorldQuery> Query<'w, 's, Q, F> { pub unsafe fn get_unchecked(&self, entity: Entity) -> Result, QueryEntityError> { // SEMI-SAFETY: system runs without conflicts with other systems. // same-system queries have runtime borrow checks when they conflict - self.state - .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) + unsafe { + self.state + .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) + } } /// Returns a shared reference to the component `T` of the given [`Entity`]. diff --git a/crates/bevy_ecs/src/system/system_param.rs b/crates/bevy_ecs/src/system/system_param.rs index e5343002df95a..67e2db291c47d 100644 --- a/crates/bevy_ecs/src/system/system_param.rs +++ b/crates/bevy_ecs/src/system/system_param.rs @@ -201,7 +201,7 @@ unsafe impl SystemPara // SAFETY: We have registered all of the query's world accesses, // so the caller ensures that `world` has permission to access any // world data that the query needs. - Query::new(world, state, system_meta.last_run, change_tick, false) + unsafe { Query::new(world, state, system_meta.last_run, change_tick, false) } } } @@ -445,23 +445,28 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks) = world - .get_resource_with_ticks(component_id) - .unwrap_or_else(|| { - panic!( - "Resource requested by {} does not exist: {}", - system_meta.name, - std::any::type_name::() - ) - }); - Res { - value: ptr.deref(), - ticks: Ticks { - added: ticks.added.deref(), - changed: ticks.changed.deref(), - last_run: system_meta.last_run, - this_run: change_tick, - }, + let (ptr, ticks) = { + // SAFETY: The caller ensures we can access this world + unsafe { world.get_resource_with_ticks(component_id) } + } + .unwrap_or_else(|| { + panic!( + "Resource requested by {} does not exist: {}", + system_meta.name, + std::any::type_name::() + ) + }); + // SAFETY: The caller ensures it is sound to deref this information + unsafe { + Res { + value: ptr.deref(), + ticks: Ticks { + added: ticks.added.deref(), + changed: ticks.changed.deref(), + last_run: system_meta.last_run, + this_run: change_tick, + }, + } } } } @@ -485,17 +490,20 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - world - .get_resource_with_ticks(component_id) - .map(|(ptr, ticks)| Res { - value: ptr.deref(), - ticks: Ticks { - added: ticks.added.deref(), - changed: ticks.changed.deref(), - last_run: system_meta.last_run, - this_run: change_tick, - }, - }) + // SAFETY: The caller ensures it is sound to access this world + unsafe { + world + .get_resource_with_ticks(component_id) + .map(|(ptr, ticks)| Res { + value: ptr.deref(), + ticks: Ticks { + added: ticks.added.deref(), + changed: ticks.changed.deref(), + last_run: system_meta.last_run, + this_run: change_tick, + }, + }) + } } } @@ -538,23 +546,25 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let value = world - .get_resource_mut_by_id(component_id) - .unwrap_or_else(|| { - panic!( - "Resource requested by {} does not exist: {}", - system_meta.name, - std::any::type_name::() - ) - }); - ResMut { - value: value.value.deref_mut::(), - ticks: TicksMut { - added: value.ticks.added, - changed: value.ticks.changed, - last_run: system_meta.last_run, - this_run: change_tick, - }, + // SAFETY: The caller ensures we can access this world + let value = unsafe { world.get_resource_mut_by_id(component_id) }.unwrap_or_else(|| { + panic!( + "Resource requested by {} does not exist: {}", + system_meta.name, + std::any::type_name::() + ) + }); + // SAFETY: The caller ensures it is sound to deref_mut this information + unsafe { + ResMut { + value: value.value.deref_mut::(), + ticks: TicksMut { + added: value.ticks.added, + changed: value.ticks.changed, + last_run: system_meta.last_run, + this_run: change_tick, + }, + } } } } @@ -575,17 +585,20 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - world - .get_resource_mut_by_id(component_id) - .map(|value| ResMut { - value: value.value.deref_mut::(), - ticks: TicksMut { - added: value.ticks.added, - changed: value.ticks.changed, - last_run: system_meta.last_run, - this_run: change_tick, - }, - }) + // SAFETY: The caller ensures it is sound to access this world + unsafe { + world + .get_resource_mut_by_id(component_id) + .map(|value| ResMut { + value: value.value.deref_mut::(), + ticks: TicksMut { + added: value.ticks.added, + changed: value.ticks.changed, + last_run: system_meta.last_run, + this_run: change_tick, + }, + }) + } } } @@ -628,7 +641,7 @@ unsafe impl SystemParam for &'_ World { _change_tick: Tick, ) -> Self::Item<'w, 's> { // SAFETY: Read-only access to the entire world was registered in `init_state`. - world.world() + unsafe { world.world() } } } @@ -1021,21 +1034,26 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks) = world - .get_non_send_with_ticks(component_id) - .unwrap_or_else(|| { - panic!( - "Non-send resource requested by {} does not exist: {}", - system_meta.name, - std::any::type_name::() - ) - }); - - NonSend { - value: ptr.deref(), - ticks: ticks.read(), - last_run: system_meta.last_run, - this_run: change_tick, + let (ptr, ticks) = { + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_non_send_with_ticks` + unsafe { world.get_non_send_with_ticks(component_id) } + } + .unwrap_or_else(|| { + panic!( + "Non-send resource requested by {} does not exist: {}", + system_meta.name, + std::any::type_name::() + ) + }); + + // SAFETY: It is the caller's responsibility to ensure a `NonSend` can be built + unsafe { + NonSend { + value: ptr.deref(), + ticks: ticks.read(), + last_run: system_meta.last_run, + this_run: change_tick, + } } } } @@ -1059,14 +1077,18 @@ unsafe impl SystemParam for Option> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - world - .get_non_send_with_ticks(component_id) - .map(|(ptr, ticks)| NonSend { - value: ptr.deref(), - ticks: ticks.read(), - last_run: system_meta.last_run, - this_run: change_tick, - }) + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_non_send_with_ticks` + unsafe { world.get_non_send_with_ticks(component_id) }.map(|(ptr, ticks)| { + // SAFETY: It is the caller's responsibility to ensure a `NonSend` can be built + unsafe { + NonSend { + value: ptr.deref(), + ticks: ticks.read(), + last_run: system_meta.last_run, + this_run: change_tick, + } + } + }) } } @@ -1111,18 +1133,23 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks) = world - .get_non_send_with_ticks(component_id) - .unwrap_or_else(|| { - panic!( - "Non-send resource requested by {} does not exist: {}", - system_meta.name, - std::any::type_name::() - ) - }); - NonSendMut { - value: ptr.assert_unique().deref_mut(), - ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), + let (ptr, ticks) = { + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_non_send_with_ticks` + unsafe { world.get_non_send_with_ticks(component_id) } + } + .unwrap_or_else(|| { + panic!( + "Non-send resource requested by {} does not exist: {}", + system_meta.name, + std::any::type_name::() + ) + }); + // SAFETY: It is the caller's responsibility to ensure a `NonSendMut` can be built + unsafe { + NonSendMut { + value: ptr.assert_unique().deref_mut(), + ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), + } } } } @@ -1143,12 +1170,16 @@ unsafe impl<'a, T: 'static> SystemParam for Option> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - world - .get_non_send_with_ticks(component_id) - .map(|(ptr, ticks)| NonSendMut { - value: ptr.assert_unique().deref_mut(), - ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), - }) + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_non_send_with_ticks` + unsafe { world.get_non_send_with_ticks(component_id) }.map(|(ptr, ticks)| { + // SAFETY: It is the caller's responsibility to ensure a `NonSendMut` can be built + unsafe { + NonSendMut { + value: ptr.assert_unique().deref_mut(), + ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), + } + } + }) } } @@ -1388,7 +1419,10 @@ macro_rules! impl_system_param_tuple { ) -> Self::Item<'w, 's> { let ($($param,)*) = state; - ($($param::get_param($param, _system_meta, _world, _change_tick),)*) + ($( + // SAFETY: It is the caller's responsibility to uphold the invariants of `get_param` + unsafe { $param::get_param($param, _system_meta, _world, _change_tick) }, + )*) } } }; @@ -1528,7 +1562,7 @@ unsafe impl SystemParam for StaticSystemParam<'_, '_, change_tick: Tick, ) -> Self::Item<'world, 'state> { // SAFETY: Defer to the safety of P::SystemParam - StaticSystemParam(P::get_param(state, system_meta, world, change_tick)) + unsafe { StaticSystemParam(P::get_param(state, system_meta, world, change_tick)) } } } diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index 3a497db57aa02..9a2aaf57c19e0 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -619,13 +619,16 @@ impl<'w> EntityWorldMut<'w> { change_tick, ); - self.location = insert_dynamic_bundle( - bundle_inserter, - self.entity, - self.location, - Some(component).into_iter(), - Some(storage_type).into_iter(), - ); + // SAFETY: The entity, storage, and bundle are all appropriate. + self.location = unsafe { + insert_dynamic_bundle( + bundle_inserter, + self.entity, + self.location, + Some(component).into_iter(), + Some(storage_type).into_iter(), + ) + }; self } @@ -662,13 +665,16 @@ impl<'w> EntityWorldMut<'w> { change_tick, ); - self.location = insert_dynamic_bundle( - bundle_inserter, - self.entity, - self.location, - iter_components, - storage_types.iter().cloned(), - ); + // SAFETY: The entity, storage, and bundle are all appropriate. + self.location = unsafe { + insert_dynamic_bundle( + bundle_inserter, + self.entity, + self.location, + iter_components, + storage_types.iter().cloned(), + ) + }; self } @@ -769,50 +775,59 @@ impl<'w> EntityWorldMut<'w> { if let Some(swapped_entity) = remove_result.swapped_entity { let swapped_location = entities.get(swapped_entity).unwrap(); - entities.set( - swapped_entity.index(), - EntityLocation { - archetype_id: swapped_location.archetype_id, - archetype_row: old_location.archetype_row, - table_id: swapped_location.table_id, - table_row: swapped_location.table_row, - }, - ); + // SAFETY: Index and location are both valid. + unsafe { + entities.set( + swapped_entity.index(), + EntityLocation { + archetype_id: swapped_location.archetype_id, + archetype_row: old_location.archetype_row, + table_id: swapped_location.table_id, + table_row: swapped_location.table_row, + }, + ); + } } let old_table_row = remove_result.table_row; let old_table_id = old_archetype.table_id(); let new_archetype = &mut archetypes[new_archetype_id]; let new_location = if old_table_id == new_archetype.table_id() { - new_archetype.allocate(entity, old_table_row) + // SAFETY: The allocation will be initialized below. + unsafe { new_archetype.allocate(entity, old_table_row) } } else { let (old_table, new_table) = storages .tables .get_2_mut(old_table_id, new_archetype.table_id()); // SAFETY: old_table_row exists - let move_result = if DROP { - old_table.move_to_and_drop_missing_unchecked(old_table_row, new_table) - } else { - old_table.move_to_and_forget_missing_unchecked(old_table_row, new_table) + let move_result = unsafe { + if DROP { + old_table.move_to_and_drop_missing_unchecked(old_table_row, new_table) + } else { + old_table.move_to_and_forget_missing_unchecked(old_table_row, new_table) + } }; // SAFETY: move_result.new_row is a valid position in new_archetype's table - let new_location = new_archetype.allocate(entity, move_result.new_row); + let new_location = unsafe { new_archetype.allocate(entity, move_result.new_row) }; // if an entity was moved into this entity's table row, update its table row if let Some(swapped_entity) = move_result.swapped_entity { let swapped_location = entities.get(swapped_entity).unwrap(); - entities.set( - swapped_entity.index(), - EntityLocation { - archetype_id: swapped_location.archetype_id, - archetype_row: swapped_location.archetype_row, - table_id: swapped_location.table_id, - table_row: old_location.table_row, - }, - ); + // SAFETY: The above ensures location and index are valid + unsafe { + entities.set( + swapped_entity.index(), + EntityLocation { + archetype_id: swapped_location.archetype_id, + archetype_row: swapped_location.archetype_row, + table_id: swapped_location.table_id, + table_row: old_location.table_row, + }, + ); + } archetypes[swapped_location.archetype_id] .set_entity_table_row(swapped_location.archetype_row, old_table_row); } @@ -822,7 +837,9 @@ impl<'w> EntityWorldMut<'w> { *self_location = new_location; // SAFETY: The entity is valid and has been moved to the new location already. - entities.set(entity.index(), new_location); + unsafe { + entities.set(entity.index(), new_location); + } } /// Removes any components in the [`Bundle`] from the entity. @@ -1113,7 +1130,7 @@ unsafe fn remove_bundle_from_archetype( for component_id in bundle_info.components().iter().cloned() { if current_archetype.contains(component_id) { // SAFETY: bundle components were already initialized by bundles.get_info - let component_info = components.get_info_unchecked(component_id); + let component_info = unsafe { components.get_info_unchecked(component_id) }; match component_info.storage_type() { StorageType::Table => removed_table_components.push(component_id), StorageType::SparseSet => removed_sparse_set_components.push(component_id), @@ -1145,9 +1162,11 @@ unsafe fn remove_bundle_from_archetype( current_archetype.table_id() } else { // SAFETY: all components in next_table_components exist - storages - .tables - .get_id_or_insert(&next_table_components, components) + unsafe { + storages + .tables + .get_id_or_insert(&next_table_components, components) + } }; } @@ -1208,7 +1227,7 @@ pub(crate) unsafe fn take_component<'a>( location: EntityLocation, ) -> OwningPtr<'a> { // SAFETY: caller promises component_id to be valid - let component_info = components.get_info_unchecked(component_id); + let component_info = unsafe { components.get_info_unchecked(component_id) }; removed_components.send(component_id, entity); match component_info.storage_type() { StorageType::Table => { @@ -1218,9 +1237,11 @@ pub(crate) unsafe fn take_component<'a>( // - archetypes only store valid table_rows // - index is in bounds as promised by caller // - promote is safe because the caller promises to remove the table row without dropping it immediately afterwards - components - .get_data_unchecked_mut(location.table_row) - .promote() + unsafe { + components + .get_data_unchecked_mut(location.table_row) + .promote() + } } StorageType::SparseSet => storages .sparse_sets diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index be43fcce3d770..654d054a9d248 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -763,14 +763,17 @@ impl World { /// must be called on an entity that was just allocated unsafe fn spawn_at_empty_internal(&mut self, entity: Entity) -> EntityWorldMut { let archetype = self.archetypes.empty_mut(); + // SAFETY: This memory is immediately written to. // PERF: consider avoiding allocating entities in the empty archetype unless needed - let table_row = self.storages.tables[archetype.table_id()].allocate(entity); + let table_row = unsafe { self.storages.tables[archetype.table_id()].allocate(entity) }; // SAFETY: no components are allocated by archetype.allocate() because the archetype is // empty - let location = archetype.allocate(entity, table_row); + let location = unsafe { archetype.allocate(entity, table_row) }; // SAFETY: entity index was just allocated - self.entities.set(entity.index(), location); - EntityWorldMut::new(self, entity, location) + unsafe { + self.entities.set(entity.index(), location); + EntityWorldMut::new(self, entity, location) + } } /// Spawns a batch of entities with the same component [`Bundle`] type. Takes a given @@ -1633,8 +1636,10 @@ impl World { let change_tick = self.change_tick(); // SAFETY: value is valid for component_id, ensured by caller - self.initialize_resource_internal(component_id) - .insert(value, change_tick); + unsafe { + self.initialize_resource_internal(component_id) + .insert(value, change_tick); + } } /// Inserts a new `!Send` resource with the given `value`. Will replace the value if it already @@ -1658,8 +1663,10 @@ impl World { let change_tick = self.change_tick(); // SAFETY: value is valid for component_id, ensured by caller - self.initialize_non_send_internal(component_id) - .insert(value, change_tick); + unsafe { + self.initialize_non_send_internal(component_id) + .insert(value, change_tick); + } } /// # Panics diff --git a/crates/bevy_ecs/src/world/unsafe_world_cell.rs b/crates/bevy_ecs/src/world/unsafe_world_cell.rs index 85dcbc5554cdf..fc90168bb9e1f 100644 --- a/crates/bevy_ecs/src/world/unsafe_world_cell.rs +++ b/crates/bevy_ecs/src/world/unsafe_world_cell.rs @@ -1,7 +1,5 @@ //! Contains types that allow disjoint mutable access to a [`World`]. -#![warn(unsafe_op_in_unsafe_fn)] - use super::{Mut, Ref, World, WorldId}; use crate::{ archetype::{Archetype, ArchetypeComponentId, Archetypes}, @@ -910,7 +908,6 @@ impl<'w> UnsafeWorldCell<'w> { /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated #[inline] -#[allow(unsafe_op_in_unsafe_fn)] unsafe fn get_component( world: UnsafeWorldCell<'_>, component_id: ComponentId, @@ -919,13 +916,15 @@ unsafe fn get_component( location: EntityLocation, ) -> Option> { // SAFETY: component_id exists and is therefore valid - match storage_type { - StorageType::Table => { - let components = world.fetch_table(location, component_id)?; - // SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules - Some(components.get_data_unchecked(location.table_row)) + unsafe { + match storage_type { + StorageType::Table => { + let components = world.fetch_table(location, component_id)?; + // SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules + Some(components.get_data_unchecked(location.table_row)) + } + StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get(entity), } - StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get(entity), } } @@ -937,7 +936,6 @@ unsafe fn get_component( /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated #[inline] -#[allow(unsafe_op_in_unsafe_fn)] unsafe fn get_component_and_ticks( world: UnsafeWorldCell<'_>, component_id: ComponentId, @@ -947,18 +945,23 @@ unsafe fn get_component_and_ticks( ) -> Option<(Ptr<'_>, TickCells<'_>)> { match storage_type { StorageType::Table => { - let components = world.fetch_table(location, component_id)?; - // SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules - Some(( - components.get_data_unchecked(location.table_row), - TickCells { - added: components.get_added_tick_unchecked(location.table_row), - changed: components.get_changed_tick_unchecked(location.table_row), - }, - )) + unsafe { + let components = world.fetch_table(location, component_id)?; + + Some(( + components.get_data_unchecked(location.table_row), + TickCells { + added: components.get_added_tick_unchecked(location.table_row), + changed: components.get_changed_tick_unchecked(location.table_row), + }, + )) + } + } + StorageType::SparseSet => { + // SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules + unsafe { world.fetch_sparse_set(component_id) }?.get_with_ticks(entity) } - StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get_with_ticks(entity), } } @@ -971,7 +974,6 @@ unsafe fn get_component_and_ticks( /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated #[inline] -#[allow(unsafe_op_in_unsafe_fn)] unsafe fn get_ticks( world: UnsafeWorldCell<'_>, component_id: ComponentId, @@ -981,10 +983,14 @@ unsafe fn get_ticks( ) -> Option { match storage_type { StorageType::Table => { - let components = world.fetch_table(location, component_id)?; + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch_table` + let components = unsafe { world.fetch_table(location, component_id) }?; // SAFETY: archetypes only store valid table_rows and caller ensure aliasing rules - Some(components.get_ticks_unchecked(location.table_row)) + unsafe { Some(components.get_ticks_unchecked(location.table_row)) } + } + StorageType::SparseSet => { + // SAFETY: It is the caller's responsibility to uphold the invariants of `fetch_sparse_set` + unsafe { world.fetch_sparse_set(component_id) }?.get_ticks(entity) } - StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get_ticks(entity), } } diff --git a/crates/bevy_ecs_compile_fail_tests/src/lib.rs b/crates/bevy_ecs_compile_fail_tests/src/lib.rs index e12684f7648c9..40fa27fd9203d 100644 --- a/crates/bevy_ecs_compile_fail_tests/src/lib.rs +++ b/crates/bevy_ecs_compile_fail_tests/src/lib.rs @@ -1,3 +1,4 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] // Nothing here, check out the integration tests diff --git a/crates/bevy_encase_derive/src/lib.rs b/crates/bevy_encase_derive/src/lib.rs index cc81b6edd52d0..6e19b30574e44 100644 --- a/crates/bevy_encase_derive/src/lib.rs +++ b/crates/bevy_encase_derive/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] use bevy_macro_utils::BevyManifest; use encase_derive_impl::{implement, syn}; diff --git a/crates/bevy_gilrs/src/lib.rs b/crates/bevy_gilrs/src/lib.rs index 1afcf515f6613..e0da61fd27657 100644 --- a/crates/bevy_gilrs/src/lib.rs +++ b/crates/bevy_gilrs/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod converter; mod gilrs_system; diff --git a/crates/bevy_gizmos/src/lib.rs b/crates/bevy_gizmos/src/lib.rs index 79f696e1f063e..3e3dde3e8c9d3 100644 --- a/crates/bevy_gizmos/src/lib.rs +++ b/crates/bevy_gizmos/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] //! This crate adds an immediate mode drawing api to Bevy for visual debugging. //! diff --git a/crates/bevy_gltf/src/lib.rs b/crates/bevy_gltf/src/lib.rs index 8474ab66e77c7..383f04567bcd5 100644 --- a/crates/bevy_gltf/src/lib.rs +++ b/crates/bevy_gltf/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] #[cfg(feature = "bevy_animation")] use bevy_animation::AnimationClip; diff --git a/crates/bevy_hierarchy/src/lib.rs b/crates/bevy_hierarchy/src/lib.rs index d616e5384ae13..a2f886b889cf6 100644 --- a/crates/bevy_hierarchy/src/lib.rs +++ b/crates/bevy_hierarchy/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] //! `bevy_hierarchy` can be used to define hierarchies of entities. //! //! Most commonly, these hierarchies are used for inheriting `Transform` values diff --git a/crates/bevy_input/src/lib.rs b/crates/bevy_input/src/lib.rs index 7af8fec03add2..1cce2d2a119f2 100644 --- a/crates/bevy_input/src/lib.rs +++ b/crates/bevy_input/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] //! Input functionality for the [Bevy game engine](https://bevyengine.org/). //! diff --git a/crates/bevy_internal/src/lib.rs b/crates/bevy_internal/src/lib.rs index 8e6bf57e52909..f7b3794800676 100644 --- a/crates/bevy_internal/src/lib.rs +++ b/crates/bevy_internal/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] //! This module is separated into its own crate to enable simple dynamic linking for Bevy, and should not be used directly /// `use bevy::prelude::*;` to import common components, bundles, and plugins. diff --git a/crates/bevy_log/src/lib.rs b/crates/bevy_log/src/lib.rs index a912d7d984b5c..376326e773c57 100644 --- a/crates/bevy_log/src/lib.rs +++ b/crates/bevy_log/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] //! This crate provides logging functions and configuration for [Bevy](https://bevyengine.org) //! apps, and automatically configures platform specific log handlers (i.e. WASM or Android). //! diff --git a/crates/bevy_macro_utils/src/lib.rs b/crates/bevy_macro_utils/src/lib.rs index 59e224d51194d..c97e7331b72c0 100644 --- a/crates/bevy_macro_utils/src/lib.rs +++ b/crates/bevy_macro_utils/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] extern crate proc_macro; diff --git a/crates/bevy_macros_compile_fail_tests/src/lib.rs b/crates/bevy_macros_compile_fail_tests/src/lib.rs index d0d1683dd6b97..5fd6ba537d44b 100644 --- a/crates/bevy_macros_compile_fail_tests/src/lib.rs +++ b/crates/bevy_macros_compile_fail_tests/src/lib.rs @@ -1 +1,2 @@ // Nothing here, check out the integration tests +#![forbid(unsafe_op_in_unsafe_fn)] diff --git a/crates/bevy_math/src/lib.rs b/crates/bevy_math/src/lib.rs index b88de2e3a6dc9..73834be18ed72 100644 --- a/crates/bevy_math/src/lib.rs +++ b/crates/bevy_math/src/lib.rs @@ -6,6 +6,7 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] mod affine3; pub mod cubic_splines; diff --git a/crates/bevy_mikktspace/src/generated.rs b/crates/bevy_mikktspace/src/generated.rs index a7022e2d786c6..c8f112f8b6a58 100644 --- a/crates/bevy_mikktspace/src/generated.rs +++ b/crates/bevy_mikktspace/src/generated.rs @@ -42,7 +42,8 @@ non_upper_case_globals, unused_mut, unused_assignments, - unused_variables + unused_variables, + unsafe_op_in_unsafe_fn )] use std::ptr::null_mut; @@ -230,113 +231,110 @@ pub unsafe fn genTangSpace(geometry: &mut I, fAngularThreshold: f32 let mut piTriListIn = vec![0i32; 3 * iNrTrianglesIn]; let mut pTriInfos = vec![STriInfo::zero(); iNrTrianglesIn]; - unsafe { - iNrTSPaces = GenerateInitialVerticesIndexList( - &mut pTriInfos, - &mut piTriListIn, - geometry, - iNrTrianglesIn, - ); - GenerateSharedVerticesIndexList(piTriListIn.as_mut_ptr(), geometry, iNrTrianglesIn); - iTotTris = iNrTrianglesIn; - iDegenTriangles = 0; - t = 0; - while t < iTotTris as usize { - let i0 = piTriListIn[t * 3 + 0]; - let i1 = piTriListIn[t * 3 + 1]; - let i2 = piTriListIn[t * 3 + 2]; - let p0 = get_position(geometry, i0 as usize); - let p1 = get_position(geometry, i1 as usize); - let p2 = get_position(geometry, i2 as usize); - if p0 == p1 || p0 == p2 || p1 == p2 { - pTriInfos[t].iFlag |= 1i32; - iDegenTriangles += 1 - } - t += 1 + iNrTSPaces = GenerateInitialVerticesIndexList( + &mut pTriInfos, + &mut piTriListIn, + geometry, + iNrTrianglesIn, + ); + GenerateSharedVerticesIndexList(piTriListIn.as_mut_ptr(), geometry, iNrTrianglesIn); + iTotTris = iNrTrianglesIn; + iDegenTriangles = 0; + t = 0; + while t < iTotTris as usize { + let i0 = piTriListIn[t * 3 + 0]; + let i1 = piTriListIn[t * 3 + 1]; + let i2 = piTriListIn[t * 3 + 2]; + let p0 = get_position(geometry, i0 as usize); + let p1 = get_position(geometry, i1 as usize); + let p2 = get_position(geometry, i2 as usize); + if p0 == p1 || p0 == p2 || p1 == p2 { + pTriInfos[t].iFlag |= 1i32; + iDegenTriangles += 1 } - iNrTrianglesIn = iTotTris - iDegenTriangles; - DegenPrologue( - pTriInfos.as_mut_ptr(), - piTriListIn.as_mut_ptr(), - iNrTrianglesIn as i32, - iTotTris as i32, - ); - InitTriInfo( - pTriInfos.as_mut_ptr(), - piTriListIn.as_ptr(), - geometry, - iNrTrianglesIn, - ); - iNrMaxGroups = iNrTrianglesIn * 3; + t += 1 + } + iNrTrianglesIn = iTotTris - iDegenTriangles; + DegenPrologue( + pTriInfos.as_mut_ptr(), + piTriListIn.as_mut_ptr(), + iNrTrianglesIn as i32, + iTotTris as i32, + ); + InitTriInfo( + pTriInfos.as_mut_ptr(), + piTriListIn.as_ptr(), + geometry, + iNrTrianglesIn, + ); + iNrMaxGroups = iNrTrianglesIn * 3; - let mut pGroups = vec![SGroup::zero(); iNrMaxGroups]; - let mut piGroupTrianglesBuffer = vec![0; iNrTrianglesIn * 3]; + let mut pGroups = vec![SGroup::zero(); iNrMaxGroups]; + let mut piGroupTrianglesBuffer = vec![0; iNrTrianglesIn * 3]; - iNrActiveGroups = Build4RuleGroups( - pTriInfos.as_mut_ptr(), - pGroups.as_mut_ptr(), - piGroupTrianglesBuffer.as_mut_ptr(), - piTriListIn.as_ptr(), - iNrTrianglesIn as i32, - ); + iNrActiveGroups = Build4RuleGroups( + pTriInfos.as_mut_ptr(), + pGroups.as_mut_ptr(), + piGroupTrianglesBuffer.as_mut_ptr(), + piTriListIn.as_ptr(), + iNrTrianglesIn as i32, + ); - let mut psTspace = vec![ - STSpace { - vOs: Vec3::new(1.0, 0.0, 0.0), - fMagS: 1.0, - vOt: Vec3::new(0.0, 1.0, 0.0), - fMagT: 1.0, - ..STSpace::zero() - }; - iNrTSPaces - ]; + let mut psTspace = vec![ + STSpace { + vOs: Vec3::new(1.0, 0.0, 0.0), + fMagS: 1.0, + vOt: Vec3::new(0.0, 1.0, 0.0), + fMagT: 1.0, + ..STSpace::zero() + }; + iNrTSPaces + ]; - bRes = GenerateTSpaces( - &mut psTspace, - pTriInfos.as_ptr(), - pGroups.as_ptr(), - iNrActiveGroups, - piTriListIn.as_ptr(), - fThresCos, - geometry, - ); - if !bRes { - return false; - } - DegenEpilogue( - psTspace.as_mut_ptr(), - pTriInfos.as_mut_ptr(), - piTriListIn.as_mut_ptr(), - geometry, - iNrTrianglesIn as i32, - iTotTris as i32, - ); - index = 0; - f = 0; - while f < iNrFaces { - let verts_0 = geometry.num_vertices_of_face(f); - if !(verts_0 != 3 && verts_0 != 4) { - i = 0; - while i < verts_0 { - let mut pTSpace: *const STSpace = &mut psTspace[index] as *mut STSpace; - let mut tang = Vec3::new((*pTSpace).vOs.x, (*pTSpace).vOs.y, (*pTSpace).vOs.z); - let mut bitang = - Vec3::new((*pTSpace).vOt.x, (*pTSpace).vOt.y, (*pTSpace).vOt.z); - geometry.set_tangent( - tang.into(), - bitang.into(), - (*pTSpace).fMagS, - (*pTSpace).fMagT, - (*pTSpace).bOrient, - f, - i, - ); - index += 1; - i += 1 - } + bRes = GenerateTSpaces( + &mut psTspace, + pTriInfos.as_ptr(), + pGroups.as_ptr(), + iNrActiveGroups, + piTriListIn.as_ptr(), + fThresCos, + geometry, + ); + if !bRes { + return false; + } + DegenEpilogue( + psTspace.as_mut_ptr(), + pTriInfos.as_mut_ptr(), + piTriListIn.as_mut_ptr(), + geometry, + iNrTrianglesIn as i32, + iTotTris as i32, + ); + index = 0; + f = 0; + while f < iNrFaces { + let verts_0 = geometry.num_vertices_of_face(f); + if !(verts_0 != 3 && verts_0 != 4) { + i = 0; + while i < verts_0 { + let mut pTSpace: *const STSpace = &mut psTspace[index] as *mut STSpace; + let mut tang = Vec3::new((*pTSpace).vOs.x, (*pTSpace).vOs.y, (*pTSpace).vOs.z); + let mut bitang = Vec3::new((*pTSpace).vOt.x, (*pTSpace).vOt.y, (*pTSpace).vOt.z); + geometry.set_tangent( + tang.into(), + bitang.into(), + (*pTSpace).fMagS, + (*pTSpace).fMagT, + (*pTSpace).bOrient, + f, + i, + ); + index += 1; + i += 1 } - f += 1 } + f += 1 } return true; @@ -352,88 +350,85 @@ unsafe fn DegenEpilogue( let mut t: i32 = 0i32; let mut i: i32 = 0i32; t = iNrTrianglesIn; - unsafe { - while t < iTotTris { - let bSkip: bool = if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { - true - } else { - false - }; - if !bSkip { - i = 0i32; - while i < 3i32 { - let index1: i32 = *piTriListIn.offset((t * 3i32 + i) as isize); - let mut bNotFound: bool = true; - let mut j: i32 = 0i32; - while bNotFound && j < 3i32 * iNrTrianglesIn { - let index2: i32 = *piTriListIn.offset(j as isize); - if index1 == index2 { - bNotFound = false - } else { - j += 1 - } - } - if !bNotFound { - let iTri: i32 = j / 3i32; - let iVert: i32 = j % 3i32; - let iSrcVert: i32 = - (*pTriInfos.offset(iTri as isize)).vert_num[iVert as usize] as i32; - let iSrcOffs: i32 = (*pTriInfos.offset(iTri as isize)).iTSpacesOffs; - let iDstVert: i32 = - (*pTriInfos.offset(t as isize)).vert_num[i as usize] as i32; - let iDstOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; - *psTspace.offset((iDstOffs + iDstVert) as isize) = - *psTspace.offset((iSrcOffs + iSrcVert) as isize) + while t < iTotTris { + let bSkip: bool = if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { + true + } else { + false + }; + if !bSkip { + i = 0i32; + while i < 3i32 { + let index1: i32 = *piTriListIn.offset((t * 3i32 + i) as isize); + let mut bNotFound: bool = true; + let mut j: i32 = 0i32; + while bNotFound && j < 3i32 * iNrTrianglesIn { + let index2: i32 = *piTriListIn.offset(j as isize); + if index1 == index2 { + bNotFound = false + } else { + j += 1 } - i += 1 } + if !bNotFound { + let iTri: i32 = j / 3i32; + let iVert: i32 = j % 3i32; + let iSrcVert: i32 = + (*pTriInfos.offset(iTri as isize)).vert_num[iVert as usize] as i32; + let iSrcOffs: i32 = (*pTriInfos.offset(iTri as isize)).iTSpacesOffs; + let iDstVert: i32 = (*pTriInfos.offset(t as isize)).vert_num[i as usize] as i32; + let iDstOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; + *psTspace.offset((iDstOffs + iDstVert) as isize) = + *psTspace.offset((iSrcOffs + iSrcVert) as isize) + } + i += 1 } - t += 1 } - t = 0i32; - while t < iNrTrianglesIn { - if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { - let mut vDstP = Vec3::new(0.0, 0.0, 0.0); - let mut iOrgF: i32 = -1i32; - let mut i_0: i32 = 0i32; - let mut bNotFound_0: bool = false; - let mut pV: *mut u8 = (*pTriInfos.offset(t as isize)).vert_num.as_mut_ptr(); - let mut iFlag: i32 = 1i32 << *pV.offset(0isize) as i32 - | 1i32 << *pV.offset(1isize) as i32 - | 1i32 << *pV.offset(2isize) as i32; - let mut iMissingIndex: i32 = 0i32; - if iFlag & 2i32 == 0i32 { - iMissingIndex = 1i32 - } else if iFlag & 4i32 == 0i32 { - iMissingIndex = 2i32 - } else if iFlag & 8i32 == 0i32 { - iMissingIndex = 3i32 - } - iOrgF = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; - vDstP = get_position( + t += 1 + } + t = 0i32; + while t < iNrTrianglesIn { + if (*pTriInfos.offset(t as isize)).iFlag & 2i32 != 0i32 { + let mut vDstP = Vec3::new(0.0, 0.0, 0.0); + let mut iOrgF: i32 = -1i32; + let mut i_0: i32 = 0i32; + let mut bNotFound_0: bool = false; + let mut pV: *mut u8 = (*pTriInfos.offset(t as isize)).vert_num.as_mut_ptr(); + let mut iFlag: i32 = 1i32 << *pV.offset(0isize) as i32 + | 1i32 << *pV.offset(1isize) as i32 + | 1i32 << *pV.offset(2isize) as i32; + let mut iMissingIndex: i32 = 0i32; + if iFlag & 2i32 == 0i32 { + iMissingIndex = 1i32 + } else if iFlag & 4i32 == 0i32 { + iMissingIndex = 2i32 + } else if iFlag & 8i32 == 0i32 { + iMissingIndex = 3i32 + } + iOrgF = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + vDstP = get_position( + geometry, + face_vert_to_index(iOrgF as usize, iMissingIndex as usize), + ); + bNotFound_0 = true; + i_0 = 0i32; + while bNotFound_0 && i_0 < 3i32 { + let iVert_0: i32 = *pV.offset(i_0 as isize) as i32; + let vSrcP = get_position( geometry, - face_vert_to_index(iOrgF as usize, iMissingIndex as usize), + face_vert_to_index(iOrgF as usize, iVert_0 as usize), ); - bNotFound_0 = true; - i_0 = 0i32; - while bNotFound_0 && i_0 < 3i32 { - let iVert_0: i32 = *pV.offset(i_0 as isize) as i32; - let vSrcP = get_position( - geometry, - face_vert_to_index(iOrgF as usize, iVert_0 as usize), - ); - if vSrcP == vDstP { - let iOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; - *psTspace.offset((iOffs + iMissingIndex) as isize) = - *psTspace.offset((iOffs + iVert_0) as isize); - bNotFound_0 = false - } else { - i_0 += 1 - } + if vSrcP == vDstP { + let iOffs: i32 = (*pTriInfos.offset(t as isize)).iTSpacesOffs; + *psTspace.offset((iOffs + iMissingIndex) as isize) = + *psTspace.offset((iOffs + iVert_0) as isize); + bNotFound_0 = false + } else { + i_0 += 1 } } - t += 1 } + t += 1 } } @@ -451,194 +446,188 @@ unsafe fn GenerateTSpaces( let mut g: i32 = 0i32; let mut i: i32 = 0i32; g = 0i32; - unsafe { - while g < iNrActiveGroups { - if iMaxNrFaces < (*pGroups.offset(g as isize)).iNrFaces as usize { - iMaxNrFaces = (*pGroups.offset(g as isize)).iNrFaces as usize - } - g += 1 - } - if iMaxNrFaces == 0 { - return true; + while g < iNrActiveGroups { + if iMaxNrFaces < (*pGroups.offset(g as isize)).iNrFaces as usize { + iMaxNrFaces = (*pGroups.offset(g as isize)).iNrFaces as usize } + g += 1 + } + if iMaxNrFaces == 0 { + return true; + } - let mut pSubGroupTspace = vec![STSpace::zero(); iMaxNrFaces]; - let mut pUniSubGroups = vec![SSubGroup::zero(); iMaxNrFaces]; - let mut pTmpMembers = vec![0i32; iMaxNrFaces]; + let mut pSubGroupTspace = vec![STSpace::zero(); iMaxNrFaces]; + let mut pUniSubGroups = vec![SSubGroup::zero(); iMaxNrFaces]; + let mut pTmpMembers = vec![0i32; iMaxNrFaces]; - iUniqueTspaces = 0; - g = 0i32; - while g < iNrActiveGroups { - let mut pGroup: *const SGroup = &*pGroups.offset(g as isize) as *const SGroup; - let mut iUniqueSubGroups = 0; - let mut s = 0; - i = 0i32; - while i < (*pGroup).iNrFaces { - let f: i32 = *(*pGroup).pFaceIndices.offset(i as isize); - let mut index: i32 = -1i32; - let mut iVertIndex: i32 = -1i32; - let mut iOF_1: i32 = -1i32; - let mut iMembers: usize = 0; - let mut j: i32 = 0i32; - let mut l: usize = 0; - let mut tmp_group: SSubGroup = SSubGroup { - iNrFaces: 0, - pTriMembers: Vec::new(), - }; - let mut bFound: bool = false; - let mut n = Vec3::new(0.0, 0.0, 0.0); - let mut vOs = Vec3::new(0.0, 0.0, 0.0); - let mut vOt = Vec3::new(0.0, 0.0, 0.0); - if (*pTriInfos.offset(f as isize)).AssignedGroup[0usize] == pGroup as *mut SGroup { - index = 0i32 - } else if (*pTriInfos.offset(f as isize)).AssignedGroup[1usize] - == pGroup as *mut SGroup - { - index = 1i32 - } else if (*pTriInfos.offset(f as isize)).AssignedGroup[2usize] - == pGroup as *mut SGroup - { - index = 2i32 - } - iVertIndex = *piTriListIn.offset((f * 3i32 + index) as isize); - n = get_normal(geometry, iVertIndex as usize); - let mut vOs = (*pTriInfos.offset(f as isize)).vOs - - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); - let mut vOt = (*pTriInfos.offset(f as isize)).vOt - - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); - if VNotZero(vOs) { - vOs = Normalize(vOs) - } - if VNotZero(vOt) { - vOt = Normalize(vOt) - } - iOF_1 = (*pTriInfos.offset(f as isize)).iOrgFaceNumber; - iMembers = 0; - j = 0i32; - while j < (*pGroup).iNrFaces { - let t: i32 = *(*pGroup).pFaceIndices.offset(j as isize); - let iOF_2: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; - let mut vOs2 = (*pTriInfos.offset(t as isize)).vOs - - (n.dot((*pTriInfos.offset(t as isize)).vOs) * n); - let mut vOt2 = (*pTriInfos.offset(t as isize)).vOt - - (n.dot((*pTriInfos.offset(t as isize)).vOt) * n); - if VNotZero(vOs2) { - vOs2 = Normalize(vOs2) - } - if VNotZero(vOt2) { - vOt2 = Normalize(vOt2) - } - let bAny: bool = if ((*pTriInfos.offset(f as isize)).iFlag - | (*pTriInfos.offset(t as isize)).iFlag) - & 4i32 - != 0i32 - { - true - } else { - false - }; - let bSameOrgFace: bool = iOF_1 == iOF_2; - let fCosS: f32 = vOs.dot(vOs2); - let fCosT: f32 = vOt.dot(vOt2); - if bAny || bSameOrgFace || fCosS > fThresCos && fCosT > fThresCos { - let fresh0 = iMembers; - iMembers = iMembers + 1; - pTmpMembers[fresh0] = t - } - j += 1 + iUniqueTspaces = 0; + g = 0i32; + while g < iNrActiveGroups { + let mut pGroup: *const SGroup = &*pGroups.offset(g as isize) as *const SGroup; + let mut iUniqueSubGroups = 0; + let mut s = 0; + i = 0i32; + while i < (*pGroup).iNrFaces { + let f: i32 = *(*pGroup).pFaceIndices.offset(i as isize); + let mut index: i32 = -1i32; + let mut iVertIndex: i32 = -1i32; + let mut iOF_1: i32 = -1i32; + let mut iMembers: usize = 0; + let mut j: i32 = 0i32; + let mut l: usize = 0; + let mut tmp_group: SSubGroup = SSubGroup { + iNrFaces: 0, + pTriMembers: Vec::new(), + }; + let mut bFound: bool = false; + let mut n = Vec3::new(0.0, 0.0, 0.0); + let mut vOs = Vec3::new(0.0, 0.0, 0.0); + let mut vOt = Vec3::new(0.0, 0.0, 0.0); + if (*pTriInfos.offset(f as isize)).AssignedGroup[0usize] == pGroup as *mut SGroup { + index = 0i32 + } else if (*pTriInfos.offset(f as isize)).AssignedGroup[1usize] == pGroup as *mut SGroup + { + index = 1i32 + } else if (*pTriInfos.offset(f as isize)).AssignedGroup[2usize] == pGroup as *mut SGroup + { + index = 2i32 + } + iVertIndex = *piTriListIn.offset((f * 3i32 + index) as isize); + n = get_normal(geometry, iVertIndex as usize); + let mut vOs = (*pTriInfos.offset(f as isize)).vOs + - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); + let mut vOt = (*pTriInfos.offset(f as isize)).vOt + - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); + if VNotZero(vOs) { + vOs = Normalize(vOs) + } + if VNotZero(vOt) { + vOt = Normalize(vOt) + } + iOF_1 = (*pTriInfos.offset(f as isize)).iOrgFaceNumber; + iMembers = 0; + j = 0i32; + while j < (*pGroup).iNrFaces { + let t: i32 = *(*pGroup).pFaceIndices.offset(j as isize); + let iOF_2: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let mut vOs2 = (*pTriInfos.offset(t as isize)).vOs + - (n.dot((*pTriInfos.offset(t as isize)).vOs) * n); + let mut vOt2 = (*pTriInfos.offset(t as isize)).vOt + - (n.dot((*pTriInfos.offset(t as isize)).vOt) * n); + if VNotZero(vOs2) { + vOs2 = Normalize(vOs2) } - if iMembers > 1 { - let mut uSeed: u32 = 39871946i32 as u32; - QuickSort(pTmpMembers.as_mut_ptr(), 0i32, (iMembers - 1) as i32, uSeed); + if VNotZero(vOt2) { + vOt2 = Normalize(vOt2) } - tmp_group.iNrFaces = iMembers as i32; - tmp_group.pTriMembers = pTmpMembers.clone(); - bFound = false; - l = 0; - while l < iUniqueSubGroups && !bFound { - bFound = CompareSubGroups(&mut tmp_group, &mut pUniSubGroups[l]); - if !bFound { - l += 1 - } + let bAny: bool = if ((*pTriInfos.offset(f as isize)).iFlag + | (*pTriInfos.offset(t as isize)).iFlag) + & 4i32 + != 0i32 + { + true + } else { + false + }; + let bSameOrgFace: bool = iOF_1 == iOF_2; + let fCosS: f32 = vOs.dot(vOs2); + let fCosT: f32 = vOt.dot(vOt2); + if bAny || bSameOrgFace || fCosS > fThresCos && fCosT > fThresCos { + let fresh0 = iMembers; + iMembers = iMembers + 1; + pTmpMembers[fresh0] = t } + j += 1 + } + if iMembers > 1 { + let mut uSeed: u32 = 39871946i32 as u32; + QuickSort(pTmpMembers.as_mut_ptr(), 0i32, (iMembers - 1) as i32, uSeed); + } + tmp_group.iNrFaces = iMembers as i32; + tmp_group.pTriMembers = pTmpMembers.clone(); + bFound = false; + l = 0; + while l < iUniqueSubGroups && !bFound { + bFound = CompareSubGroups(&mut tmp_group, &mut pUniSubGroups[l]); if !bFound { - pUniSubGroups[iUniqueSubGroups].iNrFaces = iMembers as i32; - pUniSubGroups[iUniqueSubGroups].pTriMembers = tmp_group.pTriMembers.clone(); - - pSubGroupTspace[iUniqueSubGroups] = EvalTspace( - tmp_group.pTriMembers.as_mut_ptr(), - iMembers as i32, - piTriListIn, - pTriInfos, - geometry, - (*pGroup).iVertexRepresentitive, - ); - iUniqueSubGroups += 1 - } - let iOffs = (*pTriInfos.offset(f as isize)).iTSpacesOffs as usize; - let iVert = (*pTriInfos.offset(f as isize)).vert_num[index as usize] as usize; - let mut pTS_out: *mut STSpace = &mut psTspace[iOffs + iVert] as *mut STSpace; - if (*pTS_out).iCounter == 1i32 { - *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); - (*pTS_out).iCounter = 2i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering - } else { - *pTS_out = pSubGroupTspace[l]; - (*pTS_out).iCounter = 1i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering + l += 1 } - i += 1 } - iUniqueTspaces += iUniqueSubGroups; - g += 1 + if !bFound { + pUniSubGroups[iUniqueSubGroups].iNrFaces = iMembers as i32; + pUniSubGroups[iUniqueSubGroups].pTriMembers = tmp_group.pTriMembers.clone(); + + pSubGroupTspace[iUniqueSubGroups] = EvalTspace( + tmp_group.pTriMembers.as_mut_ptr(), + iMembers as i32, + piTriListIn, + pTriInfos, + geometry, + (*pGroup).iVertexRepresentitive, + ); + iUniqueSubGroups += 1 + } + let iOffs = (*pTriInfos.offset(f as isize)).iTSpacesOffs as usize; + let iVert = (*pTriInfos.offset(f as isize)).vert_num[index as usize] as usize; + let mut pTS_out: *mut STSpace = &mut psTspace[iOffs + iVert] as *mut STSpace; + if (*pTS_out).iCounter == 1i32 { + *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); + (*pTS_out).iCounter = 2i32; + (*pTS_out).bOrient = (*pGroup).bOrientPreservering + } else { + *pTS_out = pSubGroupTspace[l]; + (*pTS_out).iCounter = 1i32; + (*pTS_out).bOrient = (*pGroup).bOrientPreservering + } + i += 1 } + iUniqueTspaces += iUniqueSubGroups; + g += 1 } return true; } unsafe fn AvgTSpace(mut pTS0: *const STSpace, mut pTS1: *const STSpace) -> STSpace { - unsafe { - let mut ts_res: STSpace = STSpace { - vOs: Vec3::new(0.0, 0.0, 0.0), - fMagS: 0., - vOt: Vec3::new(0.0, 0.0, 0.0), - fMagT: 0., - iCounter: 0, - bOrient: false, - }; - if (*pTS0).fMagS == (*pTS1).fMagS - && (*pTS0).fMagT == (*pTS1).fMagT - && (*pTS0).vOs == (*pTS1).vOs - && (*pTS0).vOt == (*pTS1).vOt - { - ts_res.fMagS = (*pTS0).fMagS; - ts_res.fMagT = (*pTS0).fMagT; - ts_res.vOs = (*pTS0).vOs; - ts_res.vOt = (*pTS0).vOt - } else { - ts_res.fMagS = 0.5f32 * ((*pTS0).fMagS + (*pTS1).fMagS); - ts_res.fMagT = 0.5f32 * ((*pTS0).fMagT + (*pTS1).fMagT); - ts_res.vOs = (*pTS0).vOs + (*pTS1).vOs; - ts_res.vOt = (*pTS0).vOt + (*pTS1).vOt; - if VNotZero(ts_res.vOs) { - ts_res.vOs = Normalize(ts_res.vOs) - } - if VNotZero(ts_res.vOt) { - ts_res.vOt = Normalize(ts_res.vOt) - } + let mut ts_res: STSpace = STSpace { + vOs: Vec3::new(0.0, 0.0, 0.0), + fMagS: 0., + vOt: Vec3::new(0.0, 0.0, 0.0), + fMagT: 0., + iCounter: 0, + bOrient: false, + }; + if (*pTS0).fMagS == (*pTS1).fMagS + && (*pTS0).fMagT == (*pTS1).fMagT + && (*pTS0).vOs == (*pTS1).vOs + && (*pTS0).vOt == (*pTS1).vOt + { + ts_res.fMagS = (*pTS0).fMagS; + ts_res.fMagT = (*pTS0).fMagT; + ts_res.vOs = (*pTS0).vOs; + ts_res.vOt = (*pTS0).vOt + } else { + ts_res.fMagS = 0.5f32 * ((*pTS0).fMagS + (*pTS1).fMagS); + ts_res.fMagT = 0.5f32 * ((*pTS0).fMagT + (*pTS1).fMagT); + ts_res.vOs = (*pTS0).vOs + (*pTS1).vOs; + ts_res.vOt = (*pTS0).vOt + (*pTS1).vOt; + if VNotZero(ts_res.vOs) { + ts_res.vOs = Normalize(ts_res.vOs) + } + if VNotZero(ts_res.vOt) { + ts_res.vOt = Normalize(ts_res.vOt) } - return ts_res; } + return ts_res; } -fn Normalize(v: Vec3) -> Vec3 { +unsafe fn Normalize(v: Vec3) -> Vec3 { return (1.0 / v.length()) * v; } -fn VNotZero(v: Vec3) -> bool { +unsafe fn VNotZero(v: Vec3) -> bool { NotZero(v.x) || NotZero(v.y) || NotZero(v.z) } -fn NotZero(fX: f32) -> bool { +unsafe fn NotZero(fX: f32) -> bool { fX.abs() > 1.17549435e-38f32 } @@ -670,81 +659,77 @@ unsafe fn EvalTspace( res.fMagT = 0i32 as f32; face = 0i32; while face < iFaces { - unsafe { - let f: i32 = *face_indices.offset(face as isize); - if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 { - let mut n = Vec3::new(0.0, 0.0, 0.0); - let mut vOs = Vec3::new(0.0, 0.0, 0.0); - let mut vOt = Vec3::new(0.0, 0.0, 0.0); - let mut p0 = Vec3::new(0.0, 0.0, 0.0); - let mut p1 = Vec3::new(0.0, 0.0, 0.0); - let mut p2 = Vec3::new(0.0, 0.0, 0.0); - let mut v1 = Vec3::new(0.0, 0.0, 0.0); - let mut v2 = Vec3::new(0.0, 0.0, 0.0); - let mut fCos: f32 = 0.; - let mut fAngle: f32 = 0.; - let mut fMagS: f32 = 0.; - let mut fMagT: f32 = 0.; - let mut i: i32 = -1i32; - let mut index: i32 = -1i32; - let mut i0: i32 = -1i32; - let mut i1: i32 = -1i32; - let mut i2: i32 = -1i32; - if *piTriListIn.offset((3i32 * f + 0i32) as isize) == iVertexRepresentitive { - i = 0i32 - } else if *piTriListIn.offset((3i32 * f + 1i32) as isize) == iVertexRepresentitive { - i = 1i32 - } else if *piTriListIn.offset((3i32 * f + 2i32) as isize) == iVertexRepresentitive { - i = 2i32 - } - index = *piTriListIn.offset((3i32 * f + i) as isize); - n = get_normal(geometry, index as usize); - let mut vOs = (*pTriInfos.offset(f as isize)).vOs - - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); - let mut vOt = (*pTriInfos.offset(f as isize)).vOt - - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); - if VNotZero(vOs) { - vOs = Normalize(vOs) - } - if VNotZero(vOt) { - vOt = Normalize(vOt) - } - i2 = *piTriListIn - .offset((3i32 * f + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); - i1 = *piTriListIn.offset((3i32 * f + i) as isize); - i0 = *piTriListIn - .offset((3i32 * f + if i > 0i32 { i - 1i32 } else { 2i32 }) as isize); - p0 = get_position(geometry, i0 as usize); - p1 = get_position(geometry, i1 as usize); - p2 = get_position(geometry, i2 as usize); - v1 = p0 - p1; - v2 = p2 - p1; - let mut v1 = v1 - (n.dot(v1) * n); - if VNotZero(v1) { - v1 = Normalize(v1) - } - let mut v2 = v2 - (n.dot(v2) * n); - if VNotZero(v2) { - v2 = Normalize(v2) - } - let fCos = v1.dot(v2); - - let fCos = if fCos > 1i32 as f32 { - 1i32 as f32 - } else if fCos < -1i32 as f32 { - -1i32 as f32 - } else { - fCos - }; - fAngle = (fCos as f64).acos() as f32; - fMagS = (*pTriInfos.offset(f as isize)).fMagS; - fMagT = (*pTriInfos.offset(f as isize)).fMagT; - res.vOs = res.vOs + (fAngle * vOs); - res.vOt = res.vOt + (fAngle * vOt); - res.fMagS += fAngle * fMagS; - res.fMagT += fAngle * fMagT; - fAngleSum += fAngle + let f: i32 = *face_indices.offset(face as isize); + if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 { + let mut n = Vec3::new(0.0, 0.0, 0.0); + let mut vOs = Vec3::new(0.0, 0.0, 0.0); + let mut vOt = Vec3::new(0.0, 0.0, 0.0); + let mut p0 = Vec3::new(0.0, 0.0, 0.0); + let mut p1 = Vec3::new(0.0, 0.0, 0.0); + let mut p2 = Vec3::new(0.0, 0.0, 0.0); + let mut v1 = Vec3::new(0.0, 0.0, 0.0); + let mut v2 = Vec3::new(0.0, 0.0, 0.0); + let mut fCos: f32 = 0.; + let mut fAngle: f32 = 0.; + let mut fMagS: f32 = 0.; + let mut fMagT: f32 = 0.; + let mut i: i32 = -1i32; + let mut index: i32 = -1i32; + let mut i0: i32 = -1i32; + let mut i1: i32 = -1i32; + let mut i2: i32 = -1i32; + if *piTriListIn.offset((3i32 * f + 0i32) as isize) == iVertexRepresentitive { + i = 0i32 + } else if *piTriListIn.offset((3i32 * f + 1i32) as isize) == iVertexRepresentitive { + i = 1i32 + } else if *piTriListIn.offset((3i32 * f + 2i32) as isize) == iVertexRepresentitive { + i = 2i32 + } + index = *piTriListIn.offset((3i32 * f + i) as isize); + n = get_normal(geometry, index as usize); + let mut vOs = (*pTriInfos.offset(f as isize)).vOs + - (n.dot((*pTriInfos.offset(f as isize)).vOs) * n); + let mut vOt = (*pTriInfos.offset(f as isize)).vOt + - (n.dot((*pTriInfos.offset(f as isize)).vOt) * n); + if VNotZero(vOs) { + vOs = Normalize(vOs) + } + if VNotZero(vOt) { + vOt = Normalize(vOt) } + i2 = *piTriListIn.offset((3i32 * f + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); + i1 = *piTriListIn.offset((3i32 * f + i) as isize); + i0 = *piTriListIn.offset((3i32 * f + if i > 0i32 { i - 1i32 } else { 2i32 }) as isize); + p0 = get_position(geometry, i0 as usize); + p1 = get_position(geometry, i1 as usize); + p2 = get_position(geometry, i2 as usize); + v1 = p0 - p1; + v2 = p2 - p1; + let mut v1 = v1 - (n.dot(v1) * n); + if VNotZero(v1) { + v1 = Normalize(v1) + } + let mut v2 = v2 - (n.dot(v2) * n); + if VNotZero(v2) { + v2 = Normalize(v2) + } + let fCos = v1.dot(v2); + + let fCos = if fCos > 1i32 as f32 { + 1i32 as f32 + } else if fCos < -1i32 as f32 { + -1i32 as f32 + } else { + fCos + }; + fAngle = (fCos as f64).acos() as f32; + fMagS = (*pTriInfos.offset(f as isize)).fMagS; + fMagT = (*pTriInfos.offset(f as isize)).fMagT; + res.vOs = res.vOs + (fAngle * vOs); + res.vOt = res.vOt + (fAngle * vOt); + res.fMagS += fAngle * fMagS; + res.fMagT += fAngle * fMagT; + fAngleSum += fAngle } face += 1 } @@ -764,19 +749,17 @@ unsafe fn EvalTspace( unsafe fn CompareSubGroups(mut pg1: *const SSubGroup, mut pg2: *const SSubGroup) -> bool { let mut bStillSame: bool = true; let mut i = 0; - unsafe { - if (*pg1).iNrFaces != (*pg2).iNrFaces { - return false; - } - while i < (*pg1).iNrFaces as usize && bStillSame { - bStillSame = if (*pg1).pTriMembers[i] == (*pg2).pTriMembers[i] { - true - } else { - false - }; - if bStillSame { - i += 1 - } + if (*pg1).iNrFaces != (*pg2).iNrFaces { + return false; + } + while i < (*pg1).iNrFaces as usize && bStillSame { + bStillSame = if (*pg1).pTriMembers[i] == (*pg2).pTriMembers[i] { + true + } else { + false + }; + if bStillSame { + i += 1 } } return bStillSame; @@ -799,33 +782,31 @@ unsafe fn QuickSort(mut pSortBuffer: *mut i32, mut iLeft: i32, mut iRight: i32, iR = iRight; n = iR - iL + 1i32; index = uSeed.wrapping_rem(n as u32) as i32; - unsafe { - iMid = *pSortBuffer.offset((index + iL) as isize); - loop { - while *pSortBuffer.offset(iL as isize) < iMid { - iL += 1 - } - while *pSortBuffer.offset(iR as isize) > iMid { - iR -= 1 - } - if iL <= iR { - iTmp = *pSortBuffer.offset(iL as isize); - *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); - *pSortBuffer.offset(iR as isize) = iTmp; - iL += 1; - iR -= 1 - } - if !(iL <= iR) { - break; - } + iMid = *pSortBuffer.offset((index + iL) as isize); + loop { + while *pSortBuffer.offset(iL as isize) < iMid { + iL += 1 } - if iLeft < iR { - QuickSort(pSortBuffer, iLeft, iR, uSeed); + while *pSortBuffer.offset(iR as isize) > iMid { + iR -= 1 + } + if iL <= iR { + iTmp = *pSortBuffer.offset(iL as isize); + *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); + *pSortBuffer.offset(iR as isize) = iTmp; + iL += 1; + iR -= 1 + } + if !(iL <= iR) { + break; } - if iL < iRight { - QuickSort(pSortBuffer, iL, iRight, uSeed); - }; } + if iLeft < iR { + QuickSort(pSortBuffer, iLeft, iR, uSeed); + } + if iL < iRight { + QuickSort(pSortBuffer, iL, iRight, uSeed); + }; } unsafe fn Build4RuleGroups( mut pTriInfos: *mut STriInfo, @@ -843,66 +824,64 @@ unsafe fn Build4RuleGroups( while f < iNrTrianglesIn { i = 0i32; while i < 3i32 { - unsafe { - if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 - && (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize].is_null() - { - let mut bOrPre: bool = false; - let mut neigh_indexL: i32 = 0; - let mut neigh_indexR: i32 = 0; - let vert_index: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); - let ref mut fresh2 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; - *fresh2 = &mut *pGroups.offset(iNrActiveGroups as isize) as *mut SGroup; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) - .iVertexRepresentitive = vert_index; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) - .bOrientPreservering = (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; - let ref mut fresh3 = - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).pFaceIndices; - *fresh3 = &mut *piGroupTrianglesBuffer.offset(iOffset as isize) as *mut i32; - iNrActiveGroups += 1; - AddTriToGroup((*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], f); - bOrPre = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - neigh_indexL = (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize]; - neigh_indexR = (*pTriInfos.offset(f as isize)).FaceNeighbors - [(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; - if neigh_indexL >= 0i32 { - let bAnswer: bool = AssignRecur( - piTriListIn, - pTriInfos, - neigh_indexL, - (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], - ); - let bOrPre2: bool = - if (*pTriInfos.offset(neigh_indexL as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - let bDiff: bool = if bOrPre != bOrPre2 { true } else { false }; - } - if neigh_indexR >= 0i32 { - let bAnswer_0: bool = AssignRecur( - piTriListIn, - pTriInfos, - neigh_indexR, - (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], - ); - let bOrPre2_0: bool = - if (*pTriInfos.offset(neigh_indexR as isize)).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - let bDiff_0: bool = if bOrPre != bOrPre2_0 { true } else { false }; - } - iOffset += (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces + if (*pTriInfos.offset(f as isize)).iFlag & 4i32 == 0i32 + && (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize].is_null() + { + let mut bOrPre: bool = false; + let mut neigh_indexL: i32 = 0; + let mut neigh_indexR: i32 = 0; + let vert_index: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); + let ref mut fresh2 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; + *fresh2 = &mut *pGroups.offset(iNrActiveGroups as isize) as *mut SGroup; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) + .iVertexRepresentitive = vert_index; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreservering = + (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; + let ref mut fresh3 = + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).pFaceIndices; + *fresh3 = &mut *piGroupTrianglesBuffer.offset(iOffset as isize) as *mut i32; + iNrActiveGroups += 1; + AddTriToGroup((*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], f); + bOrPre = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + neigh_indexL = (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize]; + neigh_indexR = (*pTriInfos.offset(f as isize)).FaceNeighbors + [(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; + if neigh_indexL >= 0i32 { + let bAnswer: bool = AssignRecur( + piTriListIn, + pTriInfos, + neigh_indexL, + (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], + ); + let bOrPre2: bool = + if (*pTriInfos.offset(neigh_indexL as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bDiff: bool = if bOrPre != bOrPre2 { true } else { false }; + } + if neigh_indexR >= 0i32 { + let bAnswer_0: bool = AssignRecur( + piTriListIn, + pTriInfos, + neigh_indexR, + (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize], + ); + let bOrPre2_0: bool = + if (*pTriInfos.offset(neigh_indexR as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bDiff_0: bool = if bOrPre != bOrPre2_0 { true } else { false }; } + iOffset += (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces } i += 1 } @@ -918,68 +897,64 @@ unsafe fn AssignRecur( iMyTriIndex: i32, mut pGroup: *mut SGroup, ) -> bool { - unsafe { - let mut pMyTriInfo: *mut STriInfo = - &mut *psTriInfos.offset(iMyTriIndex as isize) as *mut STriInfo; - // track down vertex - let iVertRep: i32 = (*pGroup).iVertexRepresentitive; - let mut pVerts: *const i32 = - &*piTriListIn.offset((3i32 * iMyTriIndex + 0i32) as isize) as *const i32; - let mut i: i32 = -1i32; - if *pVerts.offset(0isize) == iVertRep { - i = 0i32 - } else if *pVerts.offset(1isize) == iVertRep { - i = 1i32 - } else if *pVerts.offset(2isize) == iVertRep { - i = 2i32 - } - if (*pMyTriInfo).AssignedGroup[i as usize] == pGroup { - return true; - } else { - if !(*pMyTriInfo).AssignedGroup[i as usize].is_null() { - return false; - } - } - if (*pMyTriInfo).iFlag & 4i32 != 0i32 { - if (*pMyTriInfo).AssignedGroup[0usize].is_null() - && (*pMyTriInfo).AssignedGroup[1usize].is_null() - && (*pMyTriInfo).AssignedGroup[2usize].is_null() - { - (*pMyTriInfo).iFlag &= !8i32; - (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { - 8i32 - } else { - 0i32 - } - } - } - let bOrient: bool = if (*pMyTriInfo).iFlag & 8i32 != 0i32 { - true - } else { - false - }; - if bOrient != (*pGroup).bOrientPreservering { + let mut pMyTriInfo: *mut STriInfo = + &mut *psTriInfos.offset(iMyTriIndex as isize) as *mut STriInfo; + // track down vertex + let iVertRep: i32 = (*pGroup).iVertexRepresentitive; + let mut pVerts: *const i32 = + &*piTriListIn.offset((3i32 * iMyTriIndex + 0i32) as isize) as *const i32; + let mut i: i32 = -1i32; + if *pVerts.offset(0isize) == iVertRep { + i = 0i32 + } else if *pVerts.offset(1isize) == iVertRep { + i = 1i32 + } else if *pVerts.offset(2isize) == iVertRep { + i = 2i32 + } + if (*pMyTriInfo).AssignedGroup[i as usize] == pGroup { + return true; + } else { + if !(*pMyTriInfo).AssignedGroup[i as usize].is_null() { return false; } - AddTriToGroup(pGroup, iMyTriIndex); - (*pMyTriInfo).AssignedGroup[i as usize] = pGroup; - let neigh_indexL: i32 = (*pMyTriInfo).FaceNeighbors[i as usize]; - let neigh_indexR: i32 = - (*pMyTriInfo).FaceNeighbors[(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; - if neigh_indexL >= 0i32 { - AssignRecur(piTriListIn, psTriInfos, neigh_indexL, pGroup); - } - if neigh_indexR >= 0i32 { - AssignRecur(piTriListIn, psTriInfos, neigh_indexR, pGroup); + } + if (*pMyTriInfo).iFlag & 4i32 != 0i32 { + if (*pMyTriInfo).AssignedGroup[0usize].is_null() + && (*pMyTriInfo).AssignedGroup[1usize].is_null() + && (*pMyTriInfo).AssignedGroup[2usize].is_null() + { + (*pMyTriInfo).iFlag &= !8i32; + (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { + 8i32 + } else { + 0i32 + } } } + let bOrient: bool = if (*pMyTriInfo).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + if bOrient != (*pGroup).bOrientPreservering { + return false; + } + AddTriToGroup(pGroup, iMyTriIndex); + (*pMyTriInfo).AssignedGroup[i as usize] = pGroup; + let neigh_indexL: i32 = (*pMyTriInfo).FaceNeighbors[i as usize]; + let neigh_indexR: i32 = + (*pMyTriInfo).FaceNeighbors[(if i > 0i32 { i - 1i32 } else { 2i32 }) as usize]; + if neigh_indexL >= 0i32 { + AssignRecur(piTriListIn, psTriInfos, neigh_indexL, pGroup); + } + if neigh_indexR >= 0i32 { + AssignRecur(piTriListIn, psTriInfos, neigh_indexR, pGroup); + } return true; } unsafe fn AddTriToGroup(mut pGroup: *mut SGroup, iTriIndex: i32) { - unsafe { - *(*pGroup).pFaceIndices.offset((*pGroup).iNrFaces as isize) = iTriIndex; - (*pGroup).iNrFaces += 1; - } + *(*pGroup).pFaceIndices.offset((*pGroup).iNrFaces as isize) = iTriIndex; + (*pGroup).iNrFaces += 1; } unsafe fn InitTriInfo( mut pTriInfos: *mut STriInfo, @@ -994,36 +969,30 @@ unsafe fn InitTriInfo( while f < iNrTrianglesIn { i = 0i32; while i < 3i32 { - unsafe { - (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize] = -1i32; - let ref mut fresh4 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; - *fresh4 = 0 as *mut SGroup; - (*pTriInfos.offset(f as isize)).vOs.x = 0.0f32; - (*pTriInfos.offset(f as isize)).vOs.y = 0.0f32; - (*pTriInfos.offset(f as isize)).vOs.z = 0.0f32; - (*pTriInfos.offset(f as isize)).vOt.x = 0.0f32; - (*pTriInfos.offset(f as isize)).vOt.y = 0.0f32; - (*pTriInfos.offset(f as isize)).vOt.z = 0.0f32; - (*pTriInfos.offset(f as isize)).fMagS = 0i32 as f32; - (*pTriInfos.offset(f as isize)).fMagT = 0i32 as f32; - (*pTriInfos.offset(f as isize)).iFlag |= 4i32; - } + (*pTriInfos.offset(f as isize)).FaceNeighbors[i as usize] = -1i32; + let ref mut fresh4 = (*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]; + *fresh4 = 0 as *mut SGroup; + (*pTriInfos.offset(f as isize)).vOs.x = 0.0f32; + (*pTriInfos.offset(f as isize)).vOs.y = 0.0f32; + (*pTriInfos.offset(f as isize)).vOs.z = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.x = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.y = 0.0f32; + (*pTriInfos.offset(f as isize)).vOt.z = 0.0f32; + (*pTriInfos.offset(f as isize)).fMagS = 0i32 as f32; + (*pTriInfos.offset(f as isize)).fMagT = 0i32 as f32; + (*pTriInfos.offset(f as isize)).iFlag |= 4i32; i += 1 } f += 1 } f = 0; while f < iNrTrianglesIn { - let a = unsafe { *piTriListIn.offset((f * 3 + 0) as isize) } as usize; - let b = unsafe { *piTriListIn.offset((f * 3 + 1) as isize) } as usize; - let c = unsafe { *piTriListIn.offset((f * 3 + 2) as isize) } as usize; - - let v1 = get_position(geometry, a); - let v2 = get_position(geometry, b); - let v3 = get_position(geometry, c); - let t1 = get_tex_coord(geometry, a); - let t2 = get_tex_coord(geometry, b); - let t3 = get_tex_coord(geometry, c); + let v1 = get_position(geometry, *piTriListIn.offset((f * 3 + 0) as isize) as usize); + let v2 = get_position(geometry, *piTriListIn.offset((f * 3 + 1) as isize) as usize); + let v3 = get_position(geometry, *piTriListIn.offset((f * 3 + 2) as isize) as usize); + let t1 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 0) as isize) as usize); + let t2 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 1) as isize) as usize); + let t3 = get_tex_coord(geometry, *piTriListIn.offset((f * 3 + 2) as isize) as usize); let t21x: f32 = t2.x - t1.x; let t21y: f32 = t2.y - t1.y; let t31x: f32 = t3.x - t1.x; @@ -1033,7 +1002,7 @@ unsafe fn InitTriInfo( let fSignedAreaSTx2: f32 = t21x * t31y - t21y * t31x; let mut vOs = (t31y * d1) - (t21y * d2); let mut vOt = (-t31x * d1) + (t21x * d2); - unsafe { *pTriInfos.offset(f as isize) }.iFlag |= if fSignedAreaSTx2 > 0i32 as f32 { + (*pTriInfos.offset(f as isize)).iFlag |= if fSignedAreaSTx2 > 0i32 as f32 { 8i32 } else { 0i32 @@ -1042,77 +1011,66 @@ unsafe fn InitTriInfo( let fAbsArea: f32 = fSignedAreaSTx2.abs(); let fLenOs: f32 = vOs.length(); let fLenOt: f32 = vOt.length(); - let fS: f32 = if unsafe { *pTriInfos.offset(f as isize) }.iFlag & 8i32 == 0i32 { + let fS: f32 = if (*pTriInfos.offset(f as isize)).iFlag & 8i32 == 0i32 { -1.0f32 } else { 1.0f32 }; - unsafe { - if NotZero(fLenOs) { - (*pTriInfos.offset(f as isize)).vOs = (fS / fLenOs) * vOs - } + if NotZero(fLenOs) { + (*pTriInfos.offset(f as isize)).vOs = (fS / fLenOs) * vOs } - unsafe { - if NotZero(fLenOt) { - (*pTriInfos.offset(f as isize)).vOt = (fS / fLenOt) * vOt - } + if NotZero(fLenOt) { + (*pTriInfos.offset(f as isize)).vOt = (fS / fLenOt) * vOt } - unsafe { *pTriInfos.offset(f as isize) }.fMagS = fLenOs / fAbsArea; - unsafe { *pTriInfos.offset(f as isize) }.fMagT = fLenOt / fAbsArea; - if unsafe { NotZero((*pTriInfos.offset(f as isize)).fMagS) } - && unsafe { NotZero((*pTriInfos.offset(f as isize)).fMagT) } + (*pTriInfos.offset(f as isize)).fMagS = fLenOs / fAbsArea; + (*pTriInfos.offset(f as isize)).fMagT = fLenOt / fAbsArea; + if NotZero((*pTriInfos.offset(f as isize)).fMagS) + && NotZero((*pTriInfos.offset(f as isize)).fMagT) { - unsafe { *pTriInfos.offset(f as isize) }.iFlag &= !4i32 + (*pTriInfos.offset(f as isize)).iFlag &= !4i32 } } f += 1 } while t < iNrTrianglesIn - 1 { - let iFO_a: i32 = unsafe { *pTriInfos.offset(t as isize) }.iOrgFaceNumber; - let iFO_b: i32 = unsafe { *pTriInfos.offset((t + 1) as isize) }.iOrgFaceNumber; + let iFO_a: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let iFO_b: i32 = (*pTriInfos.offset((t + 1) as isize)).iOrgFaceNumber; if iFO_a == iFO_b { - let bIsDeg_a: bool = if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 1i32 != 0i32 { + let bIsDeg_a: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 != 0i32 { + true + } else { + false + }; + let bIsDeg_b: bool = if (*pTriInfos.offset((t + 1) as isize)).iFlag & 1i32 != 0i32 { true } else { false }; - let bIsDeg_b: bool = - if unsafe { *pTriInfos.offset((t + 1) as isize) }.iFlag & 1i32 != 0i32 { + if !(bIsDeg_a || bIsDeg_b) { + let bOrientA: bool = if (*pTriInfos.offset(t as isize)).iFlag & 8i32 != 0i32 { + true + } else { + false + }; + let bOrientB: bool = if (*pTriInfos.offset((t + 1) as isize)).iFlag & 8i32 != 0i32 { true } else { false }; - if !(bIsDeg_a || bIsDeg_b) { - let bOrientA: bool = - if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 8i32 != 0i32 { - true - } else { - false - }; - let bOrientB: bool = - if unsafe { *pTriInfos.offset((t + 1) as isize) }.iFlag & 8i32 != 0i32 { - true - } else { - false - }; if bOrientA != bOrientB { let mut bChooseOrientFirstTri: bool = false; - if unsafe { *pTriInfos.offset((t + 1) as isize) }.iFlag & 4i32 != 0i32 { + if (*pTriInfos.offset((t + 1) as isize)).iFlag & 4i32 != 0i32 { bChooseOrientFirstTri = true - } else if unsafe { - CalcTexArea(geometry, piTriListIn.offset((t * 3 + 0) as isize)) - } >= unsafe { - CalcTexArea(geometry, piTriListIn.offset(((t + 1) * 3 + 0) as isize)) - } { + } else if CalcTexArea(geometry, piTriListIn.offset((t * 3 + 0) as isize)) + >= CalcTexArea(geometry, piTriListIn.offset(((t + 1) * 3 + 0) as isize)) + { bChooseOrientFirstTri = true } let t0 = if bChooseOrientFirstTri { t } else { t + 1 }; let t1_0 = if bChooseOrientFirstTri { t + 1 } else { t }; - unsafe { - (*pTriInfos.offset(t1_0 as isize)).iFlag &= !8i32; - (*pTriInfos.offset(t1_0 as isize)).iFlag |= - (*pTriInfos.offset(t0 as isize)).iFlag & 8i32 - } + (*pTriInfos.offset(t1_0 as isize)).iFlag &= !8i32; + (*pTriInfos.offset(t1_0 as isize)).iFlag |= + (*pTriInfos.offset(t0 as isize)).iFlag & 8i32 } } t += 2 @@ -1122,14 +1080,12 @@ unsafe fn InitTriInfo( } let mut pEdges = vec![SEdge::zero(); iNrTrianglesIn * 3]; - unsafe { - BuildNeighborsFast( - pTriInfos, - pEdges.as_mut_ptr(), - piTriListIn, - iNrTrianglesIn as i32, - ); - } + BuildNeighborsFast( + pTriInfos, + pEdges.as_mut_ptr(), + piTriListIn, + iNrTrianglesIn as i32, + ); } unsafe fn BuildNeighborsFast( @@ -1149,115 +1105,97 @@ unsafe fn BuildNeighborsFast( while f < iNrTrianglesIn { i = 0i32; while i < 3i32 { - unsafe { - let i0: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); - let i1: i32 = *piTriListIn - .offset((f * 3i32 + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); - (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i0 = - if i0 < i1 { i0 } else { i1 }; - (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i1 = - if !(i0 < i1) { i0 } else { i1 }; - (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.f = f; - } + let i0: i32 = *piTriListIn.offset((f * 3i32 + i) as isize); + let i1: i32 = + *piTriListIn.offset((f * 3i32 + if i < 2i32 { i + 1i32 } else { 0i32 }) as isize); + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i0 = if i0 < i1 { i0 } else { i1 }; + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.i1 = if !(i0 < i1) { i0 } else { i1 }; + (*pEdges.offset((f * 3i32 + i) as isize)).unnamed.f = f; i += 1 } f += 1 } - unsafe { - QuickSortEdges(pEdges, 0i32, iNrTrianglesIn * 3i32 - 1i32, 0i32, uSeed); - } + QuickSortEdges(pEdges, 0i32, iNrTrianglesIn * 3i32 - 1i32, 0i32, uSeed); iEntries = iNrTrianglesIn * 3i32; iCurStartIndex = 0i32; i = 1i32; while i < iEntries { - if unsafe { (*pEdges.offset(iCurStartIndex as isize)).unnamed }.i0 - != unsafe { (*pEdges.offset(i as isize)).unnamed }.i0 + if (*pEdges.offset(iCurStartIndex as isize)).unnamed.i0 + != (*pEdges.offset(i as isize)).unnamed.i0 { let iL: i32 = iCurStartIndex; let iR: i32 = i - 1i32; iCurStartIndex = i; - unsafe { - QuickSortEdges(pEdges, iL, iR, 1i32, uSeed); - } + QuickSortEdges(pEdges, iL, iR, 1i32, uSeed); } i += 1 } iCurStartIndex = 0i32; i = 1i32; while i < iEntries { - if unsafe { (*pEdges.offset(iCurStartIndex as isize)).unnamed }.i0 - != unsafe { (*pEdges.offset(i as isize)).unnamed }.i0 - || unsafe { (*pEdges.offset(iCurStartIndex as isize)).unnamed }.i1 - != unsafe { (*pEdges.offset(i as isize)).unnamed }.i1 + if (*pEdges.offset(iCurStartIndex as isize)).unnamed.i0 + != (*pEdges.offset(i as isize)).unnamed.i0 + || (*pEdges.offset(iCurStartIndex as isize)).unnamed.i1 + != (*pEdges.offset(i as isize)).unnamed.i1 { let iL_0: i32 = iCurStartIndex; let iR_0: i32 = i - 1i32; iCurStartIndex = i; - unsafe { - QuickSortEdges(pEdges, iL_0, iR_0, 2i32, uSeed); - } + QuickSortEdges(pEdges, iL_0, iR_0, 2i32, uSeed); } i += 1 } i = 0i32; while i < iEntries { - let i0_0: i32 = unsafe { (*pEdges.offset(i as isize)).unnamed }.i0; - let i1_0: i32 = unsafe { (*pEdges.offset(i as isize)).unnamed }.i1; - let f_0: i32 = unsafe { (*pEdges.offset(i as isize)).unnamed }.f; + let i0_0: i32 = (*pEdges.offset(i as isize)).unnamed.i0; + let i1_0: i32 = (*pEdges.offset(i as isize)).unnamed.i1; + let f_0: i32 = (*pEdges.offset(i as isize)).unnamed.f; let mut bUnassigned_A: bool = false; let mut i0_A: i32 = 0; let mut i1_A: i32 = 0; let mut edgenum_A: i32 = 0; let mut edgenum_B: i32 = 0i32; - unsafe { - GetEdge( - &mut i0_A, - &mut i1_A, - &mut edgenum_A, - &*piTriListIn.offset((f_0 * 3i32) as isize), - i0_0, - i1_0, - ); - } - bUnassigned_A = if unsafe { *pTriInfos.offset(f_0 as isize) }.FaceNeighbors - [edgenum_A as usize] - == -1i32 - { - true - } else { - false - }; + GetEdge( + &mut i0_A, + &mut i1_A, + &mut edgenum_A, + &*piTriListIn.offset((f_0 * 3i32) as isize), + i0_0, + i1_0, + ); + bUnassigned_A = + if (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] == -1i32 { + true + } else { + false + }; if bUnassigned_A { let mut j: i32 = i + 1i32; let mut t: i32 = 0; let mut bNotFound: bool = true; while j < iEntries - && i0_0 == unsafe { (*pEdges.offset(j as isize)).unnamed }.i0 - && i1_0 == unsafe { (*pEdges.offset(j as isize)).unnamed }.i1 + && i0_0 == (*pEdges.offset(j as isize)).unnamed.i0 + && i1_0 == (*pEdges.offset(j as isize)).unnamed.i1 && bNotFound { let mut bUnassigned_B: bool = false; let mut i0_B: i32 = 0; let mut i1_B: i32 = 0; - t = unsafe { (*pEdges.offset(j as isize)).unnamed }.f; - unsafe { - GetEdge( - &mut i1_B, - &mut i0_B, - &mut edgenum_B, - &*piTriListIn.offset((t * 3i32) as isize), - (*pEdges.offset(j as isize)).unnamed.i0, - (*pEdges.offset(j as isize)).unnamed.i1, - ); - } - bUnassigned_B = if unsafe { *pTriInfos.offset(t as isize) }.FaceNeighbors - [edgenum_B as usize] - == -1i32 - { - true - } else { - false - }; + t = (*pEdges.offset(j as isize)).unnamed.f; + GetEdge( + &mut i1_B, + &mut i0_B, + &mut edgenum_B, + &*piTriListIn.offset((t * 3i32) as isize), + (*pEdges.offset(j as isize)).unnamed.i0, + (*pEdges.offset(j as isize)).unnamed.i1, + ); + bUnassigned_B = + if (*pTriInfos.offset(t as isize)).FaceNeighbors[edgenum_B as usize] == -1i32 { + true + } else { + false + }; if i0_A == i0_B && i1_A == i1_B && bUnassigned_B { bNotFound = false } else { @@ -1265,11 +1203,9 @@ unsafe fn BuildNeighborsFast( } } if !bNotFound { - unsafe { - let mut t_0: i32 = (*pEdges.offset(j as isize)).unnamed.f; - (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] = t_0; - (*pTriInfos.offset(t_0 as isize)).FaceNeighbors[edgenum_B as usize] = f_0 - } + let mut t_0: i32 = (*pEdges.offset(j as isize)).unnamed.f; + (*pTriInfos.offset(f_0 as isize)).FaceNeighbors[edgenum_A as usize] = t_0; + (*pTriInfos.offset(t_0 as isize)).FaceNeighbors[edgenum_B as usize] = f_0 } } i += 1 @@ -1283,24 +1219,22 @@ unsafe fn GetEdge( i0_in: i32, i1_in: i32, ) { - unsafe { - *edgenum_out = -1i32; - if *indices.offset(0isize) == i0_in || *indices.offset(0isize) == i1_in { - if *indices.offset(1isize) == i0_in || *indices.offset(1isize) == i1_in { - *edgenum_out.offset(0isize) = 0i32; - *i0_out.offset(0isize) = *indices.offset(0isize); - *i1_out.offset(0isize) = *indices.offset(1isize) - } else { - *edgenum_out.offset(0isize) = 2i32; - *i0_out.offset(0isize) = *indices.offset(2isize); - *i1_out.offset(0isize) = *indices.offset(0isize) - } + *edgenum_out = -1i32; + if *indices.offset(0isize) == i0_in || *indices.offset(0isize) == i1_in { + if *indices.offset(1isize) == i0_in || *indices.offset(1isize) == i1_in { + *edgenum_out.offset(0isize) = 0i32; + *i0_out.offset(0isize) = *indices.offset(0isize); + *i1_out.offset(0isize) = *indices.offset(1isize) } else { - *edgenum_out.offset(0isize) = 1i32; - *i0_out.offset(0isize) = *indices.offset(1isize); - *i1_out.offset(0isize) = *indices.offset(2isize) - }; - } + *edgenum_out.offset(0isize) = 2i32; + *i0_out.offset(0isize) = *indices.offset(2isize); + *i1_out.offset(0isize) = *indices.offset(0isize) + } + } else { + *edgenum_out.offset(0isize) = 1i32; + *i0_out.offset(0isize) = *indices.offset(1isize); + *i1_out.offset(0isize) = *indices.offset(2isize) + }; } // /////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////// @@ -1326,14 +1260,12 @@ unsafe fn QuickSortEdges( return; } else { if iElems == 2i32 { - unsafe { - if (*pSortBuffer.offset(iLeft as isize)).array[channel as usize] - > (*pSortBuffer.offset(iRight as isize)).array[channel as usize] - { - sTmp = *pSortBuffer.offset(iLeft as isize); - *pSortBuffer.offset(iLeft as isize) = *pSortBuffer.offset(iRight as isize); - *pSortBuffer.offset(iRight as isize) = sTmp - } + if (*pSortBuffer.offset(iLeft as isize)).array[channel as usize] + > (*pSortBuffer.offset(iRight as isize)).array[channel as usize] + { + sTmp = *pSortBuffer.offset(iLeft as isize); + *pSortBuffer.offset(iLeft as isize) = *pSortBuffer.offset(iRight as isize); + *pSortBuffer.offset(iRight as isize) = sTmp } return; } @@ -1349,20 +1281,18 @@ unsafe fn QuickSortEdges( iR = iRight; n = iR - iL + 1i32; index = uSeed.wrapping_rem(n as u32) as i32; - iMid = unsafe { (*pSortBuffer.offset((index + iL) as isize)).array[channel as usize] }; + iMid = (*pSortBuffer.offset((index + iL) as isize)).array[channel as usize]; loop { - while unsafe { (*pSortBuffer.offset(iL as isize)).array[channel as usize] } < iMid { + while (*pSortBuffer.offset(iL as isize)).array[channel as usize] < iMid { iL += 1 } - while unsafe { (*pSortBuffer.offset(iR as isize)).array[channel as usize] } > iMid { + while (*pSortBuffer.offset(iR as isize)).array[channel as usize] > iMid { iR -= 1 } if iL <= iR { - unsafe { - sTmp = *pSortBuffer.offset(iL as isize); - *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); - *pSortBuffer.offset(iR as isize) = sTmp; - } + sTmp = *pSortBuffer.offset(iL as isize); + *pSortBuffer.offset(iL as isize) = *pSortBuffer.offset(iR as isize); + *pSortBuffer.offset(iR as isize) = sTmp; iL += 1; iR -= 1 } @@ -1371,22 +1301,18 @@ unsafe fn QuickSortEdges( } } if iLeft < iR { - unsafe { - QuickSortEdges(pSortBuffer, iLeft, iR, channel, uSeed); - } + QuickSortEdges(pSortBuffer, iLeft, iR, channel, uSeed); } if iL < iRight { - unsafe { - QuickSortEdges(pSortBuffer, iL, iRight, channel, uSeed); - } + QuickSortEdges(pSortBuffer, iL, iRight, channel, uSeed); }; } // returns the texture area times 2 unsafe fn CalcTexArea(geometry: &mut I, mut indices: *const i32) -> f32 { - let t1 = get_tex_coord(geometry, unsafe { *indices.offset(0isize) } as usize); - let t2 = get_tex_coord(geometry, unsafe { *indices.offset(1isize) } as usize); - let t3 = get_tex_coord(geometry, unsafe { *indices.offset(2isize) } as usize); + let t1 = get_tex_coord(geometry, *indices.offset(0isize) as usize); + let t2 = get_tex_coord(geometry, *indices.offset(1isize) as usize); + let t3 = get_tex_coord(geometry, *indices.offset(2isize) as usize); let t21x: f32 = t2.x - t1.x; let t21y: f32 = t2.y - t1.y; let t31x: f32 = t3.x - t1.x; @@ -1411,25 +1337,22 @@ unsafe fn DegenPrologue( // locate quads with only one good triangle let mut t: i32 = 0i32; while t < iTotTris - 1i32 { - let iFO_a: i32 = unsafe { *pTriInfos.offset(t as isize) }.iOrgFaceNumber; - let iFO_b: i32 = unsafe { *pTriInfos.offset((t + 1i32) as isize) }.iOrgFaceNumber; + let iFO_a: i32 = (*pTriInfos.offset(t as isize)).iOrgFaceNumber; + let iFO_b: i32 = (*pTriInfos.offset((t + 1i32) as isize)).iOrgFaceNumber; if iFO_a == iFO_b { - let bIsDeg_a: bool = if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 1i32 != 0i32 { + let bIsDeg_a: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 != 0i32 { + true + } else { + false + }; + let bIsDeg_b: bool = if (*pTriInfos.offset((t + 1i32) as isize)).iFlag & 1i32 != 0i32 { true } else { false }; - let bIsDeg_b: bool = - if unsafe { *pTriInfos.offset((t + 1i32) as isize) }.iFlag & 1i32 != 0i32 { - true - } else { - false - }; if bIsDeg_a ^ bIsDeg_b != false { - unsafe { - (*pTriInfos.offset(t as isize)).iFlag |= 2i32; - (*pTriInfos.offset((t + 1i32) as isize)).iFlag |= 2i32 - } + (*pTriInfos.offset(t as isize)).iFlag |= 2i32; + (*pTriInfos.offset((t + 1i32) as isize)).iFlag |= 2i32 } t += 2i32 } else { @@ -1440,7 +1363,7 @@ unsafe fn DegenPrologue( t = 0i32; bStillFindingGoodOnes = true; while t < iNrTrianglesIn && bStillFindingGoodOnes { - let bIsGood: bool = if unsafe { *pTriInfos.offset(t as isize) }.iFlag & 1i32 == 0i32 { + let bIsGood: bool = if (*pTriInfos.offset(t as isize)).iFlag & 1i32 == 0i32 { true } else { false @@ -1455,8 +1378,7 @@ unsafe fn DegenPrologue( let mut bJustADegenerate: bool = true; while bJustADegenerate && iNextGoodTriangleSearchIndex < iTotTris { let bIsGood_0: bool = - if unsafe { *pTriInfos.offset(iNextGoodTriangleSearchIndex as isize) }.iFlag - & 1i32 + if (*pTriInfos.offset(iNextGoodTriangleSearchIndex as isize)).iFlag & 1i32 == 0i32 { true @@ -1476,19 +1398,15 @@ unsafe fn DegenPrologue( let mut i: i32 = 0i32; i = 0i32; while i < 3i32 { - unsafe { - let index: i32 = *piTriList_out.offset((t0 * 3i32 + i) as isize); - *piTriList_out.offset((t0 * 3i32 + i) as isize) = - *piTriList_out.offset((t1 * 3i32 + i) as isize); - *piTriList_out.offset((t1 * 3i32 + i) as isize) = index; - } + let index: i32 = *piTriList_out.offset((t0 * 3i32 + i) as isize); + *piTriList_out.offset((t0 * 3i32 + i) as isize) = + *piTriList_out.offset((t1 * 3i32 + i) as isize); + *piTriList_out.offset((t1 * 3i32 + i) as isize) = index; i += 1 } - unsafe { - let tri_info: STriInfo = *pTriInfos.offset(t0 as isize); - *pTriInfos.offset(t0 as isize) = *pTriInfos.offset(t1 as isize); - *pTriInfos.offset(t1 as isize) = tri_info - } + let tri_info: STriInfo = *pTriInfos.offset(t0 as isize); + *pTriInfos.offset(t0 as isize) = *pTriInfos.offset(t1 as isize); + *pTriInfos.offset(t1 as isize) = tri_info } else { bStillFindingGoodOnes = false } @@ -1515,7 +1433,7 @@ unsafe fn GenerateSharedVerticesIndexList( let mut fMax: f32 = 0.; i = 1; while i < iNrTrianglesIn * 3 { - let index: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; + let index: i32 = *piTriList_in_and_out.offset(i as isize); let vP = get_position(geometry, index as usize); if vMin.x > vP.x { vMin.x = vP.x @@ -1555,7 +1473,7 @@ unsafe fn GenerateSharedVerticesIndexList( i = 0; while i < iNrTrianglesIn * 3 { - let index_0: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; + let index_0: i32 = *piTriList_in_and_out.offset(i as isize); let vP_0 = get_position(geometry, index_0 as usize); let fVal: f32 = if iChannel == 0i32 { vP_0.x @@ -1564,7 +1482,7 @@ unsafe fn GenerateSharedVerticesIndexList( } else { vP_0.z }; - let iCell = unsafe { FindGridCell(fMin, fMax, fVal) }; + let iCell = FindGridCell(fMin, fMax, fVal); piHashCount[iCell] += 1; i += 1 } @@ -1576,7 +1494,7 @@ unsafe fn GenerateSharedVerticesIndexList( } i = 0; while i < iNrTrianglesIn * 3 { - let index_1: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; + let index_1: i32 = *piTriList_in_and_out.offset(i as isize); let vP_1 = get_position(geometry, index_1 as usize); let fVal_0: f32 = if iChannel == 0i32 { vP_1.x @@ -1585,7 +1503,7 @@ unsafe fn GenerateSharedVerticesIndexList( } else { vP_1.z }; - let iCell_0 = unsafe { FindGridCell(fMin, fMax, fVal_0) }; + let iCell_0 = FindGridCell(fMin, fMax, fVal_0); piHashTable[(piHashOffsets[iCell_0] + piHashCount2[iCell_0]) as usize] = i as i32; piHashCount2[iCell_0] += 1; i += 1 @@ -1606,32 +1524,29 @@ unsafe fn GenerateSharedVerticesIndexList( k = 0; while k < g_iCells { // extract table of cell k and amount of entries in it - let pTable_0 = unsafe { piHashTable.as_mut_ptr().offset(piHashOffsets[k] as isize) }; + let pTable_0 = piHashTable.as_mut_ptr().offset(piHashOffsets[k] as isize); let iEntries = piHashCount[k] as usize; if !(iEntries < 2) { e = 0; while e < iEntries { - let mut i_0: i32 = unsafe { *pTable_0.offset(e as isize) }; - let vP_2 = - get_position( - geometry, - unsafe { *piTriList_in_and_out.offset(i_0 as isize) } as usize, - ); + let mut i_0: i32 = *pTable_0.offset(e as isize); + let vP_2 = get_position( + geometry, + *piTriList_in_and_out.offset(i_0 as isize) as usize, + ); pTmpVert[e].vert[0usize] = vP_2.x; pTmpVert[e].vert[1usize] = vP_2.y; pTmpVert[e].vert[2usize] = vP_2.z; pTmpVert[e].index = i_0; e += 1 } - unsafe { - MergeVertsFast( - piTriList_in_and_out, - pTmpVert.as_mut_ptr(), - geometry, - 0i32, - (iEntries - 1) as i32, - ); - } + MergeVertsFast( + piTriList_in_and_out, + pTmpVert.as_mut_ptr(), + geometry, + 0i32, + (iEntries - 1) as i32, + ); } k += 1 } @@ -1656,7 +1571,7 @@ unsafe fn MergeVertsFast( let mut fSep: f32 = 0i32 as f32; c = 0i32; while c < 3i32 { - fvMin[c as usize] = unsafe { *pTmpVert.offset(iL_in as isize) }.vert[c as usize]; + fvMin[c as usize] = (*pTmpVert.offset(iL_in as isize)).vert[c as usize]; fvMax[c as usize] = fvMin[c as usize]; c += 1 } @@ -1664,10 +1579,10 @@ unsafe fn MergeVertsFast( while l <= iR_in { c = 0i32; while c < 3i32 { - if fvMin[c as usize] > unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] { - fvMin[c as usize] = unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] - } else if fvMax[c as usize] < unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] { - fvMax[c as usize] = unsafe { *pTmpVert.offset(l as isize) }.vert[c as usize] + if fvMin[c as usize] > (*pTmpVert.offset(l as isize)).vert[c as usize] { + fvMin[c as usize] = (*pTmpVert.offset(l as isize)).vert[c as usize] + } else if fvMax[c as usize] < (*pTmpVert.offset(l as isize)).vert[c as usize] { + fvMax[c as usize] = (*pTmpVert.offset(l as isize)).vert[c as usize] } c += 1 } @@ -1686,8 +1601,8 @@ unsafe fn MergeVertsFast( if fSep >= fvMax[channel as usize] || fSep <= fvMin[channel as usize] { l = iL_in; while l <= iR_in { - let mut i: i32 = unsafe { *pTmpVert.offset(l as isize) }.index; - let index: i32 = unsafe { *piTriList_in_and_out.offset(i as isize) }; + let mut i: i32 = (*pTmpVert.offset(l as isize)).index; + let index: i32 = *piTriList_in_and_out.offset(i as isize); let vP = get_position(geometry, index as usize); let vN = get_normal(geometry, index as usize); let vT = get_tex_coord(geometry, index as usize); @@ -1695,8 +1610,8 @@ unsafe fn MergeVertsFast( let mut l2: i32 = iL_in; let mut i2rec: i32 = -1i32; while l2 < l && bNotFound { - let i2: i32 = unsafe { *pTmpVert.offset(l2 as isize) }.index; - let index2: i32 = unsafe { *piTriList_in_and_out.offset(i2 as isize) }; + let i2: i32 = (*pTmpVert.offset(l2 as isize)).index; + let index2: i32 = *piTriList_in_and_out.offset(i2 as isize); let vP2 = get_position(geometry, index2 as usize); let vN2 = get_normal(geometry, index2 as usize); let vT2 = get_tex_coord(geometry, index2 as usize); @@ -1717,10 +1632,8 @@ unsafe fn MergeVertsFast( } } if !bNotFound { - unsafe { - *piTriList_in_and_out.offset(i as isize) = - *piTriList_in_and_out.offset(i2rec as isize) - } + *piTriList_in_and_out.offset(i as isize) = + *piTriList_in_and_out.offset(i2rec as isize) } l += 1 } @@ -1731,32 +1644,28 @@ unsafe fn MergeVertsFast( let mut bReadyLeftSwap: bool = false; let mut bReadyRightSwap: bool = false; while !bReadyLeftSwap && iL < iR { - bReadyLeftSwap = - !(unsafe { *pTmpVert.offset(iL as isize) }.vert[channel as usize] < fSep); + bReadyLeftSwap = !((*pTmpVert.offset(iL as isize)).vert[channel as usize] < fSep); if !bReadyLeftSwap { iL += 1 } } while !bReadyRightSwap && iL < iR { - bReadyRightSwap = - unsafe { *pTmpVert.offset(iR as isize) }.vert[channel as usize] < fSep; + bReadyRightSwap = (*pTmpVert.offset(iR as isize)).vert[channel as usize] < fSep; if !bReadyRightSwap { iR -= 1 } } if bReadyLeftSwap && bReadyRightSwap { - unsafe { - let sTmp: STmpVert = *pTmpVert.offset(iL as isize); - *pTmpVert.offset(iL as isize) = *pTmpVert.offset(iR as isize); - *pTmpVert.offset(iR as isize) = sTmp; - } + let sTmp: STmpVert = *pTmpVert.offset(iL as isize); + *pTmpVert.offset(iL as isize) = *pTmpVert.offset(iR as isize); + *pTmpVert.offset(iR as isize) = sTmp; iL += 1; iR -= 1 } } if iL == iR { let bReadyRightSwap_0: bool = - unsafe { *pTmpVert.offset(iR as isize) }.vert[channel as usize] < fSep; + (*pTmpVert.offset(iR as isize)).vert[channel as usize] < fSep; if bReadyRightSwap_0 { iL += 1 } else { @@ -1764,14 +1673,10 @@ unsafe fn MergeVertsFast( } } if iL_in < iR { - unsafe { - MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL_in, iR); - } + MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL_in, iR); } if iL < iR_in { - unsafe { - MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL, iR_in); - } + MergeVertsFast(piTriList_in_and_out, pTmpVert, geometry, iL, iR_in); } }; } diff --git a/crates/bevy_mikktspace/src/lib.rs b/crates/bevy_mikktspace/src/lib.rs index 365c2734800db..9316a5a7be306 100644 --- a/crates/bevy_mikktspace/src/lib.rs +++ b/crates/bevy_mikktspace/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![allow(clippy::all)] +#![deny(unsafe_op_in_unsafe_fn)] use glam::{Vec2, Vec3}; diff --git a/crates/bevy_pbr/src/lib.rs b/crates/bevy_pbr/src/lib.rs index 228de223caa98..d3087baa4f525 100644 --- a/crates/bevy_pbr/src/lib.rs +++ b/crates/bevy_pbr/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] pub mod wireframe; diff --git a/crates/bevy_ptr/src/lib.rs b/crates/bevy_ptr/src/lib.rs index 7cdcfaf6dc7ba..ff738e13f2b11 100644 --- a/crates/bevy_ptr/src/lib.rs +++ b/crates/bevy_ptr/src/lib.rs @@ -2,6 +2,7 @@ #![no_std] #![warn(missing_docs)] #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] use core::fmt::{self, Formatter, Pointer}; use core::{ diff --git a/crates/bevy_reflect/src/lib.rs b/crates/bevy_reflect/src/lib.rs index 6977b0bb71e9e..b65c3238f1c26 100644 --- a/crates/bevy_reflect/src/lib.rs +++ b/crates/bevy_reflect/src/lib.rs @@ -465,6 +465,7 @@ //! [`bevy_reflect_derive/documentation`]: bevy_reflect_derive //! [derive `Reflect`]: derive@crate::Reflect #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod array; mod fields; diff --git a/crates/bevy_reflect/src/type_registry.rs b/crates/bevy_reflect/src/type_registry.rs index b7ce14d8301c6..24d889e1b9916 100644 --- a/crates/bevy_reflect/src/type_registry.rs +++ b/crates/bevy_reflect/src/type_registry.rs @@ -545,7 +545,8 @@ impl ReflectFromPtr { /// `val` must be a pointer to value of the type that the [`ReflectFromPtr`] was constructed for. /// This can be verified by checking that the type id returned by [`ReflectFromPtr::type_id`] is the expected one. pub unsafe fn as_reflect<'a>(&self, val: Ptr<'a>) -> &'a dyn Reflect { - (self.from_ptr)(val) + // SAFETY: the caller must uphold the safety contract for `from_ptr`. + unsafe { (self.from_ptr)(val) } } /// Convert `PtrMut` into `&mut dyn Reflect`. @@ -555,7 +556,8 @@ impl ReflectFromPtr { /// `val` must be a pointer to a value of the type that the [`ReflectFromPtr`] was constructed for /// This can be verified by checking that the type id returned by [`ReflectFromPtr::type_id`] is the expected one. pub unsafe fn as_reflect_mut<'a>(&self, val: PtrMut<'a>) -> &'a mut dyn Reflect { - (self.from_ptr_mut)(val) + // SAFETY: the caller must uphold the safety contract for `from_ptr_mut`. + unsafe { (self.from_ptr_mut)(val) } } /// Get a function pointer to turn a `Ptr` into `&dyn Reflect` for /// the type this [`ReflectFromPtr`] was constructed for. diff --git a/crates/bevy_reflect_compile_fail_tests/src/lib.rs b/crates/bevy_reflect_compile_fail_tests/src/lib.rs index d0d1683dd6b97..8ba475734e2ca 100644 --- a/crates/bevy_reflect_compile_fail_tests/src/lib.rs +++ b/crates/bevy_reflect_compile_fail_tests/src/lib.rs @@ -1 +1,3 @@ // Nothing here, check out the integration tests + +#![forbid(unsafe_op_in_unsafe_fn)] \ No newline at end of file diff --git a/crates/bevy_render/src/extract_param.rs b/crates/bevy_render/src/extract_param.rs index e6b1ea1802fd3..60f9fed5e5e0a 100644 --- a/crates/bevy_render/src/extract_param.rs +++ b/crates/bevy_render/src/extract_param.rs @@ -83,12 +83,14 @@ where // SAFETY: // - The caller ensures that `world` is the same one that `init_state` was called with. // - The caller ensures that no other `SystemParam`s will conflict with the accesses we have registered. - let main_world = Res::::get_param( - &mut state.main_world_state, - system_meta, - world, - change_tick, - ); + let main_world = unsafe { + Res::::get_param( + &mut state.main_world_state, + system_meta, + world, + change_tick, + ) + }; let item = state.state.get(main_world.into_inner()); Extract { item } } diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index cc62f3fcead84..e61a438d861a2 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] #[cfg(target_pointer_width = "16")] compile_error!("bevy_render cannot compile for a 16-bit platform."); diff --git a/crates/bevy_scene/src/lib.rs b/crates/bevy_scene/src/lib.rs index 66d9d02043c0e..452fab5fb9744 100644 --- a/crates/bevy_scene/src/lib.rs +++ b/crates/bevy_scene/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod bundle; mod dynamic_scene; diff --git a/crates/bevy_sprite/src/lib.rs b/crates/bevy_sprite/src/lib.rs index 1eb3b1a5cc64c..9b959643f6fec 100644 --- a/crates/bevy_sprite/src/lib.rs +++ b/crates/bevy_sprite/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod bundle; mod dynamic_texture_atlas_builder; diff --git a/crates/bevy_tasks/src/lib.rs b/crates/bevy_tasks/src/lib.rs index b97d2e9df466b..cb9a4fc0841db 100644 --- a/crates/bevy_tasks/src/lib.rs +++ b/crates/bevy_tasks/src/lib.rs @@ -1,6 +1,7 @@ #![warn(missing_docs)] #![allow(clippy::type_complexity)] #![doc = include_str!("../README.md")] +#![forbid(unsafe_op_in_unsafe_fn)] mod slice; pub use slice::{ParallelSlice, ParallelSliceMut}; diff --git a/crates/bevy_text/src/lib.rs b/crates/bevy_text/src/lib.rs index f5498d26ef1fb..bb3b2c951af3e 100644 --- a/crates/bevy_text/src/lib.rs +++ b/crates/bevy_text/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] mod error; mod font; diff --git a/crates/bevy_time/src/lib.rs b/crates/bevy_time/src/lib.rs index f9b26d480fd2a..8139af23a564d 100644 --- a/crates/bevy_time/src/lib.rs +++ b/crates/bevy_time/src/lib.rs @@ -1,6 +1,7 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] #![doc = include_str!("../README.md")] +#![forbid(unsafe_op_in_unsafe_fn)] /// Common run conditions pub mod common_conditions; diff --git a/crates/bevy_transform/src/lib.rs b/crates/bevy_transform/src/lib.rs index d4cec7b8fcf96..4b0beaeae9d53 100644 --- a/crates/bevy_transform/src/lib.rs +++ b/crates/bevy_transform/src/lib.rs @@ -2,6 +2,7 @@ #![warn(missing_docs)] #![warn(clippy::undocumented_unsafe_blocks)] #![doc = include_str!("../README.md")] +#![forbid(unsafe_op_in_unsafe_fn)] pub mod commands; /// The basic components of the transform crate diff --git a/crates/bevy_ui/src/lib.rs b/crates/bevy_ui/src/lib.rs index ae9f9f41885d9..ea6bd30a2689c 100644 --- a/crates/bevy_ui/src/lib.rs +++ b/crates/bevy_ui/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] //! This crate contains Bevy's UI system, which can be used to create UI for both 2D and 3D games //! # Basic usage diff --git a/crates/bevy_utils/src/lib.rs b/crates/bevy_utils/src/lib.rs index 7916caf769476..bb7dc01730f6b 100644 --- a/crates/bevy_utils/src/lib.rs +++ b/crates/bevy_utils/src/lib.rs @@ -5,6 +5,7 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] #![warn(clippy::undocumented_unsafe_blocks)] +#![forbid(unsafe_op_in_unsafe_fn)] #[allow(missing_docs)] pub mod prelude { diff --git a/crates/bevy_window/src/lib.rs b/crates/bevy_window/src/lib.rs index 174f8c11be0a8..d58cbbe167673 100644 --- a/crates/bevy_window/src/lib.rs +++ b/crates/bevy_window/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::type_complexity)] +#![forbid(unsafe_op_in_unsafe_fn)] #[warn(missing_docs)] mod cursor; diff --git a/crates/bevy_winit/src/lib.rs b/crates/bevy_winit/src/lib.rs index d6ea22e368388..d23bdcd70991f 100644 --- a/crates/bevy_winit/src/lib.rs +++ b/crates/bevy_winit/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] +#![forbid(unsafe_op_in_unsafe_fn)] //! `bevy_winit` provides utilities to handle window creation and the eventloop through [`winit`] //! //! Most commonly, the [`WinitPlugin`] is used as part of diff --git a/src/lib.rs b/src/lib.rs index cf9b57938ad08..948d233828300 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::single_component_path_imports)] +#![forbid(unsafe_op_in_unsafe_fn)] //! [![](https://bevyengine.org/assets/bevy_logo_docs.svg)](https://bevyengine.org) //!