diff --git a/cmd/api/src/analysis/ad/adcs_integration_test.go b/cmd/api/src/analysis/ad/adcs_integration_test.go index a484ed278c0..14bb1876b30 100644 --- a/cmd/api/src/analysis/ad/adcs_integration_test.go +++ b/cmd/api/src/analysis/ad/adcs_integration_test.go @@ -24,6 +24,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" ad2 "github.com/specterops/bloodhound/packages/go/analysis/ad" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema" "github.com/specterops/dawgs/ops" @@ -77,7 +78,7 @@ func TestADCSESC1(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC1(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC1.String(), err) } @@ -191,7 +192,7 @@ func TestADCSESC1(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC1(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC1.String(), err) } @@ -242,7 +243,7 @@ func TestGoldenCert(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostGoldenCert(ctx, tx, outC, innerEnterpriseCA, targetDomains); err != nil { t.Logf("failed post processing for %s: %v", ad.GoldenCert.String(), err) } @@ -499,19 +500,19 @@ func TestEnrollOnBehalfOf(t *testing.T) { require.Len(t, results, 3) - require.Contains(t, results, analysis.CreatePostRelationshipJob{ + require.Contains(t, results, post.EnsureRelationshipJob{ FromID: harness.EnrollOnBehalfOfHarness1.CertTemplate11.ID, ToID: harness.EnrollOnBehalfOfHarness1.CertTemplate12.ID, Kind: ad.EnrollOnBehalfOf, }) - require.Contains(t, results, analysis.CreatePostRelationshipJob{ + require.Contains(t, results, post.EnsureRelationshipJob{ FromID: harness.EnrollOnBehalfOfHarness1.CertTemplate13.ID, ToID: harness.EnrollOnBehalfOfHarness1.CertTemplate12.ID, Kind: ad.EnrollOnBehalfOf, }) - require.Contains(t, results, analysis.CreatePostRelationshipJob{ + require.Contains(t, results, post.EnsureRelationshipJob{ FromID: harness.EnrollOnBehalfOfHarness1.CertTemplate12.ID, ToID: harness.EnrollOnBehalfOfHarness1.CertTemplate12.ID, Kind: ad.EnrollOnBehalfOf, @@ -566,7 +567,7 @@ func TestEnrollOnBehalfOf(t *testing.T) { require.Nil(t, err) require.Len(t, results, 1) - require.Contains(t, results, analysis.CreatePostRelationshipJob{ + require.Contains(t, results, post.EnsureRelationshipJob{ FromID: harness.EnrollOnBehalfOfHarness2.CertTemplate21.ID, ToID: harness.EnrollOnBehalfOfHarness2.CertTemplate23.ID, Kind: ad.EnrollOnBehalfOf, @@ -641,7 +642,7 @@ func TestADCSESC3(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC3(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC3.String(), err) } @@ -692,7 +693,7 @@ func TestADCSESC3(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC3(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC3.String(), err) } @@ -755,7 +756,7 @@ func TestADCSESC3(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC3(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC3.String(), err) } @@ -817,7 +818,7 @@ func TestADCSESC4(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } @@ -882,7 +883,7 @@ func TestADCSESC4(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } @@ -952,7 +953,7 @@ func TestADCSESC4(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } @@ -1003,7 +1004,7 @@ func TestADCSESC4(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } @@ -1058,7 +1059,7 @@ func TestADCSESC4Composition(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC4(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC4.String(), err) } @@ -1283,7 +1284,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1335,7 +1336,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1384,7 +1385,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1434,7 +1435,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1484,7 +1485,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1532,7 +1533,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1607,7 +1608,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1657,7 +1658,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1704,7 +1705,7 @@ func TestADCSESC9a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9a.String(), err) } @@ -1758,7 +1759,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -1810,7 +1811,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -1858,7 +1859,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -1907,7 +1908,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -1956,7 +1957,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -2035,7 +2036,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -2085,7 +2086,7 @@ func TestADCSESC9b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC9b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC9b.String(), err) } @@ -2134,7 +2135,7 @@ func TestADCSESC6a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } @@ -2183,7 +2184,7 @@ func TestADCSESC6a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } @@ -2231,7 +2232,7 @@ func TestADCSESC6a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } @@ -2333,7 +2334,7 @@ func TestADCSESC6a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6a.String(), err) } @@ -2387,7 +2388,7 @@ func TestADCSESC6b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } @@ -2491,7 +2492,7 @@ func TestADCSESC6b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } @@ -2540,7 +2541,7 @@ func TestADCSESC6b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } @@ -2589,7 +2590,7 @@ func TestADCSESC6b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } @@ -2644,7 +2645,7 @@ func TestADCSESC6b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } @@ -2693,7 +2694,7 @@ func TestADCSESC6b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC6b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC6b.String(), err) } @@ -2742,7 +2743,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -2795,7 +2796,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -2845,7 +2846,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -2896,7 +2897,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -2976,7 +2977,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -3025,7 +3026,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -3074,7 +3075,7 @@ func TestADCSESC10a(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10a(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10a.String(), err) } @@ -3123,7 +3124,7 @@ func TestADCSESC13(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC13(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC13.String(), err) } else { @@ -3190,7 +3191,7 @@ func TestADCSESC13(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC13(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC13.String(), err) } else { @@ -3262,7 +3263,7 @@ func TestADCSESC13(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC13(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC13.String(), err) } else { @@ -3356,7 +3357,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } @@ -3407,7 +3408,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } @@ -3456,7 +3457,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } @@ -3505,7 +3506,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } @@ -3586,7 +3587,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } @@ -3635,7 +3636,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } @@ -3684,7 +3685,7 @@ func TestADCSESC10b(t *testing.T) { } } - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := ad2.PostADCSESC10b(ctx, tx, outC, localGroupData, innerEnterpriseCA, targetDomains, cache); err != nil { t.Logf("failed post processing for %s: %v", ad.ADCSESC10b.String(), err) } diff --git a/cmd/api/src/analysis/ad/ntlm_integration_test.go b/cmd/api/src/analysis/ad/ntlm_integration_test.go index 3b72e7a9e3d..544ec1eee00 100644 --- a/cmd/api/src/analysis/ad/ntlm_integration_test.go +++ b/cmd/api/src/analysis/ad/ntlm_integration_test.go @@ -27,6 +27,7 @@ import ( "github.com/specterops/bloodhound/cmd/api/src/test/integration" "github.com/specterops/bloodhound/packages/go/analysis" ad2 "github.com/specterops/bloodhound/packages/go/analysis/ad" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/graphschema/common" @@ -156,7 +157,7 @@ func TestPostNTLMRelaySMB(t *testing.T) { ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer domainSid, _ := innerComputer.Properties.Get(ad.DomainSID.String()).String() @@ -232,7 +233,7 @@ func TestPostNTLMRelaySMB(t *testing.T) { ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer @@ -284,7 +285,7 @@ func TestNTLMRelayToSMBComposition(t *testing.T) { ntlmCache, err := ad2.NewNTLMCache(context.Background(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer domainSid, _ := innerComputer.Properties.Get(ad.DomainSID.String()).String() @@ -360,7 +361,7 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer domainSid, err := innerComputer.Properties.Get(ad.DomainSID.String()).String() @@ -440,7 +441,7 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer domainSid, err := innerComputer.Properties.Get(ad.DomainSID.String()).String() @@ -516,7 +517,7 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer domainSid, err := innerComputer.Properties.Get(ad.DomainSID.String()).String() @@ -571,7 +572,7 @@ func TestPostCoerceAndRelayNTLMToLDAP(t *testing.T) { protectedUsersCache, err := ad2.FetchProtectedUsersMappedToDomains(testContext.Context(), db, grouplocalGroupData) require.NoError(t, err) - err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, computer := range computers { innerComputer := computer domainSid, err := innerComputer.Properties.Get(ad.DomainSID.String()).String() diff --git a/cmd/api/src/analysis/ad/post.go b/cmd/api/src/analysis/ad/post.go index c6b7a6773fb..76b6839b041 100644 --- a/cmd/api/src/analysis/ad/post.go +++ b/cmd/api/src/analysis/ad/post.go @@ -1,4 +1,4 @@ -// Copyright 2023 Specter Ops, Inc. +// Copyright 2026 Specter Ops, Inc. // // Licensed under the Apache License, Version 2.0 // you may not use this file except in compliance with the License. @@ -13,6 +13,21 @@ // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 +// Copyright 2post.CreatePostRelationshipJob23 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.post.CreatePostRelationshipJob +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.post.CreatePostRelationshipJob +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.post.CreatePostRelationshipJob package ad @@ -22,6 +37,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" adAnalysis "github.com/specterops/bloodhound/packages/go/analysis/ad" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -29,7 +45,7 @@ import ( "github.com/specterops/dawgs/graph" ) -func Post(ctx context.Context, db graph.Database, adcsEnabled, citrixEnabled, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*analysis.AtomicPostProcessingStats, error) { +func Post(ctx context.Context, db graph.Database, adcsEnabled, citrixEnabled, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -39,7 +55,7 @@ func Post(ctx context.Context, db graph.Database, adcsEnabled, citrixEnabled, nt attr.Scope("step"), )() - aggregateStats := analysis.NewAtomicPostProcessingStats() + aggregateStats := post.NewAtomicPostProcessingStats() if err := adAnalysis.FixWellKnownNodeTypes(ctx, db); err != nil { return &aggregateStats, err diff --git a/cmd/api/src/analysis/azure/post.go b/cmd/api/src/analysis/azure/post.go index 3c1b2ccfafb..4264c718a23 100644 --- a/cmd/api/src/analysis/azure/post.go +++ b/cmd/api/src/analysis/azure/post.go @@ -23,6 +23,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" azureAnalysis "github.com/specterops/bloodhound/packages/go/analysis/azure" "github.com/specterops/bloodhound/packages/go/analysis/hybrid" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -30,7 +31,7 @@ import ( "github.com/specterops/dawgs/graph" ) -func Post(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { +func Post(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -40,7 +41,7 @@ func Post(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessin attr.Scope("step"), )() - aggregateStats := analysis.NewAtomicPostProcessingStats() + aggregateStats := post.NewAtomicPostProcessingStats() if err := azureAnalysis.FixManagementGroupNames(ctx, db); err != nil { slog.WarnContext(ctx, "Error fixing management group names", attr.Error(err)) } diff --git a/packages/cue/bh/azure/azure.cue b/packages/cue/bh/azure/azure.cue index efe534b52cd..5a910ff86e0 100644 --- a/packages/cue/bh/azure/azure.cue +++ b/packages/cue/bh/azure/azure.cue @@ -952,11 +952,6 @@ PathfindingRelationships: list.Concat([InboundOutboundRelationshipKinds]) PostProcessedRelationships: [ AddSecret, ExecuteCommand, - ResetPassword, - AddMembers, - GlobalAdmin, - PrivilegedRoleAdmin, - PrivilegedAuthAdmin, AZMGAddMember, AZMGAddOwner, AZMGAddSecret, diff --git a/packages/go/analysis/ad/adcs.go b/packages/go/analysis/ad/adcs.go index 2f5c4987ecf..284da4bb1e1 100644 --- a/packages/go/analysis/ad/adcs.go +++ b/packages/go/analysis/ad/adcs.go @@ -23,6 +23,7 @@ import ( "log/slog" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -35,7 +36,7 @@ var ( EkuCertRequestAgent = "1.3.6.1.4.1.311.20.2.1" ) -func PostADCS(ctx context.Context, db graph.Database, localGroupData *LocalGroupData, adcsEnabled bool) (*analysis.AtomicPostProcessingStats, ADCSCache, error) { +func PostADCS(ctx context.Context, db graph.Database, localGroupData *LocalGroupData, adcsEnabled bool) (*post.AtomicPostProcessingStats, ADCSCache, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -47,19 +48,19 @@ func PostADCS(ctx context.Context, db graph.Database, localGroupData *LocalGroup var cache = NewADCSCache() if enterpriseCertAuthorities, err := FetchNodesByKind(ctx, db, ad.EnterpriseCA); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching enterpriseCA nodes: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching enterpriseCA nodes: %w", err) } else if rootCertAuthorities, err := FetchNodesByKind(ctx, db, ad.RootCA); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching rootCA nodes: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching rootCA nodes: %w", err) } else if aiaCertAuthorities, err := FetchNodesByKind(ctx, db, ad.AIACA); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching AIACA nodes: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching AIACA nodes: %w", err) } else if certTemplates, err := FetchNodesByKind(ctx, db, ad.CertTemplate); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching cert template nodes: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed fetching cert template nodes: %w", err) } else if step1Stats, err := postADCSPreProcessStep1(ctx, db, enterpriseCertAuthorities, rootCertAuthorities, aiaCertAuthorities, certTemplates); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed adcs pre-processing step 1: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed adcs pre-processing step 1: %w", err) } else if err := cache.BuildCache(ctx, db, enterpriseCertAuthorities, certTemplates); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed building ADCS cache: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed building ADCS cache: %w", err) } else if step2Stats, err := postADCSPreProcessStep2(ctx, db, cache); err != nil { - return &analysis.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed adcs pre-processing step 2: %w", err) + return &post.AtomicPostProcessingStats{}, cache, fmt.Errorf("failed adcs pre-processing step 2: %w", err) } else { operation := analysis.NewPostRelationshipOperation(ctx, db, "ADCS Post Processing") @@ -84,41 +85,41 @@ func PostADCS(ctx context.Context, db graph.Database, localGroupData *LocalGroup } // postADCSPreProcessStep1 processes the edges that are not dependent on any other post-processed edges -func postADCSPreProcessStep1(ctx context.Context, db graph.Database, enterpriseCertAuthorities, rootCertAuthorities, aiaCertAuthorities, certTemplates []*graph.Node) (*analysis.AtomicPostProcessingStats, error) { +func postADCSPreProcessStep1(ctx context.Context, db graph.Database, enterpriseCertAuthorities, rootCertAuthorities, aiaCertAuthorities, certTemplates []*graph.Node) (*post.AtomicPostProcessingStats, error) { operation := analysis.NewPostRelationshipOperation(ctx, db, "ADCS Post Processing Step 1") // TODO clean up the operation.Done() calls below if err := PostTrustedForNTAuth(ctx, db, operation); err != nil { operation.Done() - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.TrustedForNTAuth.String(), err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.TrustedForNTAuth.String(), err) } else if err := PostIssuedSignedBy(operation, enterpriseCertAuthorities, rootCertAuthorities, aiaCertAuthorities); err != nil { operation.Done() - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.IssuedSignedBy.String(), err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.IssuedSignedBy.String(), err) } else if err := PostEnterpriseCAFor(operation, enterpriseCertAuthorities); err != nil { operation.Done() - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.EnterpriseCAFor.String(), err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.EnterpriseCAFor.String(), err) } else if err = PostExtendedByPolicyBinding(operation, certTemplates); err != nil { operation.Done() - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.ExtendedByPolicy.String(), err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.ExtendedByPolicy.String(), err) } else { return &operation.Stats, operation.Done() } } // postADCSPreProcessStep2 Processes the edges that are dependent on those processed in postADCSPreProcessStep1 -func postADCSPreProcessStep2(ctx context.Context, db graph.Database, cache ADCSCache) (*analysis.AtomicPostProcessingStats, error) { +func postADCSPreProcessStep2(ctx context.Context, db graph.Database, cache ADCSCache) (*post.AtomicPostProcessingStats, error) { operation := analysis.NewPostRelationshipOperation(ctx, db, "ADCS Post Processing Step 2") if err := PostEnrollOnBehalfOf(cache, operation); err != nil { operation.Done() - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.EnrollOnBehalfOf.String(), err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("failed post processing for %s: %w", ad.EnrollOnBehalfOf.String(), err) } else { return &operation.Stats, operation.Done() } } -func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, targetDomains *graph.NodeSet, localGroupData *LocalGroupData, cache ADCSCache, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { +func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, targetDomains *graph.NodeSet, localGroupData *LocalGroupData, cache ADCSCache, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob]) { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostGoldenCert(ctx, tx, outC, enterpriseCA, targetDomains); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.GoldenCert.String(), err)) } else if err != nil { @@ -127,7 +128,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC1(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC1.String(), err)) } else if err != nil { @@ -136,7 +137,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC3(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC3.String(), err)) } else if err != nil { @@ -145,7 +146,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC4(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC4.String(), err)) } else if err != nil { @@ -154,7 +155,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC6a(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6a.String(), err)) } else if err != nil { @@ -163,7 +164,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC6b(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC6b.String(), err)) } else if err != nil { @@ -172,7 +173,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC9a(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9a.String(), err)) } else if err != nil { @@ -181,7 +182,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC9b(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC9b.String(), err)) } else if err != nil { @@ -190,7 +191,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC10a(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10a.String(), err)) } else if err != nil { @@ -199,7 +200,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC10b(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC10b.String(), err)) } else if err != nil { @@ -208,7 +209,7 @@ func processEnterpriseCAWithValidCertChainToDomain(enterpriseCA *graph.Node, tar return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if err := PostADCSESC13(ctx, tx, outC, localGroupData, enterpriseCA, targetDomains, cache); errors.Is(err, graph.ErrPropertyNotFound) { slog.WarnContext(ctx, fmt.Sprintf("Post processing for %s: %v", ad.ADCSESC13.String(), err)) } else if err != nil { diff --git a/packages/go/analysis/ad/esc1.go b/packages/go/analysis/ad/esc1.go index f1df6a23c82..c11ba0de13e 100644 --- a/packages/go/analysis/ad/esc1.go +++ b/packages/go/analysis/ad/esc1.go @@ -23,6 +23,7 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -32,7 +33,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID); len(publishedCertTemplates) == 0 { return nil @@ -52,7 +53,7 @@ func PostADCSESC1(ctx context.Context, tx graph.Transaction, outC chan<- analysi results.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC1, diff --git a/packages/go/analysis/ad/esc10.go b/packages/go/analysis/ad/esc10.go index 858d4d94a5f..9ea03b3ab5e 100644 --- a/packages/go/analysis/ad/esc10.go +++ b/packages/go/analysis/ad/esc10.go @@ -23,6 +23,7 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/ein" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" @@ -33,7 +34,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { return nil } else if ecaEnrollers := cache.GetEnterpriseCAEnrollers(eca.ID); len(ecaEnrollers) == 0 { @@ -68,7 +69,7 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy results.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { if cache.HasUPNCertMappingInForest(domain.ID.Uint64()) { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC10a, @@ -81,7 +82,7 @@ func PostADCSESC10a(ctx context.Context, tx graph.Transaction, outC chan<- analy return nil } -func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if publishedCertTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID); len(publishedCertTemplates) == 0 { return nil } else if ecaEnrollers := cache.GetEnterpriseCAEnrollers(enterpriseCA.ID); len(ecaEnrollers) == 0 { @@ -113,7 +114,7 @@ func PostADCSESC10b(ctx context.Context, tx graph.Transaction, outC chan<- analy results.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { if cache.HasUPNCertMappingInForest(domain.ID.Uint64()) { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC10b, diff --git a/packages/go/analysis/ad/esc13.go b/packages/go/analysis/ad/esc13.go index 3d70f510dbb..ba77c8c6c41 100644 --- a/packages/go/analysis/ad/esc13.go +++ b/packages/go/analysis/ad/esc13.go @@ -24,6 +24,7 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -33,7 +34,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { return nil } else { @@ -59,7 +60,7 @@ func PostADCSESC13(ctx context.Context, tx graph.Transaction, outC chan<- analys for _, domain := range targetDomains.Slice() { if groupIsContainedOrTrusted(tx, group, domain) { filtered.Each(func(value uint64) bool { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: group.ID, Kind: ad.ADCSESC13, diff --git a/packages/go/analysis/ad/esc3.go b/packages/go/analysis/ad/esc3.go index 182671e75a7..dcce66fa948 100644 --- a/packages/go/analysis/ad/esc3.go +++ b/packages/go/analysis/ad/esc3.go @@ -25,6 +25,7 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" @@ -35,7 +36,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca2 *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, eca2 *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(eca2.ID); len(publishedCertTemplates) == 0 { return nil @@ -127,7 +128,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi results.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC3, @@ -139,7 +140,7 @@ func PostADCSESC3(ctx context.Context, tx graph.Transaction, outC chan<- analysi return nil } -func PostEnrollOnBehalfOf(cache ADCSCache, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) error { +func PostEnrollOnBehalfOf(cache ADCSCache, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob]) error { versionOneTemplates := make([]*graph.Node, 0) versionTwoTemplates := make([]*graph.Node, 0) for _, node := range cache.GetCertTemplates() { @@ -166,7 +167,7 @@ func PostEnrollOnBehalfOf(cache ADCSCache, operation analysis.StatTrackedOperati if publishedCertTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID); len(publishedCertTemplates) == 0 { return nil } else { - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if results, err := EnrollOnBehalfOfVersionTwo(tx, versionTwoTemplates, publishedCertTemplates, innerDomain); err != nil { return err } else { @@ -180,7 +181,7 @@ func PostEnrollOnBehalfOf(cache ADCSCache, operation analysis.StatTrackedOperati } }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if results, err := EnrollOnBehalfOfVersionOne(tx, versionOneTemplates, publishedCertTemplates, innerDomain); err != nil { return err } else { @@ -201,8 +202,8 @@ func PostEnrollOnBehalfOf(cache ADCSCache, operation analysis.StatTrackedOperati return nil } -func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, publishedTemplates []*graph.Node, domainNode *graph.Node) ([]analysis.CreatePostRelationshipJob, error) { - results := make([]analysis.CreatePostRelationshipJob, 0) +func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, publishedTemplates []*graph.Node, domainNode *graph.Node) ([]post.EnsureRelationshipJob, error) { + results := make([]post.EnsureRelationshipJob, 0) for _, certTemplateOne := range publishedTemplates { if hasBadEku, err := certTemplateHasEku(certTemplateOne, EkuAnyPurpose); errors.Is(err, graph.ErrPropertyNotFound) { slog.Warn(fmt.Sprintf("Did not get EffectiveEKUs for cert template %d: %v", certTemplateOne.ID, err)) @@ -233,7 +234,7 @@ func EnrollOnBehalfOfVersionTwo(tx graph.Transaction, versionTwoCertTemplates, p } else if !isLinked { continue } else { - results = append(results, analysis.CreatePostRelationshipJob{ + results = append(results, post.EnsureRelationshipJob{ FromID: certTemplateOne.ID, ToID: certTemplateTwo.ID, Kind: ad.EnrollOnBehalfOf, @@ -262,8 +263,8 @@ func certTemplateHasEku(certTemplate *graph.Node, targetEkus ...string) (bool, e } } -func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates []*graph.Node, publishedTemplates []*graph.Node, domainNode *graph.Node) ([]analysis.CreatePostRelationshipJob, error) { - results := make([]analysis.CreatePostRelationshipJob, 0) +func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates []*graph.Node, publishedTemplates []*graph.Node, domainNode *graph.Node) ([]post.EnsureRelationshipJob, error) { + results := make([]post.EnsureRelationshipJob, 0) for _, certTemplateOne := range publishedTemplates { //prefilter as much as we can first @@ -280,7 +281,7 @@ func EnrollOnBehalfOfVersionOne(tx graph.Transaction, versionOneCertTemplates [] } else if !hasPath { continue } else { - results = append(results, analysis.CreatePostRelationshipJob{ + results = append(results, post.EnsureRelationshipJob{ FromID: certTemplateOne.ID, ToID: certTemplateTwo.ID, Kind: ad.EnrollOnBehalfOf, diff --git a/packages/go/analysis/ad/esc4.go b/packages/go/analysis/ad/esc4.go index 2ddd03b9b54..cd0e697668f 100644 --- a/packages/go/analysis/ad/esc4.go +++ b/packages/go/analysis/ad/esc4.go @@ -23,6 +23,7 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -32,7 +33,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { // 1. principals := cardinality.NewBitmap64() publishedTemplates := cache.GetPublishedTemplateCache(enterpriseCA.ID) @@ -121,7 +122,7 @@ func PostADCSESC4(ctx context.Context, tx graph.Transaction, outC chan<- analysi principals.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC4, diff --git a/packages/go/analysis/ad/esc6.go b/packages/go/analysis/ad/esc6.go index 9198bec5f3e..416b80d6bd3 100644 --- a/packages/go/analysis/ad/esc6.go +++ b/packages/go/analysis/ad/esc6.go @@ -22,6 +22,7 @@ import ( "log/slog" "sync" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/ein" "github.com/specterops/bloodhound/packages/go/analysis" @@ -34,7 +35,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if isUserSpecifiesSanEnabledCollected, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabledCollected.String()).Bool(); err != nil { return err } else if !isUserSpecifiesSanEnabledCollected { @@ -64,7 +65,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys } else { filteredEnrollers.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC6a, @@ -79,7 +80,7 @@ func PostADCSESC6a(ctx context.Context, tx graph.Transaction, outC chan<- analys return nil } -func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, enterpriseCA *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { if isUserSpecifiesSanEnabledCollected, err := enterpriseCA.Properties.Get(ad.IsUserSpecifiesSanEnabledCollected.String()).Bool(); err != nil { return err } else if !isUserSpecifiesSanEnabledCollected { @@ -110,7 +111,7 @@ func PostADCSESC6b(ctx context.Context, tx graph.Transaction, outC chan<- analys filteredEnrollers.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { if cache.HasUPNCertMappingInForest(domain.ID.Uint64()) { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC6b, diff --git a/packages/go/analysis/ad/esc9.go b/packages/go/analysis/ad/esc9.go index 91f2bdf90ff..b2f03762787 100644 --- a/packages/go/analysis/ad/esc9.go +++ b/packages/go/analysis/ad/esc9.go @@ -23,6 +23,7 @@ import ( "sync" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" @@ -32,7 +33,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { @@ -67,7 +68,7 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys results.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { if cache.HasWeakCertBindingInForest(domain.ID.Uint64()) { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC9a, @@ -81,7 +82,7 @@ func PostADCSESC9a(ctx context.Context, tx graph.Transaction, outC chan<- analys } } -func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { +func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, localGroupData *LocalGroupData, eca *graph.Node, targetDomains *graph.NodeSet, cache ADCSCache) error { results := cardinality.NewBitmap64() if publishedCertTemplates := cache.GetPublishedTemplateCache(eca.ID); len(publishedCertTemplates) == 0 { @@ -113,7 +114,7 @@ func PostADCSESC9b(ctx context.Context, tx graph.Transaction, outC chan<- analys results.Each(func(value uint64) bool { for _, domain := range targetDomains.Slice() { if cache.HasWeakCertBindingInForest(domain.ID.Uint64()) { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: domain.ID, Kind: ad.ADCSESC9b, diff --git a/packages/go/analysis/ad/esc_shared.go b/packages/go/analysis/ad/esc_shared.go index 7df941965db..92ef4559cc8 100644 --- a/packages/go/analysis/ad/esc_shared.go +++ b/packages/go/analysis/ad/esc_shared.go @@ -25,6 +25,7 @@ import ( "strings" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/ad" "github.com/specterops/bloodhound/packages/go/slicesext" "github.com/specterops/dawgs/cardinality" @@ -34,14 +35,14 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostTrustedForNTAuth(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) error { +func PostTrustedForNTAuth(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob]) error { if ntAuthStoreNodes, err := FetchNodesByKind(ctx, db, ad.NTAuthStore); err != nil { return err } else { for _, node := range ntAuthStoreNodes { innerNode := node - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if thumbprints, err := innerNode.Properties.Get(ad.CertThumbprints.String()).StringSlice(); err != nil { if strings.Contains(err.Error(), graph.ErrPropertyNotFound.Error()) { slog.WarnContext(ctx, fmt.Sprintf("Unable to post-process TrustedForNTAuth edge for NTAuthStore node %d due to missing adcs data: %v", innerNode.ID, err)) @@ -55,7 +56,7 @@ func PostTrustedForNTAuth(ctx context.Context, db graph.Database, operation anal return err } else { for _, sourceNodeID := range sourceNodeIDs { - if !channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + if !channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: sourceNodeID, ToID: innerNode.ID, Kind: ad.TrustedForNTAuth, @@ -75,8 +76,8 @@ func PostTrustedForNTAuth(ctx context.Context, db graph.Database, operation anal return nil } -func PostIssuedSignedBy(operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], enterpriseCertAuthorities []*graph.Node, rootCertAuthorities []*graph.Node, aiaCertAuthorities []*graph.Node) error { - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { +func PostIssuedSignedBy(operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], enterpriseCertAuthorities []*graph.Node, rootCertAuthorities []*graph.Node, aiaCertAuthorities []*graph.Node) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, node := range enterpriseCertAuthorities { if postRels, err := processCertChainParent(node, tx); err != nil && !errors.Is(err, ErrNoCertParent) { return err @@ -94,7 +95,7 @@ func PostIssuedSignedBy(operation analysis.StatTrackedOperation[analysis.CreateP return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, node := range rootCertAuthorities { if postRels, err := processCertChainParent(node, tx); err != nil && !errors.Is(err, ErrNoCertParent) { return err @@ -112,7 +113,7 @@ func PostIssuedSignedBy(operation analysis.StatTrackedOperation[analysis.CreateP return nil }) - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, node := range aiaCertAuthorities { if postRels, err := processCertChainParent(node, tx); err != nil && !errors.Is(err, ErrNoCertParent) { return err @@ -133,8 +134,8 @@ func PostIssuedSignedBy(operation analysis.StatTrackedOperation[analysis.CreateP return nil } -func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], enterpriseCertAuthorities []*graph.Node) error { - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { +func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], enterpriseCertAuthorities []*graph.Node) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, ecaNode := range enterpriseCertAuthorities { if thumbprint, err := ecaNode.Properties.Get(ad.CertThumbprint.String()).String(); err != nil { if graph.IsErrPropertyNotFound(err) { @@ -146,7 +147,7 @@ func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[analysis.Create return err } else { for _, rootCANodeID := range rootCAIDs { - if !channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + if !channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: ecaNode.ID, ToID: rootCANodeID, Kind: ad.EnterpriseCAFor, @@ -159,7 +160,7 @@ func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[analysis.Create return err } else { for _, aiaCANodeID := range aiaCAIDs { - if !channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + if !channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: ecaNode.ID, ToID: aiaCANodeID, Kind: ad.EnterpriseCAFor, @@ -175,13 +176,13 @@ func PostEnterpriseCAFor(operation analysis.StatTrackedOperation[analysis.Create return nil } -func PostGoldenCert(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, enterpriseCA *graph.Node, targetDomains *graph.NodeSet) error { +func PostGoldenCert(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, enterpriseCA *graph.Node, targetDomains *graph.NodeSet) error { if hostCAServiceComputers, err := FetchHostsCAServiceComputers(tx, enterpriseCA); err != nil { slog.ErrorContext(ctx, fmt.Sprintf("Error fetching host ca computer for enterprise ca %d: %v", enterpriseCA.ID, err)) } else { for _, computer := range hostCAServiceComputers { for _, domain := range targetDomains.Slice() { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: computer.ID, ToID: domain.ID, Kind: ad.GoldenCert, @@ -192,8 +193,8 @@ func PostGoldenCert(ctx context.Context, tx graph.Transaction, outC chan<- analy return nil } -func PostExtendedByPolicyBinding(operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], certTemplates []*graph.Node) error { - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { +func PostExtendedByPolicyBinding(operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], certTemplates []*graph.Node) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if allIssuancePolicies, err := fetchAllIssuancePolicies(tx); err != nil { return err } else { @@ -215,7 +216,7 @@ func PostExtendedByPolicyBinding(operation analysis.StatTrackedOperation[analysi continue } else if certTemplateDomain != "" && certTemplateDomain == issuancePolicyDomain { // Create ExtendedByPolicy edge - if !channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + if !channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: certTemplate.ID, ToID: issuancePolicy.ID, Kind: ad.ExtendedByPolicy, @@ -263,19 +264,19 @@ func getIssuancePolicyCertOIDMap(issuancePolicies graph.NodeSet) map[string][]gr return oidMap } -func processCertChainParent(node *graph.Node, tx graph.Transaction) ([]analysis.CreatePostRelationshipJob, error) { +func processCertChainParent(node *graph.Node, tx graph.Transaction) ([]post.EnsureRelationshipJob, error) { if certChain, err := node.Properties.Get(ad.CertChain.String()).StringSlice(); err != nil { if errors.Is(err, graph.ErrPropertyNotFound) { - return []analysis.CreatePostRelationshipJob{}, nil + return []post.EnsureRelationshipJob{}, nil } - return []analysis.CreatePostRelationshipJob{}, err + return []post.EnsureRelationshipJob{}, err } else if len(certChain) > 1 { parentCert := certChain[1] if targetNodes, err := findNodesByCertThumbprint(parentCert, tx, ad.EnterpriseCA, ad.RootCA, ad.AIACA); err != nil { - return []analysis.CreatePostRelationshipJob{}, err + return []post.EnsureRelationshipJob{}, err } else { - return slicesext.Map(targetNodes, func(nodeId graph.ID) analysis.CreatePostRelationshipJob { - return analysis.CreatePostRelationshipJob{ + return slicesext.Map(targetNodes, func(nodeId graph.ID) post.EnsureRelationshipJob { + return post.EnsureRelationshipJob{ FromID: node.ID, ToID: nodeId, Kind: ad.IssuedSignedBy, @@ -283,7 +284,7 @@ func processCertChainParent(node *graph.Node, tx graph.Transaction) ([]analysis. }), nil } } else { - return []analysis.CreatePostRelationshipJob{}, ErrNoCertParent + return []post.EnsureRelationshipJob{}, ErrNoCertParent } } diff --git a/packages/go/analysis/ad/local_groups.go b/packages/go/analysis/ad/local_groups.go index ad4dbaba258..0df3b6e726f 100644 --- a/packages/go/analysis/ad/local_groups.go +++ b/packages/go/analysis/ad/local_groups.go @@ -23,6 +23,7 @@ import ( "sync/atomic" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -31,16 +32,16 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostCanRDP(parentCtx context.Context, graphDB graph.Database, localGroupData *LocalGroupData, enforceURA bool, citrixEnabled bool) (*analysis.AtomicPostProcessingStats, error) { +func PostCanRDP(parentCtx context.Context, graphDB graph.Database, localGroupData *LocalGroupData, enforceURA bool, citrixEnabled bool) (*post.AtomicPostProcessingStats, error) { var ( ctx, done = context.WithCancel(parentCtx) - stats = analysis.NewAtomicPostProcessingStats() + stats = post.NewAtomicPostProcessingStats() numComputersProcessed = &atomic.Uint64{} workC = make(chan uint64) workerWG sync.WaitGroup computerC = make(chan *CanRDPComputerData) computerWG sync.WaitGroup - postC = make(chan analysis.CreatePostRelationshipJob, 4096) + postC = make(chan post.EnsureRelationshipJob, 4096) postWG sync.WaitGroup submitStatusf = util.SLogSampleRepeated("PostCanRDP") @@ -118,7 +119,7 @@ func PostCanRDP(parentCtx context.Context, graphDB graph.Database, localGroupDat done() } else { rdpEntities.Each(func(fromID uint64) bool { - return channels.Submit(ctx, postC, analysis.CreatePostRelationshipJob{ + return channels.Submit(ctx, postC, post.EnsureRelationshipJob{ FromID: graph.ID(fromID), ToID: nextComputerRDPJob.Computer, Kind: ad.CanRDP, @@ -190,7 +191,7 @@ func PostCanRDP(parentCtx context.Context, graphDB graph.Database, localGroupDat return &stats, nil } -func PostLocalGroups(parentCtx context.Context, graphDB graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { +func PostLocalGroups(parentCtx context.Context, graphDB graph.Database, localGroupData *LocalGroupData) (*post.AtomicPostProcessingStats, error) { const ( adminGroupSuffix = "-544" psRemoteGroupSuffix = "-580" @@ -205,10 +206,10 @@ func PostLocalGroups(parentCtx context.Context, graphDB graph.Database, localGro var ( ctx, done = context.WithCancel(parentCtx) - stats = analysis.NewAtomicPostProcessingStats() + stats = post.NewAtomicPostProcessingStats() computerC = make(chan uint64) reachC = make(chan reachJob, 4096) - postC = make(chan analysis.CreatePostRelationshipJob, 4096) + postC = make(chan post.EnsureRelationshipJob, 4096) numGroupsProcessed = &atomic.Uint64{} numComputersProcessed = &atomic.Uint64{} submitStatusf = util.SLogSampleRepeated("PostLocalGroups") @@ -288,7 +289,7 @@ func PostLocalGroups(parentCtx context.Context, graphDB graph.Database, localGro } localGroupData.LocalGroupMembershipDigraph.EachAdjacentNode(nextJob.targetGroup, graph.DirectionInbound, func(fromID uint64) bool { - return channels.Submit(ctx, postC, analysis.CreatePostRelationshipJob{ + return channels.Submit(ctx, postC, post.EnsureRelationshipJob{ FromID: graph.ID(fromID), ToID: graph.ID(nextJob.targetComputer), Kind: edgeKind, diff --git a/packages/go/analysis/ad/ntlm.go b/packages/go/analysis/ad/ntlm.go index ceadcc4b399..6ae0171a93f 100644 --- a/packages/go/analysis/ad/ntlm.go +++ b/packages/go/analysis/ad/ntlm.go @@ -28,6 +28,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -125,7 +126,7 @@ func NewNTLMCache(ctx context.Context, db graph.Database, localGroupData *LocalG } // PostNTLM is the initial function used to execute our NTLM analysis -func PostNTLM(ctx context.Context, db graph.Database, localGroupData *LocalGroupData, adcsCache ADCSCache, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*analysis.AtomicPostProcessingStats, error) { +func PostNTLM(ctx context.Context, db graph.Database, localGroupData *LocalGroupData, adcsCache ADCSCache, ntlmEnabled bool, compositionCounter *analysis.CompositionCounter) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -185,7 +186,7 @@ func PostNTLM(ctx context.Context, db graph.Database, localGroupData *LocalGroup } else if authenticatedUserGroupID, ok := ntlmCache.GetAuthenticatedUserGroupForDomain(domainSid); !ok { continue } else { - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { return PostCoerceAndRelayNTLMToSMB(tx, outC, ntlmCache, innerComputer, authenticatedUserGroupID) }); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Post processing failed for %s: %v", ad.CoerceAndRelayNTLMToSMB, err)) @@ -198,7 +199,7 @@ func PostNTLM(ctx context.Context, db graph.Database, localGroupData *LocalGroup continue } - if err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err = operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { return PostCoerceAndRelayNTLMToLDAP(outC, innerComputer, authenticatedUserGroupID, ntlmCache.LdapCache) }); err != nil { slog.WarnContext(ctx, fmt.Sprintf("Post processing failed for %s: %v", ad.CoerceAndRelayNTLMToLDAP, err)) @@ -378,12 +379,12 @@ func coerceAndRelayNTLMtoADCSPath2Pattern(domainID graph.ID, enterpriseCAs cardi )) } -func PostCoerceAndRelayNTLMToADCS(adcsCache ADCSCache, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], ntlmCache NTLMCache) error { +func PostCoerceAndRelayNTLMToADCS(adcsCache ADCSCache, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], ntlmCache NTLMCache) error { for _, outerDomain := range adcsCache.GetDomains() { for _, outerEnterpriseCA := range adcsCache.GetEnterpriseCertAuthorities() { domain := outerDomain enterpriseCA := outerEnterpriseCA - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if publishedCertTemplates := adcsCache.GetPublishedTemplateCache(enterpriseCA.ID); len(publishedCertTemplates) == 0 { // If this enterprise CA has no published templates, then there's no reason to check further return nil @@ -479,7 +480,7 @@ func PostCoerceAndRelayNTLMToADCS(adcsCache ADCSCache, operation analysis.StatTr } results.Each(func(value uint64) bool { - outC <- analysis.CreatePostRelationshipJob{ + outC <- post.EnsureRelationshipJob{ FromID: authUsersGroup, ToID: graph.ID(value), Kind: ad.CoerceAndRelayNTLMToADCS, @@ -582,7 +583,7 @@ func GetCoerceAndRelayNTLMtoSMBEdgeComposition(ctx context.Context, db graph.Dat // PostCoerceAndRelayNTLMToSMB creates edges that allow a computer with unrolled admin access to one or more computers where SMB signing is disabled. // Comprised solely of adminTo and memberOf edges -func PostCoerceAndRelayNTLMToSMB(tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob, ntlmCache NTLMCache, computer *graph.Node, authenticatedUserID graph.ID) error { +func PostCoerceAndRelayNTLMToSMB(tx graph.Transaction, outC chan<- post.EnsureRelationshipJob, ntlmCache NTLMCache, computer *graph.Node, authenticatedUserID graph.ID) error { if smbSigningEnabled, err := computer.Properties.Get(ad.SMBSigning.String()).Bool(); errors.Is(err, graph.ErrPropertyNotFound) { return nil } else if err != nil { @@ -608,7 +609,7 @@ func PostCoerceAndRelayNTLMToSMB(tx graph.Transaction, outC chan<- analysis.Crea allAdminPrincipals.Remove(computer.ID.Uint64()) if allAdminPrincipals.Cardinality() > 0 { - outC <- analysis.CreatePostRelationshipJob{ + outC <- post.EnsureRelationshipJob{ FromID: authenticatedUserID, ToID: computer.ID, Kind: ad.CoerceAndRelayNTLMToSMB, @@ -783,7 +784,7 @@ func GetCoercionTargetsForCoerceAndRelayNTLMtoSMB(ctx context.Context, db graph. // PostCoerceAndRelayNTLMToLDAP creates edges where an authenticated user group, for a given domain, is able to target the provided computer. // This will create either a CoerceAndRelayNTLMToLDAP or CoerceAndRelayNTLMToLDAPS edges, depending on the ldapSigning property of the domain -func PostCoerceAndRelayNTLMToLDAP(outC chan<- analysis.CreatePostRelationshipJob, computer *graph.Node, authenticatedUserGroupID graph.ID, ldapSigningCache map[string]LDAPSigningCache) error { +func PostCoerceAndRelayNTLMToLDAP(outC chan<- post.EnsureRelationshipJob, computer *graph.Node, authenticatedUserGroupID graph.ID, ldapSigningCache map[string]LDAPSigningCache) error { // webclientrunning must be set to true for the computer's properties in order for this attack path to be viable // If the property is not found, we will assume false if webClientRunning, err := computer.Properties.Get(ad.WebClientRunning.String()).Bool(); err != nil && !errors.Is(err, graph.ErrPropertyNotFound) { @@ -804,13 +805,13 @@ func PostCoerceAndRelayNTLMToLDAP(outC chan<- analysis.CreatePostRelationshipJob // for both LDAP and LDAPS scenarios, assuming the passed in signingCache has any vulnerable paths // We also ignore instances where the computer is relaying to itself if len(signingCache.relayableToDCLDAP) == 1 && signingCache.relayableToDCLDAP[0] != computer.ID { - outC <- analysis.CreatePostRelationshipJob{ + outC <- post.EnsureRelationshipJob{ FromID: authenticatedUserGroupID, ToID: computer.ID, Kind: ad.CoerceAndRelayNTLMToLDAP, } } else if len(signingCache.relayableToDCLDAP) > 1 { - outC <- analysis.CreatePostRelationshipJob{ + outC <- post.EnsureRelationshipJob{ FromID: authenticatedUserGroupID, ToID: computer.ID, Kind: ad.CoerceAndRelayNTLMToLDAP, @@ -818,13 +819,13 @@ func PostCoerceAndRelayNTLMToLDAP(outC chan<- analysis.CreatePostRelationshipJob } if len(signingCache.relayableToDCLDAPS) == 1 && signingCache.relayableToDCLDAPS[0] != computer.ID { - outC <- analysis.CreatePostRelationshipJob{ + outC <- post.EnsureRelationshipJob{ FromID: authenticatedUserGroupID, ToID: computer.ID, Kind: ad.CoerceAndRelayNTLMToLDAPS, } } else if len(signingCache.relayableToDCLDAPS) > 1 { - outC <- analysis.CreatePostRelationshipJob{ + outC <- post.EnsureRelationshipJob{ FromID: authenticatedUserGroupID, ToID: computer.ID, Kind: ad.CoerceAndRelayNTLMToLDAPS, diff --git a/packages/go/analysis/ad/owns.go b/packages/go/analysis/ad/owns.go index 5b10ac7588b..ffbd11d17c2 100644 --- a/packages/go/analysis/ad/owns.go +++ b/packages/go/analysis/ad/owns.go @@ -26,6 +26,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -36,7 +37,7 @@ import ( "github.com/specterops/dawgs/query" ) -func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { +func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -58,7 +59,7 @@ func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, localGroupDat } else { // Get all source nodes of Owns ACEs (i.e., owning principals) where the target node has no ACEs granting abusable explicit permissions to OWNER RIGHTS - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if relationships, err := ops.FetchRelationships(tx.Relationships().Filterf(func() graph.Criteria { return query.And( query.Kind(query.Relationship(), ad.OwnsRaw), @@ -114,7 +115,7 @@ func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, localGroupDat } // Get all source nodes of WriteOwner ACEs where the target node has no ACEs granting explicit abusable permissions to OWNER RIGHTS - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if relationships, err := ops.FetchRelationships(tx.Relationships().Filterf(func() graph.Criteria { return query.And( @@ -173,11 +174,11 @@ func PostOwnsAndWriteOwner(ctx context.Context, db graph.Database, localGroupDat return &operation.Stats, operation.Done() } -func createPostRelFromRaw(rel *graph.Relationship, kind graph.Kind) analysis.CreatePostRelationshipJob { +func createPostRelFromRaw(rel *graph.Relationship, kind graph.Kind) post.EnsureRelationshipJob { isInherited, _ := rel.Properties.GetOrDefault(common.IsInherited.String(), false).Bool() inheritanceHash, _ := rel.Properties.GetOrDefault(ad.InheritanceHash.String(), "").String() - return analysis.CreatePostRelationshipJob{ + return post.EnsureRelationshipJob{ FromID: rel.StartID, ToID: rel.EndID, Kind: kind, diff --git a/packages/go/analysis/ad/post.go b/packages/go/analysis/ad/post.go index a7fc2129e38..95d021913b1 100644 --- a/packages/go/analysis/ad/post.go +++ b/packages/go/analysis/ad/post.go @@ -23,6 +23,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/ad/wellknown" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -36,7 +37,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostSyncLAPSPassword(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { +func PostSyncLAPSPassword(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -47,12 +48,12 @@ func PostSyncLAPSPassword(ctx context.Context, db graph.Database, localGroupData )() if domainNodes, err := fetchCollectedDomainNodes(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err + return &post.AtomicPostProcessingStats{}, err } else { operation := analysis.NewPostRelationshipOperation(ctx, db, "SyncLAPSPassword Post Processing") for _, domain := range domainNodes { innerDomain := domain - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if lapsSyncers, err := getLAPSSyncers(tx, innerDomain, localGroupData); err != nil { return err } else if lapsSyncers.Cardinality() == 0 { @@ -62,7 +63,7 @@ func PostSyncLAPSPassword(ctx context.Context, db graph.Database, localGroupData } else { for _, computer := range computers { lapsSyncers.Each(func(value uint64) bool { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: computer, Kind: ad.SyncLAPSPassword, @@ -80,7 +81,7 @@ func PostSyncLAPSPassword(ctx context.Context, db graph.Database, localGroupData } } -func PostDCSync(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*analysis.AtomicPostProcessingStats, error) { +func PostDCSync(ctx context.Context, db graph.Database, localGroupData *LocalGroupData) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -91,20 +92,20 @@ func PostDCSync(ctx context.Context, db graph.Database, localGroupData *LocalGro )() if domainNodes, err := fetchCollectedDomainNodes(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err + return &post.AtomicPostProcessingStats{}, err } else { operation := analysis.NewPostRelationshipOperation(ctx, db, "DCSync Post Processing") for _, domain := range domainNodes { innerDomain := domain - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if dcSyncers, err := getDCSyncers(tx, innerDomain, localGroupData); err != nil { return err } else if dcSyncers.Cardinality() == 0 { return nil } else { dcSyncers.Each(func(value uint64) bool { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: graph.ID(value), ToID: innerDomain.ID, Kind: ad.DCSync, @@ -121,7 +122,7 @@ func PostDCSync(ctx context.Context, db graph.Database, localGroupData *LocalGro } } -func PostProtectAdminGroups(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { +func PostProtectAdminGroups(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -133,14 +134,14 @@ func PostProtectAdminGroups(ctx context.Context, db graph.Database) (*analysis.A domainNodes, err := fetchCollectedDomainNodes(ctx, db) if err != nil { - return &analysis.AtomicPostProcessingStats{}, err + return &post.AtomicPostProcessingStats{}, err } operation := analysis.NewPostRelationshipOperation(ctx, db, "ProtectAdminGroups Post Processing") for _, domain := range domainNodes { - operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if adminSDHolderIDs, err := getAdminSDHolder(tx, domain); graph.IsErrNotFound(err) { // No AdminSDHolder IDs found for this domain return nil @@ -154,7 +155,7 @@ func PostProtectAdminGroups(ctx context.Context, db graph.Database) (*analysis.A } else { fromID := adminSDHolderIDs[0] // AdminSDHolder should be unique per domain for _, toID := range protectedObjectIDs { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: fromID, ToID: toID, Kind: ad.ProtectAdminGroups, @@ -168,7 +169,7 @@ func PostProtectAdminGroups(ctx context.Context, db graph.Database) (*analysis.A return &operation.Stats, operation.Done() } -func PostHasTrustKeys(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { +func PostHasTrustKeys(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -179,10 +180,10 @@ func PostHasTrustKeys(ctx context.Context, db graph.Database) (*analysis.AtomicP )() if domainNodes, err := fetchCollectedDomainNodes(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err + return &post.AtomicPostProcessingStats{}, err } else { operation := analysis.NewPostRelationshipOperation(ctx, db, "HasTrustKeys Post Processing") - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, domain := range domainNodes { if netbios, err := domain.Properties.Get(ad.NetBIOS.String()).String(); err != nil { // The property is new and may therefore not exist @@ -202,7 +203,7 @@ func PostHasTrustKeys(ctx context.Context, db graph.Database) (*analysis.AtomicP slog.DebugContext(ctx, fmt.Sprintf("Trust account not found for domain SID %s and NetBIOS %s", trustingDomainSid, netbios)) continue } else { - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: domain.ID, ToID: trustAccount.ID, Kind: ad.HasTrustKeys, @@ -213,7 +214,7 @@ func PostHasTrustKeys(ctx context.Context, db graph.Database) (*analysis.AtomicP } return nil }); err != nil { - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("error creating HasTrustKeys edges: %w", err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("error creating HasTrustKeys edges: %w", err) } return &operation.Stats, operation.Done() diff --git a/packages/go/analysis/azure/post.go b/packages/go/analysis/azure/post.go index d69eddf124d..d6e8326917e 100644 --- a/packages/go/analysis/azure/post.go +++ b/packages/go/analysis/azure/post.go @@ -23,10 +23,13 @@ import ( "strings" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/delta" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/azure" "github.com/specterops/bloodhound/packages/go/graphschema/common" + "github.com/specterops/bloodhound/packages/go/trace" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" "github.com/specterops/dawgs/ops" @@ -185,7 +188,7 @@ func aggregateSourceReadWriteServicePrincipals(tx graph.Transaction, tenantConta return sourceNodes, nil } -func AppRoleAssignments(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { +func AppRoleAssignments(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -196,7 +199,7 @@ func AppRoleAssignments(ctx context.Context, db graph.Database) (*analysis.Atomi )() if tenants, err := FetchTenants(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err + return &post.AtomicPostProcessingStats{}, err } else { operation := analysis.NewPostRelationshipOperation(ctx, db, "Azure App Role Assignments Post Processing") @@ -244,17 +247,17 @@ func AppRoleAssignments(ctx context.Context, db graph.Database) (*analysis.Atomi } } -func createAZMGApplicationReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGApplicationReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if tenantContainsAppRelationships, err := fetchTenantContainsRelationships(tx, tenant, azure.App); err != nil { return err } else if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.ApplicationReadWriteAll); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, targetRelationship := range append(tenantContainsServicePrincipalRelationships, tenantContainsAppRelationships...) { for _, sourceNode := range sourceNodes { - AZMGAddSecretRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddSecretRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: targetRelationship.EndID, Kind: azure.AZMGAddSecret, @@ -264,7 +267,7 @@ func createAZMGApplicationReadWriteAllEdges(ctx context.Context, db graph.Databa return nil } - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: targetRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -286,15 +289,15 @@ func createAZMGApplicationReadWriteAllEdges(ctx context.Context, db graph.Databa } } -func createAZMGAppRoleAssignmentReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGAppRoleAssignmentReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.AppRoleAssignmentReadWriteAll); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsServicePrincipalRelationship := range tenantContainsServicePrincipalRelationships { for _, sourceNode := range sourceNodes { - AZMGGrantAppRolesRelationship := analysis.CreatePostRelationshipJob{ + AZMGGrantAppRolesRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsServicePrincipalRelationship.StartID, // the tenant Kind: azure.AZMGGrantAppRoles, @@ -316,17 +319,17 @@ func createAZMGAppRoleAssignmentReadWriteAllEdges(ctx context.Context, db graph. } } -func createAZMGDirectoryReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGDirectoryReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.DirectoryReadWriteAll); err != nil { return err } else if tenantContainsGroupRelationships, err := fetchTenantContainsReadWriteAllGroupRelationships(tx, tenant); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsGroupRelationship := range tenantContainsGroupRelationships { for _, sourceNode := range sourceNodes { - AZMGAddMemberRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddMemberRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddMember, @@ -336,7 +339,7 @@ func createAZMGDirectoryReadWriteAllEdges(ctx context.Context, db graph.Database return nil } - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -357,17 +360,17 @@ func createAZMGDirectoryReadWriteAllEdges(ctx context.Context, db graph.Database } } -func createAZMGGroupReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGGroupReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.GroupReadWriteAll); err != nil { return err } else if tenantContainsGroupRelationships, err := fetchTenantContainsReadWriteAllGroupRelationships(tx, tenant); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsGroupRelationship := range tenantContainsGroupRelationships { for _, sourceNode := range sourceNodes { - AZMGAddMemberRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddMemberRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddMember, @@ -377,7 +380,7 @@ func createAZMGGroupReadWriteAllEdges(ctx context.Context, db graph.Database, op return nil } - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -398,17 +401,17 @@ func createAZMGGroupReadWriteAllEdges(ctx context.Context, db graph.Database, op } } -func createAZMGGroupMemberReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGGroupMemberReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.GroupMemberReadWriteAll); err != nil { return err } else if tenantContainsGroupRelationships, err := fetchTenantContainsReadWriteAllGroupRelationships(tx, tenant); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsGroupRelationship := range tenantContainsGroupRelationships { for _, sourceNode := range sourceNodes { - AZMGAddMemberRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddMemberRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddMember, @@ -429,17 +432,17 @@ func createAZMGGroupMemberReadWriteAllEdges(ctx context.Context, db graph.Databa } } -func createAZMGRoleManagementReadWriteDirectoryEdgesPart1(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGRoleManagementReadWriteDirectoryEdgesPart1(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.RoleManagementReadWriteDirectory); err != nil { return err } else if tenantContainsRoleRelationships, err := fetchTenantContainsRelationships(tx, tenant, azure.Role); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsRoleRelationship := range tenantContainsRoleRelationships { for _, sourceNode := range sourceNodes { - AZMGGrantAppRolesRelationship := analysis.CreatePostRelationshipJob{ + AZMGGrantAppRolesRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsRoleRelationship.StartID, Kind: azure.AZMGGrantAppRoles, @@ -460,17 +463,17 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart1(ctx context.Context, d } } -func createAZMGRoleManagementReadWriteDirectoryEdgesPart2(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGRoleManagementReadWriteDirectoryEdgesPart2(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.RoleManagementReadWriteDirectory); err != nil { return err } else if tenantContainsRoleRelationships, err := fetchTenantContainsRelationships(tx, tenant, azure.Role); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsRoleRelationship := range tenantContainsRoleRelationships { for _, sourceNode := range sourceNodes { - AZMGGrantRoleRelationship := analysis.CreatePostRelationshipJob{ + AZMGGrantRoleRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsRoleRelationship.EndID, Kind: azure.AZMGGrantRole, @@ -491,15 +494,15 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart2(ctx context.Context, d } } -func createAZMGRoleManagementReadWriteDirectoryEdgesPart3(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGRoleManagementReadWriteDirectoryEdgesPart3(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.RoleManagementReadWriteDirectory); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsServicePrincipalRelationship := range tenantContainsServicePrincipalRelationships { for _, sourceNode := range sourceNodes { - AZMGAddSecretRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddSecretRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsServicePrincipalRelationship.EndID, Kind: azure.AZMGAddSecret, @@ -509,7 +512,7 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart3(ctx context.Context, d return nil } - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsServicePrincipalRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -531,17 +534,17 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart3(ctx context.Context, d } } -func createAZMGRoleManagementReadWriteDirectoryEdgesPart4(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGRoleManagementReadWriteDirectoryEdgesPart4(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.RoleManagementReadWriteDirectory); err != nil { return err } else if tenantContainsAppRelationships, err := fetchTenantContainsRelationships(tx, tenant, azure.App); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsAppRelationship := range tenantContainsAppRelationships { for _, sourceNode := range sourceNodes { - AZMGAddSecretRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddSecretRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsAppRelationship.EndID, Kind: azure.AZMGAddSecret, @@ -551,7 +554,7 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart4(ctx context.Context, d return nil } - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsAppRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -573,17 +576,17 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart4(ctx context.Context, d } } -func createAZMGRoleManagementReadWriteDirectoryEdgesPart5(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGRoleManagementReadWriteDirectoryEdgesPart5(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node, tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.RoleManagementReadWriteDirectory); err != nil { return err } else if tenantContainsGroupRelationships, err := fetchTenantContainsRelationships(tx, tenant, azure.Group); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsGroupRelationship := range tenantContainsGroupRelationships { for _, sourceNode := range sourceNodes { - AZMGAddMemberRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddMemberRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddMember, @@ -593,7 +596,7 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart5(ctx context.Context, d return nil } - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsGroupRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -614,15 +617,15 @@ func createAZMGRoleManagementReadWriteDirectoryEdgesPart5(ctx context.Context, d } } -func createAZMGServicePrincipalEndpointReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenantContainsServicePrincipalRelationships []*graph.Relationship) error { +func createAZMGServicePrincipalEndpointReadWriteAllEdges(ctx context.Context, db graph.Database, operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenantContainsServicePrincipalRelationships []*graph.Relationship) error { if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { if sourceNodes, err := aggregateSourceReadWriteServicePrincipals(tx, tenantContainsServicePrincipalRelationships, azure.ServicePrincipalEndpointReadWriteAll); err != nil { return err } else { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for _, tenantContainsServicePrincipalRelationship := range tenantContainsServicePrincipalRelationships { for _, sourceNode := range sourceNodes { - AZMGAddOwnerRelationship := analysis.CreatePostRelationshipJob{ + AZMGAddOwnerRelationship := post.EnsureRelationshipJob{ FromID: sourceNode.ID, ToID: tenantContainsServicePrincipalRelationship.EndID, Kind: azure.AZMGAddOwner, @@ -644,8 +647,8 @@ func createAZMGServicePrincipalEndpointReadWriteAllEdges(ctx context.Context, db } } -func addSecret(operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node) error { - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { +func addSecret(operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], tenant *graph.Node) error { + return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if addSecretRoles, err := TenantRoles(tx, tenant, AddSecretRoleIDs()...); err != nil { return err } else if tenantAppsAndSPs, err := TenantApplicationsAndServicePrincipals(tx, tenant); err != nil { @@ -660,7 +663,7 @@ func addSecret(operation analysis.StatTrackedOperation[analysis.CreatePostRelati slog.String("target_kinds", strings.Join(target.Kinds.Strings(), ",")), slog.Any("target_id", target.ID), ) - nextJob := analysis.CreatePostRelationshipJob{ + nextJob := post.EnsureRelationshipJob{ FromID: role.ID, ToID: target.ID, Kind: azure.AddSecret, @@ -677,7 +680,7 @@ func addSecret(operation analysis.StatTrackedOperation[analysis.CreatePostRelati }) } -func ExecuteCommand(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { +func ExecuteCommand(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -688,7 +691,7 @@ func ExecuteCommand(ctx context.Context, db graph.Database) (*analysis.AtomicPos )() if tenants, err := FetchTenants(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err + return &post.AtomicPostProcessingStats{}, err } else { operation := analysis.NewPostRelationshipOperation(ctx, db, "AZExecuteCommand Post Processing") if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { @@ -703,12 +706,12 @@ func ExecuteCommand(ctx context.Context, db graph.Database) (*analysis.AtomicPos for _, tenantDevice := range tenantDevices { innerTenantDevice := tenantDevice - if err := operation.Operation.SubmitReader(func(ctx context.Context, _ graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err := operation.Operation.SubmitReader(func(ctx context.Context, _ graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { if isWindowsDevice, err := IsWindowsDevice(innerTenantDevice); err != nil { return err } else if isWindowsDevice { for _, intuneAdmin := range intuneAdmins { - nextJob := analysis.CreatePostRelationshipJob{ + nextJob := post.EnsureRelationshipJob{ FromID: intuneAdmin.ID, ToID: innerTenantDevice.ID, Kind: azure.ExecuteCommand, @@ -741,16 +744,10 @@ func ExecuteCommand(ctx context.Context, db graph.Database) (*analysis.AtomicPos } } -func resetPassword(operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], tenant *graph.Node, roleAssignments RoleAssignments) error { - defer measure.LogAndMeasure( - slog.LevelInfo, - "AZResetPassword Post Processing", - attr.Namespace("analysis"), - attr.Function("resetPassword"), - attr.Scope("routine"), - )() +func postAzureResetPassword(ctx context.Context, db graph.Database, sink *post.FilteredRelationshipSink, tenant *graph.Node, roleAssignments RoleAssignments) error { + defer trace.Function(ctx, "postAzureResetPassword", attr.Operation("Reset Password Post Processing"))() - return operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + return db.ReadTransaction(ctx, func(tx graph.Transaction) error { if pwResetRoles, err := TenantRoles(tx, tenant, ResetPasswordRoleIDs()...); err != nil { return err } else { @@ -759,17 +756,18 @@ func resetPassword(operation analysis.StatTrackedOperation[analysis.CreatePostRe return fmt.Errorf("unable to continue processing azresetpassword for tenant node %d: %w", tenant.ID, err) } else { targets.Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ + nextJob := post.EnsureRelationshipJob{ FromID: role.ID, ToID: graph.ID(nextID), Kind: azure.ResetPassword, } - return channels.Submit(ctx, outC, nextJob) + return sink.Submit(ctx, nextJob) }) } } } + return nil }) } @@ -811,230 +809,163 @@ func resetPasswordEndNodeBitmapForRole(role *graph.Node, roleAssignments RoleAss } } -func globalAdmins(roleAssignments RoleAssignments, tenant *graph.Node, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { - defer measure.LogAndMeasure( - slog.LevelInfo, - "Global Admins Post Processing", - attr.Namespace("analysis"), - attr.Function("globalAdmins"), - attr.Scope("routine"), - )() +func postAzureGlobalAdmins(ctx context.Context, sink *post.FilteredRelationshipSink, roleAssignments RoleAssignments, tenant *graph.Node) error { + defer trace.Function(ctx, "postAzureGlobalAdmins", attr.Operation("Global Admins Post Processing"))() - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - roleAssignments.PrincipalsWithRole(azure.CompanyAdministratorRole).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(nextID), - ToID: tenant.ID, - Kind: azure.GlobalAdmin, - } + roleAssignments.PrincipalsWithRole(azure.CompanyAdministratorRole).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ + FromID: graph.ID(nextID), + ToID: tenant.ID, + Kind: azure.GlobalAdmin, + } - return channels.Submit(ctx, outC, nextJob) - }) + return sink.Submit(ctx, nextJob) + }) - return nil - }); err != nil { - slog.Error("Failed to submit azure global admins post processing job", attr.Error(err)) - } + return nil } -func privilegedRoleAdmins(roleAssignments RoleAssignments, tenant *graph.Node, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { - defer measure.LogAndMeasure( - slog.LevelInfo, - "Privileged Role Admins Post Processing", - attr.Namespace("analysis"), - attr.Function("privilegedRoleAdmins"), - attr.Scope("routine"), - )() +func postAzurePrivilegedRoleAdmins(ctx context.Context, sink *post.FilteredRelationshipSink, roleAssignments RoleAssignments, tenant *graph.Node) { + defer trace.Function(ctx, "postAzurePrivilegedRoleAdmins", attr.Operation("Privileged Role Admins Post Processing"))() - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - roleAssignments.PrincipalsWithRole(azure.PrivilegedRoleAdministratorRole).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(nextID), - ToID: tenant.ID, - Kind: azure.PrivilegedRoleAdmin, - } + roleAssignments.PrincipalsWithRole(azure.PrivilegedRoleAdministratorRole).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ + FromID: graph.ID(nextID), + ToID: tenant.ID, + Kind: azure.PrivilegedRoleAdmin, + } - return channels.Submit(ctx, outC, nextJob) - }) + return sink.Submit(ctx, nextJob) + }) +} - return nil - }); err != nil { - slog.Error("Failed to submit privileged role admins post processing job", attr.Error(err)) - } +func postAzurePrivilegedAuthAdmins(ctx context.Context, sink *post.FilteredRelationshipSink, roleAssignments RoleAssignments, tenant *graph.Node) { + defer trace.Function(ctx, "postAzurePrivilegedAuthAdmins", attr.Operation("Privileged Auth Admins Post Processing"))() + + roleAssignments.PrincipalsWithRole(azure.PrivilegedAuthenticationAdministratorRole).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ + FromID: graph.ID(nextID), + ToID: tenant.ID, + Kind: azure.PrivilegedAuthAdmin, + } + + return sink.Submit(ctx, nextJob) + }) } -func privilegedAuthAdmins(roleAssignments RoleAssignments, tenant *graph.Node, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { - defer measure.LogAndMeasure( - slog.LevelInfo, - "Privileged Auth Admins Post Processing", - attr.Namespace("analysis"), - attr.Function("privilegedAuthAdmins"), - attr.Scope("routine"), - )() +func postAzureAddMembers(ctx context.Context, sink *post.FilteredRelationshipSink, roleAssignments RoleAssignments) error { + defer trace.Function(ctx, "postAzureAddMembers", attr.Operation("Azure Add Members Post Processing"))() - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - roleAssignments.PrincipalsWithRole(azure.PrivilegedAuthenticationAdministratorRole).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ + for tenantGroupID, tenantGroup := range roleAssignments.TenantPrincipals.Get(azure.Group) { + roleAssignments.UsersWithRole(AddMemberAllGroupsTargetRoles()...).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ FromID: graph.ID(nextID), - ToID: tenant.ID, - Kind: azure.PrivilegedAuthAdmin, + ToID: tenantGroupID, + Kind: azure.AddMembers, } - return channels.Submit(ctx, outC, nextJob) + return sink.Submit(ctx, nextJob) }) - return nil - }); err != nil { - slog.Error("Failed to submit azure privileged auth admins post processing job", attr.Error(err)) - } -} - -func addMembers(roleAssignments RoleAssignments, operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob]) { - defer measure.LogAndMeasure( - slog.LevelInfo, - "AZ Add Members Post Processing", - attr.Namespace("analysis"), - attr.Function("addMembers"), - attr.Scope("routine"), - )() + roleAssignments.ServicePrincipalsWithRole(AddMemberAllGroupsTargetRoles()...).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ + FromID: graph.ID(nextID), + ToID: tenantGroupID, + Kind: azure.AddMembers, + } - for tenantGroupID, tenantGroup := range roleAssignments.Principals.Get(azure.Group) { - var ( - innerGroupID = tenantGroupID - innerGroup = tenantGroup - ) + return sink.Submit(ctx, nextJob) + }) - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - roleAssignments.UsersWithRole(AddMemberAllGroupsTargetRoles()...).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ + if isRoleAssignable, err := tenantGroup.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil { + if graph.IsErrPropertyNotFound(err) { + slog.WarnContext( + ctx, + "Node is missing property", + slog.Uint64("node_id", tenantGroup.ID.Uint64()), + slog.String("property", azure.IsAssignableToRole.String()), + ) + } else { + return err + } + } else if !isRoleAssignable { + roleAssignments.UsersWithRole(AddMemberGroupNotRoleAssignableTargetRoles()...).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ FromID: graph.ID(nextID), - ToID: innerGroupID, + ToID: tenantGroupID, Kind: azure.AddMembers, } - return channels.Submit(ctx, outC, nextJob) + return sink.Submit(ctx, nextJob) }) - - return nil - }); err != nil { - slog.Error("Failed to submit post processing job for users with role allowing AZAddMembers edge", attr.Error(err)) } - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - roleAssignments.ServicePrincipalsWithRole(AddMemberAllGroupsTargetRoles()...).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ + if isRoleAssignable, err := tenantGroup.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil { + if graph.IsErrPropertyNotFound(err) { + slog.WarnContext( + ctx, + "Node is missing property", + slog.Uint64("node_id", tenantGroup.ID.Uint64()), + slog.String("property", azure.IsAssignableToRole.String()), + ) + } else { + return err + } + } else if !isRoleAssignable { + roleAssignments.ServicePrincipalsWithRole(AddMemberGroupNotRoleAssignableTargetRoles()...).Each(func(nextID uint64) bool { + nextJob := post.EnsureRelationshipJob{ FromID: graph.ID(nextID), - ToID: innerGroupID, + ToID: tenantGroupID, Kind: azure.AddMembers, } - return channels.Submit(ctx, outC, nextJob) + return sink.Submit(ctx, nextJob) }) - - return nil - }); err != nil { - slog.Error("Failed to submit post processing job for service principals with role allowing AZAddMembers edge", attr.Error(err)) - } - - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if isRoleAssignable, err := innerGroup.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil { - if graph.IsErrPropertyNotFound(err) { - slog.WarnContext( - ctx, - "Node is missing property", - slog.Uint64("node_id", innerGroup.ID.Uint64()), - slog.String("property", azure.IsAssignableToRole.String()), - ) - } else { - return err - } - } else if !isRoleAssignable { - roleAssignments.UsersWithRole(AddMemberGroupNotRoleAssignableTargetRoles()...).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(nextID), - ToID: innerGroupID, - Kind: azure.AddMembers, - } - - return channels.Submit(ctx, outC, nextJob) - }) - } - - return nil - }); err != nil { - slog.Error("Failed to submit post processing job for users with role allowing AZAddMembers edge", attr.Error(err)) } + } - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { - if isRoleAssignable, err := innerGroup.Properties.Get(azure.IsAssignableToRole.String()).Bool(); err != nil { - if graph.IsErrPropertyNotFound(err) { - slog.WarnContext( - ctx, - "Node is missing property", - slog.Uint64("node_id", innerGroup.ID.Uint64()), - slog.String("property", azure.IsAssignableToRole.String()), - ) - } else { - return err - } - } else if !isRoleAssignable { - roleAssignments.ServicePrincipalsWithRole(AddMemberGroupNotRoleAssignableTargetRoles()...).Each(func(nextID uint64) bool { - nextJob := analysis.CreatePostRelationshipJob{ - FromID: graph.ID(nextID), - ToID: innerGroupID, - Kind: azure.AddMembers, - } - - return channels.Submit(ctx, outC, nextJob) - }) - } + return nil +} - return nil - }); err != nil { - slog.Error("Failed to submit post processing job for service principals with role allowing AZAddMembers edge", attr.Error(err)) - } - } +var userRoleAssignmentPostProcessedEdges = graph.Kinds{ + azure.ResetPassword, + azure.GlobalAdmin, + azure.PrivilegedRoleAdmin, + azure.PrivilegedAuthAdmin, + azure.AddMembers, } -func UserRoleAssignments(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { - defer measure.ContextLogAndMeasure( - ctx, - slog.LevelInfo, - "Post-processing User Role Assignments", - attr.Namespace("analysis"), - attr.Function("UserRoleAssignments"), - attr.Scope("process"), - )() +func UserRoleAssignments(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { + ctx = trace.Context(ctx, slog.LevelInfo, "analysis", "Azure User Role Assignment Post-processing") + defer trace.Function(ctx, "UserRoleAssignments")() - if tenantNodes, err := FetchTenants(ctx, db); err != nil { - return &analysis.AtomicPostProcessingStats{}, err + if userRoleAssignmentTracker, err := delta.FetchTracker(ctx, db, userRoleAssignmentPostProcessedEdges); err != nil { + return &post.AtomicPostProcessingStats{}, err + } else if tenantNodes, err := FetchTenants(ctx, db); err != nil { + return &post.AtomicPostProcessingStats{}, err } else { - operation := analysis.NewPostRelationshipOperation(ctx, db, "Azure User Role Assignments Post Processing") + sink := post.NewFilteredRelationshipSink(ctx, "Azure User Role Assignments Post Processing", db, userRoleAssignmentTracker) + defer sink.Done() for _, tenant := range tenantNodes { - if roleAssignments, err := TenantRoleAssignments(ctx, db, tenant); err != nil { - if err := operation.Done(); err != nil { - slog.ErrorContext(ctx, "Error caught during azure UserRoleAssignments.TenantRoleAssignments teardown", attr.Error(err)) - } - - return &analysis.AtomicPostProcessingStats{}, err + if roleAssignments, err := FetchTenantRoleAssignments(ctx, db, tenant); err != nil { + return &post.AtomicPostProcessingStats{}, err } else { - if err := resetPassword(operation, tenant, roleAssignments); err != nil { - if err := operation.Done(); err != nil { - slog.ErrorContext(ctx, "Error caught during azure UserRoleAssignments.resetPassword teardown", attr.Error(err)) - } - - return &analysis.AtomicPostProcessingStats{}, err + if err := postAzureResetPassword(ctx, db, sink, tenant, roleAssignments); err != nil { + return &post.AtomicPostProcessingStats{}, err } else { - globalAdmins(roleAssignments, tenant, operation) - privilegedRoleAdmins(roleAssignments, tenant, operation) - privilegedAuthAdmins(roleAssignments, tenant, operation) - addMembers(roleAssignments, operation) + postAzureGlobalAdmins(ctx, sink, roleAssignments, tenant) + postAzurePrivilegedRoleAdmins(ctx, sink, roleAssignments, tenant) + postAzurePrivilegedAuthAdmins(ctx, sink, roleAssignments, tenant) + + if err := postAzureAddMembers(ctx, sink, roleAssignments); err != nil { + slog.Error("Azure AddMember Post-Processing Failure", attr.Error(err)) + } } } } - return &operation.Stats, operation.Done() + return sink.Stats(), nil } } @@ -1063,7 +994,7 @@ func CreateAZRoleApproverEdge( ctx context.Context, db graph.Database, ) ( - *analysis.AtomicPostProcessingStats, + *post.AtomicPostProcessingStats, error, ) { defer measure.ContextLogAndMeasure( diff --git a/packages/go/analysis/azure/post_test.go b/packages/go/analysis/azure/post_test.go index 4deef5170d7..127c189794a 100644 --- a/packages/go/analysis/azure/post_test.go +++ b/packages/go/analysis/azure/post_test.go @@ -55,8 +55,8 @@ func setupRoleAssignments() azure.RoleAssignments { return azure.RoleAssignments{ // user2 has no roles! this is intentional - Principals: graph.NewNodeSet(user, user2, group, app).KindSet(), - RoleMap: roleMap, + TenantPrincipals: graph.NewNodeSet(user, user2, group, app).KindSet(), + RoleMap: roleMap, } } diff --git a/packages/go/analysis/azure/role.go b/packages/go/analysis/azure/role.go index b905107e439..b4ba9a5e42e 100644 --- a/packages/go/analysis/azure/role.go +++ b/packages/go/analysis/azure/role.go @@ -24,6 +24,7 @@ import ( "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/azure" + "github.com/specterops/bloodhound/packages/go/trace" "github.com/specterops/dawgs/cardinality" "github.com/specterops/dawgs/graph" "github.com/specterops/dawgs/ops" @@ -100,7 +101,11 @@ func (s RoleAssignmentMap) HasRole(id graph.ID, roleTemplateIDs ...string) bool } type RoleAssignments struct { - Principals graph.NodeKindSet + TenantPrincipals graph.NodeKindSet + users cardinality.ImmutableDuplex[uint64] + usersWithAnyRole cardinality.ImmutableDuplex[uint64] + usersWithoutRoles cardinality.ImmutableDuplex[uint64] + servicePrincipals cardinality.ImmutableDuplex[uint64] RoleMap map[string]cardinality.Duplex[uint64] RoleAssignableGroupMembership cardinality.Duplex[uint64] } @@ -109,7 +114,7 @@ func (s RoleAssignments) GetNodeKindSet(bm cardinality.Duplex[uint64]) graph.Nod result := graph.NewNodeKindSet() bm.Each(func(nextID uint64) bool { - node := s.Principals.GetNode(graph.ID(nextID)) + node := s.TenantPrincipals.GetNode(graph.ID(nextID)) result.Add(node) return true @@ -122,29 +127,20 @@ func (s RoleAssignments) GetNodeSet(bm cardinality.Duplex[uint64]) graph.NodeSet return s.GetNodeKindSet(bm).AllNodes() } -func (s RoleAssignments) ServicePrincipals() cardinality.Duplex[uint64] { - return s.Principals.Get(azure.ServicePrincipal).IDBitmap() +func (s RoleAssignments) ServicePrincipals() cardinality.ImmutableDuplex[uint64] { + return s.servicePrincipals } -func (s RoleAssignments) Users() cardinality.Duplex[uint64] { - return s.Principals.Get(azure.User).IDBitmap() +func (s RoleAssignments) Users() cardinality.ImmutableDuplex[uint64] { + return s.users } -func (s RoleAssignments) UsersWithAnyRole() cardinality.Duplex[uint64] { - users := s.Users() - - principalsWithRoles := cardinality.NewBitmap64() - for _, bitmap := range s.RoleMap { - principalsWithRoles.Or(bitmap) - } - principalsWithRoles.And(users) - return principalsWithRoles +func (s RoleAssignments) UsersWithAnyRole() cardinality.ImmutableDuplex[uint64] { + return s.usersWithAnyRole } -func (s RoleAssignments) UsersWithoutRoles() cardinality.Duplex[uint64] { - result := s.Users() - result.AndNot(s.UsersWithAnyRole()) - return result +func (s RoleAssignments) UsersWithoutRoles() cardinality.ImmutableDuplex[uint64] { + return s.usersWithoutRoles } func (s RoleAssignments) UsersWithRole(roleTemplateIDs ...string) cardinality.Duplex[uint64] { @@ -215,38 +211,73 @@ func (s RoleAssignments) NodeHasRole(id graph.ID, roleTemplateIDs ...string) boo return false } -func initTenantRoleAssignments(tx graph.Transaction, tenant *graph.Node) (RoleAssignments, error) { - if !IsTenantNode(tenant) { - return RoleAssignments{}, fmt.Errorf("cannot initialize tenant role assignments - node %d must be of kind %s", tenant.ID, azure.Tenant) - } else if roleMembers, err := TenantPrincipals(tx, tenant); err != nil && !graph.IsErrNotFound(err) { - return RoleAssignments{}, err - } else { - return RoleAssignments{ - Principals: roleMembers.KindSet(), - RoleMap: make(map[string]cardinality.Duplex[uint64]), - RoleAssignableGroupMembership: cardinality.NewBitmap64(), - }, nil +func NewTenantRoleAssignments(tenant *graph.Node, tenantPrincipals graph.NodeKindSet, roleAssignableGroupMembership cardinality.Duplex[uint64], roleMap map[string]cardinality.Duplex[uint64]) RoleAssignments { + var ( + users = tenantPrincipals.Get(azure.User).IDBitmap() + usersWithAnyRole = cardinality.NewBitmap64() + usersWithoutRoles = cardinality.NewBitmap64() + servicePrincipals = tenantPrincipals.Get(azure.ServicePrincipal).IDBitmap() + ) + + // Calculate users with any role first + for _, bitmap := range roleMap { + usersWithAnyRole.Or(bitmap) + } + + usersWithAnyRole.And(users) + + // Calculate users without roles next + usersWithoutRoles.Or(users) + usersWithoutRoles.AndNot(usersWithAnyRole) + + slog.Info("Tenant Role Assignment Details", + slog.Uint64("num_users", users.Cardinality()), + slog.Uint64("num_service_principals", servicePrincipals.Cardinality()), + ) + + return RoleAssignments{ + TenantPrincipals: tenantPrincipals, + users: users, + usersWithAnyRole: usersWithAnyRole, + usersWithoutRoles: usersWithoutRoles, + servicePrincipals: servicePrincipals, + RoleMap: roleMap, + RoleAssignableGroupMembership: roleAssignableGroupMembership, } } -func TenantRoleAssignments(ctx context.Context, db graph.Database, tenant *graph.Node) (RoleAssignments, error) { +func FetchTenantRoleAssignments(ctx context.Context, db graph.Database, tenant *graph.Node) (RoleAssignments, error) { + defer trace.Function(ctx, "FetchTenantRoleAssignments")() + var roleAssignments RoleAssignments - return roleAssignments, db.ReadTransaction(ctx, func(tx graph.Transaction) error { - if fetchedRoleAssignments, err := initTenantRoleAssignments(tx, tenant); err != nil { + + if !IsTenantNode(tenant) { + return RoleAssignments{}, fmt.Errorf("cannot initialize tenant role assignments - node %d must be of kind %s", tenant.ID, azure.Tenant) + } + + if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { + if tenantPrincipalsNodeSet, err := TenantPrincipals(tx, tenant); err != nil && !graph.IsErrNotFound(err) { return err } else if roles, err := TenantRoles(tx, tenant); err != nil { return err } else { + var ( + tenantPrincipalsNodeKindSet = tenantPrincipalsNodeSet.KindSet() + roleAssignableGroupMembership = cardinality.NewBitmap64() + roleMap = map[string]cardinality.Duplex[uint64]{} + ) + // for each of the role assignable groups returned, fetch the users who are members - for _, group := range fetchedRoleAssignments.Principals.Get(azure.Group) { + for _, group := range tenantPrincipalsNodeKindSet.Get(azure.Group) { if members, err := FetchRoleAssignableGroupMembersUsers(tx, group, 0, 0); err != nil { return err } else { // set all users who have role assignable group membership - fetchedRoleAssignments.RoleAssignableGroupMembership.Or(members.IDBitmap()) + roleAssignableGroupMembership.Or(members.IDBitmap()) } } - return roles.KindSet().EachNode(func(node *graph.Node) error { + + for _, node := range roles { if roleTemplateID, err := node.Properties.Get(azure.RoleTemplateID.String()).String(); err != nil { if !graph.IsErrPropertyNotFound(err) { return err @@ -256,13 +287,18 @@ func TenantRoleAssignments(ctx context.Context, db graph.Database, tenant *graph return err } } else { - fetchedRoleAssignments.RoleMap[roleTemplateID] = members.IDBitmap() + roleMap[roleTemplateID] = members.IDBitmap() } - roleAssignments = fetchedRoleAssignments - return nil - }) + } + + roleAssignments = NewTenantRoleAssignments(tenant, tenantPrincipalsNodeKindSet, roleAssignableGroupMembership, roleMap) + return nil } - }) + }); err != nil { + return RoleAssignments{}, err + } + + return roleAssignments, nil } // RoleMembers returns the NodeSet of members for a given set of roles diff --git a/packages/go/analysis/azure/role_approver.go b/packages/go/analysis/azure/role_approver.go index e1ef1de02c6..a7bf6cc3765 100644 --- a/packages/go/analysis/azure/role_approver.go +++ b/packages/go/analysis/azure/role_approver.go @@ -21,6 +21,7 @@ import ( "log/slog" "github.com/specterops/bloodhound/packages/go/analysis" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/azure" "github.com/specterops/bloodhound/packages/go/graphschema/common" "github.com/specterops/dawgs/graph" @@ -56,7 +57,7 @@ func CreateApproverEdge( ctx context.Context, db graph.Database, tenantNode *graph.Node, - operation analysis.StatTrackedOperation[analysis.CreatePostRelationshipJob], + operation analysis.StatTrackedOperation[post.EnsureRelationshipJob], ) error { // Extract the tenant's objectid to match against AZRole tenantid properties tenantObjectID, err := tenantNode.Properties.Get(common.ObjectID.String()).String() @@ -99,7 +100,7 @@ func CreateApproverEdge( if err := operation.Operation.SubmitReader(func( ctx context.Context, tx graph.Transaction, - outC chan<- analysis.CreatePostRelationshipJob, + outC chan<- post.EnsureRelationshipJob, ) error { // Step 3a: Read the primaryApprovers lists (user and group GUIDs) userApproversID, err := fetchedAZRole.Properties.Get( @@ -146,7 +147,7 @@ func CreateApproverEdge( func handleDefaultAdminRoles( ctx context.Context, db graph.Database, - outC chan<- analysis.CreatePostRelationshipJob, + outC chan<- post.EnsureRelationshipJob, tenantNode, fetchedAZRole *graph.Node, ) error { // Step 3b.ii: Find Global Administrator and Privileged Role Administrator roles in this tenant @@ -172,7 +173,7 @@ func handleDefaultAdminRoles( // Step 3b.iii: Create AZRoleApprover edges from each default admin role to the target AZRole for _, fetchedNode := range fetchedNodes { // Enqueue creation of AZRoleApprover edge: from admin role → target AZRole - channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: fetchedNode.ID, ToID: fetchedAZRole.ID, Kind: azure.AZRoleApprover, @@ -196,7 +197,7 @@ func handleDefaultAdminRoles( func handlePrincipalApprovers( ctx context.Context, db graph.Database, - outC chan<- analysis.CreatePostRelationshipJob, + outC chan<- post.EnsureRelationshipJob, principalIDs []string, fetchedAZRole *graph.Node, ) error { @@ -235,7 +236,7 @@ func handlePrincipalApprovers( } // Step 3c.ii.2: Create AZRoleApprover edge from approver node to target AZRole - if !channels.Submit(ctx, outC, analysis.CreatePostRelationshipJob{ + if !channels.Submit(ctx, outC, post.EnsureRelationshipJob{ FromID: fetchedNode.ID, ToID: fetchedAZRole.ID, Kind: azure.AZRoleApprover, diff --git a/packages/go/analysis/azure/tenant.go b/packages/go/analysis/azure/tenant.go index c220560f110..1422b1b4761 100644 --- a/packages/go/analysis/azure/tenant.go +++ b/packages/go/analysis/azure/tenant.go @@ -23,6 +23,7 @@ import ( "github.com/specterops/bloodhound/packages/go/bhlog/measure" "github.com/specterops/bloodhound/packages/go/graphschema/azure" + "github.com/specterops/bloodhound/packages/go/trace" "github.com/specterops/dawgs/graph" "github.com/specterops/dawgs/ops" "github.com/specterops/dawgs/query" @@ -84,6 +85,8 @@ func TenantPrincipals(tx graph.Transaction, tenant *graph.Node) (graph.NodeSet, } func FetchTenants(ctx context.Context, db graph.Database) (graph.NodeSet, error) { + defer trace.Function(ctx, "FetchTenants")() + var nodeSet graph.NodeSet if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { var err error diff --git a/packages/go/analysis/delta/tracker.go b/packages/go/analysis/delta/tracker.go new file mode 100644 index 00000000000..4d1a273636b --- /dev/null +++ b/packages/go/analysis/delta/tracker.go @@ -0,0 +1,277 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package delta + +import ( + "context" + "encoding/binary" + "log/slog" + "slices" + "sync" + + "github.com/cespare/xxhash/v2" + "github.com/specterops/bloodhound/packages/go/trace" + "github.com/specterops/dawgs/graph" + "github.com/specterops/dawgs/query" +) + +// KeyEncoder encodes node and edge identifiers into hash keys using xxhash. It is used +// to generate unique keys for tracking entities in a Tracker. +type KeyEncoder struct { + digester *xxhash.Digest + buffer [8]byte +} + +// NewKeyEncoder creates a new key encoder instance with default settings. +func NewKeyEncoder() *KeyEncoder { + return &KeyEncoder{ + digester: xxhash.New(), + } +} + +// NodeKey computes the hash key for a given node ID and list of kinds. +func (s *KeyEncoder) NodeKey(node uint64, kinds graph.Kinds) uint64 { + s.digester.Reset() + + // Node identifier and sorted kinds make up a node key + binary.LittleEndian.PutUint64(s.buffer[:], node) + s.digester.Write(s.buffer[:]) + + kindStrs := kinds.Strings() + slices.Sort(kindStrs) + + for _, kindStr := range kindStrs { + s.digester.WriteString(kindStr) + } + + // Sum the digest + return s.digester.Sum64() +} + +// EdgeKey computes the hash key for an edge defined by start/end IDs and kind. +func (s *KeyEncoder) EdgeKey(start, end uint64, kind graph.Kind) uint64 { + s.digester.Reset() + + // Start and end identifiers and the edge's kind make up an edge key + binary.LittleEndian.PutUint64(s.buffer[:], start) + s.digester.Write(s.buffer[:]) + + binary.LittleEndian.PutUint64(s.buffer[:], end) + s.digester.Write(s.buffer[:]) + + // Edge type + s.digester.WriteString(kind.String()) + + // Sum the digest + return s.digester.Sum64() +} + +// KeyEncoderPool manages a pool of KeyEncoder instances for efficient reuse. +type KeyEncoderPool struct { + encoders *sync.Pool +} + +// NewEdgeEncoderPool creates a new pool for edge key encoding. +func NewEdgeEncoderPool() *KeyEncoderPool { + return &KeyEncoderPool{ + encoders: &sync.Pool{ + New: func() any { + return NewKeyEncoder() + }, + }, + } +} + +// EdgeKey retrieves or allocates an encoder from the pool and computes the edge key. +func (s *KeyEncoderPool) EdgeKey(start, end uint64, kind graph.Kind) uint64 { + var ( + raw = s.encoders.Get() + encoder, typeOK = raw.(*KeyEncoder) + ) + + if !typeOK { + encoder = NewKeyEncoder() + } + + key := encoder.EdgeKey(start, end, kind) + + if typeOK { + s.encoders.Put(raw) + } + + return key +} + +// trackedEntity represents a single entity being tracked within a Tracker. +type trackedEntity struct { + ID uint64 + Key uint64 +} + +// Tracker tracks edges and nodes as hashed keys that can be looked and checked. The Tracker also +// maintains a list of seen entities and provides methods to detect deletions. +type Tracker struct { + entities []trackedEntity + seenKeys map[uint64]struct{} + seenKeysLock sync.RWMutex + encoderPool *KeyEncoderPool +} + +// Seen returns the number of unique keys currently tracked and seen by either HasNode or HasEdge. +func (s *Tracker) Seen() int { + s.seenKeysLock.RLock() + defer s.seenKeysLock.RUnlock() + + return len(s.seenKeys) +} + +// Deleted returns a slice of IDs for edges that were not seen during the operation. +func (s *Tracker) Deleted() []uint64 { + s.seenKeysLock.RLock() + defer s.seenKeysLock.RUnlock() + + deletedEdges := make([]uint64, 0, len(s.entities)-len(s.seenKeys)) + + for _, edge := range s.entities { + if _, seen := s.seenKeys[edge.Key]; !seen { + deletedEdges = append(deletedEdges, edge.ID) + } + } + + return deletedEdges +} + +// HasEdge checks whether a specific edge exists in the tracker. If found, it marks the key as seen. +func (s *Tracker) HasEdge(start, end uint64, edgeKind graph.Kind) bool { + var ( + edgeKey = s.encoderPool.EdgeKey(start, end, edgeKind) + _, found = slices.BinarySearchFunc(s.entities, edgeKey, func(e trackedEntity, t uint64) int { + if e.Key < t { + return -1 + } + + if e.Key > t { + return 1 + } + + return 0 + }) + ) + + if found { + s.seenKeysLock.Lock() + s.seenKeys[edgeKey] = struct{}{} + s.seenKeysLock.Unlock() + } + + return found +} + +// EdgeTrackerBuilder builds a Tracker from a sequence of tracked edges. +type EdgeTrackerBuilder struct { + edges []trackedEntity + encoderPool *KeyEncoderPool +} + +// NewTrackerBuilder creates a new builder for constructing a Tracker. +func NewTrackerBuilder() *EdgeTrackerBuilder { + return &EdgeTrackerBuilder{ + encoderPool: NewEdgeEncoderPool(), + } +} + +// TrackEdge adds an edge to the builder for later tracking. +func (s *EdgeTrackerBuilder) TrackEdge(edge, start, end uint64, kind graph.Kind) { + s.edges = append(s.edges, trackedEntity{ + ID: edge, + Key: s.encoderPool.EdgeKey(start, end, kind), + }) +} + +// Build constructs a Tracker from the accumulated edges. +func (s *EdgeTrackerBuilder) Build() *Tracker { + // Sort the edges before building the tracker + slices.SortStableFunc(s.edges, func(a, b trackedEntity) int { + if a.Key < b.Key { + return -1 + } + + if a.Key > b.Key { + return 1 + } + + return 0 + }) + + return &Tracker{ + entities: s.edges, + encoderPool: s.encoderPool, + + // Assume that the tracker will have a high hit ratio. This may be better exposed as a function parameter + // but for now this seems like a safe bet. + seenKeys: make(map[uint64]struct{}, len(s.edges)), + } +} + +// FetchTracker retrieves all relevant edges that match one of the edge kinds given from the database. It uses +// this data to then build a Tracker. +func FetchTracker(ctx context.Context, db graph.Database, edgeKinds graph.Kinds) (*Tracker, error) { + var ( + tracef = trace.Function(ctx, "FetchTracker") + builder = NewTrackerBuilder() + numResults = uint64(0) + ) + + if err := db.ReadTransaction(ctx, func(tx graph.Transaction) error { + return tx.Relationships().Filter(query.And( + query.Not(query.KindIn(query.Start(), graph.StringKind("Meta"), graph.StringKind("MetaDetail"))), + query.KindIn(query.Relationship(), edgeKinds...), + query.Not(query.KindIn(query.End(), graph.StringKind("Meta"), graph.StringKind("MetaDetail"))), + )).Query( + func(results graph.Result) error { + var ( + edgeID graph.ID + startID graph.ID + edgeKind graph.Kind + endID graph.ID + ) + + for results.Next() { + if err := results.Scan(&edgeID, &startID, &edgeKind, &endID); err != nil { + return err + } + + builder.TrackEdge(edgeID.Uint64(), startID.Uint64(), endID.Uint64(), edgeKind) + numResults += 1 + } + + results.Close() + return results.Error() + }, + query.Returning( + query.RelationshipID(), + query.StartID(), + query.KindsOf(query.Relationship()), + query.EndID(), + ), + ) + }); err != nil { + return nil, err + } + + tracef(slog.Uint64("num_edges_tracked", numResults)) + return builder.Build(), nil +} diff --git a/packages/go/analysis/delta/tracker_test.go b/packages/go/analysis/delta/tracker_test.go new file mode 100644 index 00000000000..3276d32af40 --- /dev/null +++ b/packages/go/analysis/delta/tracker_test.go @@ -0,0 +1,208 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package delta + +import ( + "sync" + "testing" + + "github.com/specterops/dawgs/graph" + "github.com/stretchr/testify/require" +) + +var ( + kindA = graph.StringKind("A") + kindB = graph.StringKind("B") + kindC = graph.StringKind("C") +) + +func TestKeyEncoder_Key_Deterministic(t *testing.T) { + enc := NewKeyEncoder() + + start, end := uint64(12345), uint64(67890) + + key1 := enc.EdgeKey(start, end, kindA) + key2 := enc.EdgeKey(start, end, kindA) + + if key1 != key2 { + t.Fatalf("KeyEncoder.Key should be deterministic, got %d and %d", key1, key2) + } +} + +func TestKeyEncoder_Key_VaryingInputs(t *testing.T) { + enc := NewKeyEncoder() + start, end := uint64(1), uint64(2) + + keys := make(map[uint64]struct{}) + keys[enc.EdgeKey(start, end, kindA)] = struct{}{} + keys[enc.EdgeKey(start+10, end, kindA)] = struct{}{} + keys[enc.EdgeKey(start, end, kindB)] = struct{}{} + keys[enc.EdgeKey(end, start, kindA)] = struct{}{} + + if len(keys) != 4 { + t.Fatalf("Expected 4 distinct keys for different inputs, got %d {%v+}", len(keys), keys) + } + + _, exists := keys[enc.EdgeKey(start, end, kindA)] + require.True(t, exists) + + _, exists = keys[enc.EdgeKey(start+10, end, kindA)] + require.True(t, exists) + + _, exists = keys[enc.EdgeKey(start, end, kindB)] + require.True(t, exists) + + _, exists = keys[enc.EdgeKey(end, start, kindA)] + require.True(t, exists) +} + +// Helper to compute a key without using the internal encoder (for expected ordering checks) +func computeKey(start, end uint64, k graph.Kind) uint64 { + enc := NewKeyEncoder() + return enc.EdgeKey(start, end, k) +} + +func TestTrackerBuilder_Build_SortsKeysAndIDs(t *testing.T) { + builder := NewTrackerBuilder() + + // Insert edges in unsorted order. + builder.TrackEdge(100, 5, 10, kindA) // edgeID 100 + builder.TrackEdge(101, 2, 8, kindB) // edgeID 101 + builder.TrackEdge(102, 7, 3, kindC) // edgeID 102 + + sub := builder.Build() + + // Verify that edgeKeys are sorted ascending. + for i := 1; i < len(sub.entities); i++ { + if sub.entities[i-1].Key > sub.entities[i].Key { + t.Fatalf("edgeKeys not sorted: %v", sub.entities) + } + } + + // Verify that edgeIDs have been reordered to match the sorted keys. + // Compute the keys manually to know the expected order. + keys := []uint64{ + computeKey(5, 10, kindA), // edge 100 + computeKey(2, 8, kindB), // edge 101 + computeKey(7, 3, kindC), // edge 102 + } + + // Sort the keys to get the expected order. + sortedIdx := make([]int, len(keys)) + for i := range sortedIdx { + sortedIdx[i] = i + } + + // Simple bubble sort on indices based on keys (just for test readability) + for i := range len(keys) { + for j := i + 1; j < len(keys); j++ { + if keys[sortedIdx[i]] > keys[sortedIdx[j]] { + sortedIdx[i], sortedIdx[j] = sortedIdx[j], sortedIdx[i] + } + } + } + expectedOrder := []uint64{ + []uint64{100, 101, 102}[sortedIdx[0]], + []uint64{100, 101, 102}[sortedIdx[1]], + []uint64{100, 101, 102}[sortedIdx[2]], + } + + if len(sub.entities) != len(expectedOrder) { + t.Fatalf("unexpected number of edges") + } + + for i, edge := range sub.entities { + if edge.ID != expectedOrder[i] { + t.Fatalf("edge ID at index %d expected %d, got %d", i, expectedOrder[i], edge.ID) + } + } +} + +func TestTracker_HasEdge_And_DeletedEdges(t *testing.T) { + builder := NewTrackerBuilder() + + // Edge set we will actually add to the subgraph. + builder.TrackEdge(10, 1, 2, kindA) // edgeID 10 + builder.TrackEdge(20, 3, 4, kindB) // edgeID 20 + builder.TrackEdge(30, 5, 6, kindC) // edgeID 30 + + sub := builder.Build() + + // Query a subset of edges – deliberately omit the second one. + if !sub.HasEdge(1, 2, kindA) { + t.Fatalf("expected edge (1,2,kindA) to be present") + } + if sub.HasEdge(3, 5, kindB) { + t.Fatalf("expected edge (3,5,kindB) to be *absent* from HasEdge calls") + } + if !sub.HasEdge(5, 6, kindC) { + t.Fatalf("expected edge (5,6,kindC) to be present") + } + + // DeletedEdges should now contain only the ID of the edge we never queried. + deleted := sub.Deleted() + if len(deleted) != 1 { + t.Fatalf("expected exactly one deleted edge, got %d", len(deleted)) + } + if deleted[0] != 20 { + t.Fatalf("expected deleted edge ID to be 20, got %d", deleted[0]) + } +} + +func TestTracker_HasEdge_DuplicateCalls(t *testing.T) { + builder := NewTrackerBuilder() + builder.TrackEdge(55, 11, 22, kindA) + sub := builder.Build() + + // Call HasEdge many times – it should stay true and not affect DeletedEdges. + for i := range 5 { + if !sub.HasEdge(11, 22, kindA) { + t.Fatalf("edge should be found on iteration %d", i) + } + } + + if got := sub.Deleted(); len(got) != 0 { + t.Fatalf("expected no deleted edges after repeated HasEdge calls, got %v", got) + } +} + +func TestTracker_ConcurrentHasEdge(t *testing.T) { + builder := NewTrackerBuilder() + // Build a larger set of edges (10 edges) + for i := range 10 { + builder.TrackEdge(uint64(100+i), uint64(i), uint64(i+100), kindA) + } + sub := builder.Build() + + var wg sync.WaitGroup + query := func(startIdx, endIdx int) { + defer wg.Done() + for i := startIdx; i < endIdx; i++ { + if ok := sub.HasEdge(uint64(i), uint64(i+100), kindA); !ok { + t.Errorf("expected edge %d to exist", i) + } + } + } + + wg.Add(2) + go query(0, 5) // first half + go query(5, 10) // second half + wg.Wait() + + if del := sub.Deleted(); len(del) != 0 { + t.Fatalf("expected no deleted edges after concurrent queries, got %v", del) + } +} diff --git a/packages/go/analysis/hybrid/hybrid.go b/packages/go/analysis/hybrid/hybrid.go index db4844aaecb..589f862a611 100644 --- a/packages/go/analysis/hybrid/hybrid.go +++ b/packages/go/analysis/hybrid/hybrid.go @@ -24,6 +24,7 @@ import ( "github.com/specterops/bloodhound/packages/go/analysis" "github.com/specterops/bloodhound/packages/go/analysis/azure" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/measure" adSchema "github.com/specterops/bloodhound/packages/go/graphschema/ad" @@ -35,7 +36,7 @@ import ( "github.com/specterops/dawgs/util/channels" ) -func PostHybrid(ctx context.Context, db graph.Database) (*analysis.AtomicPostProcessingStats, error) { +func PostHybrid(ctx context.Context, db graph.Database) (*post.AtomicPostProcessingStats, error) { defer measure.ContextLogAndMeasure( ctx, slog.LevelInfo, @@ -48,7 +49,7 @@ func PostHybrid(ctx context.Context, db graph.Database) (*analysis.AtomicPostPro // Fetch all Azure tenants first tenants, err := azure.FetchTenants(ctx, db) if err != nil { - return &analysis.AtomicPostProcessingStats{}, fmt.Errorf("fetching Entra tenants: %w", err) + return &post.AtomicPostProcessingStats{}, fmt.Errorf("fetching Entra tenants: %w", err) } // Spin up a new parallel operation to speed up processing @@ -115,7 +116,7 @@ func PostHybrid(ctx context.Context, db graph.Database) (*analysis.AtomicPostPro } } - if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- analysis.CreatePostRelationshipJob) error { + if err := operation.Operation.SubmitReader(func(ctx context.Context, tx graph.Transaction, outC chan<- post.EnsureRelationshipJob) error { for azUser, potentialADUser := range entraToADMap { var adUser = potentialADUser @@ -129,7 +130,7 @@ func PostHybrid(ctx context.Context, db graph.Database) (*analysis.AtomicPostPro } } - SyncedToEntraUserRelationship := analysis.CreatePostRelationshipJob{ + SyncedToEntraUserRelationship := post.EnsureRelationshipJob{ FromID: adUser, ToID: azUser, Kind: adSchema.SyncedToEntraUser, @@ -139,7 +140,7 @@ func PostHybrid(ctx context.Context, db graph.Database) (*analysis.AtomicPostPro return nil } - SyncedToADUserRelationship := analysis.CreatePostRelationshipJob{ + SyncedToADUserRelationship := post.EnsureRelationshipJob{ FromID: azUser, ToID: adUser, Kind: azureSchema.SyncedToADUser, diff --git a/packages/go/analysis/impact/aggregator.go b/packages/go/analysis/impact/aggregator.go deleted file mode 100644 index 6b2cd4590cd..00000000000 --- a/packages/go/analysis/impact/aggregator.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package impact - -import ( - "fmt" - "log/slog" - "sync" - - "github.com/specterops/bloodhound/packages/go/bhlog/measure" - "github.com/specterops/dawgs/cardinality" - "github.com/specterops/dawgs/graph" -) - -// PathAggregator is a cardinality aggregator for paths and shortcut paths. -// -// When encoding shortcut paths the aggregator will track node dependencies for nodes that otherwise would have missing -// cardinality entries. Dependencies are organized as an adjacency list for each node. These adjacency lists combine to -// make a dependency graph of cardinalities that can be traversed. -// -// Once all paths are encoded, shortcut or otherwise, into the aggregator, users may then resolve the full cardinality -// of nodes by calling the cardinality functions of the aggregator. Resolution is accomplished using a recursive -// depth-first strategy. -type PathAggregator interface { - Cardinality(targets ...uint64) cardinality.Provider[uint64] - AddPath(path *graph.PathSegment) - AddShortcut(path *graph.PathSegment) -} - -type ThreadSafeAggregator struct { - aggregator PathAggregator - lock *sync.RWMutex -} - -func (s ThreadSafeAggregator) Cardinality(targets ...uint64) cardinality.Provider[uint64] { - s.lock.Lock() - defer s.lock.Unlock() - - return s.aggregator.Cardinality(targets...) -} - -func (s ThreadSafeAggregator) AddPath(path *graph.PathSegment) { - s.lock.Lock() - defer s.lock.Unlock() - - s.aggregator.AddPath(path) -} - -func (s ThreadSafeAggregator) AddShortcut(path *graph.PathSegment) { - s.lock.Lock() - defer s.lock.Unlock() - - s.aggregator.AddShortcut(path) -} - -func NewThreadSafeAggregator(aggregator PathAggregator) PathAggregator { - return &ThreadSafeAggregator{ - aggregator: aggregator, - lock: &sync.RWMutex{}, - } -} - -type aggregator struct { - resolved cardinality.Duplex[uint64] - cardinalities *graph.IndexedSlice[uint64, cardinality.Provider[uint64]] - dependencies map[uint64]cardinality.Duplex[uint64] - newCardinalityProvider cardinality.ProviderConstructor[uint64] -} - -func NewAggregator(newCardinalityProvider cardinality.ProviderConstructor[uint64]) PathAggregator { - return aggregator{ - cardinalities: graph.NewIndexedSlice[uint64, cardinality.Provider[uint64]](), - dependencies: map[uint64]cardinality.Duplex[uint64]{}, - resolved: cardinality.NewBitmap64(), - newCardinalityProvider: newCardinalityProvider, - } -} - -// pushDependency adds a new dependency for the given target. -func (s aggregator) pushDependency(target, dependency uint64) { - if dependencies, hasDependencies := s.dependencies[target]; hasDependencies { - dependencies.Add(dependency) - } else { - newDependencies := cardinality.NewBitmap64() - newDependencies.Add(dependency) - - s.dependencies[target] = newDependencies - } -} - -// popDependencies will take the simplex cardinality provider reference for the given target, remove it from the -// containing map in the aggregator and then return it -func (s aggregator) popDependencies(targetID uint64) []uint64 { - dependencies, hasDependencies := s.dependencies[targetID] - delete(s.dependencies, targetID) - - if hasDependencies { - return dependencies.Slice() - } - - return nil -} - -func (s aggregator) getImpact(targetID uint64) cardinality.Provider[uint64] { - return s.cardinalities.GetOr(targetID, s.newCardinalityProvider) -} - -// resolution is a cursor type that tracks the resolution of a node's impact -type resolution struct { - // target is the uint64 ID of the node being resolved - target uint64 - - // impact stores the cardinality of the target's impact - impact cardinality.Provider[uint64] - - // completions are cardinality providers that will have this resolution's impact merged into them - completions []cardinality.Provider[uint64] - - // dependencies contains a slice of uint64 node IDs that this resolution depends on - dependencies []uint64 -} - -// resolve takes the target uint64 ID of a node and calculates the cardinality of nodes that have a path that traverse -// it -func (s aggregator) resolve(targetID uint64) cardinality.Provider[uint64] { - var ( - targetImpact = s.getImpact(targetID) - resolutions = map[uint64]*resolution{ - targetID: { - target: targetID, - impact: targetImpact, - dependencies: s.popDependencies(targetID), - }, - } - stack = []uint64{targetID} - ) - - for len(stack) > 0 { - // Pick up the next resolution - next := resolutions[stack[len(stack)-1]] - - // Exhaust the resolution's dependencies - if len(next.dependencies) > 0 { - nextDependency := next.dependencies[len(next.dependencies)-1] - next.dependencies = next.dependencies[:len(next.dependencies)-1] - - if s.resolved.Contains(nextDependency) { - // If this dependency has already been resolved, fetch and or it with this resolution's pathMembers - next.impact.Or(s.cardinalities.Get(nextDependency)) - } else if inProgressResolution, hasResolution := resolutions[nextDependency]; hasResolution { - // If this dependency is in the process of being resolved; track this node (var next) as a completion - // to or with the in progress resolutions pathMembers once fully resolved - inProgressResolution.completions = append(inProgressResolution.completions, next.impact) - } else { - // For each dependency not already resolved or in-progress is descended into as a new resolution - stack = append(stack, nextDependency) - resolutions[nextDependency] = &resolution{ - target: nextDependency, - impact: s.getImpact(nextDependency), - completions: []cardinality.Provider[uint64]{next.impact}, - dependencies: s.popDependencies(nextDependency), - } - } - } else { - // Pop the resolution from our dependency unwind - stack = stack[:len(stack)-1] - } - } - - // First resolution pass for completion dependencies - for _, nextResolution := range resolutions { - for _, nextCompletion := range nextResolution.completions { - nextCompletion.Or(nextResolution.impact) - } - } - - // Second resolution pass for completion dependencies that were not fully resolved on the first pass - for _, nextResolution := range resolutions { - for _, nextCompletion := range nextResolution.completions { - nextCompletion.Or(nextResolution.impact) - } - - s.resolved.Add(nextResolution.target) - } - - return targetImpact -} - -func (s aggregator) Cardinality(targets ...uint64) cardinality.Provider[uint64] { - slog.Debug(fmt.Sprintf("Calculating pathMembers cardinality for %d targets", len(targets))) - defer measure.MeasureWithThreshold(slog.LevelDebug, "Calculated pathMembers cardinality", slog.Int("num_targets", len(targets)))() - - impact := s.newCardinalityProvider() - - for _, target := range targets { - if s.resolved.Contains(target) { - impact.Or(s.cardinalities.Get(target)) - } else { - impact.Or(s.resolve(target)) - } - } - - return impact -} - -func (s aggregator) AddPath(path *graph.PathSegment) { - impactingNodes := []uint64{ - path.Node.ID.Uint64(), - } - - for cursor := path.Trunk; cursor != nil; cursor = cursor.Trunk { - // Only pull the pathMembers from the map if we have nodes that should be counted for this cursor - if len(impactingNodes) > 0 { - s.getImpact(cursor.Node.ID.Uint64()).Add(impactingNodes...) - } - - impactingNodes = append(impactingNodes, cursor.Node.ID.Uint64()) - } -} - -func (s aggregator) AddShortcut(path *graph.PathSegment) { - var ( - terminalUint32ID = path.Node.ID.Uint64() - impactingNodes = []uint64{ - terminalUint32ID, - } - ) - - for cursor := path.Trunk; cursor != nil; cursor = cursor.Trunk { - cursorNodeUint32ID := cursor.Node.ID.Uint64() - - // Add the terminal shortcut as a dependency to each ascending node - s.pushDependency(cursorNodeUint32ID, terminalUint32ID) - - // Only pull the pathMembers from the map if we have nodes that should be counted for this cursor - if len(impactingNodes) > 0 { - s.getImpact(cursorNodeUint32ID).Add(impactingNodes...) - } - - impactingNodes = append(impactingNodes, cursor.Node.ID.Uint64()) - } -} - -func (s aggregator) Resolved() cardinality.Duplex[uint64] { - return s.resolved -} diff --git a/packages/go/analysis/impact/aggregator_test.go b/packages/go/analysis/impact/aggregator_test.go deleted file mode 100644 index 286c4960f8d..00000000000 --- a/packages/go/analysis/impact/aggregator_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2023 Specter Ops, Inc. -// -// Licensed under the Apache License, Version 2.0 -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -package impact_test - -import ( - "testing" - - "github.com/specterops/bloodhound/packages/go/analysis/impact" - "github.com/specterops/dawgs/cardinality" - "github.com/specterops/dawgs/graph" - "github.com/stretchr/testify/require" -) - -var ( - aKind = graph.StringKind("A") - edgeKind = graph.StringKind("EDGE") - nextID = graph.ID(0) -) - -func resetNextID() { - nextID = 0 -} - -func getNextID() graph.ID { - id := nextID - nextID++ - - return id -} - -func descend(trunk *graph.PathSegment, nextNode *graph.Node) *graph.PathSegment { - return trunk.Descend(nextNode, rel(nextNode, trunk.Node)) -} - -func rel(start, end *graph.Node) *graph.Relationship { - return graph.NewRelationship(getNextID(), start.ID, end.ID, nil, edgeKind) -} - -func node(nodeKinds ...graph.Kind) *graph.Node { - return graph.NewNode(getNextID(), nil, nodeKinds...) -} - -func requireImpact(t *testing.T, agg impact.PathAggregator, nodeID uint64, containedNodes ...uint64) { - nodeImpact := agg.Cardinality(nodeID).(cardinality.Duplex[uint64]) - - if int(nodeImpact.Cardinality()) != len(containedNodes) { - t.Fatalf("Expected node %d to contain %d impacting nodes but saw %d: %v", int(nodeID), len(containedNodes), int(nodeImpact.Cardinality()), nodeImpact.Slice()) - } - - for _, containedNode := range containedNodes { - require.Truef(t, nodeImpact.Contains(containedNode), "Expected node %d to contain node %d. Impact for node 0: %v", int(nodeID), int(containedNode), nodeImpact.Slice()) - } -} - -func TestAggregator_Impact(t *testing.T) { - resetNextID() - - var ( - node0 = node(aKind) - node1 = node(aKind) - node2 = node(aKind) - node3 = node(aKind) - node4 = node(aKind) - node5 = node(aKind) - node6 = node(aKind) - node7 = node(aKind) - node8 = node(aKind) - node9 = node(aKind) - node10 = node(aKind) - node11 = node(aKind) - - rootSegment = graph.NewRootPathSegment(node0) - - node1Segment = descend(rootSegment, node1) - node3Segment = descend(node1Segment, node3) - node5Segment = descend(node3Segment, node5) - node8Segment = descend(node5Segment, node8) - node8to10Shortcut = descend(node8Segment, node10) - - node6Segment = descend(node3Segment, node6) - node6to7Shortcut = descend(node6Segment, node7) - - node11Segment = descend(rootSegment, node11) - node11to4Shortcut = descend(node11Segment, node4) - - node2Segment = descend(rootSegment, node2) - node4Segment = descend(node2Segment, node4) - node7Segment = descend(node4Segment, node7) - node9Segment = descend(node7Segment, node9) - - node2to3Shortcut = descend(node2Segment, node3) - node7to3Shortcut = descend(node7Segment, node3) - - // Node 10 is Terminal for the node9 and node11 segments - node9to10Terminal = descend(node9Segment, node10) - node11to10Terminal = descend(node11Segment, node10) - - // Make sure to use an exact cardinality container (bitset in this case) - agg = impact.NewAggregator(func() cardinality.Provider[uint64] { - return cardinality.NewBitmap64() - }) - ) - - agg.AddPath(node9to10Terminal) - agg.AddPath(node11to10Terminal) - - agg.AddShortcut(node2to3Shortcut) - agg.AddShortcut(node11to4Shortcut) - agg.AddShortcut(node6to7Shortcut) - agg.AddShortcut(node7to3Shortcut) - agg.AddShortcut(node8to10Shortcut) - - // Validate node 2 impact values and resolutions - requireImpact(t, agg, 2, 3, 4, 5, 6, 7, 8, 9, 10) - - // Validate node 1 impact values and resolutions - requireImpact(t, agg, 1, 3, 5, 6, 7, 8, 9, 10) - - // Validate node 11 impact values and resolutions - requireImpact(t, agg, 11, 3, 4, 5, 6, 7, 8, 9, 10) - - // Validate cached resolutions are correct for node 2 - requireImpact(t, agg, 2, 3, 4, 5, 6, 7, 8, 9, 10) -} diff --git a/packages/go/analysis/impact/aggregator_test_diagram.png b/packages/go/analysis/impact/aggregator_test_diagram.png deleted file mode 100644 index c71e8d9ced5..00000000000 Binary files a/packages/go/analysis/impact/aggregator_test_diagram.png and /dev/null differ diff --git a/packages/go/analysis/post.go b/packages/go/analysis/post.go index cd016b4d031..b414e7c8e45 100644 --- a/packages/go/analysis/post.go +++ b/packages/go/analysis/post.go @@ -23,6 +23,7 @@ import ( "sort" "strings" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/bhlog/attr" "github.com/specterops/bloodhound/packages/go/bhlog/level" "github.com/specterops/bloodhound/packages/go/bhlog/measure" @@ -47,20 +48,6 @@ func statsSortedKeys(value map[graph.Kind]int) []graph.Kind { return kinds } -func atomicStatsSortedKeys(value map[graph.Kind]*int32) []graph.Kind { - kinds := make([]graph.Kind, 0, len(value)) - - for key := range value { - kinds = append(kinds, key) - } - - sort.Slice(kinds, func(i, j int) bool { - return kinds[i].String() > kinds[j].String() - }) - - return kinds -} - type PostProcessingStats struct { RelationshipsCreated map[graph.Kind]int RelationshipsDeleted map[graph.Kind]int @@ -114,59 +101,15 @@ func (s PostProcessingStats) LogStats() { } } -//These were created for the new composition method. It was scrapped for the current initiative, but will be useful later -//type CompositionInfo struct { -// CompositionID int64 -// EdgeIDs []graph.ID -// NodeIDs []graph.ID -//} -// -//func (s CompositionInfo) HasComposition() bool { -// return len(s.EdgeIDs) > 0 || len(s.NodeIDs) > 0 -//} - -// -//func (s CompositionInfo) GetCompositionEdges() model.EdgeCompositionEdges { -// edges := make(model.EdgeCompositionEdges, len(s.EdgeIDs)) -// for i, edgeID := range s.EdgeIDs { -// edges[i] = model.EdgeCompositionEdge{ -// PostProcessedEdgeID: s.CompositionID, -// CompositionEdgeID: edgeID.Int64(), -// } -// } -// -// return edges -//} - -//func (s CompositionInfo) GetCompositionNodes() model.EdgeCompositionNodes { -// edges := make(model.EdgeCompositionNodes, len(s.EdgeIDs)) -// for i, nodeID := range s.NodeIDs { -// edges[i] = model.EdgeCompositionNode{ -// PostProcessedEdgeID: s.CompositionID, -// CompositionNodeID: nodeID.Int64(), -// } -// } -// -// return edges -//} - -type CreatePostRelationshipJob struct { - FromID graph.ID - ToID graph.ID - Kind graph.Kind - RelProperties map[string]any - //CompositionInfo CompositionInfo -} - type DeleteRelationshipJob struct { Kind graph.Kind ID graph.ID } -func DeleteTransitEdges(ctx context.Context, db graph.Database, baseKinds graph.Kinds, targetRelationships graph.Kinds) (*AtomicPostProcessingStats, error) { +func DeleteTransitEdges(ctx context.Context, db graph.Database, baseKinds graph.Kinds, targetRelationships graph.Kinds) (*post.AtomicPostProcessingStats, error) { var ( relationshipIDs []graph.ID - stats = NewAtomicPostProcessingStats() + stats = post.NewAtomicPostProcessingStats() operationName = fmt.Sprintf("Delete %v post-processed relationships", strings.Join(targetRelationships.Strings(), ", ")) ) diff --git a/packages/go/analysis/post/job.go b/packages/go/analysis/post/job.go new file mode 100644 index 00000000000..491382a2906 --- /dev/null +++ b/packages/go/analysis/post/job.go @@ -0,0 +1,28 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package post + +import "github.com/specterops/dawgs/graph" + +// EnsureRelationshipJob is an asynchronous graph assertion. If the edge does not +// exist in the graph between the from and to node IDs with the given kind then +// the edge added to a batch creation process to be pushed down to the database. +type EnsureRelationshipJob struct { + FromID graph.ID + ToID graph.ID + Kind graph.Kind + RelProperties map[string]any +} diff --git a/packages/go/analysis/post/sink.go b/packages/go/analysis/post/sink.go new file mode 100644 index 00000000000..484307f9ef3 --- /dev/null +++ b/packages/go/analysis/post/sink.go @@ -0,0 +1,230 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package post + +import ( + "context" + "log/slog" + "runtime" + "sync" + "time" + + "github.com/specterops/bloodhound/packages/go/analysis/delta" + "github.com/specterops/bloodhound/packages/go/bhlog/attr" + "github.com/specterops/bloodhound/packages/go/graphschema/common" + "github.com/specterops/bloodhound/packages/go/metrics" + "github.com/specterops/bloodhound/packages/go/trace" + "github.com/specterops/dawgs/graph" + "github.com/specterops/dawgs/util/channels" +) + +var ( + postOperationsVec = metrics.CounterVec("post_processing_ops", "analysis", map[string]string{}, []string{ + "kind", + "operation", + }) +) + +func newPropertiesWithLastSeen() *graph.Properties { + newProperties := graph.NewProperties() + newProperties.Set(common.LastSeen.String(), time.Now().UTC()) + + return newProperties +} + +// FilteredRelationshipSink is an asynchronous graph relationship writer that ensures only new relationships are +// inserted and removes unused ones. It uses a delta tracker to track changes between graphs and avoids reinserting +// existing edges. Any edge not visited during processing is treated as obsolete and will be deleted after the +// operation completes. +type FilteredRelationshipSink struct { + operationName string + db graph.Database + edgeTracker *delta.Tracker + jobC chan EnsureRelationshipJob + stats AtomicPostProcessingStats + wg sync.WaitGroup +} + +// NewFilteredRelationshipSink creates a new filtered relationship sink initialized with a given database, delta tracker, and operation name. +func NewFilteredRelationshipSink(ctx context.Context, operationName string, db graph.Database, deltaSubgraph *delta.Tracker) *FilteredRelationshipSink { + newSink := &FilteredRelationshipSink{ + db: db, + edgeTracker: deltaSubgraph, + operationName: operationName, + jobC: make(chan EnsureRelationshipJob), + stats: NewAtomicPostProcessingStats(), + } + + newSink.start(ctx) + return newSink +} + +// insertWorker processes incoming jobs by inserting them into the database using batch operations. It uses common properties +// (with last seen timestamp) and applies custom relationship properties if provided. +func (s *FilteredRelationshipSink) insertWorker(ctx context.Context, commonProps *graph.Properties, insertC chan EnsureRelationshipJob) { + if err := s.db.BatchOperation(ctx, func(batch graph.Batch) error { + for { + if nextJob, shouldContinue := channels.Receive(ctx, insertC); !shouldContinue { + break + } else { + relProps := commonProps + + if len(nextJob.RelProperties) > 0 { + relProps = commonProps.Clone() + + for key, val := range nextJob.RelProperties { + relProps.Set(key, val) + } + } + + if err := batch.CreateRelationshipByIDs(nextJob.FromID, nextJob.ToID, nextJob.Kind, relProps); err != nil { + slog.Error("Create Relationship Error", slog.String("err", err.Error())) + } + + s.stats.AddRelationshipsCreated(nextJob.Kind, 1) + + postOperationsVec.With(map[string]string{ + "kind": nextJob.Kind.String(), + "operation": "edge_insert", + }).Add(1) + } + } + + return nil + }); err != nil { + slog.Error("FilteredRelationshipSink Error", attr.Error(err)) + } +} + +// deltaFilterWorker filters out duplicate edges before they reach the insert worker. It checks whether +// an edge has already been tracked in the delta subgraph; if not, it forwards it to the insert channel. +func (s *FilteredRelationshipSink) deltaFilterWorker(ctx context.Context, filterC, insertC chan EnsureRelationshipJob) { + for { + nextJob, shouldContinue := channels.Receive(ctx, filterC) + + if !shouldContinue { + break + } + + if !s.edgeTracker.HasEdge(nextJob.FromID.Uint64(), nextJob.ToID.Uint64(), nextJob.Kind) { + if !channels.Submit(ctx, insertC, nextJob) { + break + } + } else { + postOperationsVec.With(map[string]string{ + "kind": nextJob.Kind.String(), + "operation": "filtered", + }).Add(1) + } + } +} + +// deleteMissingEdges removes any lingering edges that were not part of the current operation. This ensures +// that only valid relationships remain after the sink completes its work. +func (s *FilteredRelationshipSink) deleteMissingEdges(ctx context.Context) error { + deletedEdges := s.edgeTracker.Deleted() + + defer trace.Method(ctx, "FilteredRelationshipSink", "deleteMissingEdges", slog.Int("num_edges", len(deletedEdges)))() + + if err := s.db.BatchOperation(ctx, func(batch graph.Batch) error { + for _, deletedEdge := range deletedEdges { + if err := batch.DeleteRelationship(graph.ID(deletedEdge)); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + + postOperationsVec.With(map[string]string{ + "kind": "all", + "operation": "edge_delete", + }).Add(float64(len(deletedEdges))) + + return nil +} + +// worker is the main goroutine responsible for managing the entire lifecycle of the sink. It +// coordinates between filtering, insertion, and deletion phases and handles shutdown gracefully. +func (s *FilteredRelationshipSink) worker(ctx context.Context) error { + defer trace.Method(ctx, "FilteredRelationshipSink", "worker", slog.String("operation", s.operationName))() + defer s.wg.Done() + + var ( + filterC = make(chan EnsureRelationshipJob) + insertC = make(chan EnsureRelationshipJob) + filterWG sync.WaitGroup + insertWG sync.WaitGroup + ) + + insertWG.Add(1) + + go func() { + defer insertWG.Done() + s.insertWorker(ctx, newPropertiesWithLastSeen(), insertC) + }() + + // FIXME: Really, really need a better CPU heuristic or config value + for workerID := 0; workerID < runtime.NumCPU()/2+1; workerID += 1 { + filterWG.Add(1) + + go func(workerID int) { + defer filterWG.Done() + s.deltaFilterWorker(ctx, filterC, insertC) + }(workerID) + } + + for { + if nextJob, shouldContinue := channels.Receive(ctx, s.jobC); !shouldContinue { + break + } else if !channels.Submit(ctx, filterC, nextJob) { + break + } + } + + close(filterC) + filterWG.Wait() + + close(insertC) + insertWG.Wait() + + // Remove any lingering edges after the operation completes + return s.deleteMissingEdges(ctx) +} + +// Stats returns a pointer to the atomic statistics structure tracking processed relationships. +func (s *FilteredRelationshipSink) Stats() *AtomicPostProcessingStats { + return &s.stats +} + +// start begins execution of the sink's main worker loop. +func (s *FilteredRelationshipSink) start(ctx context.Context) { + s.wg.Add(1) + go s.worker(ctx) +} + +// Submit submits a new job to be processed by the sink. +func (s *FilteredRelationshipSink) Submit(ctx context.Context, nextJob EnsureRelationshipJob) bool { + return channels.Submit(ctx, s.jobC, nextJob) +} + +// Done signals the end of processing and waits for all workers to complete. +func (s *FilteredRelationshipSink) Done() { + close(s.jobC) + s.wg.Wait() +} diff --git a/packages/go/analysis/post/stats.go b/packages/go/analysis/post/stats.go new file mode 100644 index 00000000000..c00079b5dcb --- /dev/null +++ b/packages/go/analysis/post/stats.go @@ -0,0 +1,121 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package post + +import ( + "fmt" + "log/slog" + "sort" + "sync" + "sync/atomic" + + "github.com/specterops/bloodhound/packages/go/bhlog/level" + "github.com/specterops/dawgs/graph" +) + +func atomicStatsSortedKeys(value map[graph.Kind]*int32) []graph.Kind { + kinds := make([]graph.Kind, 0, len(value)) + + for key := range value { + kinds = append(kinds, key) + } + + sort.Slice(kinds, func(i, j int) bool { + return kinds[i].String() > kinds[j].String() + }) + + return kinds +} + +type AtomicPostProcessingStats struct { + RelationshipsCreated map[graph.Kind]*int32 + RelationshipsDeleted map[graph.Kind]*int32 + mutex *sync.Mutex +} + +func NewAtomicPostProcessingStats() AtomicPostProcessingStats { + return AtomicPostProcessingStats{ + RelationshipsCreated: make(map[graph.Kind]*int32), + RelationshipsDeleted: make(map[graph.Kind]*int32), + mutex: &sync.Mutex{}, + } +} + +func (s *AtomicPostProcessingStats) AddRelationshipsCreated(kind graph.Kind, numCreated int32) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if val, ok := s.RelationshipsCreated[kind]; !ok { + s.RelationshipsCreated[kind] = &numCreated + } else { + atomic.AddInt32(val, numCreated) + } +} + +func (s *AtomicPostProcessingStats) AddRelationshipsDeleted(kind graph.Kind, numDeleted int32) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if val, ok := s.RelationshipsDeleted[kind]; !ok { + s.RelationshipsDeleted[kind] = &numDeleted + } else { + atomic.AddInt32(val, numDeleted) + } +} + +func (s *AtomicPostProcessingStats) Merge(other *AtomicPostProcessingStats) { + s.mutex.Lock() + defer s.mutex.Unlock() + + for key, value := range other.RelationshipsCreated { + if val, ok := s.RelationshipsCreated[key]; !ok { + s.RelationshipsCreated[key] = value + } else { + atomic.AddInt32(val, *value) + } + } + + for key, value := range other.RelationshipsDeleted { + if val, ok := s.RelationshipsDeleted[key]; !ok { + s.RelationshipsDeleted[key] = value + } else { + atomic.AddInt32(val, *value) + } + } +} + +func (s *AtomicPostProcessingStats) LogStats() { + // Only output stats during debug runs + if level.GlobalAccepts(slog.LevelDebug) { + return + } + + slog.Debug("Relationships deleted before post-processing:") + + for _, relationship := range atomicStatsSortedKeys(s.RelationshipsDeleted) { + if numDeleted := int(*s.RelationshipsDeleted[relationship]); numDeleted > 0 { + slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) + } + } + + slog.Debug("Relationships created after post-processing:") + + for _, relationship := range atomicStatsSortedKeys(s.RelationshipsCreated) { + if numCreated := int(*s.RelationshipsCreated[relationship]); numCreated > 0 { + slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numCreated)) + } + } +} diff --git a/packages/go/analysis/post_operation.go b/packages/go/analysis/post_operation.go index 6d8b036f9a2..b0764e6a729 100644 --- a/packages/go/analysis/post_operation.go +++ b/packages/go/analysis/post_operation.go @@ -18,67 +18,61 @@ package analysis import ( "context" - "fmt" - "log/slog" - "sync" - "sync/atomic" "time" - "github.com/specterops/bloodhound/packages/go/bhlog/attr" - "github.com/specterops/bloodhound/packages/go/bhlog/level" - "github.com/specterops/bloodhound/packages/go/bhlog/measure" + "github.com/specterops/bloodhound/packages/go/analysis/post" "github.com/specterops/bloodhound/packages/go/graphschema/common" + "github.com/specterops/bloodhound/packages/go/trace" "github.com/specterops/dawgs/graph" "github.com/specterops/dawgs/ops" ) +func NewPropertiesWithLastSeen() *graph.Properties { + newProperties := graph.NewProperties() + newProperties.Set(common.LastSeen.String(), time.Now().UTC()) + + return newProperties +} + type StatTrackedOperation[T any] struct { - Stats AtomicPostProcessingStats + Stats post.AtomicPostProcessingStats Operation *ops.Operation[T] } -func NewPostRelationshipOperation(ctx context.Context, db graph.Database, operationName string) StatTrackedOperation[CreatePostRelationshipJob] { - operation := StatTrackedOperation[CreatePostRelationshipJob]{} +func NewPostRelationshipOperation(ctx context.Context, db graph.Database, operationName string) StatTrackedOperation[post.EnsureRelationshipJob] { + operation := StatTrackedOperation[post.EnsureRelationshipJob]{} operation.NewOperation(ctx, db) - operation.Operation.SubmitWriter(func(ctx context.Context, batch graph.Batch, inC <-chan CreatePostRelationshipJob) error { - defer measure.ContextMeasure( - ctx, - slog.LevelInfo, - operationName, - attr.Namespace("analysis"), - attr.Function("NewPostRelationshipOperation"), - attr.Scope("routine"), - )() - - var ( - relProp = NewPropertiesWithLastSeen() - ) + operation.Operation.SubmitWriter(func(ctx context.Context, batch graph.Batch, inC <-chan post.EnsureRelationshipJob) error { + defer trace.Function(ctx, "PostRelationshipOperation")() + + relProp := NewPropertiesWithLastSeen() for nextJob := range inC { + relProps := relProp + if len(nextJob.RelProperties) > 0 { - tempRelProp := relProp.Clone() + relProps = relProp.Clone() + for key, val := range nextJob.RelProperties { - tempRelProp.Set(key, val) - } - if err := batch.CreateRelationshipByIDs(nextJob.FromID, nextJob.ToID, nextJob.Kind, tempRelProp); err != nil { - return err - } - } else { - if err := batch.CreateRelationshipByIDs(nextJob.FromID, nextJob.ToID, nextJob.Kind, relProp); err != nil { - return err + relProps.Set(key, val) } } + if err := batch.CreateRelationshipByIDs(nextJob.FromID, nextJob.ToID, nextJob.Kind, relProps); err != nil { + return err + } + operation.Stats.AddRelationshipsCreated(nextJob.Kind, 1) } return nil }) + return operation } func (s *StatTrackedOperation[T]) NewOperation(ctx context.Context, db graph.Database) { - s.Stats = NewAtomicPostProcessingStats() + s.Stats = post.NewAtomicPostProcessingStats() s.Operation = ops.StartNewOperation[T](ops.OperationContext{ Parent: ctx, DB: db, @@ -90,90 +84,3 @@ func (s *StatTrackedOperation[T]) NewOperation(ctx context.Context, db graph.Dat func (s *StatTrackedOperation[T]) Done() error { return s.Operation.Done() } - -type AtomicPostProcessingStats struct { - RelationshipsCreated map[graph.Kind]*int32 - RelationshipsDeleted map[graph.Kind]*int32 - mutex *sync.Mutex -} - -func NewAtomicPostProcessingStats() AtomicPostProcessingStats { - return AtomicPostProcessingStats{ - RelationshipsCreated: make(map[graph.Kind]*int32), - RelationshipsDeleted: make(map[graph.Kind]*int32), - mutex: &sync.Mutex{}, - } -} - -func (s *AtomicPostProcessingStats) AddRelationshipsCreated(kind graph.Kind, numCreated int32) { - s.mutex.Lock() - defer s.mutex.Unlock() - - if val, ok := s.RelationshipsCreated[kind]; !ok { - s.RelationshipsCreated[kind] = &numCreated - } else { - atomic.AddInt32(val, numCreated) - } -} - -func (s *AtomicPostProcessingStats) AddRelationshipsDeleted(kind graph.Kind, numDeleted int32) { - s.mutex.Lock() - defer s.mutex.Unlock() - - if val, ok := s.RelationshipsDeleted[kind]; !ok { - s.RelationshipsDeleted[kind] = &numDeleted - } else { - atomic.AddInt32(val, numDeleted) - } -} - -func (s *AtomicPostProcessingStats) Merge(other *AtomicPostProcessingStats) { - s.mutex.Lock() - defer s.mutex.Unlock() - - for key, value := range other.RelationshipsCreated { - if val, ok := s.RelationshipsCreated[key]; !ok { - s.RelationshipsCreated[key] = value - } else { - atomic.AddInt32(val, *value) - } - } - - for key, value := range other.RelationshipsDeleted { - if val, ok := s.RelationshipsDeleted[key]; !ok { - s.RelationshipsDeleted[key] = value - } else { - atomic.AddInt32(val, *value) - } - } -} - -func (s *AtomicPostProcessingStats) LogStats() { - // Only output stats during debug runs - if level.GlobalAccepts(slog.LevelDebug) { - return - } - - slog.Debug("Relationships deleted before post-processing:") - - for _, relationship := range atomicStatsSortedKeys(s.RelationshipsDeleted) { - if numDeleted := int(*s.RelationshipsDeleted[relationship]); numDeleted > 0 { - slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numDeleted)) - } - } - - slog.Debug("Relationships created after post-processing:") - - for _, relationship := range atomicStatsSortedKeys(s.RelationshipsCreated) { - if numCreated := int(*s.RelationshipsCreated[relationship]); numCreated > 0 { - slog.Debug(fmt.Sprintf(" %s %d", relationship.String(), numCreated)) - } - } -} - -func NewPropertiesWithLastSeen() *graph.Properties { - newProperties := graph.NewProperties() - newProperties.Set(common.LastSeen.String(), time.Now().UTC()) - - return newProperties -} diff --git a/packages/go/bhlog/attr/attr.go b/packages/go/bhlog/attr/attr.go index dda1e19adbc..b028c19b150 100644 --- a/packages/go/bhlog/attr/attr.go +++ b/packages/go/bhlog/attr/attr.go @@ -16,7 +16,10 @@ // attr supplies custom slog.Attr constructors package attr -import "log/slog" +import ( + "log/slog" + "time" +) // Error consistently includes an error message via standard logging in the "err" field. func Error(value error) slog.Attr { @@ -44,3 +47,27 @@ func Scope(value string) slog.Attr { func Function(value string) slog.Attr { return slog.String("fn", value) } + +func Operation(operation string) slog.Attr { + return slog.String("operation", operation) +} + +func Enter() slog.Attr { + return slog.String("state", "enter") +} + +func Exit() slog.Attr { + return slog.String("state", "exit") +} + +func Elapsed(duration time.Duration) slog.Attr { + return slog.Duration("elapsed", duration) +} + +func ElapsedSince(then time.Time) slog.Attr { + return Elapsed(time.Since(then)) +} + +func Measurement(id uint64) slog.Attr { + return slog.Uint64("measurement", id) +} diff --git a/packages/go/graphschema/azure/azure.go b/packages/go/graphschema/azure/azure.go index a3e05d7ae5d..822970b8bd9 100644 --- a/packages/go/graphschema/azure/azure.go +++ b/packages/go/graphschema/azure/azure.go @@ -429,7 +429,7 @@ func PathfindingRelationships() []graph.Kind { return []graph.Kind{AvereContributor, Contributor, GetCertificates, GetKeys, GetSecrets, HasRole, MemberOf, Owner, RunsAs, VMContributor, AutomationContributor, KeyVaultContributor, VMAdminLogin, AddMembers, AddSecret, ExecuteCommand, GlobalAdmin, PrivilegedAuthAdmin, Grant, GrantSelf, PrivilegedRoleAdmin, ResetPassword, UserAccessAdministrator, Owns, CloudAppAdmin, AppAdmin, AddOwner, ManagedIdentity, AKSContributor, NodeResourceGroup, WebsiteContributor, LogicAppContributor, AZMGAddMember, AZMGAddOwner, AZMGAddSecret, AZMGGrantAppRoles, AZMGGrantRole, SyncedToADUser, AZRoleEligible, AZRoleApprover, Contains} } func PostProcessedRelationships() []graph.Kind { - return []graph.Kind{AddSecret, ExecuteCommand, ResetPassword, AddMembers, GlobalAdmin, PrivilegedRoleAdmin, PrivilegedAuthAdmin, AZMGAddMember, AZMGAddOwner, AZMGAddSecret, AZMGGrantAppRoles, AZMGGrantRole, SyncedToADUser, AZRoleApprover} + return []graph.Kind{AddSecret, ExecuteCommand, AZMGAddMember, AZMGAddOwner, AZMGAddSecret, AZMGGrantAppRoles, AZMGGrantRole, SyncedToADUser, AZRoleApprover} } func NodeKinds() []graph.Kind { return []graph.Kind{Entity, VMScaleSet, App, Role, Device, FunctionApp, Group, KeyVault, ManagementGroup, ResourceGroup, ServicePrincipal, Subscription, Tenant, User, VM, ManagedCluster, ContainerRegistry, WebApp, LogicApp, AutomationAccount} diff --git a/packages/go/metrics/registry.go b/packages/go/metrics/registry.go new file mode 100644 index 00000000000..437aacd8686 --- /dev/null +++ b/packages/go/metrics/registry.go @@ -0,0 +1,165 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package metrics + +import ( + "strings" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type registry struct { + lock *sync.Mutex + prometheusRegistry *prometheus.Registry + counters map[string]prometheus.Counter + counterVecs map[string]*prometheus.CounterVec + gauges map[string]prometheus.Gauge +} + +func metricKey(name, namespace string, labels map[string]string) string { + builder := strings.Builder{} + + builder.WriteString(namespace) + builder.WriteString(name) + + for key, value := range labels { + builder.WriteString(key) + builder.WriteString(value) + } + + return builder.String() +} + +func (s *registry) Counter(name, namespace string, constLabels map[string]string) prometheus.Counter { + s.lock.Lock() + defer s.lock.Unlock() + + key := metricKey(name, namespace, constLabels) + + if counter, hasCounter := s.counters[key]; hasCounter { + return counter + } else { + newCounter := promauto.With(s.prometheusRegistry).NewCounter(prometheus.CounterOpts{ + Name: name, + Namespace: namespace, + ConstLabels: constLabels, + }) + + s.counters[key] = newCounter + newCounter.Add(0) + + return newCounter + } +} + +func (s *registry) CounterVec(name, namespace string, constLabels map[string]string, variableLabelNames []string) *prometheus.CounterVec { + s.lock.Lock() + defer s.lock.Unlock() + + key := metricKey(name, namespace, constLabels) + + if counterVec, hasCounter := s.counterVecs[key]; hasCounter { + return counterVec + } else { + newCounterVec := promauto.With(s.prometheusRegistry).NewCounterVec(prometheus.CounterOpts{ + Name: name, + Namespace: namespace, + ConstLabels: constLabels, + }, variableLabelNames) + + s.counterVecs[key] = newCounterVec + return newCounterVec + } +} + +func (s *registry) Gauge(name, namespace string, constLabels map[string]string) prometheus.Gauge { + s.lock.Lock() + defer s.lock.Unlock() + + key := metricKey(name, namespace, constLabels) + + if gauge, hasGauge := s.gauges[key]; hasGauge { + return gauge + } else { + newGauge := promauto.With(s.prometheusRegistry).NewGauge(prometheus.GaugeOpts{ + Name: name, + Namespace: namespace, + ConstLabels: constLabels, + }) + + s.gauges[key] = newGauge + newGauge.Set(0) + + return newGauge + } +} + +var ( + globalRegistry *registry +) + +func init() { + prometheusRegistry := prometheus.NewRegistry() + + // Default collectors for Golang and process stats. This will panic on failure to register. + prometheusRegistry.MustRegister( + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + collectors.NewGoCollector(), + ) + + globalRegistry = ®istry{ + lock: &sync.Mutex{}, + prometheusRegistry: prometheusRegistry, + counters: map[string]prometheus.Counter{}, + counterVecs: map[string]*prometheus.CounterVec{}, + gauges: map[string]prometheus.Gauge{}, + } +} + +func Counter(name, namespace string, labels map[string]string) prometheus.Counter { + return globalRegistry.Counter(name, namespace, labels) +} + +func CounterVec(name, namespace string, labels map[string]string, variableLabelNames []string) *prometheus.CounterVec { + return globalRegistry.CounterVec(name, namespace, labels, variableLabelNames) +} + +func Gauge(name, namespace string, labels map[string]string) prometheus.Gauge { + return globalRegistry.Gauge(name, namespace, labels) +} + +func Registerer() *prometheus.Registry { + return globalRegistry.prometheusRegistry +} + +func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { + return promauto.With(Registerer()).NewCounter(opts) +} + +func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { + return promauto.With(Registerer()).NewGauge(opts) +} + +func Register(collector prometheus.Collector) error { + return Registerer().Register(collector) +} + +func Unregister(collector prometheus.Collector) { + Registerer().Unregister(collector) +} diff --git a/packages/go/trace/trace.go b/packages/go/trace/trace.go new file mode 100644 index 00000000000..5a67382883e --- /dev/null +++ b/packages/go/trace/trace.go @@ -0,0 +1,133 @@ +// Copyright 2026 Specter Ops, Inc. +// +// Licensed under the Apache License, Version 2.0 +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +package trace + +import ( + "context" + "log/slog" + "sync/atomic" + "time" + + "github.com/specterops/bloodhound/packages/go/bhlog/attr" + "github.com/specterops/bloodhound/packages/go/metrics" +) + +type measureCtxKey struct{} + +var ( + nextContextID = &atomic.Uint64{} +) + +func combineArgs(args ...any) []any { + var all []any + + for _, arg := range args { + switch typedArg := arg.(type) { + case []any: + all = append(all, typedArg...) + case any: + all = append(all, typedArg) + } + } + + return all +} + +type Trace struct { + ID uint64 + Started time.Time + Level slog.Level + Namespace string + Component string +} + +func newContext(level slog.Level, namespace, component string) *Trace { + return &Trace{ + ID: nextContextID.Add(1), + Started: time.Now(), + Level: level, + Namespace: namespace, + Component: component, + } +} + +func withContext(ctx context.Context, newMeasureCtx *Trace) context.Context { + return context.WithValue(ctx, measureCtxKey{}, newMeasureCtx) +} + +func fromContext(ctx context.Context) (*Trace, bool) { + if measureCtx := ctx.Value(measureCtxKey{}); measureCtx != nil { + typedMeasureCtx, typeOK := measureCtx.(*Trace) + return typedMeasureCtx, typeOK + } + + return nil, false +} + +func Context(ctx context.Context, level slog.Level, namespace, component string) context.Context { + return withContext(ctx, newContext(level, namespace, component)) +} + +func Function(ctx context.Context, function string, startArgs ...any) func(args ...any) { + var ( + level = slog.LevelInfo + then = time.Now() + traceCtx, hasTraceCtx = fromContext(ctx) + commonArgs []any + ) + + commonArgs = combineArgs([]any{ + attr.Scope("process"), + attr.Function(function), + }, startArgs) + + if hasTraceCtx { + level = traceCtx.Level + + commonArgs = combineArgs(commonArgs, []any{ + attr.Namespace(traceCtx.Namespace), + attr.Measurement(traceCtx.ID), + }) + } + + slog.Log(ctx, level, "Function Trace", combineArgs( + commonArgs, + attr.Enter(), + startArgs, + )...) + + return func(exitArgs ...any) { + elapsed := time.Since(then) + + if hasTraceCtx { + metrics.Counter("function_trace", traceCtx.Namespace, map[string]string{ + "fn": function, + }).Add(elapsed.Seconds()) + } + + slog.Log(ctx, level, "Function Trace", combineArgs( + commonArgs, + attr.Exit(), + startArgs, + attr.Elapsed(elapsed), + exitArgs, + )...) + } +} + +func Method(ctx context.Context, receiver, function string, startArgs ...any) func(args ...any) { + return Function(ctx, receiver+"."+function, startArgs...) +} diff --git a/packages/javascript/bh-shared-ui/src/components/index.ts b/packages/javascript/bh-shared-ui/src/components/index.ts index c5d39e9c5d2..d82c283a010 100644 --- a/packages/javascript/bh-shared-ui/src/components/index.ts +++ b/packages/javascript/bh-shared-ui/src/components/index.ts @@ -33,9 +33,9 @@ export { default as CollectorCard } from './CollectorCard'; export * from './CollectorCardList'; export { default as CollectorCardList } from './CollectorCardList'; export * from './ColumnHeaders'; -export * from './ConditionalTooltip'; export * from './CommunityIcon'; export { default as CommunityIcon } from './CommunityIcon'; +export * from './ConditionalTooltip'; export * from './ConfirmationDialog'; export { default as ConfirmationDialog } from './ConfirmationDialog'; export * from './CreateMenu';