Skip to content

Commit

Permalink
Merge branch 'main' into audit-log-new-actions
Browse files Browse the repository at this point in the history
  • Loading branch information
juggernot325 committed Feb 5, 2024
2 parents 910a342 + d1e07b9 commit dfd5b2c
Show file tree
Hide file tree
Showing 5 changed files with 76 additions and 18 deletions.
57 changes: 43 additions & 14 deletions cmd/api/src/daemons/datapipe/analysis.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@ package datapipe

import (
"context"
"errors"
"fmt"
"github.com/specterops/bloodhound/log"

"github.com/specterops/bloodhound/analysis"
adAnalysis "github.com/specterops/bloodhound/analysis/ad"
"github.com/specterops/bloodhound/dawgs/graph"
"github.com/specterops/bloodhound/errors"
"github.com/specterops/bloodhound/src/analysis/ad"
"github.com/specterops/bloodhound/src/analysis/azure"
"github.com/specterops/bloodhound/src/config"
Expand All @@ -33,57 +34,85 @@ import (
"github.com/specterops/bloodhound/src/services/dataquality"
)

var (
ErrAnalysisFailed = errors.New("analysis failed")
ErrAnalysisPartiallyCompleted = errors.New("analysis partially completed")
)

func RunAnalysisOperations(ctx context.Context, db database.Database, graphDB graph.Database, _ config.Configuration) error {
var (
collector = &errors.ErrorCollector{}
collectedErrors []error
)

if err := adAnalysis.FixWellKnownNodeTypes(ctx, graphDB); err != nil {
collector.Collect(fmt.Errorf("fix well known node types failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("fix well known node types failed: %w", err))
}

if err := adAnalysis.RunDomainAssociations(ctx, graphDB); err != nil {
collector.Collect(fmt.Errorf("domain association and pruning failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("domain association and pruning failed: %w", err))
}

if err := adAnalysis.LinkWellKnownGroups(ctx, graphDB); err != nil {
collector.Collect(fmt.Errorf("well known group linking failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("well known group linking failed: %w", err))
}

if err := updateAssetGroupIsolationTags(ctx, db, graphDB); err != nil {
collector.Collect(fmt.Errorf("asset group isolation tagging failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("asset group isolation tagging failed: %w", err))
}

if err := TagActiveDirectoryTierZero(ctx, graphDB); err != nil {
collector.Collect(fmt.Errorf("active directory tier zero tagging failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("active directory tier zero tagging failed: %w", err))
}

if err := ParallelTagAzureTierZero(ctx, graphDB); err != nil {
collector.Collect(fmt.Errorf("azure tier zero tagging failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("azure tier zero tagging failed: %w", err))
}

var (
adFailed = false
azureFailed = false
agiFailed = false
dataQualityFailed = false
)

// TODO: Cleanup #ADCSFeatureFlag after full launch.
if adcsFlag, err := db.GetFlagByKey(appcfg.FeatureAdcs); err != nil {
collector.Collect(fmt.Errorf("error retrieving ADCS feature flag: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("error retrieving ADCS feature flag: %w", err))
} else if stats, err := ad.Post(ctx, graphDB, adcsFlag.Enabled); err != nil {
collector.Collect(fmt.Errorf("error during ad post: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("error during ad post: %w", err))
adFailed = true
} else {
stats.LogStats()
}

if stats, err := azure.Post(ctx, graphDB); err != nil {
collector.Collect(fmt.Errorf("error during azure post: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("error during azure post: %w", err))
azureFailed = true
} else {
stats.LogStats()
}

if err := agi.RunAssetGroupIsolationCollections(ctx, db, graphDB, analysis.GetNodeKindDisplayLabel); err != nil {
collector.Collect(fmt.Errorf("asset group isolation collection failed: %w", err))
collectedErrors = append(collectedErrors, fmt.Errorf("asset group isolation collection failed: %w", err))
agiFailed = true
}

if err := dataquality.SaveDataQuality(ctx, db, graphDB); err != nil {
collector.Collect(fmt.Errorf("error saving data quality stat: %v", err))
collectedErrors = append(collectedErrors, fmt.Errorf("error saving data quality stat: %v", err))
dataQualityFailed = true
}

if len(collectedErrors) > 0 {
for _, err := range collectedErrors {
log.Errorf("Analysis error encountered: %v", err)
}
}

if adFailed && azureFailed && agiFailed && dataQualityFailed {
return ErrAnalysisFailed
} else if adFailed || azureFailed || agiFailed || dataQualityFailed {
return ErrAnalysisPartiallyCompleted
}

return collector.Return()
return nil
}
12 changes: 8 additions & 4 deletions cmd/api/src/daemons/datapipe/datapipe.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package datapipe

import (
"context"
"errors"
"github.com/specterops/bloodhound/src/bootstrap"
"os"
"path/filepath"
Expand Down Expand Up @@ -107,10 +108,13 @@ func (s *Daemon) analyze() {
log.LogAndMeasure(log.LevelInfo, "Graph Analysis")()

if err := RunAnalysisOperations(s.ctx, s.db, s.graphdb, s.cfg); err != nil {
log.Errorf("Graph analysis failed: %v", err)
FailAnalyzedFileUploadJobs(s.ctx, s.db)

s.status.Update(model.DatapipeStatusIdle, false)
if errors.Is(err, ErrAnalysisFailed) {
FailAnalyzedFileUploadJobs(s.ctx, s.db)
s.status.Update(model.DatapipeStatusIdle, false)
} else if errors.Is(err, ErrAnalysisPartiallyCompleted) {
PartialCompleteFileUploadJobs(s.ctx, s.db)
s.status.Update(model.DatapipeStatusIdle, true)
}
} else {
CompleteAnalyzedFileUploadJobs(s.ctx, s.db)

Expand Down
18 changes: 18 additions & 0 deletions cmd/api/src/daemons/datapipe/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,24 @@ func FailAnalyzedFileUploadJobs(ctx context.Context, db database.Database) {
}
}

func PartialCompleteFileUploadJobs(ctx context.Context, db database.Database) {
// Because our database interfaces do not yet accept contexts this is a best-effort check to ensure that we do not
// commit state transitions when we are shutting down.
if ctx.Err() != nil {
return
}

if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(model.JobStatusAnalyzing); err != nil {
log.Errorf("Failed to load file upload jobs under analysis: %v", err)
} else {
for _, job := range fileUploadJobsUnderAnalysis {
if err := fileupload.UpdateFileUploadJobStatus(db, job, model.JobStatusPartiallyComplete, "Partially Completed"); err != nil {
log.Errorf("Failed updating file upload job %d to partially completed status: %v", job.ID, err)
}
}
}
}

func CompleteAnalyzedFileUploadJobs(ctx context.Context, db database.Database) {
// Because our database interfaces do not yet accept contexts this is a best-effort check to ensure that we do not
// commit state transitions when we are shutting down.
Expand Down
5 changes: 5 additions & 0 deletions cmd/api/src/model/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ const (
JobStatusFailed JobStatus = 5
JobStatusIngesting JobStatus = 6
JobStatusAnalyzing JobStatus = 7
JobStatusPartiallyComplete JobStatus = 8
)

func allJobStatuses() []JobStatus {
Expand All @@ -130,6 +131,7 @@ func allJobStatuses() []JobStatus {
JobStatusFailed,
JobStatusIngesting,
JobStatusAnalyzing,
JobStatusPartiallyComplete,
}
}

Expand Down Expand Up @@ -170,6 +172,9 @@ func (s JobStatus) String() string {

case JobStatusAnalyzing:
return "ANALYZING"

case JobStatusPartiallyComplete:
return "PARTIALLYCOMPLETE"

default:
return "INVALIDSTATUS"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ export enum FileUploadJobStatus {
FAILED = 5,
INGESTING = 6,
ANALYZING = 7,
PARTIALLY_COMPLETE = 8,
}

export const FileUploadJobStatusToString: Record<FileUploadJobStatus, string> = {
Expand All @@ -47,4 +48,5 @@ export const FileUploadJobStatusToString: Record<FileUploadJobStatus, string> =
5: 'Failed',
6: 'Ingesting',
7: 'Analyzing',
8: 'Partially Complete',
};

0 comments on commit dfd5b2c

Please sign in to comment.