diff --git a/cmd/api/src/daemons/datapipe/analysis.go b/cmd/api/src/daemons/datapipe/analysis.go index 3ca3be73f9..8f606f0343 100644 --- a/cmd/api/src/daemons/datapipe/analysis.go +++ b/cmd/api/src/daemons/datapipe/analysis.go @@ -18,12 +18,13 @@ package datapipe import ( "context" + "errors" "fmt" + "github.com/specterops/bloodhound/log" "github.com/specterops/bloodhound/analysis" adAnalysis "github.com/specterops/bloodhound/analysis/ad" "github.com/specterops/bloodhound/dawgs/graph" - "github.com/specterops/bloodhound/errors" "github.com/specterops/bloodhound/src/analysis/ad" "github.com/specterops/bloodhound/src/analysis/azure" "github.com/specterops/bloodhound/src/config" @@ -33,57 +34,85 @@ import ( "github.com/specterops/bloodhound/src/services/dataquality" ) +var ( + ErrAnalysisFailed = errors.New("analysis failed") + ErrAnalysisPartiallyCompleted = errors.New("analysis partially completed") +) + func RunAnalysisOperations(ctx context.Context, db database.Database, graphDB graph.Database, _ config.Configuration) error { var ( - collector = &errors.ErrorCollector{} + collectedErrors []error ) if err := adAnalysis.FixWellKnownNodeTypes(ctx, graphDB); err != nil { - collector.Collect(fmt.Errorf("fix well known node types failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("fix well known node types failed: %w", err)) } if err := adAnalysis.RunDomainAssociations(ctx, graphDB); err != nil { - collector.Collect(fmt.Errorf("domain association and pruning failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("domain association and pruning failed: %w", err)) } if err := adAnalysis.LinkWellKnownGroups(ctx, graphDB); err != nil { - collector.Collect(fmt.Errorf("well known group linking failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("well known group linking failed: %w", err)) } if err := updateAssetGroupIsolationTags(ctx, db, graphDB); err != nil { - collector.Collect(fmt.Errorf("asset group isolation tagging failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("asset group isolation tagging failed: %w", err)) } if err := TagActiveDirectoryTierZero(ctx, graphDB); err != nil { - collector.Collect(fmt.Errorf("active directory tier zero tagging failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("active directory tier zero tagging failed: %w", err)) } if err := ParallelTagAzureTierZero(ctx, graphDB); err != nil { - collector.Collect(fmt.Errorf("azure tier zero tagging failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("azure tier zero tagging failed: %w", err)) } + var ( + adFailed = false + azureFailed = false + agiFailed = false + dataQualityFailed = false + ) + // TODO: Cleanup #ADCSFeatureFlag after full launch. if adcsFlag, err := db.GetFlagByKey(appcfg.FeatureAdcs); err != nil { - collector.Collect(fmt.Errorf("error retrieving ADCS feature flag: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("error retrieving ADCS feature flag: %w", err)) } else if stats, err := ad.Post(ctx, graphDB, adcsFlag.Enabled); err != nil { - collector.Collect(fmt.Errorf("error during ad post: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("error during ad post: %w", err)) + adFailed = true } else { stats.LogStats() } if stats, err := azure.Post(ctx, graphDB); err != nil { - collector.Collect(fmt.Errorf("error during azure post: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("error during azure post: %w", err)) + azureFailed = true } else { stats.LogStats() } if err := agi.RunAssetGroupIsolationCollections(ctx, db, graphDB, analysis.GetNodeKindDisplayLabel); err != nil { - collector.Collect(fmt.Errorf("asset group isolation collection failed: %w", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("asset group isolation collection failed: %w", err)) + agiFailed = true } if err := dataquality.SaveDataQuality(ctx, db, graphDB); err != nil { - collector.Collect(fmt.Errorf("error saving data quality stat: %v", err)) + collectedErrors = append(collectedErrors, fmt.Errorf("error saving data quality stat: %v", err)) + dataQualityFailed = true + } + + if len(collectedErrors) > 0 { + for _, err := range collectedErrors { + log.Errorf("Analysis error encountered: %v", err) + } + } + + if adFailed && azureFailed && agiFailed && dataQualityFailed { + return ErrAnalysisFailed + } else if adFailed || azureFailed || agiFailed || dataQualityFailed { + return ErrAnalysisPartiallyCompleted } - return collector.Return() + return nil } diff --git a/cmd/api/src/daemons/datapipe/datapipe.go b/cmd/api/src/daemons/datapipe/datapipe.go index 3b6dc9d358..ef2b2e2d71 100644 --- a/cmd/api/src/daemons/datapipe/datapipe.go +++ b/cmd/api/src/daemons/datapipe/datapipe.go @@ -19,6 +19,7 @@ package datapipe import ( "context" + "errors" "github.com/specterops/bloodhound/src/bootstrap" "os" "path/filepath" @@ -107,10 +108,13 @@ func (s *Daemon) analyze() { log.LogAndMeasure(log.LevelInfo, "Graph Analysis")() if err := RunAnalysisOperations(s.ctx, s.db, s.graphdb, s.cfg); err != nil { - log.Errorf("Graph analysis failed: %v", err) - FailAnalyzedFileUploadJobs(s.ctx, s.db) - - s.status.Update(model.DatapipeStatusIdle, false) + if errors.Is(err, ErrAnalysisFailed) { + FailAnalyzedFileUploadJobs(s.ctx, s.db) + s.status.Update(model.DatapipeStatusIdle, false) + } else if errors.Is(err, ErrAnalysisPartiallyCompleted) { + PartialCompleteFileUploadJobs(s.ctx, s.db) + s.status.Update(model.DatapipeStatusIdle, true) + } } else { CompleteAnalyzedFileUploadJobs(s.ctx, s.db) diff --git a/cmd/api/src/daemons/datapipe/jobs.go b/cmd/api/src/daemons/datapipe/jobs.go index 392792f29b..c6e1546e02 100644 --- a/cmd/api/src/daemons/datapipe/jobs.go +++ b/cmd/api/src/daemons/datapipe/jobs.go @@ -54,6 +54,24 @@ func FailAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { } } +func PartialCompleteFileUploadJobs(ctx context.Context, db database.Database) { + // Because our database interfaces do not yet accept contexts this is a best-effort check to ensure that we do not + // commit state transitions when we are shutting down. + if ctx.Err() != nil { + return + } + + if fileUploadJobsUnderAnalysis, err := db.GetFileUploadJobsWithStatus(model.JobStatusAnalyzing); err != nil { + log.Errorf("Failed to load file upload jobs under analysis: %v", err) + } else { + for _, job := range fileUploadJobsUnderAnalysis { + if err := fileupload.UpdateFileUploadJobStatus(db, job, model.JobStatusPartiallyComplete, "Partially Completed"); err != nil { + log.Errorf("Failed updating file upload job %d to partially completed status: %v", job.ID, err) + } + } + } +} + func CompleteAnalyzedFileUploadJobs(ctx context.Context, db database.Database) { // Because our database interfaces do not yet accept contexts this is a best-effort check to ensure that we do not // commit state transitions when we are shutting down. diff --git a/cmd/api/src/model/jobs.go b/cmd/api/src/model/jobs.go index 4a0ca0841e..a7f942f9e1 100644 --- a/cmd/api/src/model/jobs.go +++ b/cmd/api/src/model/jobs.go @@ -117,6 +117,7 @@ const ( JobStatusFailed JobStatus = 5 JobStatusIngesting JobStatus = 6 JobStatusAnalyzing JobStatus = 7 + JobStatusPartiallyComplete JobStatus = 8 ) func allJobStatuses() []JobStatus { @@ -130,6 +131,7 @@ func allJobStatuses() []JobStatus { JobStatusFailed, JobStatusIngesting, JobStatusAnalyzing, + JobStatusPartiallyComplete, } } @@ -170,6 +172,9 @@ func (s JobStatus) String() string { case JobStatusAnalyzing: return "ANALYZING" + + case JobStatusPartiallyComplete: + return "PARTIALLYCOMPLETE" default: return "INVALIDSTATUS" diff --git a/packages/javascript/bh-shared-ui/src/components/FinishedIngestLog/types.ts b/packages/javascript/bh-shared-ui/src/components/FinishedIngestLog/types.ts index 0e7f785072..ecb1fb93ff 100644 --- a/packages/javascript/bh-shared-ui/src/components/FinishedIngestLog/types.ts +++ b/packages/javascript/bh-shared-ui/src/components/FinishedIngestLog/types.ts @@ -35,6 +35,7 @@ export enum FileUploadJobStatus { FAILED = 5, INGESTING = 6, ANALYZING = 7, + PARTIALLY_COMPLETE = 8, } export const FileUploadJobStatusToString: Record = { @@ -47,4 +48,5 @@ export const FileUploadJobStatusToString: Record = 5: 'Failed', 6: 'Ingesting', 7: 'Analyzing', + 8: 'Partially Complete', };