diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e5adf53d..0741ed4e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ This is the changelog for [Nosey Parker](https://github.com/praetorian-inc/nosey All notable changes to the project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project aspires to use [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +Note that the use of semantic versioning applies to the command-line interface and output formats; the Rust crate APIs are considered an implementation detail at this point. ## Unreleased @@ -22,14 +23,30 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), This fixes a bug in v0.20.0 where provenance entries from an extensible enumerator could _only_ be JSON objects, instead of arbitrary JSON values as claimed by the documentation. +- The datastore schema has changed in order to support a new finding deduplication mechanism ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + Datastores from previous versions of Nosey Parker are not supported. + +- The `report` command now reports at most 3 provenenance entries per match by default ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + This can be overridden with the new `--max-provenance=N` option. + +- The `report` command now includes finding and match IDs in its default "human" format ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + +- The `scan` command now prints a simplified summary at the end, without the unpopulated status columns ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + ### Fixes - The `Blynk Organization Client Credentials` rule now has a non-varying number of capture groups +- Fixed a typo in the `report` command that could cause a diagnostic message about suppressed matches to be incorrect ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + ### Changes - The `Slack Bot Token` rule has been modified to match additional cases. - The `rules check` command now more thoroughly checks the number of capture groups of each rule ### Additions +- A new finding deduplication mechanism is enabled by default when reporting ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + This mechanism suppresses matches and findings that overlap with others if they are less specific. + For example, a single blob might contain text that matches _both_ the `HTTP Bearer Token` and `Slack User Token` rules; the less-specific `HTTP Bearer Token` match will be suppressed. + - New rules have been added: - `Connection String in .NET Configuration` ([#238](https://github.com/praetorian-inc/noseyparker/pull/238)) @@ -43,6 +60,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), Only a few rules have descriptions so far. Use `rules list -f json` to see. +- The `report` command has a new `--max-provenance=N` option that limits the number of provenance entries displayed for any single match ([#239](https://github.com/praetorian-inc/noseyparker/pull/239)). + A negative number means "no limit". + The default value is 3. + ## [v0.21.0](https://github.com/praetorian-inc/noseyparker/releases/v0.21.0) (2024-11-20) diff --git a/crates/noseyparker-cli/src/args.rs b/crates/noseyparker-cli/src/args.rs index 1a323f63d..be9fb57dc 100644 --- a/crates/noseyparker-cli/src/args.rs +++ b/crates/noseyparker-cli/src/args.rs @@ -987,7 +987,7 @@ pub struct ContentFilteringArgs { /// Do not scan files larger than the specified size /// /// The value is parsed as a floating point literal, and hence fractional values can be supplied. - /// A negative value means "no limit". + /// A non-positive value means "no limit". /// Note that scanning requires reading the entire contents of each file into memory, so using an excessively large limit may be problematic. #[arg( long("max-file-size"), @@ -1069,7 +1069,7 @@ pub struct ReportArgs { pub struct ReportFilterArgs { /// Limit the number of matches per finding to at most N /// - /// A negative value means "no limit". + /// A non-positive value means "no limit". #[arg( long, default_value_t = 3, @@ -1078,7 +1078,18 @@ pub struct ReportFilterArgs { )] pub max_matches: i64, - /// Only report findings that have a mean score of at least N. + /// Limit the number of provenance entries per match to at most N + /// + /// A non-positive value means "no limit". + #[arg( + long, + default_value_t = 3, + value_name = "N", + allow_negative_numbers = true + )] + pub max_provenance: i64, + + /// Only report findings that have a mean score of at least N /// /// Scores are floating point numbers in the range [0, 1]. /// Use the value `0` to disable this filtering. @@ -1090,6 +1101,13 @@ pub struct ReportFilterArgs { /// Include only findings with the assigned status #[arg(long, value_name = "STATUS")] pub finding_status: Option, + + /// Suppress redundant matches and findings + /// + /// A match is considered redundant to another if they overlap significantly within the same + /// blob and satisfy a handful of heuristics. + #[arg(long, default_value_t=true, action=ArgAction::Set, value_name="BOOL")] + pub suppress_redundant: bool, } #[derive(ValueEnum, Debug, Display, Clone, Copy)] diff --git a/crates/noseyparker-cli/src/cmd_report.rs b/crates/noseyparker-cli/src/cmd_report.rs index f37bdb022..a473110e2 100644 --- a/crates/noseyparker-cli/src/cmd_report.rs +++ b/crates/noseyparker-cli/src/cmd_report.rs @@ -37,6 +37,12 @@ pub fn run(global_args: &GlobalArgs, args: &ReportArgs) -> Result<()> { Some(args.filter_args.max_matches.try_into().unwrap()) }; + let max_provenance = if args.filter_args.max_provenance <= 0 { + None + } else { + Some(args.filter_args.max_provenance.try_into().unwrap()) + }; + let min_score = if args.filter_args.min_score <= 0.0 { None } else { @@ -57,6 +63,8 @@ pub fn run(global_args: &GlobalArgs, args: &ReportArgs) -> Result<()> { let reporter = DetailsReporter { datastore, max_matches, + max_provenance, + suppress_redundant: args.filter_args.suppress_redundant, min_score, finding_status: args.filter_args.finding_status, styles, @@ -67,7 +75,9 @@ pub fn run(global_args: &GlobalArgs, args: &ReportArgs) -> Result<()> { struct DetailsReporter { datastore: Datastore, max_matches: Option, + max_provenance: Option, min_score: Option, + suppress_redundant: bool, finding_status: Option, styles: Styles, } @@ -89,66 +99,43 @@ impl DetailsReporter { fn get_finding_metadata(&self) -> Result> { let datastore = &self.datastore; let mut group_metadata = datastore - .get_finding_metadata() + .get_finding_metadata(self.suppress_redundant) .context("Failed to get match group metadata from datastore")?; - // How many findings were suppressed due to their status not matching? - let mut num_suppressed_for_status: usize = 0; - - // How many findings were suppressed due to their status not matching? - let mut num_suppressed_for_score: usize = 0; - // Suppress findings with non-matching status if let Some(status) = self.finding_status { - group_metadata.retain(|md| { - if statuses_match(status, md.statuses.0.as_slice()) { - true - } else { - num_suppressed_for_status += 1; - false - } - }) - } + let old_len = group_metadata.len(); + group_metadata.retain(|md| statuses_match(status, md.statuses.0.as_slice())); + let num_suppressed = old_len - group_metadata.len(); - // Suppress findings with non-matching score - if let Some(min_score) = self.min_score { - group_metadata.retain(|md| match md.mean_score { - Some(mean_score) if mean_score < min_score => { - num_suppressed_for_score += 1; - false - } - _ => true, - }) - } - - if num_suppressed_for_status > 0 { - let finding_status = self.finding_status.unwrap(); - - if num_suppressed_for_status == 1 { + if num_suppressed == 1 { info!( - "Note: 1 finding with status not matching {finding_status} was suppressed; \ - rerun without `--finding-status={finding_status}` to show it" + "Note: 1 finding with status not matching {status} was suppressed; \ + rerun without `--finding-status={status}` to show it" ); - } else { + } else if num_suppressed > 1 { info!( - "Note: {num_suppressed_for_status} findings with status not matching \ - `{finding_status}` were suppressed; \ - rerun without `--finding-status={finding_status}` to show them" + "Note: {num_suppressed} findings with status not matching \ + `{status}` were suppressed; \ + rerun without `--finding-status={status}` to show them" ); } } - if num_suppressed_for_score > 0 { - let min_score = self.min_score.unwrap(); + // Suppress findings with non-matching score + if let Some(min_score) = self.min_score { + let old_len = group_metadata.len(); + group_metadata.retain(|md| md.mean_score.map(|s| s >= min_score).unwrap_or(true)); + let num_suppressed = old_len - group_metadata.len(); - if num_suppressed_for_status == 1 { + if num_suppressed == 1 { info!( "Note: 1 finding with meanscore less than {min_score} was suppressed; \ rerun with `--min-score=0` to show it" ); - } else { + } else if num_suppressed > 1 { info!( - "Note: {num_suppressed_for_score} findings with mean score less than \ + "Note: {num_suppressed} findings with mean score less than \ {min_score} were suppressed; \ rerun with `--min-score=0` to show them" ); @@ -162,7 +149,12 @@ impl DetailsReporter { fn get_matches(&self, metadata: &FindingMetadata) -> Result> { Ok(self .datastore - .get_finding_data(metadata, self.max_matches) + .get_finding_data( + metadata, + self.max_matches, + self.max_provenance, + self.suppress_redundant, + ) .with_context(|| format!("Failed to get matches for finding {metadata:?}")) .expect("should be able to find get matches for finding") .into_iter() @@ -182,6 +174,10 @@ impl DetailsReporter { self.styles.style_heading.apply_to(val) } + fn style_id(&self, val: D) -> StyledObject { + self.styles.style_id.apply_to(val) + } + fn style_match(&self, val: D) -> StyledObject { self.styles.style_match.apply_to(val) } @@ -286,6 +282,9 @@ struct ReportMatch { /// An optional status assigned to the match status: Option, + + /// The match structural IDs that this match is considered redundant to + redundant_to: Vec, } impl From for ReportMatch { @@ -297,6 +296,7 @@ impl From for ReportMatch { score: e.match_score, comment: e.match_comment, status: e.match_status, + redundant_to: e.redundant_to, } } } diff --git a/crates/noseyparker-cli/src/cmd_report/human_format.rs b/crates/noseyparker-cli/src/cmd_report/human_format.rs index 6ac50cb34..488999e5d 100644 --- a/crates/noseyparker-cli/src/cmd_report/human_format.rs +++ b/crates/noseyparker-cli/src/cmd_report/human_format.rs @@ -10,8 +10,9 @@ impl DetailsReporter { let finding = Finding { metadata, matches }; writeln!( &mut writer, - "{}", - self.style_finding_heading(format!("Finding {finding_num}/{num_findings}")) + "{} (id {})", + self.style_finding_heading(format!("Finding {finding_num}/{num_findings}")), + self.style_id(&finding.metadata.finding_id), )?; writeln!(&mut writer, "{}", PrettyFinding(self, &finding))?; } @@ -90,7 +91,7 @@ impl<'a> Display for PrettyFinding<'a> { f, "{}", reporter.style_heading(format!( - "Showing {}/{} occurrences:", + "Showing {}/{} matches:", finding.num_matches_available(), finding.total_matches() )) @@ -109,14 +110,25 @@ impl<'a> Display for PrettyFinding<'a> { score, comment, status, + redundant_to, } = rm; writeln!( f, - "{}", - reporter.style_heading(format!("Occurrence {i}/{}", finding.total_matches())), + "{} (id {})", + reporter.style_heading(format!("Match {i}/{}", finding.total_matches())), + reporter.style_id(&m.structural_id), )?; + if !redundant_to.is_empty() { + writeln!( + f, + "{} {}", + reporter.style_heading("Redundant to:"), + redundant_to.join(", "), + )?; + } + // write out match status if set if let Some(status) = status { let status = match status { @@ -145,7 +157,6 @@ impl<'a> Display for PrettyFinding<'a> { ) }; - // FIXME: limit the total number of provenance entries displayed for p in provenance.iter() { match p { Provenance::File(e) => { diff --git a/crates/noseyparker-cli/src/cmd_report/styles.rs b/crates/noseyparker-cli/src/cmd_report/styles.rs index 12ff2d2f1..c0f68c9d3 100644 --- a/crates/noseyparker-cli/src/cmd_report/styles.rs +++ b/crates/noseyparker-cli/src/cmd_report/styles.rs @@ -6,6 +6,7 @@ pub struct Styles { pub style_heading: Style, pub style_match: Style, pub style_metadata: Style, + pub style_id: Style, } impl Styles { @@ -23,6 +24,7 @@ impl Styles { let style_heading = Style::new().bold().force_styling(styles_enabled); let style_match = Style::new().yellow().force_styling(styles_enabled); let style_metadata = Style::new().bright().blue().force_styling(styles_enabled); + let style_id = Style::new().bright().green().force_styling(styles_enabled); Self { style_finding_heading, @@ -30,6 +32,7 @@ impl Styles { style_heading, style_match, style_metadata, + style_id, } } } diff --git a/crates/noseyparker-cli/src/cmd_scan.rs b/crates/noseyparker-cli/src/cmd_scan.rs index ece2b8e77..76d09ebe0 100644 --- a/crates/noseyparker-cli/src/cmd_scan.rs +++ b/crates/noseyparker-cli/src/cmd_scan.rs @@ -549,7 +549,7 @@ pub fn run(global_args: &args::GlobalArgs, args: &args::ScanArgs) -> Result<()> .unwrap() .context("Failed to enumerate inputs")?; - let (datastore, num_matches, num_new_matches) = datastore_thread + let (mut datastore, num_matches, num_new_matches) = datastore_thread .join() .unwrap() .context("Failed to save results to the datastore")?; @@ -561,6 +561,8 @@ pub fn run(global_args: &args::GlobalArgs, args: &args::ScanArgs) -> Result<()> progress.finish(); + datastore.check_match_redundancies()?; + // --------------------------------------------------------------------------------------------- // Finalize and report // --------------------------------------------------------------------------------------------- @@ -614,7 +616,7 @@ pub fn run(global_args: &args::GlobalArgs, args: &args::ScanArgs) -> Result<()> .get_summary() .context("Failed to get finding summary") .unwrap(); - let table = crate::cmd_summarize::summary_table(&summary); + let table = crate::cmd_summarize::summary_table(&summary, /* simple= */ true); println!(); table.print_tty(global_args.use_color(std::io::stdout()))?; } diff --git a/crates/noseyparker-cli/src/cmd_summarize.rs b/crates/noseyparker-cli/src/cmd_summarize.rs index 02f698794..3ef2d911f 100644 --- a/crates/noseyparker-cli/src/cmd_summarize.rs +++ b/crates/noseyparker-cli/src/cmd_summarize.rs @@ -6,7 +6,10 @@ use noseyparker::datastore::{Datastore, FindingSummary}; use crate::args::{GlobalArgs, SummarizeArgs, SummarizeOutputFormat}; use crate::reportable::Reportable; -struct FindingSummaryReporter(FindingSummary); +struct FindingSummaryReporter { + summary: FindingSummary, + simple: bool, +} impl Reportable for FindingSummaryReporter { type Format = SummarizeOutputFormat; @@ -22,23 +25,19 @@ impl Reportable for FindingSummaryReporter { impl FindingSummaryReporter { fn human_format(&self, mut writer: W) -> Result<()> { - let summary = &self.0; writeln!(writer)?; - let table = summary_table(summary); // FIXME: this doesn't preserve ANSI styling on the table - table.print(&mut writer)?; + summary_table(&self.summary, self.simple).print(&mut writer)?; Ok(()) } fn json_format(&self, writer: W) -> Result<()> { - let summary = &self.0; - serde_json::to_writer_pretty(writer, &summary)?; + serde_json::to_writer_pretty(writer, &self.summary)?; Ok(()) } fn jsonl_format(&self, mut writer: W) -> Result<()> { - let summary = &self.0; - for entry in summary.0.iter() { + for entry in self.summary.0.iter() { serde_json::to_writer(&mut writer, entry)?; writeln!(&mut writer)?; } @@ -57,10 +56,14 @@ pub fn run(global_args: &GlobalArgs, args: &SummarizeArgs) -> Result<()> { .get_summary() .context("Failed to get finding summary") .unwrap(); - FindingSummaryReporter(summary).report(args.output_args.format, output) + FindingSummaryReporter { + simple: false, + summary, + } + .report(args.output_args.format, output) } -pub fn summary_table(summary: &FindingSummary) -> prettytable::Table { +pub(crate) fn summary_table(summary: &FindingSummary, simple: bool) -> prettytable::Table { use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::row; @@ -70,30 +73,51 @@ pub fn summary_table(summary: &FindingSummary) -> prettytable::Table { .padding(1, 1) .build(); - let mut table: prettytable::Table = summary - .0 - .iter() - .map(|e| { - row![ - l -> &e.rule_name, - r -> HumanCount(e.distinct_count.try_into().unwrap()), - r -> HumanCount(e.total_count.try_into().unwrap()), - r -> HumanCount(e.accept_count.try_into().unwrap()), - r -> HumanCount(e.reject_count.try_into().unwrap()), - r -> HumanCount(e.mixed_count.try_into().unwrap()), - r -> HumanCount(e.unlabeled_count.try_into().unwrap()), - ] - }) - .collect(); - table.set_format(f); - table.set_titles(row![ - lb -> "Rule", - cb -> "Findings", - cb -> "Matches", - cb -> "Accepted", - cb -> "Rejected", - cb -> "Mixed", - cb -> "Unlabeled", - ]); - table + if simple { + let mut table: prettytable::Table = summary + .0 + .iter() + .map(|e| { + row![ + l -> &e.rule_name, + r -> HumanCount(e.distinct_count.try_into().unwrap()), + r -> HumanCount(e.total_count.try_into().unwrap()), + ] + }) + .collect(); + table.set_format(f); + table.set_titles(row![ + lb -> "Rule", + cb -> "Findings", + cb -> "Matches", + ]); + table + } else { + let mut table: prettytable::Table = summary + .0 + .iter() + .map(|e| { + row![ + l -> &e.rule_name, + r -> HumanCount(e.distinct_count.try_into().unwrap()), + r -> HumanCount(e.total_count.try_into().unwrap()), + r -> HumanCount(e.accept_count.try_into().unwrap()), + r -> HumanCount(e.reject_count.try_into().unwrap()), + r -> HumanCount(e.mixed_count.try_into().unwrap()), + r -> HumanCount(e.unlabeled_count.try_into().unwrap()), + ] + }) + .collect(); + table.set_format(f); + table.set_titles(row![ + lb -> "Rule", + cb -> "Findings", + cb -> "Matches", + cb -> "Accepted", + cb -> "Rejected", + cb -> "Mixed", + cb -> "Unlabeled", + ]); + table + } } diff --git a/crates/noseyparker-cli/tests/generate/snapshots/test_noseyparker__generate__generate_json_schema.snap b/crates/noseyparker-cli/tests/generate/snapshots/test_noseyparker__generate__generate_json_schema.snap index a82023ac9..e3e10fa21 100644 --- a/crates/noseyparker-cli/tests/generate/snapshots/test_noseyparker__generate__generate_json_schema.snap +++ b/crates/noseyparker-cli/tests/generate/snapshots/test_noseyparker__generate__generate_json_schema.snap @@ -171,6 +171,12 @@ expression: stdout "minimum": 0.0, "type": "integer" }, + "num_redundant_matches": { + "description": "The number of matches in the group that are considered redundant", + "format": "uint", + "minimum": 0.0, + "type": "integer" + }, "rule_name": { "description": "The name of the rule that detected each match", "type": "string" @@ -197,6 +203,7 @@ expression: stdout "groups", "matches", "num_matches", + "num_redundant_matches", "rule_name", "rule_structural_id", "rule_text_id", @@ -372,6 +379,13 @@ expression: stdout "provenance": { "$ref": "#/definitions/ProvenanceSet" }, + "redundant_to": { + "description": "The match structural IDs that this match is considered redundant to", + "items": { + "type": "string" + }, + "type": "array" + }, "rule_name": { "description": "The name of the rule that produced this match", "type": "string" @@ -424,6 +438,7 @@ expression: stdout "groups", "location", "provenance", + "redundant_to", "rule_name", "rule_structural_id", "rule_text_id", diff --git a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report-2.snap b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report-2.snap index 3e2905e3c..227221d67 100644 --- a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report-2.snap +++ b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report-2.snap @@ -20,12 +20,19 @@ Filtering Options: --max-matches Limit the number of matches per finding to at most N - A negative value means "no limit". + A non-positive value means "no limit". + + [default: 3] + + --max-provenance + Limit the number of provenance entries per match to at most N + + A non-positive value means "no limit". [default: 3] --min-score - Only report findings that have a mean score of at least N. + Only report findings that have a mean score of at least N Scores are floating point numbers in the range [0, 1]. Use the value `0` to disable this filtering. @@ -43,6 +50,15 @@ Filtering Options: - mixed: Findings with both `accept` and `reject` matches - null: Findings without any `accept` or `reject` matches + --suppress-redundant + Suppress redundant matches and findings + + A match is considered redundant to another if they overlap significantly within the same + blob and satisfy a handful of heuristics. + + [default: true] + [possible values: true, false] + Output Options: -o, --output Write output to the specified path diff --git a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report_short-2.snap b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report_short-2.snap index 1e34c1fd0..f74e60c0a 100644 --- a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report_short-2.snap +++ b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_report_short-2.snap @@ -11,11 +11,15 @@ Options: -h, --help Print help (see more with '--help') Filtering Options: - --max-matches Limit the number of matches per finding to at most N [default: 3] - --min-score Only report findings that have a mean score of at least N [default: - 0.05] - --finding-status Include only findings with the assigned status [possible values: - accept, reject, mixed, null] + --max-matches Limit the number of matches per finding to at most N [default: 3] + --max-provenance Limit the number of provenance entries per match to at most N + [default: 3] + --min-score Only report findings that have a mean score of at least N + [default: 0.05] + --finding-status Include only findings with the assigned status [possible values: + accept, reject, mixed, null] + --suppress-redundant Suppress redundant matches and findings [default: true] [possible + values: true, false] Output Options: -o, --output Write output to the specified path diff --git a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan-2.snap b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan-2.snap index fcc0729a4..0b2be4b29 100644 --- a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan-2.snap +++ b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan-2.snap @@ -178,7 +178,7 @@ Content Filtering Options: Do not scan files larger than the specified size The value is parsed as a floating point literal, and hence fractional values can be - supplied. A negative value means "no limit". Note that scanning requires reading the + supplied. A non-positive value means "no limit". Note that scanning requires reading the entire contents of each file into memory, so using an excessively large limit may be problematic. diff --git a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan_nogithub-2.snap b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan_nogithub-2.snap index 2bbca40b6..e990eb22a 100644 --- a/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan_nogithub-2.snap +++ b/crates/noseyparker-cli/tests/help/snapshots/test_noseyparker__help__help_scan_nogithub-2.snap @@ -126,7 +126,7 @@ Content Filtering Options: Do not scan files larger than the specified size The value is parsed as a floating point literal, and hence fractional values can be - supplied. A negative value means "no limit". Note that scanning requires reading the + supplied. A non-positive value means "no limit". Note that scanning requires reading the entire contents of each file into memory, so using an excessively large limit may be problematic. diff --git a/crates/noseyparker-cli/tests/report/mod.rs b/crates/noseyparker-cli/tests/report/mod.rs index 95d9a9158..4f6b535aa 100644 --- a/crates/noseyparker-cli/tests/report/mod.rs +++ b/crates/noseyparker-cli/tests/report/mod.rs @@ -1,4 +1,6 @@ use super::*; + +use indoc::indoc; pub use pretty_assertions::{assert_eq, assert_ne}; #[test] @@ -127,6 +129,95 @@ fn report_finding_status() { assert_eq!(findings.as_array().unwrap().len(), 1); } +#[test] +fn max_provenance_exceeded() { + let scan_env = ScanEnv::new(); + + // 4 inputs with the same content + let i1 = scan_env.input_file_with_secret("i1.txt"); + let i2 = scan_env.input_file_with_secret("i2.txt"); + let i3 = scan_env.input_file_with_secret("i3.txt"); + let i4 = scan_env.input_file_with_secret("i4.txt"); + + noseyparker_success!( + "scan", + "-d", + scan_env.dspath(), + i1.path(), + i2.path(), + i3.path(), + i4.path() + ) + .stdout(match_scan_stats("416 B", 4, 1, 1)); + + // default setting truncates provenance to 3 + { + let cmd = noseyparker_success!("report", "-d", scan_env.dspath(), "--format=json"); + let output: serde_json::Value = serde_json::from_slice(&cmd.get_output().stdout).unwrap(); + let ps = &output[0]["matches"][0]["provenance"]; + assert!(ps.is_array(), "not an array: {ps:?}"); + assert_eq!(ps.as_array().unwrap().len(), 3); + } + + // unlimited setting gives us 4 + for limit in ["-1", "0"] { + let cmd = noseyparker_success!( + "report", + "-d", + scan_env.dspath(), + "--format=json", + "--max-provenance", + limit + ); + let output: serde_json::Value = serde_json::from_slice(&cmd.get_output().stdout).unwrap(); + let ps = &output[0]["matches"][0]["provenance"]; + assert!(ps.is_array(), "not an array: {ps:?}"); + assert_eq!(ps.as_array().unwrap().len(), 4); + } +} + +#[test] +fn redundant_matches() { + let scan_env = ScanEnv::new(); + let input = scan_env.input_file_with_contents( + "input.txt", + indoc! {r#" + aws_access_key_id = 'AKIADEADBEEFDEADBEEF' + aws_secret_access_key = 'FakeValues99cl9bqJFVA3iFUm+yqVe08HxhXFE/' + "#}, + ); + + noseyparker_success!("scan", "-d", scan_env.dspath(), input.path()) + .stdout(match_scan_stats("110 B", 1, 3, 3)); + + // Should have only a single finding / match reported by default settings + { + let cmd = noseyparker_success!("report", "-d", scan_env.dspath(), "--format=json"); + let output: serde_json::Value = serde_json::from_slice(&cmd.get_output().stdout).unwrap(); + + assert_eq!(output.as_array().unwrap().len(), 1); + let ms = &output[0]["matches"]; + assert_eq!(ms.as_array().unwrap().len(), 1); + } + + // Should have 3 findings with 1 match each with `--suppress-redundant=false` + { + let cmd = noseyparker_success!( + "report", + "-d", + scan_env.dspath(), + "--format=json", + "--suppress-redundant=false" + ); + let output: serde_json::Value = serde_json::from_slice(&cmd.get_output().stdout).unwrap(); + + assert_eq!(output.as_array().unwrap().len(), 3); + for f in output.as_array().unwrap() { + assert_eq!(f["matches"].as_array().unwrap().len(), 1); + } + } +} + // Test that the `report` command uses colors as expected when running under a pty: // - When running with the output going to stdout (default), colors are used // - When running with the explicitly written to a file, colors are not used diff --git a/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_output_colors1.snap b/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_output_colors1.snap index f0e61a9a9..032c6bfd3 100644 --- a/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_output_colors1.snap +++ b/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_output_colors1.snap @@ -2,11 +2,11 @@ source: crates/noseyparker-cli/tests/report/mod.rs expression: output1_contents --- -Finding 1/1 +Finding 1/1 (id d551329ba5578559646aa49467be47e9d496578d) Rule: GitHub Personal Access Token Group: ghp_XIxB7KMNdAr3zqWtQqhE94qglHqOzn1D1stg - Occurrence 1/1 + Match 1/1 (id 155cdfa3e16d6abc09ecb8a2f659c2f84f7b91fc) File: Blob: Lines: 3:12-3:51 diff --git a/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_unlimited_matches-2.snap b/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_unlimited_matches-2.snap index d1aec2fb5..4a28a2ba4 100644 --- a/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_unlimited_matches-2.snap +++ b/crates/noseyparker-cli/tests/report/snapshots/test_noseyparker__report__report_unlimited_matches-2.snap @@ -2,11 +2,11 @@ source: crates/noseyparker-cli/tests/report/mod.rs expression: stdout --- -Finding 1/1 +Finding 1/1 (id d551329ba5578559646aa49467be47e9d496578d) Rule: GitHub Personal Access Token Group: ghp_XIxB7KMNdAr3zqWtQqhE94qglHqOzn1D1stg - Occurrence 1/1 + Match 1/1 (id 155cdfa3e16d6abc09ecb8a2f659c2f84f7b91fc) File: Blob: Lines: 3:12-3:51 diff --git a/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-4.snap b/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-4.snap index 29f9a1a0d..84bd44859 100644 --- a/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-4.snap +++ b/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-4.snap @@ -59,6 +59,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS API Credentials", "rule_structural_id": "39d60c56d8a84ca6ab5999de8fea93657e3cae99", "rule_text_id": "np.aws.6", @@ -74,6 +75,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "AWS API Credentials", "rule_structural_id": "39d60c56d8a84ca6ab5999de8fea93657e3cae99", "rule_text_id": "np.aws.6", @@ -133,6 +135,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS API Key", "rule_structural_id": "1e4113c48323df7405840eede9a2be89a9797520", "rule_text_id": "np.aws.1", @@ -148,6 +151,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "AWS API Key", "rule_structural_id": "1e4113c48323df7405840eede9a2be89a9797520", "rule_text_id": "np.aws.1", @@ -207,6 +211,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS API Key", "rule_structural_id": "1e4113c48323df7405840eede9a2be89a9797520", "rule_text_id": "np.aws.1", @@ -222,80 +227,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, - "rule_name": "AWS API Key", - "rule_structural_id": "1e4113c48323df7405840eede9a2be89a9797520", - "rule_text_id": "np.aws.1", - "statuses": [] - }, - { - "comment": null, - "finding_id": "01e1da0929b8d870274f935862cc930ebabd08ab", - "groups": [ - "QUtJQUpVWlBMVE5NWTJKU1hMWUE=" - ], - "matches": [ - { - "blob_id": "fba046509dc93ea42e3be8bf0f8bd181ae3ddb20", - "blob_metadata": { - "charset": null, - "id": "fba046509dc93ea42e3be8bf0f8bd181ae3ddb20", - "mime_essence": null, - "num_bytes": 523 - }, - "comment": null, - "groups": [ - "QUtJQUpVWlBMVE5NWTJKU1hMWUE=" - ], - "location": { - "offset_span": { - "end": 377, - "start": 357 - }, - "source_span": { - "end": { - "column": 27, - "line": 9 - }, - "start": { - "column": 8, - "line": 9 - } - } - }, - "provenance": [ - { - "first_commit": { - "blob_path": "my.env", - "commit_metadata": { - "author_email": "david.ascher@gmail.com", - "author_name": "David Ascher", - "author_timestamp": "1378126038 +0100", - "commit_id": "9ba2c5654ef78e49ce0173b81f1bcf8f25fcb36a", - "committer_email": "david.ascher@gmail.com", - "committer_name": "David Ascher", - "committer_timestamp": "1378126038 +0100", - "message": "fix both 254 and 257\n" - } - }, - "kind": "git_repo", - "repo_path": "" - } - ], - "rule_name": "AWS API Key", - "rule_structural_id": "1e4113c48323df7405840eede9a2be89a9797520", - "rule_text_id": "np.aws.1", - "score": null, - "snippet": { - "after": "\nS3_OBJECT_PREFIX=flathead\nS3_SECRET=DvHxHLMsO0fSnNqI82efq4sO8QefdMSz3l2q2Xk1\nSHARE_URL_PREFIX=https://s3.amazonaws.com/com.mozillalabs.appmaker/\n", - "before": " a janitor for arrogant rich people; so I clean their computer keyboareds with the toilet brush\nPATH=bin:node_modules/.bin:/usr/local/bin:/usr/bin:/bin\nPORT=5002\nPUBLISH_HOST=appalot.me\nPUBLISH_HOST_PREFIX=http://\nS3_BUCKET=com.mozillalabs.appmaker\nS3_KEY=", - "matching": "AKIAJUZPLTNMY2JSXLYA" - }, - "status": null, - "structural_id": "a05586d6f6356a67f91b8d693b24f6a7e14a3b7e" - } - ], - "mean_score": null, - "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "AWS API Key", "rule_structural_id": "1e4113c48323df7405840eede9a2be89a9797520", "rule_text_id": "np.aws.1", @@ -355,6 +287,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -370,6 +303,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -429,6 +363,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -488,6 +423,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -547,6 +483,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -562,6 +499,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 11, + "num_redundant_matches": 0, "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -621,6 +559,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -636,6 +575,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "AWS S3 Bucket", "rule_structural_id": "a09eba269cc7230d53713d75a4c04a5bad6044a5", "rule_text_id": "np.s3.2", @@ -695,6 +635,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "AWS Secret Access Key", "rule_structural_id": "faaf86b6ca922630c4bf6425ee7fb688410c490b", "rule_text_id": "np.aws.2", @@ -710,6 +651,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "AWS Secret Access Key", "rule_structural_id": "faaf86b6ca922630c4bf6425ee7fb688410c490b", "rule_text_id": "np.aws.2", @@ -786,6 +728,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "Amazon Resource Name", "rule_structural_id": "9b6dbcab66d56d9b6b9b3abbe3269f0eefcfd7da", "rule_text_id": "np.arn.1", @@ -801,6 +744,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "Amazon Resource Name", "rule_structural_id": "9b6dbcab66d56d9b6b9b3abbe3269f0eefcfd7da", "rule_text_id": "np.arn.1", @@ -877,6 +821,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "Amazon Resource Name", "rule_structural_id": "9b6dbcab66d56d9b6b9b3abbe3269f0eefcfd7da", "rule_text_id": "np.arn.1", @@ -892,6 +837,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "Amazon Resource Name", "rule_structural_id": "9b6dbcab66d56d9b6b9b3abbe3269f0eefcfd7da", "rule_text_id": "np.arn.1", @@ -968,6 +914,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "Amazon Resource Name", "rule_structural_id": "9b6dbcab66d56d9b6b9b3abbe3269f0eefcfd7da", "rule_text_id": "np.arn.1", @@ -983,159 +930,12 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "Amazon Resource Name", "rule_structural_id": "9b6dbcab66d56d9b6b9b3abbe3269f0eefcfd7da", "rule_text_id": "np.arn.1", "statuses": [] }, - { - "comment": null, - "finding_id": "a2795aa2248cdcbf164e7ab85873e6044e508c16", - "groups": [ - "RHZIeEhMTXNPMGZTbk5xSTgyZWZxNHNPOFFlZmRNU3ozbDJxMlhrMQ==" - ], - "matches": [ - { - "blob_id": "fba046509dc93ea42e3be8bf0f8bd181ae3ddb20", - "blob_metadata": { - "charset": null, - "id": "fba046509dc93ea42e3be8bf0f8bd181ae3ddb20", - "mime_essence": null, - "num_bytes": 523 - }, - "comment": null, - "groups": [ - "RHZIeEhMTXNPMGZTbk5xSTgyZWZxNHNPOFFlZmRNU3ozbDJxMlhrMQ==" - ], - "location": { - "offset_span": { - "end": 454, - "start": 407 - }, - "source_span": { - "end": { - "column": 50, - "line": 11 - }, - "start": { - "column": 4, - "line": 11 - } - } - }, - "provenance": [ - { - "first_commit": { - "blob_path": "my.env", - "commit_metadata": { - "author_email": "david.ascher@gmail.com", - "author_name": "David Ascher", - "author_timestamp": "1378126038 +0100", - "commit_id": "9ba2c5654ef78e49ce0173b81f1bcf8f25fcb36a", - "committer_email": "david.ascher@gmail.com", - "committer_name": "David Ascher", - "committer_timestamp": "1378126038 +0100", - "message": "fix both 254 and 257\n" - } - }, - "kind": "git_repo", - "repo_path": "" - } - ], - "rule_name": "Generic Secret", - "rule_structural_id": "3a961eccebcf7356ad803ec8e1a711d01801b9d7", - "rule_text_id": "np.generic.1", - "score": null, - "snippet": { - "after": "\nSHARE_URL_PREFIX=https://s3.amazonaws.com/com.mozillalabs.appmaker/\n", - "before": "eir computer keyboareds with the toilet brush\nPATH=bin:node_modules/.bin:/usr/local/bin:/usr/bin:/bin\nPORT=5002\nPUBLISH_HOST=appalot.me\nPUBLISH_HOST_PREFIX=http://\nS3_BUCKET=com.mozillalabs.appmaker\nS3_KEY=AKIAJUZPLTNMY2JSXLYA\nS3_OBJECT_PREFIX=flathead\nS3_", - "matching": "SECRET=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" - }, - "status": null, - "structural_id": "c471047ad5c5cda2503dd11cde88ef5bad0e582b" - } - ], - "mean_score": null, - "num_matches": 1, - "rule_name": "Generic Secret", - "rule_structural_id": "3a961eccebcf7356ad803ec8e1a711d01801b9d7", - "rule_text_id": "np.generic.1", - "statuses": [] - }, - { - "comment": null, - "finding_id": "d1e7983992d3b49f7084eab3817a6c38e24c9e17", - "groups": [ - "YUxjY2NMckhiZFd0Q25ONzVNS0k3Tm1iOU5aUUhaNWNTeHdPb20=" - ], - "matches": [ - { - "blob_id": "4355c1c85f858cb4e2737b543ab87007ff71f48a", - "blob_metadata": { - "charset": null, - "id": "4355c1c85f858cb4e2737b543ab87007ff71f48a", - "mime_essence": null, - "num_bytes": 1299 - }, - "comment": null, - "groups": [ - "YUxjY2NMckhiZFd0Q25ONzVNS0k3Tm1iOU5aUUhaNWNTeHdPb20=" - ], - "location": { - "offset_span": { - "end": 1187, - "start": 1138 - }, - "source_span": { - "end": { - "column": 1187, - "line": 1 - }, - "start": { - "column": 1139, - "line": 1 - } - } - }, - "provenance": [ - { - "first_commit": { - "blob_path": "tmp.env", - "commit_metadata": { - "author_email": "david.ascher@gmail.com", - "author_name": "David Ascher", - "author_timestamp": "1384157507 -0800", - "commit_id": "efda07fbb9e3a2c172f07e441c29b6988f90fe2f", - "committer_email": "david.ascher@gmail.com", - "committer_name": "David Ascher", - "committer_timestamp": "1384157507 -0800", - "message": "cleanup\n" - } - }, - "kind": "git_repo", - "repo_path": "" - } - ], - "rule_name": "Generic Secret", - "rule_structural_id": "3a961eccebcf7356ad803ec8e1a711d01801b9d7", - "rule_text_id": "np.generic.1", - "score": null, - "snippet": { - "after": "\",\"DISPLAY\":\"/tmp/launch-xo2Dks/org.macosforge.xquartz:0\",\"SECURITYSESSIONID\":\"186a4\",\"_\":\"/usr/local/bin/node\"}", - "before": "eworks/JavaVM.framework/Versions/1.6/Home\",\"LANG\":\"en_CA.UTF-8\",\"BING_APP_ID\":\"yX/AEjX7Iz6zSSDo+rvTxDCvTEjuQDGC+fNdVgk6bZs=\",\"SHLVL\":\"2\",\"HOME\":\"/Users/davida\",\"PYTHONPATH\":\"/Users/davida/lib/python2.6/site-packages:\",\"LOGNAME\":\"davida\",\"PORT\":\"1234\",\"AWS_", - "matching": "SECRET\":\"D+aLcccLrHbdWtCnN75MKI7Nmb9NZQHZ5cSxwOom" - }, - "status": null, - "structural_id": "865163cf72afa33926f788881c1d290419d175a4" - } - ], - "mean_score": null, - "num_matches": 1, - "rule_name": "Generic Secret", - "rule_structural_id": "3a961eccebcf7356ad803ec8e1a711d01801b9d7", - "rule_text_id": "np.generic.1", - "statuses": [] - }, { "comment": null, "finding_id": "3667958b271fb8c205883602cbf1e666c7ff9b14", @@ -1190,6 +990,7 @@ expression: read_json(report_json.path()).unwrap() "repo_path": "" } ], + "redundant_to": [], "rule_name": "Generic Secret", "rule_structural_id": "3a961eccebcf7356ad803ec8e1a711d01801b9d7", "rule_text_id": "np.generic.1", @@ -1205,6 +1006,7 @@ expression: read_json(report_json.path()).unwrap() ], "mean_score": null, "num_matches": 1, + "num_redundant_matches": 0, "rule_name": "Generic Secret", "rule_structural_id": "3a961eccebcf7356ad803ec8e1a711d01801b9d7", "rule_text_id": "np.generic.1", diff --git a/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-5.snap b/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-5.snap index e46f369d4..1c1eef1b3 100644 --- a/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-5.snap +++ b/crates/noseyparker-cli/tests/scan/appmaker/snapshots/test_noseyparker__scan__appmaker__scan_workflow_from_git_url-5.snap @@ -2,12 +2,12 @@ source: crates/noseyparker-cli/tests/scan/appmaker/mod.rs expression: "std::fs::read_to_string(report_txt.path()).unwrap()" --- -Finding 1/14 +Finding 1/11 (id 4b68f1a2eebf34b00aee81bd944e3403921b8cce) Rule: AWS API Credentials Group 1: AKIAJUZPLTNMY2JSXLYA Group 2: DvHxHLMsO0fSnNqI82efq4sO8QefdMSz3l2q2Xk1 - Occurrence 1/1 + Match 1/1 (id 4514a0252fd7c1d2b57d1e2c8ca9e38cded33685) Git repo: Commit: first seen in 9ba2c5654ef78e49ce0173b81f1bcf8f25fcb36a @@ -32,11 +32,11 @@ Group 2: DvHxHLMsO0fSnNqI82efq4sO8QefdMSz3l2q2Xk1 -Finding 2/14 +Finding 2/11 (id 384ff44ebe6409d664f0d31189828b2e5ffbf45b) Rule: AWS API Key Group: AKIAJEEUOSJXYB2BKLMA - Occurrence 1/1 + Match 1/1 (id 54871c4ee7ca7aa5c101acccaa2928bfc9bbd93e) Git repo: Commit: first seen in efda07fbb9e3a2c172f07e441c29b6988f90fe2f @@ -51,11 +51,11 @@ Group: AKIAJEEUOSJXYB2BKLMA {"TERM_PROGRAM":"Apple_Terminal","TERM":"xterm-256color","SHELL":"/usr/local/bin/fish","CLICOLOR":"1","TMPDIR":"/var/folders/xj/5v3cc4y9039fx_fhk67bvg8w0000gn/T/","AWS_ID":"AKIAJEEUOSJXYB2BKLMA","Apple_PubSub_Socket_Render":"/tmp/launch-rHzB0S/Render","TERM_PROGRAM_VERSION":"326","TERM_SESSION_ID":"C99AE63D-8AC8-40EC-9A43-7C14ACEBE413","ANT_HOME":"/usr/local/etc/ant","CMD_DURATION":"1m 22.1s","USER":"davida","SSH_AUTH_SOCK":"/tmp/launch-W9AsCX/L -Finding 3/14 +Finding 3/11 (id 1d29e95e861d51ce67b64ca481653a1800b860fe) Rule: AWS API Key Group: AKIAJQNF3BIPEDR6MXEA - Occurrence 1/1 + Match 1/1 (id 1eef0046f69ca10f4057f74d39ed4a44edd5c4be) Git repo: Commit: first seen in 6ea21730dd131fd3bc315d19953761e2cd7bac07 @@ -70,40 +70,11 @@ Group: AKIAJQNF3BIPEDR6MXEA S3U+qflQBzMNZ3TqlsXsrqBfA12wcidr5jLQ","S3_OBJECT_PREFIX":"flathead","SSH_AUTH_SOCK":"/tmp/launch-W9AsCX/Listeners","__CF_USER_TEXT_ENCODING":"0x1F5:0:0","PATH":"/usr/bin:/bin:/usr/local/bin:/usr/X11R6/bin","__CHECKFIX1436934":"1","ASSET_HOST":"","S3_KEY":"AKIAJQNF3BIPEDR6MXEA","PWD":"/Users/davida/src/appmaker","LANG":"en_CA.UTF-8","PUBLISH_URL_PREFIX":"XXX","HOME":"/Users/davida","SHLVL":"2","LOGNAME":"davida","S3_BUCKET":"com.mozillalabs.appmaker","COOKIE_SECRET":"I hate working as a janitor for arrogant rich people; so I cl -Finding 4/14 -Rule: AWS API Key -Group: AKIAJUZPLTNMY2JSXLYA - - Occurrence 1/1 - Git repo: - Commit: first seen in 9ba2c5654ef78e49ce0173b81f1bcf8f25fcb36a - - Author: David Ascher - Date: 2013-09-02 - Summary: fix both 254 and 257 - Path: my.env - - Blob: - Lines: 9:8-9:27 - - a janitor for arrogant rich people; so I clean their computer keyboareds with the toilet brush - PATH=bin:node_modules/.bin:/usr/local/bin:/usr/bin:/bin - PORT=5002 - PUBLISH_HOST=appalot.me - PUBLISH_HOST_PREFIX=http:// - S3_BUCKET=com.mozillalabs.appmaker - S3_KEY=AKIAJUZPLTNMY2JSXLYA - S3_OBJECT_PREFIX=flathead - S3_SECRET=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 - SHARE_URL_PREFIX=https://s3.amazonaws.com/com.mozillalabs.appmaker/ - - - -Finding 5/14 +Finding 4/11 (id 812243c9b73e6798275802bf5dc253f0bae6a309) Rule: AWS S3 Bucket Group: s3-us-west-2.amazonaws.com/makerstrap - Occurrence 1/1 + Match 1/1 (id 0297f3cad50dec88eb2c749791944380a7a3788c) Git repo: Commit: first seen in 93a0d0b65978bb3b364f7494e28c187c363c4a73 @@ -135,12 +106,12 @@ Group: s3-us-west-2.amazonaws.com/makerstrap aria-labelledby= -Finding 6/14 +Finding 5/11 (id 04c5e5a446dfdf78849217ac2c4662aea584f867) Rule: AWS S3 Bucket Group: s3.amazonaws.com/com.mozillalabs.appmaker -Showing 3/11 occurrences: +Showing 3/11 matches: - Occurrence 1/11 + Match 1/11 (id b465f491912d3abf8c5a1ba3d4a2456fccea1671) Git repo: Commit: first seen in ad1a555a827707a18a81f248cb30966a657fb89f @@ -160,7 +131,7 @@ Showing 3/11 occurrences:

Draw with Friends