⚠️ Only include probes which ran for probe format (#3991)

* add findings to check results struct

these dont make it to the JSON output format as theyre
not copied to the jsonCheckResultV2 struct in AsJSON2()

Signed-off-by: Spencer Schrock <sschrock@google.com>

* populate CheckResult findings

It would be nice if the evaluation functions did this for us,
but would require changes to theCreate*ScoreResult functions.
It was simpler just to set it in one place at the check level.

Signed-off-by: Spencer Schrock <sschrock@google.com>

---------

Signed-off-by: Spencer Schrock <sschrock@google.com>
This commit is contained in:
Spencer Schrock 2024-04-04 14:43:30 -07:00 committed by GitHub
parent aeaee6099e
commit 76a9b0470a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 62 additions and 38 deletions

View File

@ -66,8 +66,9 @@ type CheckResult struct {
Score int
Reason string
Details []CheckDetail
// Structured results.
Rules []string // TODO(X): add support.
// Findings from the check's probes.
Findings []finding.Finding
}
// CheckDetail contains information for each detail.

View File

@ -57,5 +57,7 @@ func BinaryArtifacts(c *checker.CheckRequest) checker.CheckResult {
return checker.CreateRuntimeErrorResult(CheckBinaryArtifacts, e)
}
return evaluation.BinaryArtifacts(CheckBinaryArtifacts, findings, c.Dlogger)
ret := evaluation.BinaryArtifacts(CheckBinaryArtifacts, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -54,5 +54,7 @@ func BranchProtection(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.BranchProtection(CheckBranchProtection, findings, c.Dlogger)
ret := evaluation.BranchProtection(CheckBranchProtection, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -53,5 +53,7 @@ func CITests(c *checker.CheckRequest) checker.CheckResult {
return checker.CreateRuntimeErrorResult(CheckCITests, e)
}
return evaluation.CITests(CheckCITests, findings, c.Dlogger)
ret := evaluation.CITests(CheckCITests, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -54,5 +54,7 @@ func CIIBestPractices(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.CIIBestPractices(CheckCIIBestPractices, findings, c.Dlogger)
ret := evaluation.CIIBestPractices(CheckCIIBestPractices, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -49,5 +49,6 @@ func CodeReview(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
// TODO(#3049)
return evaluation.CodeReview(CheckCodeReview, c.Dlogger, &rawData)
}

View File

@ -54,5 +54,7 @@ func Contributors(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.Contributors(CheckContributors, findings, c.Dlogger)
ret := evaluation.Contributors(CheckContributors, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -57,5 +57,7 @@ func DangerousWorkflow(c *checker.CheckRequest) checker.CheckResult {
return checker.CreateRuntimeErrorResult(CheckDangerousWorkflow, e)
}
return evaluation.DangerousWorkflow(CheckDangerousWorkflow, findings, c.Dlogger)
ret := evaluation.DangerousWorkflow(CheckDangerousWorkflow, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -57,5 +57,7 @@ func DependencyUpdateTool(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.DependencyUpdateTool(CheckDependencyUpdateTool, findings, c.Dlogger)
ret := evaluation.DependencyUpdateTool(CheckDependencyUpdateTool, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -54,5 +54,7 @@ func Fuzzing(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.Fuzzing(CheckFuzzing, findings, c.Dlogger)
ret := evaluation.Fuzzing(CheckFuzzing, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -56,5 +56,7 @@ func License(c *checker.CheckRequest) checker.CheckResult {
return checker.CreateRuntimeErrorResult(CheckLicense, e)
}
return evaluation.License(CheckLicense, findings, c.Dlogger)
ret := evaluation.License(CheckLicense, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -54,5 +54,7 @@ func Maintained(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.Maintained(CheckMaintained, findings, c.Dlogger)
ret := evaluation.Maintained(CheckMaintained, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -65,5 +65,7 @@ func Packaging(c *checker.CheckRequest) checker.CheckResult {
return checker.CreateRuntimeErrorResult(CheckPackaging, e)
}
return evaluation.Packaging(CheckPackaging, findings, c.Dlogger)
ret := evaluation.Packaging(CheckPackaging, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -58,5 +58,7 @@ func TokenPermissions(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.TokenPermissions(CheckTokenPermissions, findings, c.Dlogger)
ret := evaluation.TokenPermissions(CheckTokenPermissions, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -58,5 +58,7 @@ func PinningDependencies(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.PinningDependencies(CheckPinnedDependencies, findings, c.Dlogger)
ret := evaluation.PinningDependencies(CheckPinnedDependencies, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -54,5 +54,7 @@ func SAST(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.SAST(CheckSAST, findings, c.Dlogger)
ret := evaluation.SAST(CheckSAST, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -57,5 +57,7 @@ func SecurityPolicy(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.SecurityPolicy(CheckSecurityPolicy, findings, c.Dlogger)
ret := evaluation.SecurityPolicy(CheckSecurityPolicy, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -54,5 +54,7 @@ func SignedReleases(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.SignedReleases(CheckSignedReleases, findings, c.Dlogger)
ret := evaluation.SignedReleases(CheckSignedReleases, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -57,5 +57,7 @@ func Vulnerabilities(c *checker.CheckRequest) checker.CheckResult {
return checker.CreateRuntimeErrorResult(CheckVulnerabilities, e)
}
return evaluation.Vulnerabilities(CheckVulnerabilities, findings, c.Dlogger)
ret := evaluation.Vulnerabilities(CheckVulnerabilities, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -69,5 +69,7 @@ func WebHooks(c *checker.CheckRequest) checker.CheckResult {
}
// Return the score evaluation.
return evaluation.Webhooks(CheckWebHooks, findings, c.Dlogger)
ret := evaluation.Webhooks(CheckWebHooks, findings, c.Dlogger)
ret.Findings = findings
return ret
}

View File

@ -32,8 +32,6 @@ import (
"github.com/ossf/scorecard/v4/finding"
proberegistration "github.com/ossf/scorecard/v4/internal/probes"
"github.com/ossf/scorecard/v4/options"
"github.com/ossf/scorecard/v4/probes"
"github.com/ossf/scorecard/v4/probes/zrunner"
)
// errEmptyRepository indicates the repository is empty.
@ -164,25 +162,13 @@ func runScorecard(ctx context.Context,
// If the user runs checks
go runEnabledChecks(ctx, repo, request, checksToRun, resultsCh)
exposeFindings := os.Getenv(options.EnvVarScorecardExperimental) == "1"
for result := range resultsCh {
ret.Checks = append(ret.Checks, result)
}
if value, _ := os.LookupEnv(options.EnvVarScorecardExperimental); value == "1" {
// Run the probes.
var findings []finding.Finding
// TODO(#3049): only run the probes for checks.
// NOTE: We will need separate functions to support:
// - `--probes X,Y`
// - `--check-definitions-file path/to/config.yml
// NOTE: we discard the returned error because the errors are
// already contained in the findings and we want to return the findings
// to users.
// See https://github.com/ossf/scorecard/blob/main/probes/zrunner/runner.go#L34-L45.
// We also don't want the entire scorecard run to fail if a single error is encountered.
//nolint:errcheck
findings, _ = zrunner.Run(&ret.RawResults, probes.All)
ret.Findings = findings
if exposeFindings {
ret.Findings = append(ret.Findings, result.Findings...)
}
}
return ret, nil
}