Refactoring.

This commit is contained in:
Dan Lorenc 2020-10-13 09:32:38 -05:00
parent 939cf20832
commit 3608792e91
7 changed files with 31 additions and 90 deletions

View File

@ -14,14 +14,6 @@ A short motivational video clip to inspire us: https://youtu.be/rDMMYT3vkTk "You
1. Use this data to proactively improve the security posture of the critical projects the world depends on.
### Requirements
* The scorecard must only be composed of automate-able, objective data. For example, a project having 10 contributors doesnt necessarily mean its more secure than a project with say 50 contributors. But, having two maintainers might be preferable to only having one - the larger bus factor and ability to provide code reviews is objectively better.
* The scorecard criteria can be as specific as possible and not limited general recommendations. For example, for Go, we can recommend/require specific linters and analyzers to be run on the codebase.
* The scorecard can be populated for any open source project without any work or interaction from maintainers.
* Maintainers must be provided with a mechanism to correct any automated scorecard findings they feel were made in error, provide "hints" for anything we can't detect automatically, and even dispute the applicability of a given scorecard finding for that repository.
* Any criteria in the scorecard must be actionable. It should be possible, with help, for any project to "check all the boxes".
* Any solution to compile a scorecard should be usable by the greater open source community to monitor upstream security.
## Usage
The program only requires one argument to run, the name of the repo:
@ -88,6 +80,15 @@ A confidence of 10 indicates the check is completely sure of the result.
Many of the checks are based on heuristics, contributions are welcome to improve the detection!
### Requirements
* The scorecard must only be composed of automate-able, objective data. For example, a project having 10 contributors doesnt necessarily mean its more secure than a project with say 50 contributors. But, having two maintainers might be preferable to only having one - the larger bus factor and ability to provide code reviews is objectively better.
* The scorecard criteria can be as specific as possible and not limited general recommendations. For example, for Go, we can recommend/require specific linters and analyzers to be run on the codebase.
* The scorecard can be populated for any open source project without any work or interaction from maintainers.
* Maintainers must be provided with a mechanism to correct any automated scorecard findings they feel were made in error, provide "hints" for anything we can't detect automatically, and even dispute the applicability of a given scorecard finding for that repository.
* Any criteria in the scorecard must be actionable. It should be possible, with help, for any project to "check all the boxes".
* Any solution to compile a scorecard should be usable by the greater open source community to monitor upstream security.
## Contributing
See the [Contributing](contributing.md) documentation for guidance on how to contribute.

View File

@ -47,6 +47,20 @@ func MultiCheck(fns ...CheckFn) CheckFn {
}
}
func ProportionalResult(numerator, denominator int, threshold float32) CheckResult {
actual := float32(numerator) / float32(denominator)
if actual >= threshold {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
}
type NamedCheck struct {
Name string
Fn CheckFn

View File

@ -54,18 +54,7 @@ func GithubCodeReview(c *checker.Checker) CheckResult {
}
}
// Threshold is 3/4 of merged PRs
actual := float32(totalReviewed) / float32(totalMerged)
if actual >= .75 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalReviewed, totalMerged, .75)
}
func IsPrReviewRequired(c *checker.Checker) CheckResult {
@ -112,20 +101,10 @@ func ProwCodeReview(c *checker.Checker) CheckResult {
}
}
}
// Threshold is 3/4 of merged PRs
actual := float32(totalReviewed) / float32(totalMerged)
if actual >= .75 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
if totalReviewed == 0 {
return InconclusiveResult
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalReviewed, totalMerged, .75)
}

View File

@ -30,15 +30,5 @@ func PullRequests(c *checker.Checker) CheckResult {
totalWithPrs++
}
}
actual := float32(totalWithPrs) / float32(total)
if actual >= .9 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalWithPrs, total, .9)
}

View File

@ -49,15 +49,5 @@ func SignedReleases(c *checker.Checker) CheckResult {
if totalReleases == 0 {
return InconclusiveResult
}
actual := float32(totalSigned) / float32(totalReleases)
if actual >= .75 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalSigned, totalReleases, .75)
}

View File

@ -31,16 +31,5 @@ func SignedTags(c *checker.Checker) CheckResult {
}
}
// Threshold is 3/4 of releases
actual := float32(totalSigned) / float32(totalReleases)
if actual >= .75 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalSigned, totalReleases, .75)
}

View File

@ -43,21 +43,10 @@ func GithubStatuses(c *checker.Checker) CheckResult {
}
}
}
// Threshold is 3/4 of merged PRs
actual := float32(totalTested) / float32(totalMerged)
if actual >= .75 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
if totalTested == 0 {
return InconclusiveResult
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalTested, totalMerged, .75)
}
func isTest(s string) bool {
@ -104,19 +93,8 @@ func GithubCheckRuns(c *checker.Checker) CheckResult {
}
}
}
// Threshold is 3/4 of merged PRs
actual := float32(totalTested) / float32(totalMerged)
if actual >= .75 {
return CheckResult{
Pass: true,
Confidence: int(actual * 10),
}
}
if totalTested == 0 {
return InconclusiveResult
}
return CheckResult{
Pass: false,
Confidence: int(10 - int(actual*10)),
}
return ProportionalResult(totalTested, totalMerged, .75)
}