mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2024-12-14 18:51:34 +03:00
Merge pull request #7 in DNS/adguard-dns from metrics to master
* commit '1f0fdef8d6b2ce324e7009bb3f95626d87438d61': Fix invalid element order for historical stats -- in API declaration values are from oldest to newest, not other way around. Rewrite how historical stats are stored and calculated. coredns plugin -- convert logic into switch, logging unexpected non-covered cases After filters were redownloaded and deemed to be fresh, tell coredns server to reload coredns plugin -- on server reload, metrics disappeared, therefore they must be registered on each reload instead of once coredns plugin -- give feedback how many rules were in rulefile dnsfilter -- Update tests to check for expected filter/nofilter reason as well. Remove debug logging during checks if coredns is alive Be more atomic during writing of files -- this prevents other processes from seeing empty or impartial files Start coredns on launch before we serve HTTP -- this checks if port is available Move starting of coredns server into separate function sometimes answer can be empty, therefore question could be lost -- pass both to querylog Reduce binary size of coredns by 60% by removing orchestration plugins like kubernetes, route53, trace, etcd and federation Fix registration of metrics if querylog is enabled
This commit is contained in:
commit
74275bebdc
1
Makefile
1
Makefile
@ -24,6 +24,7 @@ AdguardDNS: $(STATIC) *.go
|
|||||||
coredns: coredns_plugin/*.go dnsfilter/*.go
|
coredns: coredns_plugin/*.go dnsfilter/*.go
|
||||||
echo mkfile_dir = $(mkfile_dir)
|
echo mkfile_dir = $(mkfile_dir)
|
||||||
go get -v -d github.com/coredns/coredns
|
go get -v -d github.com/coredns/coredns
|
||||||
|
cd $(GOPATH)/src/github.com/coredns/coredns && perl -p -i.bak -e 's/^(trace|route53|federation|kubernetes|etcd):.*//' plugin.cfg
|
||||||
cd $(GOPATH)/src/github.com/coredns/coredns && grep -q '^dnsfilter:' plugin.cfg || perl -p -i.bak -e 's|^log:log|log:log\ndnsfilter:github.com/AdguardTeam/AdguardDNS/coredns_plugin|' plugin.cfg
|
cd $(GOPATH)/src/github.com/coredns/coredns && grep -q '^dnsfilter:' plugin.cfg || perl -p -i.bak -e 's|^log:log|log:log\ndnsfilter:github.com/AdguardTeam/AdguardDNS/coredns_plugin|' plugin.cfg
|
||||||
grep '^dnsfilter:' $(GOPATH)/src/github.com/coredns/coredns/plugin.cfg ## used to check that plugin.cfg was successfully edited by sed
|
grep '^dnsfilter:' $(GOPATH)/src/github.com/coredns/coredns/plugin.cfg ## used to check that plugin.cfg was successfully edited by sed
|
||||||
cd $(GOPATH)/src/github.com/coredns/coredns && GOOS=$(NATIVE_GOOS) GOARCH=$(NATIVE_GOARCH) go generate
|
cd $(GOPATH)/src/github.com/coredns/coredns && GOOS=$(NATIVE_GOOS) GOARCH=$(NATIVE_GOARCH) go generate
|
||||||
|
5
app.go
5
app.go
@ -122,6 +122,11 @@ func main() {
|
|||||||
http.Handle("/", http.FileServer(box))
|
http.Handle("/", http.FileServer(box))
|
||||||
registerControlHandlers()
|
registerControlHandlers()
|
||||||
|
|
||||||
|
err = startDNSServer()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
URL := fmt.Sprintf("http://%s", address)
|
URL := fmt.Sprintf("http://%s", address)
|
||||||
log.Println("Go to " + URL)
|
log.Println("Go to " + URL)
|
||||||
log.Fatal(http.ListenAndServe(address, nil))
|
log.Fatal(http.ListenAndServe(address, nil))
|
||||||
|
13
config.go
13
config.go
@ -108,11 +108,16 @@ func writeConfig() error {
|
|||||||
log.Printf("Couldn't generate YAML file: %s", err)
|
log.Printf("Couldn't generate YAML file: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = ioutil.WriteFile(configfile, yamlText, 0644)
|
err = ioutil.WriteFile(configfile+".tmp", yamlText, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't write YAML config: %s", err)
|
log.Printf("Couldn't write YAML config: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
err = os.Rename(configfile+".tmp", configfile)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Couldn't rename YAML config: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,10 +132,14 @@ func writeCoreDNSConfig() error {
|
|||||||
log.Printf("Couldn't generate DNS config: %s", err)
|
log.Printf("Couldn't generate DNS config: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = ioutil.WriteFile(corefile, []byte(configtext), 0644)
|
err = ioutil.WriteFile(corefile+".tmp", []byte(configtext), 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't write DNS config: %s", err)
|
log.Printf("Couldn't write DNS config: %s", err)
|
||||||
}
|
}
|
||||||
|
err = os.Rename(corefile+".tmp", corefile)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Couldn't rename DNS config: %s", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
133
control.go
133
control.go
@ -44,7 +44,6 @@ func tellCoreDNSToReload() {
|
|||||||
log.Printf("os.FindProcess(%d) returned err: %v\n", pid, err)
|
log.Printf("os.FindProcess(%d) returned err: %v\n", pid, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Printf("os.FindProcess(%d) returned: %v, %v\n", pid, process, err)
|
|
||||||
err = process.Signal(syscall.SIGUSR1)
|
err = process.Signal(syscall.SIGUSR1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("process.Signal on pid %d returned: %v\n", pid, err)
|
log.Printf("process.Signal on pid %d returned: %v\n", pid, err)
|
||||||
@ -69,9 +68,10 @@ func isRunning() bool {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("os.FindProcess(%d) returned err: %v\n", pid, err)
|
log.Printf("os.FindProcess(%d) returned err: %v\n", pid, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("os.FindProcess(%d) returned: %v, %v\n", pid, process, err)
|
|
||||||
err := process.Signal(syscall.Signal(0))
|
err := process.Signal(syscall.Signal(0))
|
||||||
|
if err != nil {
|
||||||
log.Printf("process.Signal on pid %d returned: %v\n", pid, err)
|
log.Printf("process.Signal on pid %d returned: %v\n", pid, err)
|
||||||
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -79,24 +79,22 @@ func isRunning() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
func handleStart(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
func startDNSServer() error {
|
||||||
if isRunning() {
|
if isRunning() {
|
||||||
http.Error(w, fmt.Sprintf("Unable to start coreDNS: Already running"), 400)
|
return fmt.Errorf("Unable to start coreDNS: Already running")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
err := writeCoreDNSConfig()
|
err := writeCoreDNSConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Unable to write coredns config: %s", err)
|
errortext := fmt.Errorf("Unable to write coredns config: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errortext)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
return errortext
|
||||||
return
|
|
||||||
}
|
}
|
||||||
err = writeFilterFile()
|
err = writeFilterFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Couldn't write filter file: %s", err)
|
errortext := fmt.Errorf("Couldn't write filter file: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errortext)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
return errortext
|
||||||
return
|
|
||||||
}
|
}
|
||||||
binarypath := filepath.Join(config.ourBinaryDir, config.CoreDNS.binaryFile)
|
binarypath := filepath.Join(config.ourBinaryDir, config.CoreDNS.binaryFile)
|
||||||
configpath := filepath.Join(config.ourBinaryDir, config.CoreDNS.coreFile)
|
configpath := filepath.Join(config.ourBinaryDir, config.CoreDNS.coreFile)
|
||||||
@ -105,14 +103,27 @@ func handleStart(w http.ResponseWriter, r *http.Request) {
|
|||||||
coreDNSCommand.Stderr = os.Stderr
|
coreDNSCommand.Stderr = os.Stderr
|
||||||
err = coreDNSCommand.Start()
|
err = coreDNSCommand.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Unable to start coreDNS: %s", err)
|
errortext := fmt.Errorf("Unable to start coreDNS: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errortext)
|
||||||
http.Error(w, errortext, http.StatusInternalServerError)
|
return errortext
|
||||||
return
|
|
||||||
}
|
}
|
||||||
log.Printf("coredns PID: %v\n", coreDNSCommand.Process.Pid)
|
log.Printf("coredns PID: %v\n", coreDNSCommand.Process.Pid)
|
||||||
fmt.Fprintf(w, "OK, PID %d\n", coreDNSCommand.Process.Pid)
|
|
||||||
go childwaiter()
|
go childwaiter()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleStart(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if isRunning() {
|
||||||
|
http.Error(w, fmt.Sprintf("Unable to start coreDNS: Already running"), 400)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := startDNSServer()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "OK, PID %d\n", coreDNSCommand.Process.Pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func childwaiter() {
|
func childwaiter() {
|
||||||
@ -188,21 +199,23 @@ func handleStatus(w http.ResponseWriter, r *http.Request) {
|
|||||||
// stats
|
// stats
|
||||||
// -----
|
// -----
|
||||||
func handleStats(w http.ResponseWriter, r *http.Request) {
|
func handleStats(w http.ResponseWriter, r *http.Request) {
|
||||||
snap := &statistics.lastsnap
|
histrical := generateMapFromStats(&statistics.perMinute, 0, 2)
|
||||||
|
// sum them up
|
||||||
|
summed := map[string]interface{}{}
|
||||||
|
for key, values := range histrical {
|
||||||
|
summedValue := 0.0
|
||||||
|
floats, ok := values.([]float64)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range floats {
|
||||||
|
summedValue += v
|
||||||
|
}
|
||||||
|
summed[key] = summedValue
|
||||||
|
}
|
||||||
|
summed["stats_period"] = "3 minutes"
|
||||||
|
|
||||||
// generate from last 3 minutes
|
json, err := json.Marshal(summed)
|
||||||
var last3mins statsSnapshot
|
|
||||||
last3mins.filteredTotal = snap.filteredTotal - statistics.perMinute.filteredTotal[2]
|
|
||||||
last3mins.filteredLists = snap.filteredLists - statistics.perMinute.filteredLists[2]
|
|
||||||
last3mins.filteredSafebrowsing = snap.filteredSafebrowsing - statistics.perMinute.filteredSafebrowsing[2]
|
|
||||||
last3mins.filteredParental = snap.filteredParental - statistics.perMinute.filteredParental[2]
|
|
||||||
last3mins.totalRequests = snap.totalRequests - statistics.perMinute.totalRequests[2]
|
|
||||||
last3mins.processingTimeSum = snap.processingTimeSum - statistics.perMinute.processingTimeSum[2]
|
|
||||||
last3mins.processingTimeCount = snap.processingTimeCount - statistics.perMinute.processingTimeCount[2]
|
|
||||||
// rate := computeRate(append([]float64(snap.totalRequests}, statistics.perMinute.totalRequests[0:2])
|
|
||||||
|
|
||||||
data := generateMapFromSnap(last3mins)
|
|
||||||
json, err := json.Marshal(data)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Unable to marshal status json: %s", err)
|
errortext := fmt.Sprintf("Unable to marshal status json: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errortext)
|
||||||
@ -221,28 +234,29 @@ func handleStats(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func handleStatsHistory(w http.ResponseWriter, r *http.Request) {
|
func handleStatsHistory(w http.ResponseWriter, r *http.Request) {
|
||||||
// handle time unit and prepare our time window size
|
// handle time unit and prepare our time window size
|
||||||
limitTime := time.Now()
|
now := time.Now()
|
||||||
timeUnit := r.URL.Query().Get("time_unit")
|
timeUnitString := r.URL.Query().Get("time_unit")
|
||||||
var stats *periodicStats
|
var stats *periodicStats
|
||||||
switch timeUnit {
|
var timeUnit time.Duration
|
||||||
|
switch timeUnitString {
|
||||||
case "seconds":
|
case "seconds":
|
||||||
limitTime = limitTime.Add(statsHistoryElements * -1 * time.Second)
|
timeUnit = time.Second
|
||||||
stats = &statistics.perSecond
|
stats = &statistics.perSecond
|
||||||
case "minutes":
|
case "minutes":
|
||||||
limitTime = limitTime.Add(statsHistoryElements * -1 * time.Minute)
|
timeUnit = time.Minute
|
||||||
stats = &statistics.perMinute
|
stats = &statistics.perMinute
|
||||||
case "hours":
|
case "hours":
|
||||||
limitTime = limitTime.Add(statsHistoryElements * -1 * time.Hour)
|
timeUnit = time.Hour
|
||||||
stats = &statistics.perHour
|
stats = &statistics.perHour
|
||||||
case "days":
|
case "days":
|
||||||
limitTime = limitTime.Add(statsHistoryElements * -1 * time.Hour * 24)
|
timeUnit = time.Hour * 24
|
||||||
stats = &statistics.perDay
|
stats = &statistics.perDay
|
||||||
default:
|
default:
|
||||||
http.Error(w, "Must specify valid time_unit parameter", 400)
|
http.Error(w, "Must specify valid time_unit parameter", 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if start time is within supported time range
|
// parse start and end time
|
||||||
startTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("start_time"))
|
startTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("start_time"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Must specify valid start_time parameter: %s", err)
|
errortext := fmt.Sprintf("Must specify valid start_time parameter: %s", err)
|
||||||
@ -250,12 +264,6 @@ func handleStatsHistory(w http.ResponseWriter, r *http.Request) {
|
|||||||
http.Error(w, errortext, 400)
|
http.Error(w, errortext, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if startTime.Before(limitTime) {
|
|
||||||
http.Error(w, "start_time parameter is outside of supported range", 501)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if end time is within supported time range
|
|
||||||
endTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("end_time"))
|
endTime, err := time.Parse(time.RFC3339, r.URL.Query().Get("end_time"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errortext := fmt.Sprintf("Must specify valid end_time parameter: %s", err)
|
errortext := fmt.Sprintf("Must specify valid end_time parameter: %s", err)
|
||||||
@ -263,28 +271,22 @@ func handleStatsHistory(w http.ResponseWriter, r *http.Request) {
|
|||||||
http.Error(w, errortext, 400)
|
http.Error(w, errortext, 400)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if endTime.Before(limitTime) {
|
|
||||||
|
// check if start and time times are within supported time range
|
||||||
|
timeRange := timeUnit * statsHistoryElements
|
||||||
|
if startTime.Add(timeRange).Before(now) {
|
||||||
|
http.Error(w, "start_time parameter is outside of supported range", 501)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if endTime.Add(timeRange).Before(now) {
|
||||||
http.Error(w, "end_time parameter is outside of supported range", 501)
|
http.Error(w, "end_time parameter is outside of supported range", 501)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculate how which slice range we need to provide
|
// calculate start and end of our array
|
||||||
var start int
|
// basically it's how many hours/minutes/etc have passed since now
|
||||||
var end int
|
start := int(now.Sub(endTime) / timeUnit)
|
||||||
switch timeUnit {
|
end := int(now.Sub(startTime) / timeUnit)
|
||||||
case "seconds":
|
|
||||||
start = int(startTime.Sub(limitTime).Seconds())
|
|
||||||
end = int(endTime.Sub(limitTime).Seconds())
|
|
||||||
case "minutes":
|
|
||||||
start = int(startTime.Sub(limitTime).Minutes())
|
|
||||||
end = int(endTime.Sub(limitTime).Minutes())
|
|
||||||
case "hours":
|
|
||||||
start = int(startTime.Sub(limitTime).Hours())
|
|
||||||
end = int(endTime.Sub(limitTime).Hours())
|
|
||||||
case "days":
|
|
||||||
start = int(startTime.Sub(limitTime).Hours() / 24.0)
|
|
||||||
end = int(endTime.Sub(limitTime).Hours() / 24.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// swap them around if they're inverted
|
// swap them around if they're inverted
|
||||||
if start > end {
|
if start > end {
|
||||||
@ -840,6 +842,7 @@ func refreshFiltersIfNeccessary() int {
|
|||||||
errortext := fmt.Sprintf("Couldn't write filter file: %s", err)
|
errortext := fmt.Sprintf("Couldn't write filter file: %s", err)
|
||||||
log.Println(errortext)
|
log.Println(errortext)
|
||||||
}
|
}
|
||||||
|
tellCoreDNSToReload()
|
||||||
}
|
}
|
||||||
return updateCount
|
return updateCount
|
||||||
}
|
}
|
||||||
@ -918,11 +921,17 @@ func writeFilterFile() error {
|
|||||||
data = append(data, []byte(rule)...)
|
data = append(data, []byte(rule)...)
|
||||||
data = append(data, '\n')
|
data = append(data, '\n')
|
||||||
}
|
}
|
||||||
err := ioutil.WriteFile(filterpath, data, 0644)
|
err := ioutil.WriteFile(filterpath+".tmp", data, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't write filter file: %s", err)
|
log.Printf("Couldn't write filter file: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = os.Rename(filterpath+".tmp", filterpath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Couldn't rename filter file: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ func setupPlugin(c *caddy.Controller) (*Plugin, error) {
|
|||||||
}
|
}
|
||||||
case "querylog":
|
case "querylog":
|
||||||
d.QueryLogEnabled = true
|
d.QueryLogEnabled = true
|
||||||
once.Do(func() {
|
onceQueryLog.Do(func() {
|
||||||
go startQueryLogServer() // TODO: how to handle errors?
|
go startQueryLogServer() // TODO: how to handle errors?
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -145,6 +145,7 @@ func setupPlugin(c *caddy.Controller) (*Plugin, error) {
|
|||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
|
count := 0
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
text := scanner.Text()
|
text := scanner.Text()
|
||||||
@ -158,7 +159,9 @@ func setupPlugin(c *caddy.Controller) (*Plugin, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
count++
|
||||||
}
|
}
|
||||||
|
log.Printf("Added %d rules from %s", count, filterFileName)
|
||||||
|
|
||||||
if err = scanner.Err(); err != nil {
|
if err = scanner.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -184,10 +187,9 @@ func setup(c *caddy.Controller) error {
|
|||||||
})
|
})
|
||||||
|
|
||||||
c.OnStartup(func() error {
|
c.OnStartup(func() error {
|
||||||
once.Do(func() {
|
|
||||||
m := dnsserver.GetConfig(c).Handler("prometheus")
|
m := dnsserver.GetConfig(c).Handler("prometheus")
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
if x, ok := m.(*metrics.Metrics); ok {
|
if x, ok := m.(*metrics.Metrics); ok {
|
||||||
x.MustRegister(requests)
|
x.MustRegister(requests)
|
||||||
@ -200,7 +202,6 @@ func setup(c *caddy.Controller) error {
|
|||||||
x.MustRegister(errorsTotal)
|
x.MustRegister(errorsTotal)
|
||||||
x.MustRegister(d)
|
x.MustRegister(d)
|
||||||
}
|
}
|
||||||
})
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
c.OnShutdown(d.OnShutdown)
|
c.OnShutdown(d.OnShutdown)
|
||||||
@ -410,8 +411,9 @@ func (d *Plugin) serveDNSInternal(ctx context.Context, w dns.ResponseWriter, r *
|
|||||||
return dns.RcodeServerFailure, fmt.Errorf("plugin/dnsfilter: %s", err), dnsfilter.Result{}
|
return dns.RcodeServerFailure, fmt.Errorf("plugin/dnsfilter: %s", err), dnsfilter.Result{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// safebrowsing
|
if result.IsFiltered {
|
||||||
if result.IsFiltered == true && result.Reason == dnsfilter.FilteredSafeBrowsing {
|
switch result.Reason {
|
||||||
|
case dnsfilter.FilteredSafeBrowsing:
|
||||||
// return cname safebrowsing.block.dns.adguard.com
|
// return cname safebrowsing.block.dns.adguard.com
|
||||||
val := d.SafeBrowsingBlockHost
|
val := d.SafeBrowsingBlockHost
|
||||||
rcode, err := d.replaceHostWithValAndReply(ctx, w, r, host, val, question)
|
rcode, err := d.replaceHostWithValAndReply(ctx, w, r, host, val, question)
|
||||||
@ -419,30 +421,34 @@ func (d *Plugin) serveDNSInternal(ctx context.Context, w dns.ResponseWriter, r *
|
|||||||
return rcode, err, dnsfilter.Result{}
|
return rcode, err, dnsfilter.Result{}
|
||||||
}
|
}
|
||||||
return rcode, err, result
|
return rcode, err, result
|
||||||
}
|
case dnsfilter.FilteredParental:
|
||||||
|
// return cname family.block.dns.adguard.com
|
||||||
// parental
|
|
||||||
if result.IsFiltered == true && result.Reason == dnsfilter.FilteredParental {
|
|
||||||
// return cname
|
|
||||||
val := d.ParentalBlockHost
|
val := d.ParentalBlockHost
|
||||||
rcode, err := d.replaceHostWithValAndReply(ctx, w, r, host, val, question)
|
rcode, err := d.replaceHostWithValAndReply(ctx, w, r, host, val, question)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rcode, err, dnsfilter.Result{}
|
return rcode, err, dnsfilter.Result{}
|
||||||
}
|
}
|
||||||
return rcode, err, result
|
return rcode, err, result
|
||||||
}
|
case dnsfilter.FilteredBlackList:
|
||||||
|
// return NXdomain
|
||||||
// blacklist
|
|
||||||
if result.IsFiltered == true && result.Reason == dnsfilter.FilteredBlackList {
|
|
||||||
rcode, err := writeNXdomain(ctx, w, r)
|
rcode, err := writeNXdomain(ctx, w, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rcode, err, dnsfilter.Result{}
|
return rcode, err, dnsfilter.Result{}
|
||||||
}
|
}
|
||||||
return rcode, err, result
|
return rcode, err, result
|
||||||
|
default:
|
||||||
|
log.Printf("SHOULD NOT HAPPEN -- got unknown reason for filtering: %T %v %s", result.Reason, result.Reason, result.Reason.String())
|
||||||
}
|
}
|
||||||
if result.IsFiltered == false && result.Reason == dnsfilter.NotFilteredWhiteList {
|
} else {
|
||||||
|
switch result.Reason {
|
||||||
|
case dnsfilter.NotFilteredWhiteList:
|
||||||
rcode, err := plugin.NextOrFailure(d.Name(), d.Next, ctx, w, r)
|
rcode, err := plugin.NextOrFailure(d.Name(), d.Next, ctx, w, r)
|
||||||
return rcode, err, result
|
return rcode, err, result
|
||||||
|
case dnsfilter.NotFilteredNotFound:
|
||||||
|
// do nothing, pass through to lower code
|
||||||
|
default:
|
||||||
|
log.Printf("SHOULD NOT HAPPEN -- got unknown reason for not filtering: %T %v %s", result.Reason, result.Reason, result.Reason.String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rcode, err := plugin.NextOrFailure(d.Name(), d.Next, ctx, w, r)
|
rcode, err := plugin.NextOrFailure(d.Name(), d.Next, ctx, w, r)
|
||||||
@ -498,11 +504,11 @@ func (d *Plugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
|
|||||||
|
|
||||||
// log
|
// log
|
||||||
if d.QueryLogEnabled {
|
if d.QueryLogEnabled {
|
||||||
logRequest(rrw.Msg, result, time.Since(start), ip)
|
logRequest(r, rrw.Msg, result, time.Since(start), ip)
|
||||||
}
|
}
|
||||||
return rcode, err
|
return rcode, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Plugin) Name() string { return "dnsfilter" }
|
func (d *Plugin) Name() string { return "dnsfilter" }
|
||||||
|
|
||||||
var once sync.Once
|
var onceQueryLog sync.Once
|
||||||
|
@ -19,7 +19,8 @@ import (
|
|||||||
var logBuffer = ring.Ring{}
|
var logBuffer = ring.Ring{}
|
||||||
|
|
||||||
type logEntry struct {
|
type logEntry struct {
|
||||||
R *dns.Msg
|
Question *dns.Msg
|
||||||
|
Answer *dns.Msg
|
||||||
Result dnsfilter.Result
|
Result dnsfilter.Result
|
||||||
Time time.Time
|
Time time.Time
|
||||||
Elapsed time.Duration
|
Elapsed time.Duration
|
||||||
@ -30,9 +31,10 @@ func init() {
|
|||||||
logBuffer.SetCapacity(1000)
|
logBuffer.SetCapacity(1000)
|
||||||
}
|
}
|
||||||
|
|
||||||
func logRequest(r *dns.Msg, result dnsfilter.Result, elapsed time.Duration, ip string) {
|
func logRequest(question *dns.Msg, answer *dns.Msg, result dnsfilter.Result, elapsed time.Duration, ip string) {
|
||||||
entry := logEntry{
|
entry := logEntry{
|
||||||
R: r,
|
Question: question,
|
||||||
|
Answer: answer,
|
||||||
Result: result,
|
Result: result,
|
||||||
Time: time.Now(),
|
Time: time.Now(),
|
||||||
Elapsed: elapsed,
|
Elapsed: elapsed,
|
||||||
@ -57,21 +59,21 @@ func handler(w http.ResponseWriter, r *http.Request) {
|
|||||||
"client": entry.IP,
|
"client": entry.IP,
|
||||||
}
|
}
|
||||||
question := map[string]interface{}{
|
question := map[string]interface{}{
|
||||||
"host": strings.ToLower(strings.TrimSuffix(entry.R.Question[0].Name, ".")),
|
"host": strings.ToLower(strings.TrimSuffix(entry.Question.Question[0].Name, ".")),
|
||||||
"type": dns.Type(entry.R.Question[0].Qtype).String(),
|
"type": dns.Type(entry.Question.Question[0].Qtype).String(),
|
||||||
"class": dns.Class(entry.R.Question[0].Qclass).String(),
|
"class": dns.Class(entry.Question.Question[0].Qclass).String(),
|
||||||
}
|
}
|
||||||
jsonentry["question"] = question
|
jsonentry["question"] = question
|
||||||
|
|
||||||
status, _ := response.Typify(entry.R, time.Now().UTC())
|
status, _ := response.Typify(entry.Answer, time.Now().UTC())
|
||||||
jsonentry["status"] = status.String()
|
jsonentry["status"] = status.String()
|
||||||
if len(entry.Result.Rule) > 0 {
|
if len(entry.Result.Rule) > 0 {
|
||||||
jsonentry["rule"] = entry.Result.Rule
|
jsonentry["rule"] = entry.Result.Rule
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entry.R.Answer) > 0 {
|
if entry.Answer != nil && len(entry.Answer.Answer) > 0 {
|
||||||
var answers = []map[string]interface{}{}
|
var answers = []map[string]interface{}{}
|
||||||
for _, k := range entry.R.Answer {
|
for _, k := range entry.Answer.Answer {
|
||||||
header := k.Header()
|
header := k.Header()
|
||||||
answer := map[string]interface{}{
|
answer := map[string]interface{}{
|
||||||
"type": dns.TypeToString[header.Rrtype],
|
"type": dns.TypeToString[header.Rrtype],
|
||||||
|
@ -388,40 +388,41 @@ var tests = []struct {
|
|||||||
testname string
|
testname string
|
||||||
rules []string
|
rules []string
|
||||||
hostname string
|
hostname string
|
||||||
result bool
|
isFiltered bool
|
||||||
|
reason Reason
|
||||||
}{
|
}{
|
||||||
{"sanity", []string{"||doubleclick.net^"}, "www.doubleclick.net", true},
|
{"sanity", []string{"||doubleclick.net^"}, "www.doubleclick.net", true, FilteredBlackList},
|
||||||
{"sanity", []string{"||doubleclick.net^"}, "nodoubleclick.net", false},
|
{"sanity", []string{"||doubleclick.net^"}, "nodoubleclick.net", false, NotFilteredNotFound},
|
||||||
{"sanity", []string{"||doubleclick.net^"}, "doubleclick.net.ru", false},
|
{"sanity", []string{"||doubleclick.net^"}, "doubleclick.net.ru", false, NotFilteredNotFound},
|
||||||
{"sanity", []string{"||doubleclick.net^"}, "wmconvirus.narod.ru", false},
|
{"sanity", []string{"||doubleclick.net^"}, "wmconvirus.narod.ru", false, NotFilteredNotFound},
|
||||||
{"blocking", blockingRules, "example.org", true},
|
{"blocking", blockingRules, "example.org", true, FilteredBlackList},
|
||||||
{"blocking", blockingRules, "test.example.org", true},
|
{"blocking", blockingRules, "test.example.org", true, FilteredBlackList},
|
||||||
{"blocking", blockingRules, "test.test.example.org", true},
|
{"blocking", blockingRules, "test.test.example.org", true, FilteredBlackList},
|
||||||
{"blocking", blockingRules, "testexample.org", false},
|
{"blocking", blockingRules, "testexample.org", false, NotFilteredNotFound},
|
||||||
{"blocking", blockingRules, "onemoreexample.org", false},
|
{"blocking", blockingRules, "onemoreexample.org", false, NotFilteredNotFound},
|
||||||
{"whitelist", whitelistRules, "example.org", true},
|
{"whitelist", whitelistRules, "example.org", true, FilteredBlackList},
|
||||||
{"whitelist", whitelistRules, "test.example.org", false},
|
{"whitelist", whitelistRules, "test.example.org", false, NotFilteredWhiteList},
|
||||||
{"whitelist", whitelistRules, "test.test.example.org", false},
|
{"whitelist", whitelistRules, "test.test.example.org", false, NotFilteredWhiteList},
|
||||||
{"whitelist", whitelistRules, "testexample.org", false},
|
{"whitelist", whitelistRules, "testexample.org", false, NotFilteredNotFound},
|
||||||
{"whitelist", whitelistRules, "onemoreexample.org", false},
|
{"whitelist", whitelistRules, "onemoreexample.org", false, NotFilteredNotFound},
|
||||||
{"important", importantRules, "example.org", false},
|
{"important", importantRules, "example.org", false, NotFilteredWhiteList},
|
||||||
{"important", importantRules, "test.example.org", true},
|
{"important", importantRules, "test.example.org", true, FilteredBlackList},
|
||||||
{"important", importantRules, "test.test.example.org", true},
|
{"important", importantRules, "test.test.example.org", true, FilteredBlackList},
|
||||||
{"important", importantRules, "testexample.org", false},
|
{"important", importantRules, "testexample.org", false, NotFilteredNotFound},
|
||||||
{"important", importantRules, "onemoreexample.org", false},
|
{"important", importantRules, "onemoreexample.org", false, NotFilteredNotFound},
|
||||||
{"regex", regexRules, "example.org", true},
|
{"regex", regexRules, "example.org", true, FilteredBlackList},
|
||||||
{"regex", regexRules, "test.example.org", false},
|
{"regex", regexRules, "test.example.org", false, NotFilteredWhiteList},
|
||||||
{"regex", regexRules, "test.test.example.org", false},
|
{"regex", regexRules, "test.test.example.org", false, NotFilteredWhiteList},
|
||||||
{"regex", regexRules, "testexample.org", true},
|
{"regex", regexRules, "testexample.org", true, FilteredBlackList},
|
||||||
{"regex", regexRules, "onemoreexample.org", true},
|
{"regex", regexRules, "onemoreexample.org", true, FilteredBlackList},
|
||||||
{"mask", maskRules, "test.example.org", true},
|
{"mask", maskRules, "test.example.org", true, FilteredBlackList},
|
||||||
{"mask", maskRules, "test2.example.org", true},
|
{"mask", maskRules, "test2.example.org", true, FilteredBlackList},
|
||||||
{"mask", maskRules, "example.com", true},
|
{"mask", maskRules, "example.com", true, FilteredBlackList},
|
||||||
{"mask", maskRules, "exampleeee.com", true},
|
{"mask", maskRules, "exampleeee.com", true, FilteredBlackList},
|
||||||
{"mask", maskRules, "onemoreexamsite.com", true},
|
{"mask", maskRules, "onemoreexamsite.com", true, FilteredBlackList},
|
||||||
{"mask", maskRules, "example.org", false},
|
{"mask", maskRules, "example.org", false, NotFilteredNotFound},
|
||||||
{"mask", maskRules, "testexample.org", false},
|
{"mask", maskRules, "testexample.org", false, NotFilteredNotFound},
|
||||||
{"mask", maskRules, "example.co.uk", false},
|
{"mask", maskRules, "example.co.uk", false, NotFilteredNotFound},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMatching(t *testing.T) {
|
func TestMatching(t *testing.T) {
|
||||||
@ -439,8 +440,11 @@ func TestMatching(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error while matching host %s: %s", test.hostname, err)
|
t.Errorf("Error while matching host %s: %s", test.hostname, err)
|
||||||
}
|
}
|
||||||
if ret.IsFiltered != test.result {
|
if ret.IsFiltered != test.isFiltered {
|
||||||
t.Errorf("Hostname %s has wrong result (%v must be %v)", test.hostname, ret, test.result)
|
t.Errorf("Hostname %s has wrong result (%v must be %v)", test.hostname, ret.IsFiltered, test.isFiltered)
|
||||||
|
}
|
||||||
|
if ret.Reason != test.reason {
|
||||||
|
t.Errorf("Hostname %s has wrong reason (%v must be %v)", test.hostname, ret.Reason.String(), test.reason.String())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
39
helpers.go
39
helpers.go
@ -51,33 +51,14 @@ func ensureDELETE(handler func(http.ResponseWriter, *http.Request)) func(http.Re
|
|||||||
// --------------------------
|
// --------------------------
|
||||||
// helper functions for stats
|
// helper functions for stats
|
||||||
// --------------------------
|
// --------------------------
|
||||||
func computeRate(input []float64) []float64 {
|
func getReversedSlice(input [statsHistoryElements]float64, start int, end int) []float64 {
|
||||||
output := make([]float64, 0)
|
output := make([]float64, 0)
|
||||||
for i := len(input) - 2; i >= 0; i-- {
|
for i := start; i <= end; i++ {
|
||||||
value := input[i]
|
output = append([]float64{input[i]}, output...)
|
||||||
diff := value - input[i+1]
|
|
||||||
output = append([]float64{diff}, output...)
|
|
||||||
}
|
}
|
||||||
return output
|
return output
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateMapFromSnap(snap statsSnapshot) map[string]interface{} {
|
|
||||||
var avgProcessingTime float64
|
|
||||||
if snap.processingTimeCount > 0 {
|
|
||||||
avgProcessingTime = snap.processingTimeSum / snap.processingTimeCount
|
|
||||||
}
|
|
||||||
|
|
||||||
result := map[string]interface{}{
|
|
||||||
"dns_queries": snap.totalRequests,
|
|
||||||
"blocked_filtering": snap.filteredLists,
|
|
||||||
"replaced_safebrowsing": snap.filteredSafebrowsing,
|
|
||||||
"replaced_safesearch": snap.filteredSafesearch,
|
|
||||||
"replaced_parental": snap.filteredParental,
|
|
||||||
"avg_processing_time": avgProcessingTime,
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateMapFromStats(stats *periodicStats, start int, end int) map[string]interface{} {
|
func generateMapFromStats(stats *periodicStats, start int, end int) map[string]interface{} {
|
||||||
// clamp
|
// clamp
|
||||||
start = clamp(start, 0, statsHistoryElements)
|
start = clamp(start, 0, statsHistoryElements)
|
||||||
@ -85,8 +66,8 @@ func generateMapFromStats(stats *periodicStats, start int, end int) map[string]i
|
|||||||
|
|
||||||
avgProcessingTime := make([]float64, 0)
|
avgProcessingTime := make([]float64, 0)
|
||||||
|
|
||||||
count := computeRate(stats.processingTimeCount[start:end])
|
count := getReversedSlice(stats.entries[processingTimeCount], start, end)
|
||||||
sum := computeRate(stats.processingTimeSum[start:end])
|
sum := getReversedSlice(stats.entries[processingTimeSum], start, end)
|
||||||
for i := 0; i < len(count); i++ {
|
for i := 0; i < len(count); i++ {
|
||||||
var avg float64
|
var avg float64
|
||||||
if count[i] != 0 {
|
if count[i] != 0 {
|
||||||
@ -97,11 +78,11 @@ func generateMapFromStats(stats *periodicStats, start int, end int) map[string]i
|
|||||||
}
|
}
|
||||||
|
|
||||||
result := map[string]interface{}{
|
result := map[string]interface{}{
|
||||||
"dns_queries": computeRate(stats.totalRequests[start:end]),
|
"dns_queries": getReversedSlice(stats.entries[totalRequests], start, end),
|
||||||
"blocked_filtering": computeRate(stats.filteredLists[start:end]),
|
"blocked_filtering": getReversedSlice(stats.entries[filteredLists], start, end),
|
||||||
"replaced_safebrowsing": computeRate(stats.filteredSafebrowsing[start:end]),
|
"replaced_safebrowsing": getReversedSlice(stats.entries[filteredSafebrowsing], start, end),
|
||||||
"replaced_safesearch": computeRate(stats.filteredSafesearch[start:end]),
|
"replaced_safesearch": getReversedSlice(stats.entries[filteredSafesearch], start, end),
|
||||||
"replaced_parental": computeRate(stats.filteredParental[start:end]),
|
"replaced_parental": getReversedSlice(stats.entries[filteredParental], start, end),
|
||||||
"avg_processing_time": avgProcessingTime,
|
"avg_processing_time": avgProcessingTime,
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
200
stats.go
200
stats.go
@ -8,70 +8,50 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type periodicStats struct {
|
|
||||||
totalRequests []float64
|
|
||||||
|
|
||||||
filteredTotal []float64
|
|
||||||
filteredLists []float64
|
|
||||||
filteredSafebrowsing []float64
|
|
||||||
filteredSafesearch []float64
|
|
||||||
filteredParental []float64
|
|
||||||
|
|
||||||
processingTimeSum []float64
|
|
||||||
processingTimeCount []float64
|
|
||||||
|
|
||||||
lastRotate time.Time // last time this data was rotated
|
|
||||||
}
|
|
||||||
|
|
||||||
type statsSnapshot struct {
|
|
||||||
totalRequests float64
|
|
||||||
|
|
||||||
filteredTotal float64
|
|
||||||
filteredLists float64
|
|
||||||
filteredSafebrowsing float64
|
|
||||||
filteredSafesearch float64
|
|
||||||
filteredParental float64
|
|
||||||
|
|
||||||
processingTimeSum float64
|
|
||||||
processingTimeCount float64
|
|
||||||
}
|
|
||||||
|
|
||||||
type statsCollection struct {
|
|
||||||
perSecond periodicStats
|
|
||||||
perMinute periodicStats
|
|
||||||
perHour periodicStats
|
|
||||||
perDay periodicStats
|
|
||||||
lastsnap statsSnapshot
|
|
||||||
}
|
|
||||||
|
|
||||||
var statistics statsCollection
|
|
||||||
|
|
||||||
var client = &http.Client{
|
var client = &http.Client{
|
||||||
Timeout: time.Second * 30,
|
Timeout: time.Second * 30,
|
||||||
}
|
}
|
||||||
|
|
||||||
const statsHistoryElements = 60 + 1 // +1 for calculating delta
|
// as seen over HTTP
|
||||||
|
type statsEntry map[string]float64
|
||||||
|
type statsEntries map[string][statsHistoryElements]float64
|
||||||
|
|
||||||
var requestCountTotalRegex = regexp.MustCompile(`^coredns_dns_request_count_total`)
|
const (
|
||||||
var requestDurationSecondsSum = regexp.MustCompile(`^coredns_dns_request_duration_seconds_sum`)
|
statsHistoryElements = 60 + 1 // +1 for calculating delta
|
||||||
var requestDurationSecondsCount = regexp.MustCompile(`^coredns_dns_request_duration_seconds_count`)
|
totalRequests = `coredns_dns_request_count_total`
|
||||||
|
filteredTotal = `coredns_dnsfilter_filtered_total`
|
||||||
|
filteredLists = `coredns_dnsfilter_filtered_lists_total`
|
||||||
|
filteredSafebrowsing = `coredns_dnsfilter_filtered_safebrowsing_total`
|
||||||
|
filteredSafesearch = `coredns_dnsfilter_safesearch_total`
|
||||||
|
filteredParental = `coredns_dnsfilter_filtered_parental_total`
|
||||||
|
processingTimeSum = `coredns_dns_request_duration_seconds_sum`
|
||||||
|
processingTimeCount = `coredns_dns_request_duration_seconds_count`
|
||||||
|
)
|
||||||
|
|
||||||
func initPeriodicStats(stats *periodicStats) {
|
type periodicStats struct {
|
||||||
stats.totalRequests = make([]float64, statsHistoryElements)
|
entries statsEntries
|
||||||
stats.filteredTotal = make([]float64, statsHistoryElements)
|
lastRotate time.Time // last time this data was rotated
|
||||||
stats.filteredLists = make([]float64, statsHistoryElements)
|
}
|
||||||
stats.filteredSafebrowsing = make([]float64, statsHistoryElements)
|
|
||||||
stats.filteredSafesearch = make([]float64, statsHistoryElements)
|
type stats struct {
|
||||||
stats.filteredParental = make([]float64, statsHistoryElements)
|
perSecond periodicStats
|
||||||
stats.processingTimeSum = make([]float64, statsHistoryElements)
|
perMinute periodicStats
|
||||||
stats.processingTimeCount = make([]float64, statsHistoryElements)
|
perHour periodicStats
|
||||||
|
perDay periodicStats
|
||||||
|
|
||||||
|
lastSeen statsEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
var statistics stats
|
||||||
|
|
||||||
|
func initPeriodicStats(periodic *periodicStats) {
|
||||||
|
periodic.entries = statsEntries{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -106,37 +86,22 @@ func isConnRefused(err error) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func sliceRotate(slice *[]float64) {
|
func statsRotate(periodic *periodicStats, now time.Time) {
|
||||||
a := (*slice)[:len(*slice)-1]
|
for key, values := range periodic.entries {
|
||||||
*slice = append([]float64{0}, a...)
|
newValues := [statsHistoryElements]float64{}
|
||||||
|
for i := 1; i < len(values); i++ {
|
||||||
|
newValues[i] = values[i-1]
|
||||||
}
|
}
|
||||||
|
periodic.entries[key] = newValues
|
||||||
func statsRotate(stats *periodicStats, now time.Time) {
|
|
||||||
sliceRotate(&stats.totalRequests)
|
|
||||||
sliceRotate(&stats.filteredTotal)
|
|
||||||
sliceRotate(&stats.filteredLists)
|
|
||||||
sliceRotate(&stats.filteredSafebrowsing)
|
|
||||||
sliceRotate(&stats.filteredSafesearch)
|
|
||||||
sliceRotate(&stats.filteredParental)
|
|
||||||
sliceRotate(&stats.processingTimeSum)
|
|
||||||
sliceRotate(&stats.processingTimeCount)
|
|
||||||
stats.lastRotate = now
|
|
||||||
}
|
}
|
||||||
|
periodic.lastRotate = now
|
||||||
func handleValue(input string, target *float64) {
|
|
||||||
value, err := strconv.ParseFloat(input, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("Failed to parse number input:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
*target = value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// called every second, accumulates stats for each second, minute, hour and day
|
// called every second, accumulates stats for each second, minute, hour and day
|
||||||
func collectStats() {
|
func collectStats() {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
// rotate each second
|
// rotate each second
|
||||||
// NOTE: since we are called every second, always rotate, otherwise aliasing problems cause the rotation to skip
|
// NOTE: since we are called every second, always rotate perSecond, otherwise aliasing problems cause the rotation to skip
|
||||||
if true {
|
if true {
|
||||||
statsRotate(&statistics.perSecond, now)
|
statsRotate(&statistics.perSecond, now)
|
||||||
}
|
}
|
||||||
@ -172,6 +137,8 @@ func collectStats() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entry := statsEntry{}
|
||||||
|
|
||||||
// handle body
|
// handle body
|
||||||
scanner := bufio.NewScanner(strings.NewReader(string(body)))
|
scanner := bufio.NewScanner(strings.NewReader(string(body)))
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
@ -181,38 +148,61 @@ func collectStats() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
splitted := strings.Split(line, " ")
|
splitted := strings.Split(line, " ")
|
||||||
switch {
|
if len(splitted) < 2 {
|
||||||
case splitted[0] == "coredns_dnsfilter_filtered_total":
|
continue
|
||||||
handleValue(splitted[1], &statistics.lastsnap.filteredTotal)
|
|
||||||
case splitted[0] == "coredns_dnsfilter_filtered_lists_total":
|
|
||||||
handleValue(splitted[1], &statistics.lastsnap.filteredLists)
|
|
||||||
case splitted[0] == "coredns_dnsfilter_filtered_safebrowsing_total":
|
|
||||||
handleValue(splitted[1], &statistics.lastsnap.filteredSafebrowsing)
|
|
||||||
case splitted[0] == "coredns_dnsfilter_filtered_parental_total":
|
|
||||||
handleValue(splitted[1], &statistics.lastsnap.filteredParental)
|
|
||||||
case requestCountTotalRegex.MatchString(splitted[0]):
|
|
||||||
handleValue(splitted[1], &statistics.lastsnap.totalRequests)
|
|
||||||
case requestDurationSecondsSum.MatchString(splitted[0]):
|
|
||||||
handleValue(splitted[1], &statistics.lastsnap.processingTimeSum)
|
|
||||||
case requestDurationSecondsCount.MatchString(splitted[0]):
|
|
||||||
handleValue(splitted[1], &statistics.lastsnap.processingTimeCount)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// put the snap into per-second, per-minute, per-hour and per-day
|
value, err := strconv.ParseFloat(splitted[1], 64)
|
||||||
assignSnapToStats(&statistics.perSecond)
|
if err != nil {
|
||||||
assignSnapToStats(&statistics.perMinute)
|
log.Printf("Failed to parse number input %s: %s", splitted[1], err)
|
||||||
assignSnapToStats(&statistics.perHour)
|
continue
|
||||||
assignSnapToStats(&statistics.perDay)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func assignSnapToStats(stats *periodicStats) {
|
key := splitted[0]
|
||||||
stats.totalRequests[0] = statistics.lastsnap.totalRequests
|
index := strings.IndexByte(key, '{')
|
||||||
stats.filteredTotal[0] = statistics.lastsnap.filteredTotal
|
if index >= 0 {
|
||||||
stats.filteredLists[0] = statistics.lastsnap.filteredLists
|
key = key[:index]
|
||||||
stats.filteredSafebrowsing[0] = statistics.lastsnap.filteredSafebrowsing
|
}
|
||||||
stats.filteredSafesearch[0] = statistics.lastsnap.filteredSafesearch
|
|
||||||
stats.filteredParental[0] = statistics.lastsnap.filteredParental
|
// empty keys are not ok
|
||||||
stats.processingTimeSum[0] = statistics.lastsnap.processingTimeSum
|
if key == "" {
|
||||||
stats.processingTimeCount[0] = statistics.lastsnap.processingTimeCount
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
got, ok := entry[key]
|
||||||
|
if ok {
|
||||||
|
value += got
|
||||||
|
}
|
||||||
|
entry[key] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate delta
|
||||||
|
delta := calcDelta(entry, statistics.lastSeen)
|
||||||
|
|
||||||
|
// apply delta to second/minute/hour/day
|
||||||
|
applyDelta(&statistics.perSecond, delta)
|
||||||
|
applyDelta(&statistics.perMinute, delta)
|
||||||
|
applyDelta(&statistics.perHour, delta)
|
||||||
|
applyDelta(&statistics.perDay, delta)
|
||||||
|
|
||||||
|
// save last seen
|
||||||
|
statistics.lastSeen = entry
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcDelta(current, seen statsEntry) statsEntry {
|
||||||
|
delta := statsEntry{}
|
||||||
|
for key, currentValue := range current {
|
||||||
|
seenValue := seen[key]
|
||||||
|
deltaValue := currentValue - seenValue
|
||||||
|
delta[key] = deltaValue
|
||||||
|
}
|
||||||
|
return delta
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyDelta(current *periodicStats, delta statsEntry) {
|
||||||
|
for key, deltaValue := range delta {
|
||||||
|
currentValues := current.entries[key]
|
||||||
|
currentValues[0] += deltaValue
|
||||||
|
current.entries[key] = currentValues
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user