2018-08-30 17:25:33 +03:00
|
|
|
package dnsfilter
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
2019-04-18 14:31:13 +03:00
|
|
|
"context"
|
2018-08-30 17:25:33 +03:00
|
|
|
"crypto/sha256"
|
2019-08-22 15:09:43 +03:00
|
|
|
"encoding/binary"
|
|
|
|
"encoding/gob"
|
2018-08-30 17:25:33 +03:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
2018-10-29 15:46:58 +03:00
|
|
|
"net"
|
2018-08-30 17:25:33 +03:00
|
|
|
"net/http"
|
2019-07-05 17:35:40 +03:00
|
|
|
"os"
|
2019-10-22 14:58:20 +03:00
|
|
|
"runtime"
|
2018-08-30 17:25:33 +03:00
|
|
|
"strings"
|
2019-10-09 19:51:26 +03:00
|
|
|
"sync"
|
2018-08-30 17:25:33 +03:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2019-06-18 16:18:13 +03:00
|
|
|
"github.com/joomcode/errorx"
|
|
|
|
|
2019-04-18 14:31:13 +03:00
|
|
|
"github.com/AdguardTeam/dnsproxy/upstream"
|
2019-08-22 15:09:43 +03:00
|
|
|
"github.com/AdguardTeam/golibs/cache"
|
2019-02-25 16:44:22 +03:00
|
|
|
"github.com/AdguardTeam/golibs/log"
|
2019-05-15 16:46:11 +03:00
|
|
|
"github.com/AdguardTeam/urlfilter"
|
2019-02-28 13:01:41 +03:00
|
|
|
"github.com/bluele/gcache"
|
2019-05-22 12:38:17 +03:00
|
|
|
"github.com/miekg/dns"
|
2018-08-30 17:25:33 +03:00
|
|
|
"golang.org/x/net/publicsuffix"
|
|
|
|
)
|
|
|
|
|
2018-10-20 19:58:39 +03:00
|
|
|
const defaultHTTPTimeout = 5 * time.Minute
|
2018-08-30 17:25:33 +03:00
|
|
|
const defaultHTTPMaxIdleConnections = 100
|
|
|
|
|
|
|
|
const defaultSafebrowsingServer = "sb.adtidy.org"
|
2019-05-27 18:11:05 +03:00
|
|
|
const defaultSafebrowsingURL = "%s://%s/safebrowsing-lookup-hash.html?prefixes=%s"
|
2018-09-10 20:34:42 +03:00
|
|
|
const defaultParentalServer = "pctrl.adguard.com"
|
2019-05-27 18:11:05 +03:00
|
|
|
const defaultParentalURL = "%s://%s/check-parental-control-hash?prefixes=%s&sensitivity=%d"
|
2019-06-06 22:42:17 +03:00
|
|
|
const defaultParentalSensitivity = 13 // use "TEEN" by default
|
|
|
|
const maxDialCacheSize = 2 // the number of host names for safebrowsing and parental control
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2019-07-23 12:21:37 +03:00
|
|
|
// ServiceEntry - blocked service array element
|
|
|
|
type ServiceEntry struct {
|
|
|
|
Name string
|
|
|
|
Rules []*urlfilter.NetworkRule
|
|
|
|
}
|
|
|
|
|
2019-07-29 11:37:16 +03:00
|
|
|
// RequestFilteringSettings is custom filtering settings
|
2019-05-28 14:14:12 +03:00
|
|
|
type RequestFilteringSettings struct {
|
|
|
|
FilteringEnabled bool
|
|
|
|
SafeSearchEnabled bool
|
|
|
|
SafeBrowsingEnabled bool
|
|
|
|
ParentalEnabled bool
|
2019-07-23 12:21:37 +03:00
|
|
|
ServicesRules []ServiceEntry
|
2019-05-28 14:14:12 +03:00
|
|
|
}
|
|
|
|
|
2019-07-29 11:37:16 +03:00
|
|
|
// RewriteEntry is a rewrite array element
|
|
|
|
type RewriteEntry struct {
|
|
|
|
Domain string `yaml:"domain"`
|
|
|
|
Answer string `yaml:"answer"` // IP address or canonical name
|
|
|
|
}
|
|
|
|
|
2018-11-30 13:32:51 +03:00
|
|
|
// Config allows you to configure DNS filtering with New() or just change variables directly.
|
|
|
|
type Config struct {
|
2019-07-04 14:00:20 +03:00
|
|
|
ParentalSensitivity int `yaml:"parental_sensitivity"` // must be either 3, 10, 13 or 17
|
|
|
|
ParentalEnabled bool `yaml:"parental_enabled"`
|
|
|
|
UsePlainHTTP bool `yaml:"-"` // use plain HTTP for requests to parental and safe browsing servers
|
|
|
|
SafeSearchEnabled bool `yaml:"safesearch_enabled"`
|
|
|
|
SafeBrowsingEnabled bool `yaml:"safebrowsing_enabled"`
|
2019-10-09 19:51:26 +03:00
|
|
|
ResolverAddress string `yaml:"-"` // DNS server address
|
2019-05-28 14:14:12 +03:00
|
|
|
|
2019-08-22 15:09:43 +03:00
|
|
|
SafeBrowsingCacheSize uint `yaml:"safebrowsing_cache_size"` // (in bytes)
|
|
|
|
SafeSearchCacheSize uint `yaml:"safesearch_cache_size"` // (in bytes)
|
|
|
|
ParentalCacheSize uint `yaml:"parental_cache_size"` // (in bytes)
|
|
|
|
CacheTime uint `yaml:"cache_time"` // Element's TTL (in minutes)
|
|
|
|
|
2019-07-29 11:37:16 +03:00
|
|
|
Rewrites []RewriteEntry `yaml:"rewrites"`
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
// Called when the configuration is changed by HTTP request
|
|
|
|
ConfigModified func() `yaml:"-"`
|
2018-11-30 13:32:51 +03:00
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
// Register an HTTP handler
|
|
|
|
HTTPRegister func(string, string, func(http.ResponseWriter, *http.Request)) `yaml:"-"`
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// LookupStats store stats collected during safebrowsing or parental checks
|
2018-08-30 17:25:33 +03:00
|
|
|
type LookupStats struct {
|
|
|
|
Requests uint64 // number of HTTP requests that were sent
|
|
|
|
CacheHits uint64 // number of lookups that didn't need HTTP requests
|
|
|
|
Pending int64 // number of currently pending HTTP requests
|
|
|
|
PendingMax int64 // maximum number of pending HTTP requests
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// Stats store LookupStats for safebrowsing, parental and safesearch
|
2018-08-30 17:25:33 +03:00
|
|
|
type Stats struct {
|
|
|
|
Safebrowsing LookupStats
|
|
|
|
Parental LookupStats
|
2019-02-22 16:34:36 +03:00
|
|
|
Safesearch LookupStats
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
// Parameters to pass to filters-initializer goroutine
|
|
|
|
type filtersInitializerParams struct {
|
|
|
|
filters map[int]string
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// Dnsfilter holds added rules and performs hostname matches against the rules
|
|
|
|
type Dnsfilter struct {
|
2019-07-04 14:00:20 +03:00
|
|
|
rulesStorage *urlfilter.RuleStorage
|
2019-05-15 16:46:11 +03:00
|
|
|
filteringEngine *urlfilter.DNSEngine
|
2019-10-09 19:51:26 +03:00
|
|
|
engineLock sync.RWMutex
|
2019-05-15 16:46:11 +03:00
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// HTTP lookups for safebrowsing and parental
|
|
|
|
client http.Client // handle for http client -- single instance as recommended by docs
|
|
|
|
transport *http.Transport // handle for http transport used by http client
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
parentalServer string // access via methods
|
|
|
|
safeBrowsingServer string // access via methods
|
|
|
|
|
|
|
|
Config // for direct access by library users, even a = assignment
|
|
|
|
confLock sync.RWMutex
|
|
|
|
|
|
|
|
// Channel for passing data to filters-initializer goroutine
|
|
|
|
filtersInitializerChan chan filtersInitializerParams
|
|
|
|
filtersInitializerLock sync.Mutex
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2019-01-24 20:11:01 +03:00
|
|
|
// Filter represents a filter list
|
2018-11-30 13:24:42 +03:00
|
|
|
type Filter struct {
|
2019-09-04 14:12:00 +03:00
|
|
|
ID int64 // auto-assigned when filter is added (see nextFilterID)
|
|
|
|
Data []byte `yaml:"-"` // List of rules divided by '\n'
|
|
|
|
FilePath string `yaml:"-"` // Path to a filtering rules file
|
2018-11-30 13:24:42 +03:00
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Reason holds an enum detailing why it was filtered or not filtered
|
2018-08-30 17:25:33 +03:00
|
|
|
type Reason int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// reasons for not filtering
|
2019-01-24 20:11:01 +03:00
|
|
|
|
|
|
|
// NotFilteredNotFound - host was not find in any checks, default value for result
|
|
|
|
NotFilteredNotFound Reason = iota
|
|
|
|
// NotFilteredWhiteList - the host is explicitly whitelisted
|
|
|
|
NotFilteredWhiteList
|
|
|
|
// NotFilteredError - there was a transitive error during check
|
|
|
|
NotFilteredError
|
2018-08-30 17:25:33 +03:00
|
|
|
|
|
|
|
// reasons for filtering
|
2019-01-24 20:11:01 +03:00
|
|
|
|
|
|
|
// FilteredBlackList - the host was matched to be advertising host
|
|
|
|
FilteredBlackList
|
|
|
|
// FilteredSafeBrowsing - the host was matched to be malicious/phishing
|
|
|
|
FilteredSafeBrowsing
|
|
|
|
// FilteredParental - the host was matched to be outside of parental control settings
|
|
|
|
FilteredParental
|
|
|
|
// FilteredInvalid - the request was invalid and was not processed
|
|
|
|
FilteredInvalid
|
|
|
|
// FilteredSafeSearch - the host was replaced with safesearch variant
|
|
|
|
FilteredSafeSearch
|
2019-07-23 12:21:37 +03:00
|
|
|
// FilteredBlockedService - the host is blocked by "blocked services" settings
|
|
|
|
FilteredBlockedService
|
2019-07-29 11:37:16 +03:00
|
|
|
|
|
|
|
// ReasonRewrite - rewrite rule was applied
|
|
|
|
ReasonRewrite
|
2018-08-30 17:25:33 +03:00
|
|
|
)
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
var reasonNames = []string{
|
|
|
|
"NotFilteredNotFound",
|
|
|
|
"NotFilteredWhiteList",
|
|
|
|
"NotFilteredError",
|
|
|
|
|
|
|
|
"FilteredBlackList",
|
|
|
|
"FilteredSafeBrowsing",
|
|
|
|
"FilteredParental",
|
|
|
|
"FilteredInvalid",
|
|
|
|
"FilteredSafeSearch",
|
|
|
|
"FilteredBlockedService",
|
|
|
|
|
|
|
|
"Rewrite",
|
|
|
|
}
|
|
|
|
|
2019-08-20 00:55:32 +03:00
|
|
|
func (r Reason) String() string {
|
2019-10-09 19:51:26 +03:00
|
|
|
if uint(r) >= uint(len(reasonNames)) {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return reasonNames[r]
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetConfig - get configuration
|
|
|
|
func (d *Dnsfilter) GetConfig() RequestFilteringSettings {
|
|
|
|
c := RequestFilteringSettings{}
|
|
|
|
// d.confLock.RLock()
|
|
|
|
c.SafeSearchEnabled = d.Config.SafeSearchEnabled
|
|
|
|
c.SafeBrowsingEnabled = d.Config.SafeBrowsingEnabled
|
|
|
|
c.ParentalEnabled = d.Config.ParentalEnabled
|
|
|
|
// d.confLock.RUnlock()
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteDiskConfig - write configuration
|
|
|
|
func (d *Dnsfilter) WriteDiskConfig(c *Config) {
|
|
|
|
*c = d.Config
|
|
|
|
}
|
2019-07-29 11:37:16 +03:00
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
// SetFilters - set new filters (synchronously or asynchronously)
|
|
|
|
// When filters are set asynchronously, the old filters continue working until the new filters are ready.
|
|
|
|
// In this case the caller must ensure that the old filter files are intact.
|
|
|
|
func (d *Dnsfilter) SetFilters(filters map[int]string, async bool) error {
|
|
|
|
if async {
|
|
|
|
params := filtersInitializerParams{
|
|
|
|
filters: filters,
|
|
|
|
}
|
2019-07-29 11:37:16 +03:00
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
d.filtersInitializerLock.Lock() // prevent multiple writers from adding more than 1 task
|
|
|
|
// remove all pending tasks
|
|
|
|
stop := false
|
|
|
|
for !stop {
|
|
|
|
select {
|
|
|
|
case <-d.filtersInitializerChan:
|
|
|
|
//
|
|
|
|
default:
|
|
|
|
stop = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
d.filtersInitializerChan <- params
|
|
|
|
d.filtersInitializerLock.Unlock()
|
|
|
|
return nil
|
2019-07-23 11:43:30 +03:00
|
|
|
}
|
2019-10-09 19:51:26 +03:00
|
|
|
|
|
|
|
err := d.initFiltering(filters)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Can't initialize filtering subsystem: %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Starts initializing new filters by signal from channel
|
|
|
|
func (d *Dnsfilter) filtersInitializer() {
|
|
|
|
for {
|
|
|
|
params := <-d.filtersInitializerChan
|
|
|
|
err := d.initFiltering(params.filters)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Can't initialize filtering subsystem: %s", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close - close the object
|
|
|
|
func (d *Dnsfilter) Close() {
|
|
|
|
if d != nil && d.transport != nil {
|
|
|
|
d.transport.CloseIdleConnections()
|
|
|
|
}
|
|
|
|
if d.rulesStorage != nil {
|
|
|
|
d.rulesStorage.Close()
|
2019-07-23 11:43:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 10:48:12 +03:00
|
|
|
type dnsFilterContext struct {
|
2018-08-30 17:25:33 +03:00
|
|
|
stats Stats
|
2019-05-13 14:47:55 +03:00
|
|
|
dialCache gcache.Cache // "host" -> "IP" cache for safebrowsing and parental control servers
|
2019-08-22 15:09:43 +03:00
|
|
|
safebrowsingCache cache.Cache
|
|
|
|
parentalCache cache.Cache
|
|
|
|
safeSearchCache cache.Cache
|
2019-06-24 19:00:03 +03:00
|
|
|
}
|
|
|
|
|
2019-06-27 10:48:12 +03:00
|
|
|
var gctx dnsFilterContext // global dnsfilter context
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Result holds state of hostname check
|
2018-08-30 17:25:33 +03:00
|
|
|
type Result struct {
|
2018-10-29 15:46:58 +03:00
|
|
|
IsFiltered bool `json:",omitempty"` // True if the host name is filtered
|
|
|
|
Reason Reason `json:",omitempty"` // Reason for blocking / unblocking
|
|
|
|
Rule string `json:",omitempty"` // Original rule text
|
2019-01-24 20:11:01 +03:00
|
|
|
IP net.IP `json:",omitempty"` // Not nil only in the case of a hosts file syntax
|
2018-10-30 17:16:20 +03:00
|
|
|
FilterID int64 `json:",omitempty"` // Filter ID the rule belongs to
|
2019-07-29 11:37:16 +03:00
|
|
|
|
|
|
|
// for ReasonRewrite:
|
|
|
|
CanonName string `json:",omitempty"` // CNAME value
|
|
|
|
IPList []net.IP `json:",omitempty"` // list of IP addresses
|
2019-07-23 12:21:37 +03:00
|
|
|
|
|
|
|
// for FilteredBlockedService:
|
|
|
|
ServiceName string `json:",omitempty"` // Name of the blocked service
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Matched can be used to see if any match at all was found, no matter filtered or not
|
2018-08-30 17:25:33 +03:00
|
|
|
func (r Reason) Matched() bool {
|
|
|
|
return r != NotFilteredNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// CheckHost tries to match host against rules, then safebrowsing and parental if they are enabled
|
2019-07-25 16:37:06 +03:00
|
|
|
func (d *Dnsfilter) CheckHost(host string, qtype uint16, setts *RequestFilteringSettings) (Result, error) {
|
2018-10-05 07:31:56 +03:00
|
|
|
// sometimes DNS clients will try to resolve ".", which is a request to get root servers
|
2018-08-30 17:25:33 +03:00
|
|
|
if host == "" {
|
2018-10-05 07:31:56 +03:00
|
|
|
return Result{Reason: NotFilteredNotFound}, nil
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2018-09-10 20:34:42 +03:00
|
|
|
host = strings.ToLower(host)
|
2019-04-24 12:38:05 +03:00
|
|
|
// prevent recursion
|
|
|
|
if host == d.parentalServer || host == d.safeBrowsingServer {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2019-05-28 14:14:12 +03:00
|
|
|
var result Result
|
|
|
|
var err error
|
2019-07-29 11:37:16 +03:00
|
|
|
|
|
|
|
result = d.processRewrites(host, qtype)
|
|
|
|
if result.Reason == ReasonRewrite {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2019-05-28 14:14:12 +03:00
|
|
|
// try filter lists first
|
|
|
|
if setts.FilteringEnabled {
|
|
|
|
result, err = d.matchHost(host, qtype)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2019-07-23 12:21:37 +03:00
|
|
|
if len(setts.ServicesRules) != 0 {
|
|
|
|
result = matchBlockedServicesRules(host, setts.ServicesRules)
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// check safeSearch if no match
|
2019-05-28 14:14:12 +03:00
|
|
|
if setts.SafeSearchEnabled {
|
2019-02-22 16:34:36 +03:00
|
|
|
result, err = d.checkSafeSearch(host)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Failed to safesearch HTTP lookup, ignoring check: %v", err)
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// check safebrowsing if no match
|
2019-05-28 14:14:12 +03:00
|
|
|
if setts.SafeBrowsingEnabled {
|
2018-08-30 17:25:33 +03:00
|
|
|
result, err = d.checkSafeBrowsing(host)
|
|
|
|
if err != nil {
|
|
|
|
// failed to do HTTP lookup -- treat it as if we got empty response, but don't save cache
|
|
|
|
log.Printf("Failed to do safebrowsing HTTP lookup, ignoring check: %v", err)
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check parental if no match
|
2019-05-28 14:14:12 +03:00
|
|
|
if setts.ParentalEnabled {
|
2018-08-30 17:25:33 +03:00
|
|
|
result, err = d.checkParental(host)
|
|
|
|
if err != nil {
|
|
|
|
// failed to do HTTP lookup -- treat it as if we got empty response, but don't save cache
|
|
|
|
log.Printf("Failed to do parental HTTP lookup, ignoring check: %v", err)
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nothing matched, return nothing
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
2019-07-29 11:37:16 +03:00
|
|
|
// Process rewrites table
|
|
|
|
// . Find CNAME for a domain name
|
|
|
|
// . if found, set domain name to canonical name
|
|
|
|
// . Find A or AAAA record for a domain name
|
|
|
|
// . if found, return IP addresses
|
|
|
|
func (d *Dnsfilter) processRewrites(host string, qtype uint16) Result {
|
|
|
|
var res Result
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
d.confLock.RLock()
|
|
|
|
defer d.confLock.RUnlock()
|
|
|
|
|
2019-07-29 11:37:16 +03:00
|
|
|
for _, r := range d.Rewrites {
|
|
|
|
if r.Domain != host {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ip := net.ParseIP(r.Answer)
|
|
|
|
if ip == nil {
|
|
|
|
log.Debug("Rewrite: CNAME for %s is %s", host, r.Answer)
|
|
|
|
host = r.Answer
|
|
|
|
res.CanonName = r.Answer
|
|
|
|
res.Reason = ReasonRewrite
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, r := range d.Rewrites {
|
|
|
|
if r.Domain != host {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ip := net.ParseIP(r.Answer)
|
|
|
|
if ip == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ip4 := ip.To4()
|
|
|
|
|
|
|
|
if qtype == dns.TypeA && ip4 != nil {
|
|
|
|
res.IPList = append(res.IPList, ip4)
|
|
|
|
log.Debug("Rewrite: A for %s is %s", host, ip4)
|
|
|
|
|
|
|
|
} else if qtype == dns.TypeAAAA && ip4 == nil {
|
|
|
|
res.IPList = append(res.IPList, ip)
|
|
|
|
log.Debug("Rewrite: AAAA for %s is %s", host, ip)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(res.IPList) != 0 {
|
|
|
|
res.Reason = ReasonRewrite
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2019-07-23 12:21:37 +03:00
|
|
|
func matchBlockedServicesRules(host string, svcs []ServiceEntry) Result {
|
|
|
|
req := urlfilter.NewRequestForHostname(host)
|
|
|
|
res := Result{}
|
|
|
|
|
|
|
|
for _, s := range svcs {
|
|
|
|
for _, rule := range s.Rules {
|
|
|
|
if rule.Match(req) {
|
|
|
|
res.Reason = FilteredBlockedService
|
|
|
|
res.IsFiltered = true
|
|
|
|
res.ServiceName = s.Name
|
|
|
|
res.Rule = rule.Text()
|
|
|
|
log.Debug("Blocked Services: matched rule: %s host: %s service: %s",
|
|
|
|
res.Rule, host, s.Name)
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2019-08-22 15:09:43 +03:00
|
|
|
/*
|
|
|
|
expire byte[4]
|
|
|
|
res Result
|
|
|
|
*/
|
|
|
|
func (d *Dnsfilter) setCacheResult(cache cache.Cache, host string, res Result) {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
expire := uint(time.Now().Unix()) + d.Config.CacheTime*60
|
|
|
|
var exp []byte
|
|
|
|
exp = make([]byte, 4)
|
|
|
|
binary.BigEndian.PutUint32(exp, uint32(expire))
|
|
|
|
_, _ = buf.Write(exp)
|
|
|
|
|
|
|
|
enc := gob.NewEncoder(&buf)
|
|
|
|
err := enc.Encode(res)
|
2019-08-16 15:07:24 +03:00
|
|
|
if err != nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
log.Error("gob.Encode(): %s", err)
|
2019-08-16 15:07:24 +03:00
|
|
|
return
|
|
|
|
}
|
2019-08-22 15:09:43 +03:00
|
|
|
_ = cache.Set([]byte(host), buf.Bytes())
|
2019-08-16 15:07:24 +03:00
|
|
|
log.Debug("Stored in cache %p: %s", cache, host)
|
2019-07-23 17:14:13 +03:00
|
|
|
}
|
|
|
|
|
2019-08-22 15:09:43 +03:00
|
|
|
func getCachedResult(cache cache.Cache, host string) (Result, bool) {
|
|
|
|
data := cache.Get([]byte(host))
|
|
|
|
if data == nil {
|
|
|
|
return Result{}, false
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2019-08-22 15:09:43 +03:00
|
|
|
exp := int(binary.BigEndian.Uint32(data[:4]))
|
|
|
|
if exp <= int(time.Now().Unix()) {
|
|
|
|
cache.Del([]byte(host))
|
2019-07-23 17:14:13 +03:00
|
|
|
return Result{}, false
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2019-08-22 15:09:43 +03:00
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
buf.Write(data[4:])
|
|
|
|
dec := gob.NewDecoder(&buf)
|
|
|
|
r := Result{}
|
|
|
|
err := dec.Decode(&r)
|
2018-08-30 17:25:33 +03:00
|
|
|
if err != nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
log.Debug("gob.Decode(): %s", err)
|
2019-07-23 17:14:13 +03:00
|
|
|
return Result{}, false
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2019-08-16 15:07:24 +03:00
|
|
|
|
2019-08-22 15:09:43 +03:00
|
|
|
return r, true
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// for each dot, hash it and add it to string
|
|
|
|
func hostnameToHashParam(host string, addslash bool) (string, map[string]bool) {
|
|
|
|
var hashparam bytes.Buffer
|
|
|
|
hashes := map[string]bool{}
|
|
|
|
tld, icann := publicsuffix.PublicSuffix(host)
|
2018-09-14 16:50:56 +03:00
|
|
|
if !icann {
|
2018-08-30 17:25:33 +03:00
|
|
|
// private suffixes like cloudfront.net
|
|
|
|
tld = ""
|
|
|
|
}
|
|
|
|
curhost := host
|
|
|
|
for {
|
|
|
|
if curhost == "" {
|
|
|
|
// we've reached end of string
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if tld != "" && curhost == tld {
|
|
|
|
// we've reached the TLD, don't hash it
|
|
|
|
break
|
|
|
|
}
|
|
|
|
tohash := []byte(curhost)
|
|
|
|
if addslash {
|
|
|
|
tohash = append(tohash, '/')
|
|
|
|
}
|
|
|
|
sum := sha256.Sum256(tohash)
|
|
|
|
hexhash := fmt.Sprintf("%X", sum)
|
|
|
|
hashes[hexhash] = true
|
|
|
|
hashparam.WriteString(fmt.Sprintf("%02X%02X%02X%02X/", sum[0], sum[1], sum[2], sum[3]))
|
|
|
|
pos := strings.IndexByte(curhost, byte('.'))
|
|
|
|
if pos < 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
curhost = curhost[pos+1:]
|
|
|
|
}
|
|
|
|
return hashparam.String(), hashes
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
func (d *Dnsfilter) checkSafeSearch(host string) (Result, error) {
|
2019-03-18 14:50:33 +03:00
|
|
|
if log.GetLevel() >= log.DEBUG {
|
|
|
|
timer := log.StartTimer()
|
|
|
|
defer timer.LogElapsed("SafeSearch HTTP lookup for %s", host)
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// Check cache. Return cached result if it was found
|
2019-07-23 17:14:13 +03:00
|
|
|
cachedValue, isFound := getCachedResult(gctx.safeSearchCache, host)
|
2019-02-22 16:34:36 +03:00
|
|
|
if isFound {
|
2019-06-24 19:00:03 +03:00
|
|
|
atomic.AddUint64(&gctx.stats.Safesearch.CacheHits, 1)
|
2019-03-18 14:50:33 +03:00
|
|
|
log.Tracef("%s: found in SafeSearch cache", host)
|
2019-02-22 16:34:36 +03:00
|
|
|
return cachedValue, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
safeHost, ok := d.SafeSearchDomain(host)
|
|
|
|
if !ok {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:41:30 +03:00
|
|
|
res := Result{IsFiltered: true, Reason: FilteredSafeSearch}
|
2019-02-22 16:34:36 +03:00
|
|
|
if ip := net.ParseIP(safeHost); ip != nil {
|
|
|
|
res.IP = ip
|
2019-08-22 15:09:43 +03:00
|
|
|
d.setCacheResult(gctx.safeSearchCache, host, res)
|
2019-02-22 16:34:36 +03:00
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2019-02-25 17:15:50 +03:00
|
|
|
// TODO this address should be resolved with upstream that was configured in dnsforward
|
2019-02-22 16:34:36 +03:00
|
|
|
addrs, err := net.LookupIP(safeHost)
|
|
|
|
if err != nil {
|
|
|
|
log.Tracef("SafeSearchDomain for %s was found but failed to lookup for %s cause %s", host, safeHost, err)
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, i := range addrs {
|
2019-04-23 15:08:41 +03:00
|
|
|
if ipv4 := i.To4(); ipv4 != nil {
|
2019-02-22 16:34:36 +03:00
|
|
|
res.IP = ipv4
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-23 15:08:41 +03:00
|
|
|
if len(res.IP) == 0 {
|
|
|
|
return Result{}, fmt.Errorf("no ipv4 addresses in safe search response for %s", safeHost)
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// Cache result
|
2019-08-22 15:09:43 +03:00
|
|
|
d.setCacheResult(gctx.safeSearchCache, host, res)
|
2019-02-22 16:34:36 +03:00
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) checkSafeBrowsing(host string) (Result, error) {
|
2019-03-18 14:50:33 +03:00
|
|
|
if log.GetLevel() >= log.DEBUG {
|
|
|
|
timer := log.StartTimer()
|
|
|
|
defer timer.LogElapsed("SafeBrowsing HTTP lookup for %s", host)
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
format := func(hashparam string) string {
|
2019-05-27 18:11:05 +03:00
|
|
|
schema := "https"
|
|
|
|
if d.UsePlainHTTP {
|
|
|
|
schema = "http"
|
|
|
|
}
|
|
|
|
url := fmt.Sprintf(defaultSafebrowsingURL, schema, d.safeBrowsingServer, hashparam)
|
2018-08-30 17:25:33 +03:00
|
|
|
return url
|
|
|
|
}
|
|
|
|
handleBody := func(body []byte, hashes map[string]bool) (Result, error) {
|
|
|
|
result := Result{}
|
|
|
|
scanner := bufio.NewScanner(strings.NewReader(string(body)))
|
|
|
|
for scanner.Scan() {
|
|
|
|
line := scanner.Text()
|
|
|
|
splitted := strings.Split(line, ":")
|
|
|
|
if len(splitted) < 3 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
hash := splitted[2]
|
|
|
|
if _, ok := hashes[hash]; ok {
|
|
|
|
// it's in the hash
|
|
|
|
result.IsFiltered = true
|
|
|
|
result.Reason = FilteredSafeBrowsing
|
|
|
|
result.Rule = splitted[0]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := scanner.Err(); err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
2019-08-07 16:01:23 +03:00
|
|
|
|
|
|
|
// check cache
|
|
|
|
cachedValue, isFound := getCachedResult(gctx.safebrowsingCache, host)
|
|
|
|
if isFound {
|
|
|
|
atomic.AddUint64(&gctx.stats.Safebrowsing.CacheHits, 1)
|
|
|
|
log.Tracef("%s: found in the lookup cache %p", host, gctx.safebrowsingCache)
|
|
|
|
return cachedValue, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
result, err := d.lookupCommon(host, &gctx.stats.Safebrowsing, true, format, handleBody)
|
|
|
|
|
|
|
|
if err == nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
d.setCacheResult(gctx.safebrowsingCache, host, result)
|
2019-08-07 16:01:23 +03:00
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Dnsfilter) checkParental(host string) (Result, error) {
|
2019-03-18 14:50:33 +03:00
|
|
|
if log.GetLevel() >= log.DEBUG {
|
|
|
|
timer := log.StartTimer()
|
|
|
|
defer timer.LogElapsed("Parental HTTP lookup for %s", host)
|
|
|
|
}
|
|
|
|
|
2018-09-07 16:10:43 +03:00
|
|
|
format := func(hashparam string) string {
|
2019-05-27 18:11:05 +03:00
|
|
|
schema := "https"
|
|
|
|
if d.UsePlainHTTP {
|
|
|
|
schema = "http"
|
|
|
|
}
|
2019-06-06 22:42:17 +03:00
|
|
|
sensitivity := d.ParentalSensitivity
|
|
|
|
if sensitivity == 0 {
|
|
|
|
sensitivity = defaultParentalSensitivity
|
|
|
|
}
|
|
|
|
url := fmt.Sprintf(defaultParentalURL, schema, d.parentalServer, hashparam, sensitivity)
|
2018-08-30 17:25:33 +03:00
|
|
|
return url
|
|
|
|
}
|
2018-09-07 16:10:43 +03:00
|
|
|
handleBody := func(body []byte, hashes map[string]bool) (Result, error) {
|
2018-08-30 17:25:33 +03:00
|
|
|
// parse json
|
|
|
|
var m []struct {
|
|
|
|
Blocked bool `json:"blocked"`
|
|
|
|
ClientTTL int `json:"clientTtl"`
|
|
|
|
Reason string `json:"reason"`
|
2018-09-17 01:41:39 +03:00
|
|
|
Hash string `json:"hash"`
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
err := json.Unmarshal(body, &m)
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
log.Printf("Couldn't parse json '%s': %s", body, err)
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
result := Result{}
|
|
|
|
|
|
|
|
for i := range m {
|
2018-09-17 01:41:39 +03:00
|
|
|
if !hashes[m[i].Hash] {
|
|
|
|
continue
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
if m[i].Blocked {
|
|
|
|
result.IsFiltered = true
|
|
|
|
result.Reason = FilteredParental
|
|
|
|
result.Rule = fmt.Sprintf("parental %s", m[i].Reason)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
2019-08-07 16:01:23 +03:00
|
|
|
|
|
|
|
// check cache
|
|
|
|
cachedValue, isFound := getCachedResult(gctx.parentalCache, host)
|
|
|
|
if isFound {
|
|
|
|
atomic.AddUint64(&gctx.stats.Parental.CacheHits, 1)
|
|
|
|
log.Tracef("%s: found in the lookup cache %p", host, gctx.parentalCache)
|
|
|
|
return cachedValue, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
result, err := d.lookupCommon(host, &gctx.stats.Parental, false, format, handleBody)
|
|
|
|
|
|
|
|
if err == nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
d.setCacheResult(gctx.parentalCache, host, result)
|
2019-08-07 16:01:23 +03:00
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
2019-01-24 20:11:01 +03:00
|
|
|
type formatHandler func(hashparam string) string
|
2019-01-25 12:21:57 +03:00
|
|
|
type bodyHandler func(body []byte, hashes map[string]bool) (Result, error)
|
2019-01-24 20:11:01 +03:00
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// real implementation of lookup/check
|
2019-08-07 16:01:23 +03:00
|
|
|
func (d *Dnsfilter) lookupCommon(host string, lookupstats *LookupStats, hashparamNeedSlash bool, format formatHandler, handleBody bodyHandler) (Result, error) {
|
2018-08-30 17:25:33 +03:00
|
|
|
// convert hostname to hash parameters
|
|
|
|
hashparam, hashes := hostnameToHashParam(host, hashparamNeedSlash)
|
|
|
|
|
|
|
|
// format URL with our hashes
|
|
|
|
url := format(hashparam)
|
|
|
|
|
|
|
|
// do HTTP request
|
|
|
|
atomic.AddUint64(&lookupstats.Requests, 1)
|
|
|
|
atomic.AddInt64(&lookupstats.Pending, 1)
|
|
|
|
updateMax(&lookupstats.Pending, &lookupstats.PendingMax)
|
|
|
|
resp, err := d.client.Get(url)
|
|
|
|
atomic.AddInt64(&lookupstats.Pending, -1)
|
|
|
|
if resp != nil && resp.Body != nil {
|
|
|
|
defer resp.Body.Close()
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// get body text
|
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// handle status code
|
|
|
|
switch {
|
|
|
|
case resp.StatusCode == 204:
|
|
|
|
// empty result, save cache
|
|
|
|
return Result{}, nil
|
|
|
|
case resp.StatusCode != 200:
|
2019-08-07 16:01:23 +03:00
|
|
|
return Result{}, fmt.Errorf("HTTP status code: %d", resp.StatusCode)
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
result, err := handleBody(body, hashes)
|
|
|
|
if err != nil {
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Adding rule and matching against the rules
|
|
|
|
//
|
|
|
|
|
2019-07-05 17:35:40 +03:00
|
|
|
// Return TRUE if file exists
|
|
|
|
func fileExists(fn string) bool {
|
|
|
|
_, err := os.Stat(fn)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
// Initialize urlfilter objects
|
|
|
|
func (d *Dnsfilter) initFiltering(filters map[int]string) error {
|
2019-07-04 14:00:20 +03:00
|
|
|
listArray := []urlfilter.RuleList{}
|
|
|
|
for id, dataOrFilePath := range filters {
|
|
|
|
var list urlfilter.RuleList
|
2019-07-05 17:35:40 +03:00
|
|
|
|
2019-07-04 14:00:20 +03:00
|
|
|
if id == 0 {
|
|
|
|
list = &urlfilter.StringRuleList{
|
|
|
|
ID: 0,
|
|
|
|
RulesText: dataOrFilePath,
|
2019-10-22 14:58:20 +03:00
|
|
|
IgnoreCosmetic: true,
|
2019-07-04 14:00:20 +03:00
|
|
|
}
|
2019-07-05 17:35:40 +03:00
|
|
|
|
|
|
|
} else if !fileExists(dataOrFilePath) {
|
|
|
|
list = &urlfilter.StringRuleList{
|
|
|
|
ID: id,
|
2019-10-22 14:58:20 +03:00
|
|
|
IgnoreCosmetic: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if runtime.GOOS == "windows" {
|
|
|
|
// On Windows we don't pass a file to urlfilter because
|
|
|
|
// it's difficult to update this file while it's being used.
|
|
|
|
data, err := ioutil.ReadFile(dataOrFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("ioutil.ReadFile(): %s: %s", dataOrFilePath, err)
|
|
|
|
}
|
|
|
|
list = &urlfilter.StringRuleList{
|
|
|
|
ID: id,
|
|
|
|
RulesText: string(data),
|
|
|
|
IgnoreCosmetic: true,
|
2019-07-05 17:35:40 +03:00
|
|
|
}
|
|
|
|
|
2019-07-04 14:00:20 +03:00
|
|
|
} else {
|
|
|
|
var err error
|
2019-10-22 14:58:20 +03:00
|
|
|
list, err = urlfilter.NewFileRuleList(id, dataOrFilePath, true)
|
2019-07-04 14:00:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("urlfilter.NewFileRuleList(): %s: %s", dataOrFilePath, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
listArray = append(listArray, list)
|
|
|
|
}
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
rulesStorage, err := urlfilter.NewRuleStorage(listArray)
|
2019-05-15 16:46:11 +03:00
|
|
|
if err != nil {
|
2019-07-04 14:00:20 +03:00
|
|
|
return fmt.Errorf("urlfilter.NewRuleStorage(): %s", err)
|
2019-05-15 16:46:11 +03:00
|
|
|
}
|
2019-10-09 19:51:26 +03:00
|
|
|
filteringEngine := urlfilter.NewDNSEngine(rulesStorage)
|
|
|
|
|
|
|
|
d.engineLock.Lock()
|
|
|
|
if d.rulesStorage != nil {
|
|
|
|
d.rulesStorage.Close()
|
|
|
|
}
|
|
|
|
d.rulesStorage = rulesStorage
|
|
|
|
d.filteringEngine = filteringEngine
|
|
|
|
d.engineLock.Unlock()
|
|
|
|
log.Debug("initialized filtering engine")
|
|
|
|
|
2018-11-30 13:48:53 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// matchHost is a low-level way to check only if hostname is filtered by rules, skipping expensive safebrowsing and parental lookups
|
2019-05-22 12:38:17 +03:00
|
|
|
func (d *Dnsfilter) matchHost(host string, qtype uint16) (Result, error) {
|
2019-10-09 19:51:26 +03:00
|
|
|
d.engineLock.RLock()
|
|
|
|
defer d.engineLock.RUnlock()
|
2019-05-15 16:46:11 +03:00
|
|
|
if d.filteringEngine == nil {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
rules, ok := d.filteringEngine.Match(host)
|
|
|
|
if !ok {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
2019-05-22 12:38:17 +03:00
|
|
|
log.Tracef("%d rules matched for host '%s'", len(rules), host)
|
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
for _, rule := range rules {
|
|
|
|
|
|
|
|
log.Tracef("Found rule for host '%s': '%s' list_id: %d",
|
|
|
|
host, rule.Text(), rule.GetFilterListID())
|
|
|
|
|
|
|
|
res := Result{}
|
|
|
|
res.Reason = FilteredBlackList
|
|
|
|
res.IsFiltered = true
|
|
|
|
res.FilterID = int64(rule.GetFilterListID())
|
|
|
|
res.Rule = rule.Text()
|
|
|
|
|
|
|
|
if netRule, ok := rule.(*urlfilter.NetworkRule); ok {
|
|
|
|
|
|
|
|
if netRule.Whitelist {
|
|
|
|
res.Reason = NotFilteredWhiteList
|
|
|
|
res.IsFiltered = false
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
|
|
|
|
} else if hostRule, ok := rule.(*urlfilter.HostRule); ok {
|
|
|
|
|
2019-05-22 12:38:17 +03:00
|
|
|
if qtype == dns.TypeA && hostRule.IP.To4() != nil {
|
|
|
|
// either IPv4 or IPv4-mapped IPv6 address
|
|
|
|
res.IP = hostRule.IP.To4()
|
|
|
|
return res, nil
|
2019-05-22 15:50:22 +03:00
|
|
|
|
|
|
|
} else if qtype == dns.TypeAAAA {
|
|
|
|
ip4 := hostRule.IP.To4()
|
|
|
|
if ip4 == nil {
|
|
|
|
res.IP = hostRule.IP
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
if bytes.Equal(ip4, []byte{0, 0, 0, 0}) {
|
|
|
|
// send IP="::" response for a rule "0.0.0.0 blockdomain"
|
|
|
|
res.IP = net.IPv6zero
|
|
|
|
return res, nil
|
|
|
|
}
|
2019-05-22 12:38:17 +03:00
|
|
|
}
|
|
|
|
continue
|
2019-05-15 16:46:11 +03:00
|
|
|
|
|
|
|
} else {
|
|
|
|
log.Tracef("Rule type is unsupported: '%s' list_id: %d",
|
|
|
|
rule.Text(), rule.GetFilterListID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// lifecycle helper functions
|
|
|
|
//
|
|
|
|
|
2019-05-13 14:16:07 +03:00
|
|
|
// Return TRUE if this host's IP should be cached
|
2019-05-13 14:47:55 +03:00
|
|
|
func (d *Dnsfilter) shouldBeInDialCache(host string) bool {
|
2019-05-13 14:16:07 +03:00
|
|
|
return host == d.safeBrowsingServer ||
|
|
|
|
host == d.parentalServer
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for an IP address by host name
|
2019-05-13 14:47:55 +03:00
|
|
|
func searchInDialCache(host string) string {
|
2019-06-24 19:00:03 +03:00
|
|
|
rawValue, err := gctx.dialCache.Get(host)
|
2019-05-13 14:16:07 +03:00
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
ip, _ := rawValue.(string)
|
|
|
|
log.Debug("Found in cache: %s -> %s", host, ip)
|
|
|
|
return ip
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add "hostname" -> "IP address" entry to cache
|
2019-05-13 14:47:55 +03:00
|
|
|
func addToDialCache(host, ip string) {
|
2019-06-24 19:00:03 +03:00
|
|
|
err := gctx.dialCache.Set(host, ip)
|
2019-05-30 15:36:39 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("dialCache.Set: %s", err)
|
|
|
|
}
|
2019-05-13 14:16:07 +03:00
|
|
|
log.Debug("Added to cache: %s -> %s", host, ip)
|
|
|
|
}
|
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
type dialFunctionType func(ctx context.Context, network, addr string) (net.Conn, error)
|
|
|
|
|
2019-04-18 14:31:13 +03:00
|
|
|
// Connect to a remote server resolving hostname using our own DNS server
|
2019-05-13 14:16:07 +03:00
|
|
|
func (d *Dnsfilter) createCustomDialContext(resolverAddr string) dialFunctionType {
|
2019-04-24 12:49:12 +03:00
|
|
|
return func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
|
|
log.Tracef("network:%v addr:%v", network, addr)
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
host, port, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
dialer := &net.Dialer{
|
|
|
|
Timeout: time.Minute * 5,
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
if net.ParseIP(host) != nil {
|
|
|
|
con, err := dialer.DialContext(ctx, network, addr)
|
|
|
|
return con, err
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-05-13 14:47:55 +03:00
|
|
|
cache := d.shouldBeInDialCache(host)
|
2019-05-13 14:16:07 +03:00
|
|
|
if cache {
|
2019-05-13 14:47:55 +03:00
|
|
|
ip := searchInDialCache(host)
|
2019-05-13 14:16:07 +03:00
|
|
|
if len(ip) != 0 {
|
|
|
|
addr = fmt.Sprintf("%s:%s", ip, port)
|
|
|
|
return dialer.DialContext(ctx, network, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
r := upstream.NewResolver(resolverAddr, 30*time.Second)
|
|
|
|
addrs, e := r.LookupIPAddr(ctx, host)
|
|
|
|
log.Tracef("LookupIPAddr: %s: %v", host, addrs)
|
|
|
|
if e != nil {
|
|
|
|
return nil, e
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-06-18 16:18:13 +03:00
|
|
|
if len(addrs) == 0 {
|
|
|
|
return nil, fmt.Errorf("couldn't lookup host: %s", host)
|
|
|
|
}
|
|
|
|
|
|
|
|
var dialErrs []error
|
2019-04-24 12:49:12 +03:00
|
|
|
for _, a := range addrs {
|
|
|
|
addr = fmt.Sprintf("%s:%s", a.String(), port)
|
|
|
|
con, err := dialer.DialContext(ctx, network, addr)
|
|
|
|
if err != nil {
|
2019-06-18 16:18:13 +03:00
|
|
|
dialErrs = append(dialErrs, err)
|
2019-04-24 12:49:12 +03:00
|
|
|
continue
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
2019-05-13 14:16:07 +03:00
|
|
|
|
|
|
|
if cache {
|
2019-05-13 14:47:55 +03:00
|
|
|
addToDialCache(host, a.String())
|
2019-05-13 14:16:07 +03:00
|
|
|
}
|
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
return con, err
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
2019-06-18 16:18:13 +03:00
|
|
|
return nil, errorx.DecorateMany(fmt.Sprintf("couldn't dial to %s", addr), dialErrs...)
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// New creates properly initialized DNS Filter that is ready to be used
|
2019-05-15 16:46:11 +03:00
|
|
|
func New(c *Config, filters map[int]string) *Dnsfilter {
|
2019-06-24 19:00:03 +03:00
|
|
|
|
|
|
|
if c != nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
cacheConf := cache.Config{
|
|
|
|
EnableLRU: true,
|
|
|
|
}
|
|
|
|
|
2019-06-24 19:00:03 +03:00
|
|
|
// initialize objects only once
|
2019-08-22 15:09:43 +03:00
|
|
|
|
2019-07-15 12:10:43 +03:00
|
|
|
if gctx.safebrowsingCache == nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
cacheConf.MaxSize = c.SafeBrowsingCacheSize
|
|
|
|
gctx.safebrowsingCache = cache.New(cacheConf)
|
2019-06-24 19:00:03 +03:00
|
|
|
}
|
2019-08-22 15:09:43 +03:00
|
|
|
|
2019-07-15 12:10:43 +03:00
|
|
|
if gctx.safeSearchCache == nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
cacheConf.MaxSize = c.SafeSearchCacheSize
|
|
|
|
gctx.safeSearchCache = cache.New(cacheConf)
|
2019-06-24 19:00:03 +03:00
|
|
|
}
|
2019-08-22 15:09:43 +03:00
|
|
|
|
2019-07-15 12:10:43 +03:00
|
|
|
if gctx.parentalCache == nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
cacheConf.MaxSize = c.ParentalCacheSize
|
|
|
|
gctx.parentalCache = cache.New(cacheConf)
|
2019-06-24 19:00:03 +03:00
|
|
|
}
|
2019-08-22 15:09:43 +03:00
|
|
|
|
2019-06-24 19:00:03 +03:00
|
|
|
if len(c.ResolverAddress) != 0 && gctx.dialCache == nil {
|
2019-08-22 15:09:43 +03:00
|
|
|
dur := time.Duration(c.CacheTime) * time.Minute
|
|
|
|
gctx.dialCache = gcache.New(maxDialCacheSize).LRU().Expiration(dur).Build()
|
2019-06-24 19:00:03 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
d := new(Dnsfilter)
|
|
|
|
|
2019-02-07 14:45:46 +03:00
|
|
|
// Customize the Transport to have larger connection pool,
|
|
|
|
// We are not (re)using http.DefaultTransport because of race conditions found by tests
|
|
|
|
d.transport = &http.Transport{
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
|
|
|
MaxIdleConns: defaultHTTPMaxIdleConnections, // default 100
|
|
|
|
MaxIdleConnsPerHost: defaultHTTPMaxIdleConnections, // default 2
|
|
|
|
IdleConnTimeout: 90 * time.Second,
|
|
|
|
TLSHandshakeTimeout: 10 * time.Second,
|
|
|
|
ExpectContinueTimeout: 1 * time.Second,
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2019-04-23 15:09:23 +03:00
|
|
|
if c != nil && len(c.ResolverAddress) != 0 {
|
2019-05-13 14:16:07 +03:00
|
|
|
d.transport.DialContext = d.createCustomDialContext(c.ResolverAddress)
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
d.client = http.Client{
|
|
|
|
Transport: d.transport,
|
|
|
|
Timeout: defaultHTTPTimeout,
|
|
|
|
}
|
2018-11-30 13:32:51 +03:00
|
|
|
d.safeBrowsingServer = defaultSafebrowsingServer
|
|
|
|
d.parentalServer = defaultParentalServer
|
2018-11-30 13:47:26 +03:00
|
|
|
if c != nil {
|
|
|
|
d.Config = *c
|
|
|
|
}
|
2018-09-10 20:34:42 +03:00
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
if filters != nil {
|
|
|
|
err := d.initFiltering(filters)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Can't initialize filtering subsystem: %s", err)
|
2019-10-09 19:51:26 +03:00
|
|
|
d.Close()
|
2019-05-15 16:46:11 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
d.filtersInitializerChan = make(chan filtersInitializerParams, 1)
|
|
|
|
go d.filtersInitializer()
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2019-10-09 19:51:26 +03:00
|
|
|
if d.Config.HTTPRegister != nil { // for tests
|
|
|
|
d.registerSecurityHandlers()
|
|
|
|
d.registerRewritesHandlers()
|
2019-05-15 16:46:11 +03:00
|
|
|
}
|
2019-10-09 19:51:26 +03:00
|
|
|
return d
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// config manipulation helpers
|
|
|
|
//
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// SetSafeBrowsingServer lets you optionally change hostname of safesearch lookup
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) SetSafeBrowsingServer(host string) {
|
|
|
|
if len(host) == 0 {
|
2018-11-30 13:32:51 +03:00
|
|
|
d.safeBrowsingServer = defaultSafebrowsingServer
|
2018-08-30 17:25:33 +03:00
|
|
|
} else {
|
2018-11-30 13:32:51 +03:00
|
|
|
d.safeBrowsingServer = host
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// SetHTTPTimeout lets you optionally change timeout during lookups
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) SetHTTPTimeout(t time.Duration) {
|
|
|
|
d.client.Timeout = t
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// ResetHTTPTimeout resets lookup timeouts
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) ResetHTTPTimeout() {
|
|
|
|
d.client.Timeout = defaultHTTPTimeout
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// SafeSearchDomain returns replacement address for search engine
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) SafeSearchDomain(host string) (string, bool) {
|
2018-11-30 13:32:51 +03:00
|
|
|
if d.SafeSearchEnabled {
|
2018-09-14 16:50:56 +03:00
|
|
|
val, ok := safeSearchDomains[host]
|
|
|
|
return val, ok
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2018-09-14 16:50:56 +03:00
|
|
|
return "", false
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// stats
|
|
|
|
//
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// GetStats return dns filtering stats since startup
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) GetStats() Stats {
|
2019-06-24 19:00:03 +03:00
|
|
|
return gctx.stats
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|