Assets cache (#54)

* add caching control for assets
* regen site
This commit is contained in:
Umputun 2021-04-23 02:02:36 -05:00 committed by GitHub
parent ef8298bb9c
commit c590c3246d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 100 additions and 35 deletions

View File

@ -120,14 +120,16 @@ User can also turn stdout log on with `--logger.stdout`. It won't affect the fil
## Assets Server ## Assets Server
User may turn assets server on (off by default) to serve static files. As long as `--assets.location` set it will treat every non-proxied request under `assets.root` as a request for static files. Assets server can be used without any proxy providers. In this mode reproxy acts as a simple web server for a static context. Users may turn the assets server on (off by default) to serve static files. As long as `--assets.location` set it treats every non-proxied request under `assets.root` as a request for static files. The assets server can be used without any proxy providers; in this mode, reproxy acts as a simple web server for the static content.
In addition to the common assets server multiple custom static servers supported. Each provider has a different way to define such static rule and some providers may not support it at all. For example, multiple static server make sense in case of static (command line provide), file provider and can be even useful with docker provider. In addition to the common assets server, multiple custom static servers are supported. Each provider has a different way to define such a static rule, and some providers may not support it at all. For example, multiple static servers make sense in static (command line provider), file provider, and even useful with docker providers.
1. static provider - if source element prefixed by `assets:` it will be treated as file-server. For example `*,assets:/web,/var/www,` will serve all `/web/*` request with a file server on top of `/var/www` directory. 1. static provider - if source element prefixed by `assets:` it will be treated as file-server. For example `*,assets:/web,/var/www,` will serve all `/web/*` request with a file server on top of `/var/www` directory.
2. file provider - setting optional field `assets: true` 2. file provider - setting optional field `assets: true`
3. docker provider - `reproxy.assets=web-root:location`, i.e. `reproxy.assets=/web:/var/www`. 3. docker provider - `reproxy.assets=web-root:location`, i.e. `reproxy.assets=/web:/var/www`.
Assets server supports caching control with the `--assets.cache=<duration>` parameter. `0s` duration (default) turns caching control off.
## More options ## More options
- `--gzip` enables gzip compression for responses. - `--gzip` enables gzip compression for responses.
@ -173,6 +175,7 @@ ssl:
assets: assets:
-a, --assets.location= assets location [$ASSETS_LOCATION] -a, --assets.location= assets location [$ASSETS_LOCATION]
--assets.root= assets web root (default: /) [$ASSETS_ROOT] --assets.root= assets web root (default: /) [$ASSETS_ROOT]
--assets.cache= cache duration for assets (default: 0s) [$ASSETS_CACHE]
logger: logger:
--logger.stdout enable stdout logging [$LOGGER_STDOUT] --logger.stdout enable stdout logging [$LOGGER_STDOUT]

View File

@ -40,8 +40,9 @@ var opts struct {
} `group:"ssl" namespace:"ssl" env-namespace:"SSL"` } `group:"ssl" namespace:"ssl" env-namespace:"SSL"`
Assets struct { Assets struct {
Location string `short:"a" long:"location" env:"LOCATION" default:"" description:"assets location"` Location string `short:"a" long:"location" env:"LOCATION" default:"" description:"assets location"`
WebRoot string `long:"root" env:"ROOT" default:"/" description:"assets web root"` WebRoot string `long:"root" env:"ROOT" default:"/" description:"assets web root"`
CacheDuration time.Duration `long:"cache" env:"CACHE" default:"0s" description:"cache duration for assets"`
} `group:"assets" namespace:"assets" env-namespace:"ASSETS"` } `group:"assets" namespace:"assets" env-namespace:"ASSETS"`
Logger struct { Logger struct {
@ -109,6 +110,7 @@ func main() {
setupLog(opts.Dbg) setupLog(opts.Dbg)
log.Printf("[DEBUG] options: %+v", opts)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
go func() { // catch signal and invoke graceful termination go func() { // catch signal and invoke graceful termination
stop := make(chan os.Signal, 1) stop := make(chan os.Signal, 1)
@ -169,18 +171,19 @@ func main() {
}() }()
px := &proxy.Http{ px := &proxy.Http{
Version: revision, Version: revision,
Matcher: svc, Matcher: svc,
Address: opts.Listen, Address: opts.Listen,
MaxBodySize: opts.MaxSize, MaxBodySize: opts.MaxSize,
AssetsLocation: opts.Assets.Location, AssetsLocation: opts.Assets.Location,
AssetsWebRoot: opts.Assets.WebRoot, AssetsWebRoot: opts.Assets.WebRoot,
GzEnabled: opts.GzipEnabled, AssetsCacheDuration: opts.Assets.CacheDuration,
SSLConfig: sslConfig, GzEnabled: opts.GzipEnabled,
ProxyHeaders: opts.ProxyHeaders, SSLConfig: sslConfig,
AccessLog: accessLog, ProxyHeaders: opts.ProxyHeaders,
StdOutEnabled: opts.Logger.StdOut, AccessLog: accessLog,
Signature: opts.Signature, StdOutEnabled: opts.Logger.StdOut,
Signature: opts.Signature,
Timeouts: proxy.Timeouts{ Timeouts: proxy.Timeouts{
ReadHeader: opts.Timeouts.ReadHeader, ReadHeader: opts.Timeouts.ReadHeader,
Write: opts.Timeouts.Write, Write: opts.Timeouts.Write,

View File

@ -24,19 +24,20 @@ import (
// Http is a proxy server for both http and https // Http is a proxy server for both http and https
type Http struct { // nolint golint type Http struct { // nolint golint
Matcher Matcher
Address string Address string
AssetsLocation string AssetsLocation string
AssetsWebRoot string AssetsWebRoot string
MaxBodySize int64 AssetsCacheDuration time.Duration
GzEnabled bool MaxBodySize int64
ProxyHeaders []string GzEnabled bool
SSLConfig SSLConfig ProxyHeaders []string
Version string SSLConfig SSLConfig
AccessLog io.Writer Version string
StdOutEnabled bool AccessLog io.Writer
Signature bool StdOutEnabled bool
Timeouts Timeouts Signature bool
Metrics Metrics Timeouts Timeouts
Metrics Metrics
} }
// Matcher source info (server and route) to the destination url // Matcher source info (server and route) to the destination url
@ -189,9 +190,7 @@ func (h *Http) proxyHandler() http.HandlerFunc {
if h.AssetsLocation != "" && h.AssetsWebRoot != "" { if h.AssetsLocation != "" && h.AssetsWebRoot != "" {
fs, err := R.FileServer(h.AssetsWebRoot, h.AssetsLocation) fs, err := R.FileServer(h.AssetsWebRoot, h.AssetsLocation)
if err == nil { if err == nil {
assetsHandler = func(w http.ResponseWriter, r *http.Request) { assetsHandler = h.cachingHandler(fs).ServeHTTP
fs.ServeHTTP(w, r)
}
} }
} }
@ -229,7 +228,7 @@ func (h *Http) proxyHandler() http.HandlerFunc {
http.Error(w, "Server error", http.StatusInternalServerError) http.Error(w, "Server error", http.StatusInternalServerError)
return return
} }
fs.ServeHTTP(w, r) h.cachingHandler(fs).ServeHTTP(w, r)
} }
} }
} }
@ -311,6 +310,16 @@ func (h *Http) stdoutLogHandler(enable bool, lh func(next http.Handler) http.Han
} }
} }
func (h *Http) cachingHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if h.AssetsCacheDuration > 0 {
w.Header().Set("Cache-Control", "public, max-age="+strconv.Itoa(int(h.AssetsCacheDuration.Seconds())))
}
next.ServeHTTP(w, r)
})
}
func (h *Http) makeHTTPServer(addr string, router http.Handler) *http.Server { func (h *Http) makeHTTPServer(addr string, router http.Handler) *http.Server {
return &http.Server{ return &http.Server{
Addr: addr, Addr: addr,

View File

@ -4,13 +4,17 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os"
"path"
"strconv" "strconv"
"testing" "testing"
"time" "time"
R "github.com/go-pkgz/rest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -252,3 +256,48 @@ func TestHttp_toHttp(t *testing.T) {
} }
} }
func TestHttp_cachingHandler(t *testing.T) {
dir, e := ioutil.TempDir(os.TempDir(), "reproxy")
require.NoError(t, e)
e = ioutil.WriteFile(path.Join(dir, "1.html"), []byte("1.htm"), 0600)
assert.NoError(t, e)
e = ioutil.WriteFile(path.Join(dir, "2.html"), []byte("2.htm"), 0600)
assert.NoError(t, e)
defer os.RemoveAll(dir)
fh, e := R.FileServer("/static", dir)
require.NoError(t, e)
h := Http{AssetsCacheDuration: 10 * time.Second, AssetsLocation: dir, AssetsWebRoot: "/static"}
hh := R.Wrap(fh, h.cachingHandler)
ts := httptest.NewServer(hh)
defer ts.Close()
client := http.Client{Timeout: 599 * time.Second}
{
resp, err := client.Get(ts.URL + "/static/1.html")
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
t.Logf("headers: %+v", resp.Header)
assert.Equal(t, "public, max-age=10", resp.Header.Get("Cache-Control"))
assert.NotEqual(t, "", resp.Header.Get("Last-Modified"))
}
{
resp, err := client.Get(ts.URL + "/static/bad.html")
require.NoError(t, err)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
t.Logf("headers: %+v", resp.Header)
assert.Equal(t, "public, max-age=10", resp.Header.Get("Cache-Control"))
assert.Equal(t, "", resp.Header.Get("Last-Modified"))
}
{
resp, err := client.Get(ts.URL + "/%2e%2e%2f%2e%2e%2f%2e%2e%2f/etc/passwd")
require.NoError(t, err)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
t.Logf("headers: %+v", resp.Header)
assert.Equal(t, "public, max-age=10", resp.Header.Get("Cache-Control"))
assert.Equal(t, "", resp.Header.Get("Last-Modified"))
}
}

View File

@ -9,7 +9,7 @@ srv.example.com:
- { route: &quot;^/api/svc2/(.*)&quot;, dest: &quot;http://127.0.0.2:8080/blah2/$1/abc&quot; } - { route: &quot;^/api/svc2/(.*)&quot;, dest: &quot;http://127.0.0.2:8080/blah2/$1/abc&quot; }
</code></pre><p>This is a dynamic provider and file change will be applied automatically.</p><h3 id="docker">Docker <a href="#docker"></a></h3><p>Docker provider supports a fully automatic discovery (with <code>--docker.auto</code>) with no extra configuration and by default redirects all requests like <code>https://server/&lt;container_name&gt;/(.*)</code> to the internal IP of the given container and the exposed port. Only active (running) containers will be detected.</p><p>This default can be changed with labels:</p><ul><li><code>reproxy.server</code> - server (hostname) to match. Also can be a list of comma-separated servers.</li><li><code>reproxy.route</code> - source route (location)</li><li><code>reproxy.dest</code> - destination path. Note: this is not full url, but just the path which will be appended to container's ip:port</li><li><code>reproxy.port</code> - destination port for the discovered container</li><li><code>reproxy.ping</code> - ping path for the destination container.</li><li><code>reproxy.enabled</code> - enable (<code>yes</code>, <code>true</code>, <code>1</code>) or disable (<code>no</code>, <code>false</code>, <code>0</code>) container from reproxy destinations.</li></ul><p>Pls note: without <code>--docker.auto</code> the destination container has to have at least one of <code>reproxy.*</code> labels to be considered as a potential destination.</p><p>With <code>--docker.auto</code>, all containers with exposed port will be considered as routing destinations. There are 3 ways to restrict it:</p><ul><li>Exclude some containers explicitly with <code>--docker.exclude</code>, i.e. <code>--docker.exclude=c1 --docker.exclude=c2 ...</code></li><li>Allow only a particular docker network with <code>--docker.network</code></li><li>Set the label <code>reproxy.enabled=false</code> or <code>reproxy.enabled=no</code> or <code>reproxy.enabled=0</code></li></ul><p>This is a dynamic provider and any change in container's status will be applied automatically.</p><h2 id="ssl-support">SSL support <a href="#ssl-support"></a></h2><p>SSL mode (by default none) can be set to <code>auto</code> (ACME/LE certificates), <code>static</code> (existing certificate) or <code>none</code>. If <code>auto</code> turned on SSL certificate will be issued automatically for all discovered server names. User can override it by setting <code>--ssl.fqdn</code> value(s)</p><h2 id="logging">Logging <a href="#logging"></a></h2><p>By default no request log generated. This can be turned on by setting <code>--logger.enabled</code>. The log (auto-rotated) has <a href="http://httpd.apache.org/docs/2.2/logs.html#combined">Apache Combined Log Format</a></p><p>User can also turn stdout log on with <code>--logger.stdout</code>. It won't affect the file logging but will output some minimal info about processed requests, something like this:</p><pre><code>2021/04/16 01:17:25.601 [INFO] GET - /echo/image.png - xxx.xxx.xxx.xxx - 200 (155400) - 371.661251ms </code></pre><p>This is a dynamic provider and file change will be applied automatically.</p><h3 id="docker">Docker <a href="#docker"></a></h3><p>Docker provider supports a fully automatic discovery (with <code>--docker.auto</code>) with no extra configuration and by default redirects all requests like <code>https://server/&lt;container_name&gt;/(.*)</code> to the internal IP of the given container and the exposed port. Only active (running) containers will be detected.</p><p>This default can be changed with labels:</p><ul><li><code>reproxy.server</code> - server (hostname) to match. Also can be a list of comma-separated servers.</li><li><code>reproxy.route</code> - source route (location)</li><li><code>reproxy.dest</code> - destination path. Note: this is not full url, but just the path which will be appended to container's ip:port</li><li><code>reproxy.port</code> - destination port for the discovered container</li><li><code>reproxy.ping</code> - ping path for the destination container.</li><li><code>reproxy.enabled</code> - enable (<code>yes</code>, <code>true</code>, <code>1</code>) or disable (<code>no</code>, <code>false</code>, <code>0</code>) container from reproxy destinations.</li></ul><p>Pls note: without <code>--docker.auto</code> the destination container has to have at least one of <code>reproxy.*</code> labels to be considered as a potential destination.</p><p>With <code>--docker.auto</code>, all containers with exposed port will be considered as routing destinations. There are 3 ways to restrict it:</p><ul><li>Exclude some containers explicitly with <code>--docker.exclude</code>, i.e. <code>--docker.exclude=c1 --docker.exclude=c2 ...</code></li><li>Allow only a particular docker network with <code>--docker.network</code></li><li>Set the label <code>reproxy.enabled=false</code> or <code>reproxy.enabled=no</code> or <code>reproxy.enabled=0</code></li></ul><p>This is a dynamic provider and any change in container's status will be applied automatically.</p><h2 id="ssl-support">SSL support <a href="#ssl-support"></a></h2><p>SSL mode (by default none) can be set to <code>auto</code> (ACME/LE certificates), <code>static</code> (existing certificate) or <code>none</code>. If <code>auto</code> turned on SSL certificate will be issued automatically for all discovered server names. User can override it by setting <code>--ssl.fqdn</code> value(s)</p><h2 id="logging">Logging <a href="#logging"></a></h2><p>By default no request log generated. This can be turned on by setting <code>--logger.enabled</code>. The log (auto-rotated) has <a href="http://httpd.apache.org/docs/2.2/logs.html#combined">Apache Combined Log Format</a></p><p>User can also turn stdout log on with <code>--logger.stdout</code>. It won't affect the file logging but will output some minimal info about processed requests, something like this:</p><pre><code>2021/04/16 01:17:25.601 [INFO] GET - /echo/image.png - xxx.xxx.xxx.xxx - 200 (155400) - 371.661251ms
2021/04/16 01:18:18.959 [INFO] GET - /api/v1/params - xxx.xxx.xxx.xxx - 200 (74) - 1.217669m 2021/04/16 01:18:18.959 [INFO] GET - /api/v1/params - xxx.xxx.xxx.xxx - 200 (74) - 1.217669m
</code></pre><h2 id="assets-server">Assets Server <a href="#assets-server"></a></h2><p>User may turn assets server on (off by default) to serve static files. As long as <code>--assets.location</code> set it will treat every non-proxied request under <code>assets.root</code> as a request for static files. Assets server can be used without any proxy providers. In this mode reproxy acts as a simple web server for a static context.</p><p>In addition to the common assets server multiple custom static servers supported. Each provider has a different way to define such static rule and some providers may not support it at all. For example, multiple static server make sense in case of static (command line provide), file provider and can be even useful with docker provider.</p><ol><li>static provider - if source element prefixed by <code>assets:</code> it will be treated as file-server. For example <code>*,assets:/web,/var/www,</code> will serve all <code>/web/*</code> request with a file server on top of <code>/var/www</code> directory.</li><li>file provider - setting optional field <code>assets: true</code></li><li>docker provider - <code>reproxy.assets=web-root:location</code>, i.e. <code>reproxy.assets=/web:/var/www</code>.</li></ol><h2 id="more-options">More options <a href="#more-options"></a></h2><ul><li><code>--gzip</code> enables gzip compression for responses.</li><li><code>--max=N</code> allows to set the maximum size of request (default 64k)</li><li><code>--header</code> sets extra header(s) added to each proxied request</li><li><code>--timeout.*</code> various timeouts for both server and proxy transport. See <code>timeout</code> section in <a href="#all-application-options">All Application Options</a></li></ul><h2 id="ping-and-health-checks">Ping and health checks <a href="#ping-and-health-checks"></a></h2><p>reproxy provides 2 endpoints for this purpose:</p><ul><li><code>/ping</code> responds with <code>pong</code> and indicates what reproxy up and running</li><li><code>/health</code> returns <code>200 OK</code> status if all destination servers responded to their ping request with <code>200</code> or <code>417 Expectation Failed</code> if any of servers responded with non-200 code. It also returns json body with details about passed/failed services.</li></ul><h2 id="management-api">Management API <a href="#management-api"></a></h2><p>Optional, can be turned on with <code>--mgmt.enabled</code>. Exposes 2 endpoints on <code>mgmt.listen</code> address:port:</p><ul><li><code>GET /routes</code> - list of all discovered routes</li><li><code>GET /metrics</code> - returns prometheus metrics (<code>http_requests_total</code>, <code>response_status</code> and <code>http_response_time_seconds</code>)</li></ul><p><em>see also <a href="https://github.com/umputun/reproxy/examples/metrics">examples/metrics</a></em></p><h2 id="all-application-options">All Application Options <a href="#all-application-options"></a></h2><pre><code> -l, --listen= listen on host:port (default: 127.0.0.1:8080) [$LISTEN] </code></pre><h2 id="assets-server">Assets Server <a href="#assets-server"></a></h2><p>Users may turn the assets server on (off by default) to serve static files. As long as <code>--assets.location</code> set it treats every non-proxied request under <code>assets.root</code> as a request for static files. The assets server can be used without any proxy providers; in this mode, reproxy acts as a simple web server for the static content.</p><p>In addition to the common assets server, multiple custom static servers are supported. Each provider has a different way to define such a static rule, and some providers may not support it at all. For example, multiple static servers make sense in static (command line provider), file provider, and even useful with docker providers.</p><ol><li>static provider - if source element prefixed by <code>assets:</code> it will be treated as file-server. For example <code>*,assets:/web,/var/www,</code> will serve all <code>/web/*</code> request with a file server on top of <code>/var/www</code> directory.</li><li>file provider - setting optional field <code>assets: true</code></li><li>docker provider - <code>reproxy.assets=web-root:location</code>, i.e. <code>reproxy.assets=/web:/var/www</code>.</li></ol><p>Assets server supports caching control with the <code>--assets.cache=&lt;duration&gt;</code> parameter. <code>0s</code> duration (default) turns caching control off.</p><h2 id="more-options">More options <a href="#more-options"></a></h2><ul><li><code>--gzip</code> enables gzip compression for responses.</li><li><code>--max=N</code> allows to set the maximum size of request (default 64k)</li><li><code>--header</code> sets extra header(s) added to each proxied request</li><li><code>--timeout.*</code> various timeouts for both server and proxy transport. See <code>timeout</code> section in <a href="#all-application-options">All Application Options</a></li></ul><h2 id="ping-and-health-checks">Ping and health checks <a href="#ping-and-health-checks"></a></h2><p>reproxy provides 2 endpoints for this purpose:</p><ul><li><code>/ping</code> responds with <code>pong</code> and indicates what reproxy up and running</li><li><code>/health</code> returns <code>200 OK</code> status if all destination servers responded to their ping request with <code>200</code> or <code>417 Expectation Failed</code> if any of servers responded with non-200 code. It also returns json body with details about passed/failed services.</li></ul><h2 id="management-api">Management API <a href="#management-api"></a></h2><p>Optional, can be turned on with <code>--mgmt.enabled</code>. Exposes 2 endpoints on <code>mgmt.listen</code> address:port:</p><ul><li><code>GET /routes</code> - list of all discovered routes</li><li><code>GET /metrics</code> - returns prometheus metrics (<code>http_requests_total</code>, <code>response_status</code> and <code>http_response_time_seconds</code>)</li></ul><p><em>see also <a href="https://github.com/umputun/reproxy/examples/metrics">examples/metrics</a></em></p><h2 id="all-application-options">All Application Options <a href="#all-application-options"></a></h2><pre><code> -l, --listen= listen on host:port (default: 127.0.0.1:8080) [$LISTEN]
-m, --max= max request size (default: 64000) [$MAX_SIZE] -m, --max= max request size (default: 64000) [$MAX_SIZE]
-g, --gzip enable gz compression [$GZIP] -g, --gzip enable gz compression [$GZIP]
-x, --header= proxy headers [$HEADER] -x, --header= proxy headers [$HEADER]
@ -28,6 +28,7 @@ ssl:
assets: assets:
-a, --assets.location= assets location [$ASSETS_LOCATION] -a, --assets.location= assets location [$ASSETS_LOCATION]
--assets.root= assets web root (default: /) [$ASSETS_ROOT] --assets.root= assets web root (default: /) [$ASSETS_ROOT]
--assets.cache= cache duration for assets (default: 0s) [$ASSETS_CACHE]
logger: logger:
--logger.stdout enable stdout logging [$LOGGER_STDOUT] --logger.stdout enable stdout logging [$LOGGER_STDOUT]
@ -71,7 +72,7 @@ mgmt:
Help Options: Help Options:
-h, --help Show this help message -h, --help Show this help message
</code></pre><h2 id="status">Status <a href="#status"></a></h2><p>The project is under active development and may have breaking changes till <code>v1</code> released.</p><footer class="flex mt-4"><div class="ml-auto text-xs">Updated&nbsp;<time datetime="2021-02-21">Apr 21, 2021</time> <a class="ml-2 inline-block no-underline font-normal rounded" target="_blank" href="https://github.com/umputun/reproxy/edit/master/README.md">Edit</a></div></footer></arcicle></main><script>const sidebarElement = document.getElementById('sidebar') </code></pre><h2 id="status">Status <a href="#status"></a></h2><p>The project is under active development and may have breaking changes till <code>v1</code> released.</p><footer class="flex mt-4"><div class="ml-auto text-xs">Updated&nbsp;<time datetime="2021-59-23">Apr 23, 2021</time> <a class="ml-2 inline-block no-underline font-normal rounded" target="_blank" href="https://github.com/umputun/reproxy/edit/master/README.md">Edit</a></div></footer></arcicle></main><script>const sidebarElement = document.getElementById('sidebar')
const sidebarOpenButton = document.getElementById('sidebar-open-button') const sidebarOpenButton = document.getElementById('sidebar-open-button')
const sidebarCloseButton = document.getElementById('sidebar-close-button') const sidebarCloseButton = document.getElementById('sidebar-close-button')
const sidebarLinks = sidebarElement.querySelectorAll('a') const sidebarLinks = sidebarElement.querySelectorAll('a')