2020-08-06 20:58:47 +03:00
|
|
|
package source
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"io"
|
2023-08-12 21:54:14 +03:00
|
|
|
"log/slog"
|
2024-01-15 04:45:34 +03:00
|
|
|
"net/http"
|
2020-08-06 20:58:47 +03:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2024-01-15 04:45:34 +03:00
|
|
|
"strconv"
|
2020-08-06 20:58:47 +03:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
"github.com/neilotoole/fscache"
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2020-08-23 13:42:15 +03:00
|
|
|
"github.com/neilotoole/sq/libsq/core/cleanup"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/errz"
|
2024-01-15 04:45:34 +03:00
|
|
|
"github.com/neilotoole/sq/libsq/core/ioz"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/ioz/checksum"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/ioz/contextio"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/ioz/download"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/ioz/httpz"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/ioz/lockfile"
|
2023-11-20 04:06:36 +03:00
|
|
|
"github.com/neilotoole/sq/libsq/core/lg"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/lg/lga"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/lg/lgm"
|
2024-01-15 04:45:34 +03:00
|
|
|
"github.com/neilotoole/sq/libsq/core/options"
|
|
|
|
"github.com/neilotoole/sq/libsq/core/progress"
|
2020-08-06 20:58:47 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Files is the centralized API for interacting with files.
|
2021-01-02 07:10:02 +03:00
|
|
|
//
|
|
|
|
// Why does Files exist? There's a need for functionality to
|
2021-01-02 08:42:33 +03:00
|
|
|
// transparently get a Reader for remote or local files, and most importantly,
|
2021-01-02 07:10:02 +03:00
|
|
|
// an ability for multiple goroutines to read/sample a file while
|
2023-03-15 10:43:48 +03:00
|
|
|
// it's being read (mainly to "sample" the file type, e.g. to determine
|
|
|
|
// if it's an XLSX file etc.). Currently we use fscache under the hood
|
|
|
|
// for this, but our implementation is not satisfactory: in particular,
|
2021-01-02 07:10:02 +03:00
|
|
|
// the implementation currently requires that we read the entire source
|
|
|
|
// file into fscache before it's available to be read (which is awful
|
|
|
|
// if we're reading long-running pipe from stdin). This entire thing
|
2023-05-03 15:36:10 +03:00
|
|
|
// needs to be revisited. Maybe Files even becomes a fs.FS.
|
2020-08-06 20:58:47 +03:00
|
|
|
type Files struct {
|
2024-01-15 04:45:34 +03:00
|
|
|
mu sync.Mutex
|
|
|
|
log *slog.Logger
|
|
|
|
cacheDir string
|
|
|
|
tempDir string
|
|
|
|
clnup *cleanup.Cleanup
|
|
|
|
optRegistry *options.Registry
|
|
|
|
|
|
|
|
// cfgLockFn is the lock func for sq's config.
|
|
|
|
cfgLockFn lockfile.LockFunc
|
|
|
|
|
|
|
|
// downloads is a map of source handles the download.Download
|
|
|
|
// for that source.
|
|
|
|
downloads map[string]*download.Download
|
|
|
|
|
|
|
|
// fillerWgs is used to wait for asynchronous filling of the cache
|
|
|
|
// to complete (including downloads).
|
|
|
|
fillerWgs *sync.WaitGroup
|
|
|
|
|
|
|
|
// fscache is used to cache files, providing convenient access
|
|
|
|
// to multiple readers via Files.newReader.
|
|
|
|
fscache *fscache.FSCache
|
|
|
|
|
|
|
|
fscacheDir string
|
|
|
|
|
|
|
|
// fscacheEntryMetas contains metadata about fscache entries.
|
|
|
|
// Entries are added by Files.addStdin, and consumed by
|
|
|
|
// Files.Filesize.
|
|
|
|
fscacheEntryMetas map[string]*fscacheEntryMeta
|
|
|
|
|
|
|
|
// detectFns is the set of functions that can detect
|
|
|
|
// the type of a file.
|
2023-04-22 06:36:32 +03:00
|
|
|
detectFns []DriverDetectFunc
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// NewFiles returns a new Files instance. If cleanFscache is true, the fscache
|
|
|
|
// is cleaned on Files.Close.
|
|
|
|
func NewFiles(
|
|
|
|
ctx context.Context,
|
|
|
|
optReg *options.Registry,
|
|
|
|
cfgLock lockfile.LockFunc,
|
|
|
|
tmpDir, cacheDir string,
|
|
|
|
) (*Files, error) {
|
|
|
|
log := lg.FromContext(ctx)
|
|
|
|
log.Debug("Creating new Files instance", "tmp_dir", tmpDir, "cache_dir", cacheDir)
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
if optReg == nil {
|
|
|
|
optReg = &options.Registry{}
|
|
|
|
}
|
|
|
|
|
|
|
|
fs := &Files{
|
|
|
|
optRegistry: optReg,
|
|
|
|
cacheDir: cacheDir,
|
|
|
|
cfgLockFn: cfgLock,
|
|
|
|
tempDir: tmpDir,
|
|
|
|
clnup: cleanup.New(),
|
|
|
|
log: lg.FromContext(ctx),
|
|
|
|
downloads: map[string]*download.Download{},
|
|
|
|
fillerWgs: &sync.WaitGroup{},
|
|
|
|
fscacheEntryMetas: make(map[string]*fscacheEntryMeta),
|
|
|
|
}
|
|
|
|
|
|
|
|
// We want a unique dir for each execution. Note that fcache is deleted
|
|
|
|
// on cleanup (unless something bad happens and sq doesn't
|
|
|
|
// get a chance to clean up). But, why take the chance; we'll just give
|
|
|
|
// fcache a unique dir each time.
|
|
|
|
fs.fscacheDir = filepath.Join(cacheDir, "fscache", strconv.Itoa(os.Getpid())+"_"+checksum.Rand())
|
|
|
|
|
|
|
|
if err := ioz.RequireDir(fs.fscacheDir); err != nil {
|
2020-08-06 20:58:47 +03:00
|
|
|
return nil, errz.Err(err)
|
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
var err error
|
|
|
|
if fs.fscache, err = fscache.New(fs.fscacheDir, os.ModePerm, time.Hour); err != nil {
|
2020-08-06 20:58:47 +03:00
|
|
|
return nil, errz.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fs, nil
|
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// Filesize returns the file size of src.Location. If the source is being
|
|
|
|
// loaded asynchronously, this function may block until loading completes.
|
|
|
|
// An error is returned if src is not a document/file source.
|
|
|
|
// For remote files, this method should only be invoked after the file
|
|
|
|
// has completed downloading, or an error will be returned.
|
|
|
|
func (fs *Files) Filesize(ctx context.Context, src *Source) (size int64, err error) {
|
|
|
|
locTyp := getLocType(src.Location)
|
|
|
|
switch locTyp {
|
|
|
|
case locTypeLocalFile:
|
|
|
|
// It's a filepath
|
|
|
|
var fi os.FileInfo
|
|
|
|
if fi, err = os.Stat(src.Location); err != nil {
|
|
|
|
return 0, errz.Err(err)
|
|
|
|
}
|
|
|
|
return fi.Size(), nil
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
case locTypeRemoteFile:
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
var dl *download.Download
|
|
|
|
if dl, err = fs.downloadFor(ctx, src); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
return dl.Filesize(ctx)
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
case locTypeSQL:
|
|
|
|
return 0, errz.Errorf("invalid to get size of SQL source: %s", src.Handle)
|
|
|
|
|
|
|
|
case locTypeStdin:
|
|
|
|
fs.mu.Lock()
|
|
|
|
entryMeta, ok := fs.fscacheEntryMetas[StdinHandle]
|
|
|
|
fs.mu.Unlock()
|
|
|
|
if !ok {
|
|
|
|
return 0, errz.Errorf("stdin not present in cache")
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return 0, ctx.Err()
|
|
|
|
case <-entryMeta.done:
|
|
|
|
return entryMeta.written, entryMeta.err
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0, errz.Errorf("unknown source location type: %s", RedactLocation(src.Location))
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
2024-01-15 04:45:34 +03:00
|
|
|
}
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// fscacheEntryMeta contains metadata about a fscache entry.
|
|
|
|
// When the cache entry has been filled, the done channel
|
|
|
|
// is closed and the written and err fields are set.
|
|
|
|
// This mechanism allows Files.Filesize to block until
|
|
|
|
// the asynchronous filling of the cache entry has completed.
|
|
|
|
type fscacheEntryMeta struct {
|
|
|
|
key string
|
|
|
|
done chan struct{}
|
|
|
|
written int64
|
|
|
|
err error
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2023-05-22 18:08:14 +03:00
|
|
|
// AddStdin copies f to fs's cache: the stdin data in f
|
2020-08-23 13:42:15 +03:00
|
|
|
// is later accessible via fs.Open(src) where src.Handle
|
2024-01-15 04:45:34 +03:00
|
|
|
// is StdinHandle; f's type can be detected via DetectStdinType.
|
|
|
|
// Note that f is ultimately closed by a goroutine spawned by
|
|
|
|
// this method, but may not be closed at the time of return.
|
|
|
|
func (fs *Files) AddStdin(ctx context.Context, f *os.File) error {
|
2020-08-06 20:58:47 +03:00
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// FIXME: This might be the spot where we can add the cleanup
|
|
|
|
// for the stdin cache dir, because it should always be deleted
|
|
|
|
// when sq exits. But, first we probably need to refactor the
|
|
|
|
// interaction with driver.Grips.
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
err := fs.addStdin(ctx, StdinHandle, f) // f is closed by addStdin
|
|
|
|
return errz.Wrapf(err, "failed to add %s to fscache", StdinHandle)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// addStdin asynchronously copies f to fs's cache. f is closed
|
|
|
|
// when the async copy completes. This method should only be used
|
|
|
|
// for stdin; for regular files, use Files.addRegularFile.
|
|
|
|
func (fs *Files) addStdin(ctx context.Context, handle string, f *os.File) error {
|
|
|
|
log := lg.FromContext(ctx).With(lga.Handle, handle, lga.File, f.Name())
|
|
|
|
|
|
|
|
if _, ok := fs.fscacheEntryMetas[handle]; ok {
|
|
|
|
return errz.Errorf("%s already added to fscache", handle)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
cacheRdr, cacheWrtr, cacheWrtrErrFn, err := fs.fscache.GetWithErr(handle)
|
2020-08-06 20:58:47 +03:00
|
|
|
if err != nil {
|
2024-01-15 04:45:34 +03:00
|
|
|
return errz.Err(err)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, cacheRdr)
|
|
|
|
|
|
|
|
if cacheWrtr == nil {
|
|
|
|
// Shouldn't happen
|
|
|
|
if cacheRdr != nil {
|
|
|
|
return errz.Errorf("no fscache writer for %s (but fscache reader exists when it shouldn't)", handle)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errz.Errorf("no fscache writer for %s", handle)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We create an entry meta for this handle. This entry will be
|
|
|
|
// filled asynchronously in the ioz.CopyAsync callback below.
|
|
|
|
// The entry can then be consumed by Files.Filesize.
|
|
|
|
entryMeta := &fscacheEntryMeta{
|
|
|
|
key: handle,
|
|
|
|
done: make(chan struct{}),
|
|
|
|
}
|
|
|
|
fs.fscacheEntryMetas[handle] = entryMeta
|
|
|
|
|
|
|
|
fs.fillerWgs.Add(1)
|
|
|
|
start := time.Now()
|
|
|
|
pw := progress.NewWriter(ctx, "Reading "+handle, -1, cacheWrtr)
|
|
|
|
ioz.CopyAsync(pw, contextio.NewReader(ctx, f),
|
|
|
|
func(written int64, err error) {
|
|
|
|
defer fs.fillerWgs.Done()
|
|
|
|
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, f)
|
|
|
|
entryMeta.written = written
|
|
|
|
entryMeta.err = err
|
|
|
|
close(entryMeta.done)
|
|
|
|
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
if err == nil {
|
|
|
|
log.Info("Async fscache fill: completed", lga.Copied, written, lga.Elapsed, elapsed)
|
|
|
|
lg.WarnIfCloseError(log, "Close fscache writer", cacheWrtr)
|
|
|
|
pw.Stop()
|
|
|
|
return
|
|
|
|
}
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
log.Error("Async fscache fill: failure", lga.Copied, written, lga.Elapsed, elapsed, lga.Err, err)
|
|
|
|
|
|
|
|
pw.Stop()
|
|
|
|
cacheWrtrErrFn(err)
|
|
|
|
// We deliberately don't close cacheWrtr here,
|
|
|
|
// because cacheWrtrErrFn handles that work.
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
log.Debug("Async fscache fill: dispatched")
|
|
|
|
return nil
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// addRegularFile maps f to fs's cache, returning a reader which the
|
2020-08-06 20:58:47 +03:00
|
|
|
// caller is responsible for closing. f is closed by this method.
|
2024-01-15 04:45:34 +03:00
|
|
|
// Do not add stdin via this function; instead use addStdin.
|
|
|
|
func (fs *Files) addRegularFile(ctx context.Context, f *os.File, key string) (fscache.ReadAtCloser, error) {
|
|
|
|
log := lg.FromContext(ctx)
|
|
|
|
log.Debug("Adding regular file", lga.Key, key, lga.Path, f.Name())
|
|
|
|
|
|
|
|
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, f)
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
if key == StdinHandle {
|
|
|
|
// This is a programming error; the caller should have
|
|
|
|
// instead invoked addStdin. Probably should panic here.
|
|
|
|
return nil, errz.New("illegal to add stdin via Files.addRegularFile")
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
if fs.fscache.Exists(key) {
|
|
|
|
return nil, errz.Errorf("file already exists in cache: %s", key)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
if err := fs.fscache.MapFile(f.Name()); err != nil {
|
|
|
|
return nil, errz.Wrapf(err, "failed to map file into fscache: %s", f.Name())
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
r, _, err := fs.fscache.Get(key)
|
|
|
|
return r, errz.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// filepath returns the file path of src.Location. An error is returned
|
|
|
|
// if the source's driver type is not a document type (e.g. it is a
|
|
|
|
// SQL driver). If src is a remote (http) location, the returned filepath
|
|
|
|
// is that of the cached download file. If that file is not present, an
|
|
|
|
// error is returned.
|
|
|
|
func (fs *Files) filepath(src *Source) (string, error) {
|
|
|
|
switch getLocType(src.Location) {
|
|
|
|
case locTypeLocalFile:
|
|
|
|
return src.Location, nil
|
|
|
|
case locTypeRemoteFile:
|
|
|
|
dlDir, err := fs.downloadDirFor(src)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
dlFile := filepath.Join(dlDir, "body")
|
|
|
|
if !ioz.FileAccessible(dlFile) {
|
|
|
|
return "", errz.Errorf("remote file for %s not downloaded at: %s", src.Handle, dlFile)
|
|
|
|
}
|
|
|
|
return dlFile, nil
|
|
|
|
case locTypeSQL:
|
|
|
|
return "", errz.Errorf("cannot get filepath of SQL source: %s", src.Handle)
|
|
|
|
case locTypeStdin:
|
|
|
|
return "", errz.Errorf("cannot get filepath of stdin source: %s", src.Handle)
|
|
|
|
default:
|
|
|
|
return "", errz.Errorf("unknown source location type for %s: %s", src.Handle, RedactLocation(src.Location))
|
|
|
|
}
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2020-08-23 13:42:15 +03:00
|
|
|
// Open returns a new io.ReadCloser for src.Location.
|
2020-08-06 20:58:47 +03:00
|
|
|
// If src.Handle is StdinHandle, AddStdin must first have
|
|
|
|
// been invoked. The caller must close the reader.
|
2024-01-15 04:45:34 +03:00
|
|
|
func (fs *Files) Open(ctx context.Context, src *Source) (io.ReadCloser, error) {
|
2020-08-06 20:58:47 +03:00
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
lg.FromContext(ctx).Debug("Files.Open", lga.Src, src)
|
|
|
|
return fs.newReader(ctx, src)
|
2020-08-23 13:42:15 +03:00
|
|
|
}
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2020-08-23 13:42:15 +03:00
|
|
|
// OpenFunc returns a func that invokes fs.Open for src.Location.
|
2024-01-15 04:45:34 +03:00
|
|
|
func (fs *Files) OpenFunc(src *Source) FileOpenFunc {
|
|
|
|
return func(ctx context.Context) (io.ReadCloser, error) {
|
|
|
|
return fs.Open(ctx, src)
|
2020-08-23 13:42:15 +03:00
|
|
|
}
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
func (fs *Files) newReader(ctx context.Context, src *Source) (io.ReadCloser, error) {
|
|
|
|
loc := src.Location
|
|
|
|
locTyp := getLocType(loc)
|
|
|
|
switch locTyp {
|
|
|
|
case locTypeUnknown:
|
|
|
|
return nil, errz.Errorf("unknown source location type: %s", loc)
|
|
|
|
case locTypeSQL:
|
|
|
|
return nil, errz.Errorf("invalid to read SQL source: %s", loc)
|
|
|
|
case locTypeStdin:
|
|
|
|
r, w, err := fs.fscache.Get(StdinHandle)
|
2020-08-06 20:58:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, errz.Err(err)
|
|
|
|
}
|
|
|
|
if w != nil {
|
|
|
|
return nil, errz.New("@stdin not cached: has AddStdin been invoked yet?")
|
|
|
|
}
|
|
|
|
|
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// Well, it's either a local or remote file.
|
|
|
|
// Let's see if it's cached.
|
|
|
|
if fs.fscache.Exists(loc) {
|
|
|
|
r, _, err := fs.fscache.Get(loc)
|
2020-08-06 20:58:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's not cached.
|
|
|
|
if locTyp == locTypeLocalFile {
|
|
|
|
f, err := os.Open(loc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errz.Err(err)
|
|
|
|
}
|
|
|
|
// fs.addRegularFile closes f, so we don't have to do it.
|
|
|
|
r, err := fs.addRegularFile(ctx, f, loc)
|
2020-08-06 20:58:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
_, r, err := fs.openRemoteFile(ctx, src, false)
|
|
|
|
return r, err
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// Ping implements a ping mechanism for document
|
|
|
|
// sources (local or remote files).
|
|
|
|
func (fs *Files) Ping(ctx context.Context, src *Source) error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
switch getLocType(src.Location) {
|
|
|
|
case locTypeLocalFile:
|
|
|
|
if _, err := os.Stat(src.Location); err != nil {
|
|
|
|
return errz.Wrapf(err, "ping: failed to stat file source %s: %s", src.Handle, src.Location)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
2024-01-15 04:45:34 +03:00
|
|
|
return nil
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
case locTypeRemoteFile:
|
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodHead, src.Location, nil)
|
2020-08-06 20:58:47 +03:00
|
|
|
if err != nil {
|
2024-01-15 04:45:34 +03:00
|
|
|
return errz.Wrapf(err, "ping: %s", src.Handle)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
2024-01-15 04:45:34 +03:00
|
|
|
c := fs.httpClientFor(ctx, src)
|
|
|
|
resp, err := c.Do(req) //nolint:bodyclose
|
|
|
|
if err != nil {
|
|
|
|
return errz.Wrapf(err, "ping: %s", src.Handle)
|
|
|
|
}
|
|
|
|
defer lg.WarnIfCloseError(fs.log, lgm.CloseHTTPResponseBody, resp.Body)
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
return errz.Errorf("ping: %s: expected {%s} but got {%s}",
|
|
|
|
src.Handle, httpz.StatusText(http.StatusOK), httpz.StatusText(resp.StatusCode))
|
|
|
|
}
|
|
|
|
return nil
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
default:
|
|
|
|
// Shouldn't happen
|
|
|
|
return errz.Errorf("ping: %s is not a document source", src.Handle)
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
// Close closes any open resources and waits for any goroutines
|
|
|
|
// to complete.
|
|
|
|
func (fs *Files) Close() error {
|
|
|
|
fs.mu.Lock()
|
|
|
|
defer fs.mu.Unlock()
|
2021-01-02 07:10:02 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
fs.log.Debug("Files.Close: waiting for goroutines to complete")
|
|
|
|
fs.fillerWgs.Wait()
|
2020-08-06 20:58:47 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
fs.log.Debug("Files.Close: executing cleanup", lga.Count, fs.clnup.Len())
|
|
|
|
err := fs.clnup.Run()
|
|
|
|
err = errz.Append(err, fs.fscache.Clean())
|
|
|
|
err = errz.Append(err, errz.Wrap(os.RemoveAll(fs.fscacheDir), "remove fscache dir"))
|
|
|
|
err = errz.Append(err, errz.Wrap(os.RemoveAll(fs.tempDir), "remove files temp dir"))
|
|
|
|
fs.doCacheSweep()
|
2020-12-30 21:57:58 +03:00
|
|
|
|
2024-01-15 04:45:34 +03:00
|
|
|
return err
|
2020-08-06 20:58:47 +03:00
|
|
|
}
|
|
|
|
|
2020-08-09 00:23:30 +03:00
|
|
|
// CleanupE adds fn to the cleanup sequence invoked by fs.Close.
|
2024-01-15 04:45:34 +03:00
|
|
|
//
|
|
|
|
// REVISIT: This CleanupE method really is an odd fish. It's only used
|
|
|
|
// by the test helper. Probably it can we removed?
|
2020-08-09 00:23:30 +03:00
|
|
|
func (fs *Files) CleanupE(fn func() error) {
|
|
|
|
fs.clnup.AddE(fn)
|
|
|
|
}
|
|
|
|
|
2020-08-23 13:42:15 +03:00
|
|
|
// FileOpenFunc returns a func that opens a ReadCloser. The caller
|
|
|
|
// is responsible for closing the returned ReadCloser.
|
2024-01-15 04:45:34 +03:00
|
|
|
type FileOpenFunc func(ctx context.Context) (io.ReadCloser, error)
|