#199: Config overhaul (#214)

* refactor: partially moved over driver.Tuning params to options

* All knobs moved to options

* sq config edit: now has comments for options

* Major work complete on config/options overhaul

* Major work complete on config/options overhaul

* Updated help text for 'sq version'
This commit is contained in:
Neil O'Toole 2023-05-03 06:36:10 -06:00 committed by GitHub
parent 4521008b72
commit 3180334c0c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
83 changed files with 1021 additions and 627 deletions

View File

@ -5,19 +5,60 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
Breaking changes are annotated with ☢️.
## Upcoming
This release overhauls `sq`'s config mechanism.
This release completely overhauls `sq`'s config mechanism. There are a
handful of minor breaking changes ☢️.
### Added
- `sq config get` prints config. DOCS
- `sq config set` sets config values.
- `sq config edit` edits config.
- Editor can be specified via `$EDITOR` or `$SQ_EDITOR`.
- `sq config location` prints the location of the config dir.
- `--config` flag is now honored globally.
- Many more knobs are exposed in config.
### Changed
- Envar `SQ_CONFIG` replaces `SQ_CONFIGDIR`.
- ☢Envar `SQ_CONFIG` replaces `SQ_CONFIGDIR`.
- ☢Format flag `--table` is renamed to `--text`. This is changed because the flag
often outputs text in a table format, but sometimes it's just plain text. Thus
`table` was not quite accurate.
- ☢The flag to explicitly specify a driver when piping input to `sq` has been
renamed from `--driver` to `--ingest.driver`. This change is made to align
the naming of all the ingest options and reduce ambiguity.
```shell
# previously
$ cat mystery.data | sq --driver=csv '.data'
# now
$ cat mystery.data | sq --ingest.driver=csv '.data'
```
- ☢️ `sq add` no longer has the generic `--opts x=y` mechanism. This flag was
ambiguous and confusing. Instead use explicit option flags.
```shell
# previously
$ sq add ./actor.csv --opts=header=false
# now
$ sq add ./actor.csv --ingest.header=false
```
- ☢️ The short form of the `sq add --handle` flag has been changed from `-h` to
`-n`. While this is not ideal, the `-h` shorthand is already in use everywhere
else as the short form of `--header`.
```shell
# previously
$ sq add ./actor.csv -h @actor
# now
$ sq add ./actor.csv -n @actor
```
## [v0.33.0] - 2023-04-15
@ -119,7 +160,7 @@ make working with lots of sources much easier.
- The `count` function has been changed ([docs](https://sq.io/docs/query#count))
- Added no-args version: `.actor | count` equivalent to `SELECT COUNT(*) AS "count" FROM "actor"`.
- **BREAKING CHANGE**: The "star" version (`.actor | count(*)`) is no longer supported; use the
- ☢️ The "star" version (`.actor | count(*)`) is no longer supported; use the
naked version instead.
- Function columns are now named according to the `sq` token, not the SQL token.
```shell

View File

@ -22,6 +22,8 @@ import (
"strings"
"sync"
"github.com/spf13/pflag"
"github.com/neilotoole/sq/cli/flag"
"github.com/neilotoole/sq/libsq/core/lg"
@ -62,11 +64,11 @@ func Execute(ctx context.Context, stdin *os.File, stdout, stderr io.Writer, args
// resulting in a command being executed. The caller must
// invoke rc.Close.
func ExecuteWith(ctx context.Context, rc *RunContext, args []string) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
log.Debug("EXECUTE", "args", strings.Join(args, " "))
log.Debug("Build info", "build", buildinfo.Info())
log.Debug("Config",
lga.Version, rc.Config.Version,
"config.version", rc.Config.Version,
lga.Path, rc.ConfigStore.Location(),
)
@ -169,12 +171,6 @@ func newCommandTree(rc *RunContext) (rootCmd *cobra.Command) {
rootCmd.SetErr(rc.ErrOut)
rootCmd.Flags().SortFlags = false
// The --help flag must be explicitly added to rootCmd,
// or else cobra tries to do its own (unwanted) thing.
// The behavior of cobra in this regard seems to have
// changed? This particular incantation currently does the trick.
rootCmd.Flags().Bool(flag.Help, false, "Show sq help")
helpCmd := addCmd(rc, rootCmd, newHelpCmd())
rootCmd.SetHelpCommand(helpCmd)
@ -266,7 +262,22 @@ func addCmd(rc *RunContext, parentCmd, cmd *cobra.Command) *cobra.Command {
cmd.SilenceErrors = true
cmd.SilenceUsage = true
cmd.Flags().SetNormalizeFunc(applyFlagAliases)
parentCmd.AddCommand(cmd)
return cmd
}
func applyFlagAliases(f *pflag.FlagSet, name string) pflag.NormalizedName {
if f == nil {
return pflag.NormalizedName(name)
}
switch name {
case "table":
// Legacy: flag --text was once named --table.
name = flag.Text
default:
}
return pflag.NormalizedName(name)
}

View File

@ -8,7 +8,7 @@ import (
"os"
"strings"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/drivers/csv"
"github.com/neilotoole/sq/cli/flag"
@ -37,13 +37,13 @@ Note that sq generated the handle "@actor". But you can explicitly specify
a handle.
# Add a postgres source with handle "@sakila/pg"
$ sq add -h @sakila/pg 'postgres://user:pass@localhost/sakila'
$ sq add --handle @sakila/pg 'postgres://user:pass@localhost/sakila'
This handle format "@sakila/pg" includes a group, "sakila". Using a group
is entirely optional: it is a way to organize sources. For example:
$ sq add -h @dev/pg 'postgres://user:pass@dev.db.example.com/sakila'
$ sq add -h @prod/pg 'postgres://user:pass@prod.db.acme.com/sakila'
$ sq add --handle @dev/pg 'postgres://user:pass@dev.db.example.com/sakila'
$ sq add --handle @prod/pg 'postgres://user:pass@prod.db.acme.com/sakila'
The format of LOCATION is driver-specific, but is generally a DB connection
string, a file path, or a URL.
@ -73,13 +73,9 @@ there instead of prompting the user:
$ echo 'open:;"_Ses@me' > password.txt
$ sq add 'postgres://user@localhost/sakila' -p < password.txt
Flag --opts sets source-specific options. Generally, opts are relevant
to document driver types (such as a CSV file). The most common
use is to specify that the document has a header row:
There are various driver-specific options available. For example:
$ sq add actor.csv --opts=header=true
Use query string encoding for multiple options, e.g. "--opts a=b&x=y".
$ sq add actor.csv --ingest.header=false --driver.csv.delim=colon
If flag --driver is omitted, sq will attempt to determine the
type from LOCATION via file suffix, content type, etc.. If the result
@ -112,41 +108,51 @@ More examples:
Password: ****
# Explicitly set flags
$ sq add --handle=@sakila_pg --driver=postgres 'postgres://user:pass@localhost/sakila'
$ sq add --handle @sakila_pg --driver postgres 'postgres://user:pass@localhost/sakila'
# Same as above, but with short flags
$ sq add -h @sakila_pg --d postgres 'postgres://user:pass@localhost/sakila'
$ sq add -n @sakila_pg -d postgres 'postgres://user:pass@localhost/sakila'
# Add a SQL Server source; will have generated handle @sakila_mssql or similar
# Add a SQL Server source; will have generated handle @sakila
$ sq add 'sqlserver://user:pass@localhost?database=sakila'
# Add a sqlite db, and immediately make it the active source
$ sq add ./testdata/sqlite1.db --active
# Add an Excel spreadsheet, with options
$ sq add ./testdata/test1.xlsx --opts=header=true
$ sq add ./testdata/test1.xlsx --ingest.header=true
# Add a CSV source, with options
$ sq add ./testdata/person.csv --opts=header=true
$ sq add ./testdata/person.csv --ingest.header=true
# Add a CSV source from a URL (will be downloaded)
$ sq add https://sq.io/testdata/actor.csv
# Add a source, and make it the active source (and group)
$ sq add ./actor.csv -h @csv/actor`,
$ sq add ./actor.csv --handle @csv/actor
# Add a currently unreachable source
$ sq add 'postgres://user:pass@db.offline.com/sakila' --skip-verify`,
Short: "Add data source",
Long: `Add data source specified by LOCATION, optionally identified by @HANDLE.`,
}
cmd.Flags().StringP(flag.Driver, flag.DriverShort, "", flag.DriverUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.Driver, completeDriverType))
cmd.Flags().StringP(flag.SrcOptions, "", "", flag.SrcOptionsUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().StringP(flag.AddDriver, flag.AddDriverShort, "", flag.AddDriverUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.AddDriver, completeDriverType))
cmd.Flags().StringP(flag.Handle, flag.HandleShort, "", flag.HandleUsage)
cmd.Flags().BoolP(flag.PasswordPrompt, flag.PasswordPromptShort, false, flag.PasswordPromptUsage)
cmd.Flags().Bool(flag.SkipVerify, false, flag.SkipVerifyUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.AddActive, flag.AddActiveShort, false, flag.AddActiveUsage)
cmd.Flags().Bool(flag.IngestHeader, false, flag.IngestHeaderUsage)
cmd.Flags().Bool(flag.CSVEmptyAsNull, true, flag.CSVEmptyAsNullUsage)
cmd.Flags().String(flag.CSVDelim, flag.CSVDelimDefault, flag.CSVDelimUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.CSVDelim, completeStrings(1, csv.NamedDelims()...)))
return cmd
}
@ -158,8 +164,8 @@ func execSrcAdd(cmd *cobra.Command, args []string) error {
var err error
var typ source.DriverType
if cmdFlagChanged(cmd, flag.Driver) {
val, _ := cmd.Flags().GetString(flag.Driver)
if cmdFlagChanged(cmd, flag.AddDriver) {
val, _ := cmd.Flags().GetString(flag.AddDriver)
typ = source.DriverType(strings.TrimSpace(val))
} else {
typ, err = rc.files.DriverType(cmd.Context(), loc)
@ -197,21 +203,6 @@ func execSrcAdd(cmd *cobra.Command, args []string) error {
return errz.Errorf("source handle already exists: %s", handle)
}
var opts options.Options
if cmdFlagChanged(cmd, flag.SrcOptions) {
val, _ := cmd.Flags().GetString(flag.SrcOptions)
val = strings.TrimSpace(val)
_ = val
// FIXME: Deal with option flags
// if val != "" {
// opts, err = options.ParseOptions(val)
// if err != nil {
// return err
// }
// }
}
if typ == sqlite3.Type {
// Special handling for SQLite, because it's a file-based DB.
loc, err = sqlite3.MungeLocation(loc)
@ -224,27 +215,36 @@ func execSrcAdd(cmd *cobra.Command, args []string) error {
// or sq prompts the user.
if cmdFlagTrue(cmd, flag.PasswordPrompt) {
var passwd []byte
passwd, err = readPassword(cmd.Context(), rc.Stdin, rc.Out, rc.writers.pr)
if err != nil {
if passwd, err = readPassword(cmd.Context(), rc.Stdin, rc.Out, rc.writers.pr); err != nil {
return err
}
loc, err = source.LocationWithPassword(loc, string(passwd))
if err != nil {
if loc, err = source.LocationWithPassword(loc, string(passwd)); err != nil {
return err
}
}
src, err := newSource(cmd.Context(), rc.driverReg, typ, handle, loc, opts)
o, err := getSrcOptionsFromFlags(cmd.Flags(), rc.OptionsRegistry, typ)
if err != nil {
return err
}
err = cfg.Collection.Add(src)
src, err := newSource(
cmd.Context(),
rc.driverReg,
typ,
handle,
loc,
o,
)
if err != nil {
return err
}
if err = cfg.Collection.Add(src); err != nil {
return err
}
if cfg.Collection.Active() == nil || cmdFlagTrue(cmd, flag.AddActive) {
// If no current active data source, use this one, OR if
// flagAddActive is true.
@ -252,7 +252,7 @@ func execSrcAdd(cmd *cobra.Command, args []string) error {
return err
}
// However, we do not set the active group to the src's group.
// However, we do not set the active group to be the new src's group.
// In UX testing, it led to confused users.
}
@ -263,6 +263,7 @@ func execSrcAdd(cmd *cobra.Command, args []string) error {
if !cmdFlagTrue(cmd, flag.SkipVerify) {
// Typically we want to ping the source before adding it.
// But, sometimes not, for example if a source is temporarily offline.
if err = drvr.Ping(cmd.Context(), src); err != nil {
return err
}
@ -272,6 +273,10 @@ func execSrcAdd(cmd *cobra.Command, args []string) error {
return err
}
if src, err = rc.Config.Collection.Get(src.Handle); err != nil {
return err
}
return rc.writers.srcw.Source(rc.Config.Collection, src)
}

View File

@ -110,7 +110,7 @@ func TestCmdAdd_SQLite_Path(t *testing.T) {
const h1 = `@s1`
ru := newRun(ctx, t, nil)
require.NoError(t, ru.Exec("add", "-j", "sqlite3://test.db", "-h", h1))
require.NoError(t, ru.Exec("add", "-j", "sqlite3://test.db", "--handle", h1))
got := ru.BindMap()
absPath, err := filepath.Abs("test.db")
@ -134,7 +134,7 @@ func TestCmdAdd_Active(t *testing.T) {
// Add a new source. It should become the active source.
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "-h", h1))
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "--handle", h1))
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("src", "-j"))
m := ru.BindMap()
@ -143,7 +143,7 @@ func TestCmdAdd_Active(t *testing.T) {
// Add a second src, without the --active flag. The active src
// should remain h1.
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "-h", h2))
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "--handle", h2))
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("src", "-j"))
m = ru.BindMap()
@ -152,7 +152,7 @@ func TestCmdAdd_Active(t *testing.T) {
// Add a third src, this time with the --active flag. The active src
// should become h3.
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "-h", h3, "--active"))
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "--handle", h3, "--active"))
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("src", "-j"))
m = ru.BindMap()
@ -160,7 +160,7 @@ func TestCmdAdd_Active(t *testing.T) {
// Same again with a fourth src, but this time using the shorthand -a flag.
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "-h", h4, "-a"))
require.NoError(t, ru.Exec("add", proj.Abs(sakila.PathCSVActor), "--handle", h4, "-a"))
ru = newRun(ctx, t, ru)
require.NoError(t, ru.Exec("src", "-j"))
m = ru.BindMap()

View File

@ -11,17 +11,23 @@ func newConfigCmd() *cobra.Command {
Use: "config",
Args: cobra.NoArgs,
Short: "Manage config",
Long: "Manage config.",
Long: `View and edit base and source-specific config.`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
Example: ` # Print config location
$ sq config location
# Show default options
# Show base config
$ sq config get
# Edit default options
# Show base config including unset and default values.
$ sq config get -v
# Set config value
$ sq config set format json
# Edit base config
$ sq config edit
# Edit config for source
@ -49,7 +55,6 @@ func newConfigLocationCmd() *cobra.Command {
Origin: env`,
}
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.YAML, flag.YAMLShort, false, flag.YAMLUsage)
return cmd

View File

@ -1,14 +1,15 @@
package cli
import (
"bufio"
"bytes"
"os"
"strings"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/libsq/source"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/libsq/core/lg/lga"
"github.com/neilotoole/sq/libsq/core/lg"
@ -27,15 +28,27 @@ func newConfigEditCmd() *cobra.Command {
Use: "edit [@HANDLE]",
Args: cobra.MaximumNArgs(1),
ValidArgsFunction: completeHandle(1),
RunE: execConfigEdit,
Short: "Edit config or source options",
Long: `Edit config or source options in the editor specified in envar $SQ_EDITOR or $EDITOR.`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return execConfigEditOptions(cmd, args)
}
return execConfigEditSource(cmd, args)
},
Short: "Edit config or source options",
Long: `Edit config or source options in the editor specified in envar $SQ_EDITOR or $EDITOR.`,
Example: ` # Edit default options
$ sq config edit
# Edit default options, but show additional help/context.
$ sq config edit -v
# Edit config for source @sakila
$ sq config edit @sakila
# Same as above, with additional help/context.
$ sq config edit @sakila -v
# Use a different editor
$ SQ_EDITOR=nano sq config edit`,
}
@ -43,23 +56,22 @@ func newConfigEditCmd() *cobra.Command {
return cmd
}
func execConfigEdit(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return execConfigEditOptions(cmd, args)
}
return execConfigEditSource(cmd, args)
}
// execConfigEditOptions edits the default options.
func execConfigEditOptions(cmd *cobra.Command, _ []string) error {
ctx := cmd.Context()
rc, log := RunContextFrom(ctx), logFrom(cmd)
cfg := rc.Config
before, err := ioz.MarshalYAML(cfg.Options)
cmdOpts, err := getOptionsFromCmd(cmd)
if err != nil {
return err
}
verbose := OptVerbose.Get(cmdOpts)
optsText, err := getOptionsEditableText(rc.OptionsRegistry, rc.Config.Options, verbose)
if err != nil {
return err
}
before := []byte(optsText)
ed := shelleditor.NewDefaultEditor(editorEnvs...)
after, tmpFile, err := ed.LaunchTempFile("sq", ".yml", bytes.NewReader(before))
@ -92,21 +104,61 @@ func execConfigEditOptions(cmd *cobra.Command, _ []string) error {
return nil
}
// execConfigEditSource edits an individual source's config.
func execConfigEditSource(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
rc, log := RunContextFrom(ctx), logFrom(cmd)
cfg := rc.Config
cmdOpts, err := getOptionsFromCmd(cmd)
if err != nil {
return err
}
verbose := OptVerbose.Get(cmdOpts)
src, err := cfg.Collection.Get(args[0])
if err != nil {
return err
}
before, err := ioz.MarshalYAML(src)
opts := rc.OptionsRegistry.Opts()
opts = filterOptionsForSrc(src.Type, opts...)
srcReg := &options.Registry{}
srcReg.Add(opts...)
tmpSrc := src.Clone()
tmpSrc.Options = nil
header, err := ioz.MarshalYAML(tmpSrc)
if err != nil {
return err
}
sb := strings.Builder{}
sb.Write(header)
sb.WriteString("options:\n")
optionsText, err := getOptionsEditableText(srcReg, src.Options, verbose)
if err != nil {
return err
}
// Add indentation
sc := bufio.NewScanner(strings.NewReader(optionsText))
var line string
for sc.Scan() {
line = sc.Text()
if line != "" {
sb.WriteString(" ") // indent
}
sb.WriteString(line)
sb.WriteRune('\n')
}
if err = sc.Err(); err != nil {
return errz.Err(err)
}
before := []byte(sb.String())
ed := shelleditor.NewDefaultEditor(editorEnvs...)
fname := strings.ReplaceAll(src.Handle[1:], "/", "__")
after, tmpFile, err := ed.LaunchTempFile(fname, ".yml", bytes.NewReader(before))
@ -124,7 +176,7 @@ func execConfigEditSource(cmd *cobra.Command, args []string) error {
return nil
}
src2 := source.Source{}
src2 := &source.Source{}
if err = ioz.UnmarshallYAML(after, &src2); err != nil {
return err
}
@ -138,9 +190,9 @@ func execConfigEditSource(cmd *cobra.Command, args []string) error {
}
}
*src = src2
*src = *src2
// TODO: if --verbose, show diff
// TODO: if --verbose, show diff between config before and after.
if err = rc.ConfigStore.Save(ctx, cfg); err != nil {
return err
}
@ -149,3 +201,65 @@ func execConfigEditSource(cmd *cobra.Command, args []string) error {
lga.Src, src2.Handle, lga.Path, rc.ConfigStore.Location())
return nil
}
func getOptionsEditableText(reg *options.Registry, o options.Options, verbose bool) (string, error) {
sb := strings.Builder{}
if verbose {
for i, opt := range reg.Opts() {
if i > 0 {
sb.WriteString("\n")
}
sb.WriteString("# ")
sb.WriteString(strings.ReplaceAll(opt.Comment(), "\n", "\n# "))
sb.WriteRune('\n')
if !o.IsSet(opt) {
sb.WriteString("#")
}
b, err := ioz.MarshalYAML(map[string]any{opt.Key(): opt.GetAny(o)})
if err != nil {
return "", err
}
sb.WriteString(string(b))
}
return sb.String(), nil
}
// Not verbose
for _, opt := range reg.Opts() {
// First we print the opts that have been set
if !o.IsSet(opt) {
continue
}
b, err := ioz.MarshalYAML(map[string]any{opt.Key(): opt.GetAny(o)})
if err != nil {
return "", err
}
sb.WriteString(string(b))
}
if len(o) > 0 && len(o) != len(reg.Opts()) {
sb.WriteRune('\n')
}
// Now we print the unset opts
for _, opt := range reg.Opts() {
if o.IsSet(opt) {
continue
}
sb.WriteString("#")
b, err := ioz.MarshalYAML(map[string]any{opt.Key(): opt.GetAny(o)})
if err != nil {
return "", err
}
sb.WriteString(string(b))
}
return sb.String(), nil
}

View File

@ -12,14 +12,14 @@ func newConfigGetCmd() *cobra.Command {
Use: "get",
Short: "Show config",
Long: `Show config. By default, only explicitly set options are shown.
Use the --verbose flag (in text output format) to see default options.`,
Use the --verbose flag (in text output format) to see all options.`,
Args: cobra.MaximumNArgs(1),
ValidArgsFunction: completeOptKey,
RunE: execConfigGet,
Example: ` # Show all config
Example: ` # Show base config
$ sq config get
# Also show defaults
# Also show defaults and unset options.
$ sq config get -v
# Show individual option
@ -28,7 +28,7 @@ Use the --verbose flag (in text output format) to see default options.`,
# Show config for source
$ sq config get --src @sakila
# Show config for source, including defaults
# Show config for source, including defaults and unset options.
$ sq config get --src @sakila -v
# Show individual option for src
@ -38,7 +38,6 @@ Use the --verbose flag (in text output format) to see default options.`,
$ sq config get --src @active`,
}
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.YAML, flag.YAMLShort, false, flag.YAMLUsage)
@ -72,7 +71,7 @@ func execConfigGet(cmd *cobra.Command, args []string) error {
// Create a new registry that only contains Opts applicable
// to this source.
opts := filterOptionsForSrc(src, reg.Opts()...)
opts := filterOptionsForSrc(src.Type, reg.Opts()...)
reg = &options.Registry{}
reg.Add(opts...)
}

View File

@ -17,7 +17,8 @@ func newConfigSetCmd() *cobra.Command {
Args: cobra.ExactArgs(2),
ValidArgsFunction: completeConfigSet,
Short: "Set config value",
Long: `Set config value globally, or for a specific source.`,
Long: `Set config value globally, or for a specific source.
Use "sq config get -v" to see available options.`,
Example: ` # Set default output format
$ sq config set format json
@ -28,7 +29,6 @@ func newConfigSetCmd() *cobra.Command {
$ sq config set --src @sakila conn.max-open 50`,
}
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.YAML, flag.YAMLShort, false, flag.YAMLUsage)
@ -80,9 +80,9 @@ func execConfigSet(cmd *cobra.Command, args []string) error {
}
if src != nil {
lg.From(ctx).Info("Set default config value", lga.Val, o)
lg.FromContext(ctx).Info("Set default config value", lga.Val, o)
} else {
lg.From(ctx).Info("Set source config value", lga.Src, src, lga.Val, o)
lg.FromContext(ctx).Info("Set source config value", lga.Src, src, lga.Val, o)
}
return rc.writers.configw.SetOption(rc.OptionsRegistry, o, opt)

View File

@ -31,10 +31,6 @@ func newDriverListCmd() *cobra.Command {
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.YAML, flag.YAMLShort, false, flag.YAMLUsage)
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.Header, flag.HeaderShort, false, flag.HeaderUsage)
cmd.Flags().BoolP(flag.NoHeader, flag.NoHeaderShort, false, flag.NoHeaderUsage)
cmd.MarkFlagsMutuallyExclusive(flag.Header, flag.NoHeader)
return cmd
}

View File

@ -1,6 +1,8 @@
package cli
import "github.com/spf13/cobra"
import (
"github.com/spf13/cobra"
)
func newHelpCmd() *cobra.Command {
cmd := &cobra.Command{

View File

@ -45,7 +45,6 @@ If @HANDLE is not provided, the active data source is assumed.`,
}
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.YAML, flag.YAMLShort, false, flag.YAMLUsage)
return cmd

View File

@ -39,9 +39,6 @@ any further descendants.
$ sq ls -g prod`,
}
cmd.Flags().BoolP(flag.Header, flag.HeaderShort, true, flag.HeaderUsage)
cmd.Flags().BoolP(flag.NoHeader, flag.NoHeaderShort, false, flag.NoHeaderUsage)
cmd.MarkFlagsMutuallyExclusive(flag.Header, flag.NoHeader)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.ListGroup, flag.ListGroupShort, false, flag.ListGroupUsage)

View File

@ -62,10 +62,9 @@ The exit code is 1 if ping fails for any of the sources.`,
$ sq ping --tsv @my1`,
}
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.CSV, flag.CSVShort, false, flag.CSVUsage)
cmd.Flags().BoolP(flag.TSV, flag.TSVShort, false, flag.TSVUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().Duration(flag.PingTimeout, time.Second*10, flag.PingTimeoutUsage)
return cmd
}
@ -111,7 +110,7 @@ func execPing(cmd *cobra.Command, args []string) error {
srcs = lo.Uniq(srcs)
cmdOpts, err := getCmdOptions(cmd)
cmdOpts, err := getOptionsFromCmd(cmd)
if err != nil {
return err
}
@ -142,7 +141,7 @@ func pingSources(ctx context.Context, dp driver.Provider, srcs []*source.Source,
return err
}
log := lg.From(ctx)
log := lg.FromContext(ctx)
defer lg.WarnIfFuncError(log, "Close ping writer", w.Close)
resultCh := make(chan pingResult, len(srcs))

View File

@ -21,7 +21,7 @@ write output to a database table.
You can query using sq's own jq-like syntax, or in native SQL.
Use "sq inspect" to view schema metadata. Use the "sq tbl" commands
to copy, truncate and drop tables.
to copy, truncate and drop tables.
See docs and more: https://sq.io`,
Example: ` # pipe an Excel file and output the first 10 rows from sheet1
@ -54,10 +54,10 @@ See docs and more: https://sq.io`,
# output in JSON
$ sq -j '.person | .uid, .username, .email'
# output in table format (with header)
# output in text format (with header)
$ sq -th '.person | .uid, .username, .email'
# output in table format (no header)
# output in text format (no header)
$ sq -t '.person | .uid, .username, .email'
# output to a HTML file
@ -79,12 +79,22 @@ See docs and more: https://sq.io`,
$ sq tbl truncate @sakila_sl3.actor2
# drop table
$ sq tbl drop @sakila_sl3.actor2
`,
$ sq tbl drop @sakila_sl3.actor2`,
}
// The --help flag must be explicitly added to rootCmd,
// or else cobra tries to do its own (unwanted) thing.
// The behavior of cobra in this regard seems to have
// changed? This particular incantation currently does the trick.
cmd.PersistentFlags().Bool(flag.Help, false, "Show help")
addQueryCmdFlags(cmd)
cmd.Flags().Bool(flag.Version, false, flag.VersionUsage)
cmd.PersistentFlags().BoolP(flag.Text, flag.TextShort, false, flag.TextUsage)
cmd.PersistentFlags().BoolP(flag.Header, flag.HeaderShort, true, flag.HeaderUsage)
cmd.PersistentFlags().BoolP(flag.NoHeader, flag.NoHeaderShort, false, flag.NoHeaderUsage)
cmd.MarkFlagsMutuallyExclusive(flag.Header, flag.NoHeader)
cmd.PersistentFlags().BoolP(flag.Monochrome, flag.MonochromeShort, false, flag.MonochromeUsage)
cmd.PersistentFlags().BoolP(flag.Verbose, flag.VerboseShort, false, flag.VerboseUsage)
cmd.PersistentFlags().String(flag.Config, "", flag.ConfigUsage)

View File

@ -155,7 +155,7 @@ func execSLQInsert(ctx context.Context, rc *RunContext, mArgs map[string]string,
inserter := libsq.NewDBWriter(
destDB,
destTbl,
driver.Tuning.RecordChSize,
driver.OptTuningRecChanSize.Get(destSrc.Options),
libsq.DBWriterCreateTableIfNotExistsHook(destTbl),
)
@ -229,7 +229,7 @@ func execSLQPrint(ctx context.Context, rc *RunContext, mArgs map[string]string)
//
// $ sq '.person' --> $ sq '@active.person'
func preprocessUserSLQ(ctx context.Context, rc *RunContext, args []string) (string, error) {
log, reg, dbases, coll := lg.From(ctx), rc.driverReg, rc.databases, rc.Config.Collection
log, reg, dbases, coll := lg.FromContext(ctx), rc.driverReg, rc.databases, rc.Config.Collection
activeSrc := coll.Active()
if len(args) == 0 {
@ -337,41 +337,34 @@ func preprocessUserSLQ(ctx context.Context, rc *RunContext, args []string) (stri
// addQueryCmdFlags sets the common flags for the slq/sql commands.
func addQueryCmdFlags(cmd *cobra.Command) {
cmd.Flags().StringP(flag.Output, flag.OutputShort, "", flag.OutputUsage)
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.JSONA, flag.JSONAShort, false, flag.JSONAUsage)
cmd.Flags().BoolP(flag.JSONL, flag.JSONLShort, false, flag.JSONLUsage)
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
cmd.Flags().BoolP(flag.XML, flag.XMLShort, false, flag.XMLUsage)
cmd.Flags().BoolP(flag.XLSX, flag.XLSXShort, false, flag.XLSXUsage)
cmd.Flags().BoolP(flag.CSV, flag.CSVShort, false, flag.CSVUsage)
cmd.Flags().BoolP(flag.TSV, flag.TSVShort, false, flag.TSVUsage)
cmd.Flags().BoolP(flag.Raw, flag.RawShort, false, flag.RawUsage)
cmd.Flags().Bool(flag.HTML, false, flag.HTMLUsage)
cmd.Flags().Bool(flag.Markdown, false, flag.MarkdownUsage)
cmd.Flags().BoolP(flag.Raw, flag.RawShort, false, flag.RawUsage)
cmd.Flags().BoolP(flag.XLSX, flag.XLSXShort, false, flag.XLSXUsage)
cmd.Flags().BoolP(flag.XML, flag.XMLShort, false, flag.XMLUsage)
cmd.Flags().Bool(flag.Pretty, true, flag.PrettyUsage)
cmd.Flags().BoolP(flag.Header, flag.HeaderShort, false, flag.HeaderUsage)
cmd.Flags().BoolP(flag.NoHeader, flag.NoHeaderShort, false, flag.NoHeaderUsage)
cmd.MarkFlagsMutuallyExclusive(flag.Header, flag.NoHeader)
cmd.Flags().BoolP(flag.Pretty, "", true, flag.PrettyUsage)
cmd.Flags().StringP(flag.Insert, "", "", flag.InsertUsage)
cmd.Flags().StringP(flag.Output, flag.OutputShort, "", flag.OutputUsage)
cmd.Flags().String(flag.Insert, "", flag.InsertUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.Insert,
(&handleTableCompleter{onlySQL: true, handleRequired: true}).complete))
cmd.Flags().StringP(flag.ActiveSrc, "", "", flag.ActiveSrcUsage)
cmd.Flags().String(flag.ActiveSrc, "", flag.ActiveSrcUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.ActiveSrc, completeHandle(0)))
// The driver flag can be used if data is piped to sq over stdin
cmd.Flags().StringP(flag.Driver, "", "", flag.QueryDriverUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.Driver, completeDriverType))
cmd.Flags().String(flag.IngestDriver, "", flag.IngestDriverUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.IngestDriver, completeDriverType))
cmd.Flags().BoolP(flag.IngestHeader, "", false, flag.IngestHeaderUsage)
cmd.Flags().BoolP(flag.CSVEmptyAsNull, "", true, flag.CSVEmptyAsNullUsage)
cmd.Flags().StringP(flag.CSVDelim, "", flag.CSVDelimDefault, flag.CSVDelimUsage)
cmd.Flags().Bool(flag.IngestHeader, false, flag.IngestHeaderUsage)
cmd.Flags().Bool(flag.CSVEmptyAsNull, true, flag.CSVEmptyAsNullUsage)
cmd.Flags().String(flag.CSVDelim, flag.CSVDelimDefault, flag.CSVDelimUsage)
panicOn(cmd.RegisterFlagCompletionFunc(flag.CSVDelim, completeStrings(1, csv.NamedDelims()...)))
}

View File

@ -156,7 +156,7 @@ func execSQLInsert(ctx context.Context, rc *RunContext, fromSrc, destSrc *source
inserter := libsq.NewDBWriter(
destDB,
destTbl,
driver.Tuning.RecordChSize,
driver.OptTuningRecChanSize.Get(destSrc.Options),
libsq.DBWriterCreateTableIfNotExistsHook(destTbl),
)
err = libsq.QuerySQL(ctx, fromDB, inserter, args[0])
@ -169,7 +169,7 @@ func execSQLInsert(ctx context.Context, rc *RunContext, fromSrc, destSrc *source
return errz.Wrapf(err, "insert %s.%s failed", destSrc.Handle, destTbl)
}
lg.From(ctx).Debug(lgm.RowsAffected, lga.Count, affected)
lg.FromContext(ctx).Debug(lgm.RowsAffected, lga.Count, affected)
// TODO: Should really use a Printer here
fmt.Fprintf(rc.Out, stringz.Plu("Inserted %d row(s) into %s\n",

View File

@ -169,7 +169,6 @@ only applies to SQL sources.`,
}
cmd.Flags().BoolP(flag.JSON, flag.JSONShort, false, flag.JSONUsage)
cmd.Flags().BoolP(flag.Table, flag.TableShort, false, flag.TableUsage)
return cmd
}

View File

@ -24,8 +24,10 @@ func newVersionCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "Show version info",
Long: "Show version info. Use --verbose for more detail.",
RunE: execVersion,
Long: `Show version info. Use --verbose for more detail.
The output will note if a new version of sq is available.
Before upgrading, check the changelog: https://sq.io/changelog`,
RunE: execVersion,
Example: ` # Show version (note that an update is available)
$ sq version
sq v0.32.0 Update available: v0.33.0

View File

@ -31,7 +31,9 @@ import (
var OptShellCompletionTimeout = options.NewDuration(
"shell-completion.timeout",
time.Millisecond*500,
"How long shell completion should wait before giving up.",
`How long shell completion should wait before giving up. This can
become relevant when shell completion inspects a source's metadata, e.g. to
offer a list of tables in a source.`,
)
// completionFunc is a shell completion function.
@ -167,7 +169,7 @@ func completeOptKey(cmd *cobra.Command, _ []string, toComplete string) ([]string
return nil, cobra.ShellCompDirectiveError
}
opts := filterOptionsForSrc(src, rc.OptionsRegistry.Opts()...)
opts := filterOptionsForSrc(src.Type, rc.OptionsRegistry.Opts()...)
keys = lo.Map(opts, func(item options.Opt, index int) string {
return item.Key()
})
@ -308,14 +310,14 @@ func (c *handleTableCompleter) completeTableOnly(ctx context.Context, rc *RunCon
) ([]string, cobra.ShellCompDirective) {
activeSrc := rc.Config.Collection.Active()
if activeSrc == nil {
lg.From(ctx).Error("Active source is nil")
lg.FromContext(ctx).Error("Active source is nil")
return nil, cobra.ShellCompDirectiveError
}
if c.onlySQL {
isSQL, err := handleIsSQLDriver(rc, activeSrc.Handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
if !isSQL {
@ -325,7 +327,7 @@ func (c *handleTableCompleter) completeTableOnly(ctx context.Context, rc *RunCon
tables, err := getTableNamesForHandle(ctx, rc, activeSrc.Handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
@ -358,7 +360,7 @@ func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContex
// partial table name, such as "@sakila_sl3.fil"
handle, partialTbl, err := source.ParseTableHandle(strings.TrimSuffix(toComplete, "."))
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
@ -366,7 +368,7 @@ func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContex
var isSQL bool
isSQL, err = handleIsSQLDriver(rc, handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
@ -377,7 +379,7 @@ func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContex
tables, err := getTableNamesForHandle(ctx, rc, handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
@ -399,7 +401,7 @@ func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContex
if c.onlySQL {
isSQL, err := handleIsSQLDriver(rc, handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
if !isSQL {
@ -427,7 +429,7 @@ func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContex
// This means that we aren't able to get metadata for this source.
// This could be because the source is temporarily offline. The
// best we can do is just to return the handle, without the tables.
lg.WarnIfError(lg.From(ctx), "Fetch metadata", err)
lg.WarnIfError(lg.FromContext(ctx), "Fetch metadata", err)
return matchingHandles, cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace
}
@ -453,7 +455,7 @@ func (c *handleTableCompleter) completeEither(ctx context.Context, rc *RunContex
var activeSrcTables []string
isSQL, err := handleIsSQLDriver(rc, activeSrc.Handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
@ -463,7 +465,7 @@ func (c *handleTableCompleter) completeEither(ctx context.Context, rc *RunContex
// This can happen if the active source is offline.
// Log the error, but continue below, because we still want to
// list the handles.
lg.From(ctx).Warn("completion: failed to get table metadata from active source",
lg.FromContext(ctx).Warn("completion: failed to get table metadata from active source",
lga.Err, err, lga.Src, activeSrc)
}
}
@ -477,7 +479,7 @@ func (c *handleTableCompleter) completeEither(ctx context.Context, rc *RunContex
if c.onlySQL {
isSQL, err := handleIsSQLDriver(rc, src.Handle)
if err != nil {
lg.Unexpected(lg.From(ctx), err)
lg.Unexpected(lg.FromContext(ctx), err)
return nil, cobra.ShellCompDirectiveError
}
if !isSQL {

View File

@ -1,2 +1,2 @@
options:
format.header: not_a_bool
header: not_a_bool

View File

@ -2,8 +2,8 @@ config.version: v0.34.0
options:
ping.timeout: 10s
shell-completion.timeout: 1s
format: table
format.header: true
format: text
header: true
collection:
active.source: '@sl1'
scratch: ""

View File

@ -1,4 +1,4 @@
config_version: v0.34.0
config.version: v0.34.0
options:
collection:

View File

@ -1,4 +1,4 @@
config_version: v0.34.0
config.version: v0.34.0
collection:
active_source: ""

View File

@ -1,4 +1,4 @@
config_version: v0.34.0
config.version: v0.34.0
options:
collection:

View File

@ -1,10 +1,10 @@
# This file has an empty active source.
config_version: v0.34.0
config.version: v0.34.0
options:
output_format: table
output_header: false
ping_timeout: 10s
shell_completion_timeout: 500ms
format: table
header: false
ping.timeout: 10s
shell-completion.timeout: 500ms
collection:
active.source: ""
scratch: ""

View File

@ -36,7 +36,7 @@ type UpgradeRegistry map[string]UpgradeFunc
// and targetVersion. Typically this is checked by Load, but can be
// explicitly invoked for testing etc.
func (fs *Store) doUpgrade(ctx context.Context, startVersion, targetVersion string) (*config.Config, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
log.Debug("Starting config upgrade", lga.From, startVersion, lga.To, targetVersion)
if !semver.IsValid(targetVersion) {
@ -161,7 +161,7 @@ func checkNeedsUpgrade(ctx context.Context, path string) (needsUpgrade bool, fou
return false, "", err
}
lg.From(ctx).Debug("Found config version in file",
lg.FromContext(ctx).Debug("Found config version in file",
lga.Version, foundVers, lga.Path, path)
if semver.Compare(foundVers, MinConfigVersion) < 0 {

View File

@ -1,7 +1,7 @@
config.version: v0.34.0
options:
format: json
format.header: true
header: true
ping.timeout: 1m40s
shell-completion.timeout: 500ms
collection:

View File

@ -23,7 +23,7 @@ const Version = "v0.34.0"
// - "sources" is renamed to "collection".
// - "config.version" is set to "v0.34.0".
func Upgrade(ctx context.Context, before []byte) (after []byte, err error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
log.Info("Starting config upgrade step", lga.To, Version)
// Load data
@ -47,7 +47,7 @@ func Upgrade(ctx context.Context, before []byte) (after []byte, err error) {
opts["format"] = opts["output_format"]
delete(opts, "output_format")
opts["format.header"] = opts["output_header"]
opts["header"] = opts["output_header"]
delete(opts, "output_header")
opts["ping.timeout"] = opts["ping_timeout"]
delete(opts, "ping_timeout")

View File

@ -62,7 +62,7 @@ func (fs *Store) Location() string {
// Load reads config from disk. It implements Store.
func (fs *Store) Load(ctx context.Context) (*config.Config, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
log.Debug("Loading config from file", lga.Path, fs.Path)
if fs.UpgradeRegistry != nil {

View File

@ -22,7 +22,7 @@ import (
// redundancy; ultimately err will print if non-nil (even if
// rc or any of its fields are nil).
func printError(ctx context.Context, rc *RunContext, err error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
if err == nil {
log.Warn("printError called with nil error")
@ -108,7 +108,7 @@ func bootstrapIsFormatJSON(rc *RunContext) bool {
return false
}
defaultFormat := format.Table
defaultFormat := format.Text
if rc.Config != nil {
defaultFormat = OptOutputFormat.Get(rc.Config.Options)
}

View File

@ -12,9 +12,12 @@ const (
CSVShort = "c"
CSVUsage = "Output CSV"
Driver = "driver"
DriverShort = "d"
DriverUsage = "Explicitly specify the data source driver to use"
AddDriver = "driver"
AddDriverShort = "d"
AddDriverUsage = "Explicitly specify the driver to use"
IngestDriver = "ingest.driver"
IngestDriverUsage = "Explicitly specify the driver to use for ingesting data"
HTML = "html"
HTMLUsage = "Output HTML table"
@ -28,7 +31,7 @@ const (
NoHeaderUsage = "Don't print header row"
Handle = "handle"
HandleShort = "h"
HandleShort = "n"
HandleUsage = "Handle for the source"
ListGroup = "group"
@ -72,9 +75,6 @@ const (
Pretty = "pretty"
PrettyUsage = "Pretty-print output"
QueryDriverUsage = "Explicitly specify the data source driver to use when piping input"
QuerySrcOptionsUsage = "Driver-dependent data source options when piping input"
Raw = "raw"
RawShort = "r"
RawUsage = "Output each record field in raw format without any encoding or delimiter"
@ -88,6 +88,7 @@ const (
// SrcOptions is deprecated.
//
// //Deprecated: Use specific options like flag.IngestHeader.
// FIXME: get rid of SrcOptions.
SrcOptions = "opts"
SrcOptionsUsage = "Driver-dependent data source options"
@ -95,10 +96,9 @@ const (
TSVShort = "T"
TSVUsage = "Output TSV"
Table = "table" // TODO: Rename "table" to "text" (output is not always a table).
TableShort = "t"
TableUsage = "Output text table"
Text = "text"
TextShort = "t"
TextUsage = "Output text"
TblData = "data"
TblDataUsage = "Copy table data"

View File

@ -119,5 +119,5 @@ func TestRegisterDefaultOpts(t *testing.T) {
log.Debug("options.Registry (after)", "reg", reg)
keys := reg.Keys()
require.Len(t, keys, 11)
require.Len(t, keys, 19)
}

View File

@ -23,11 +23,11 @@ import (
func defaultLogging(ctx context.Context) (log *slog.Logger, h slog.Handler, closer func() error, err error) {
logFilePath, ok := os.LookupEnv(config.EnvarLogPath)
if !ok || strings.TrimSpace(logFilePath) == "" {
lg.From(ctx).Debug("Logging: not enabled via envar", lga.Key, config.EnvarLogPath)
lg.FromContext(ctx).Debug("Logging: not enabled via envar", lga.Key, config.EnvarLogPath)
return lg.Discard(), nil, nil, nil
}
lg.From(ctx).Debug("Logging: enabled via envar", lga.Key, config.EnvarLogPath, lga.Val, logFilePath)
lg.FromContext(ctx).Debug("Logging: enabled via envar", lga.Key, config.EnvarLogPath, lga.Val, logFilePath)
// Let's try to create the dir holding the logfile... if it already exists,
// then os.MkdirAll will just no-op
@ -98,7 +98,7 @@ func logFrom(cmd *cobra.Command) *slog.Logger {
return lg.Discard()
}
log := lg.From(ctx)
log := lg.FromContext(ctx)
if log == nil {
return lg.Discard()
}

View File

@ -15,11 +15,11 @@ import (
"golang.org/x/exp/slices"
)
// getFlagOptions builds options.Options from flags. In effect, a flag
// getOptionsFromFlags builds options.Options from flags. In effect, a flag
// such as --ingest.header is mapped to an option.Opt of the same name.
//
// See also: getCmdOptions, applySourceOptions, applyCollectionOptions.
func getFlagOptions(flags *pflag.FlagSet, reg *options.Registry) (options.Options, error) {
// See also: getOptionsFromCmd, applySourceOptions, applyCollectionOptions.
func getOptionsFromFlags(flags *pflag.FlagSet, reg *options.Registry) (options.Options, error) {
o := options.Options{}
err := reg.Visit(func(opt options.Opt) error {
key := opt.Key()
@ -52,11 +52,24 @@ func getFlagOptions(flags *pflag.FlagSet, reg *options.Registry) (options.Option
return o, nil
}
// getCmdOptions returns the options.Options generated by merging
// getSrcOptionsFromFlags returns the options.Options applicable to
// the driver type.
//
// See also: getOptionsFromFlags, getOptionsFromCmd, applySourceOptions, applyCollectionOptions.
func getSrcOptionsFromFlags(flags *pflag.FlagSet, reg *options.Registry,
typ source.DriverType,
) (options.Options, error) {
srcOpts := filterOptionsForSrc(typ, reg.Opts()...)
srcReg := &options.Registry{}
srcReg.Add(srcOpts...)
return getOptionsFromFlags(flags, srcReg)
}
// getOptionsFromCmd returns the options.Options generated by merging
// config options and flag options.
//
// See also: getFlagOptions, applySourceOptions, applyCollectionOptions.
func getCmdOptions(cmd *cobra.Command) (options.Options, error) {
// See also: getOptionsFromFlags, applySourceOptions, applyCollectionOptions.
func getOptionsFromCmd(cmd *cobra.Command) (options.Options, error) {
rc := RunContextFrom(cmd.Context())
var configOpts options.Options
if rc.Config != nil && rc.Config.Options != nil {
@ -65,7 +78,7 @@ func getCmdOptions(cmd *cobra.Command) (options.Options, error) {
configOpts = options.Options{}
}
flagOpts, err := getFlagOptions(cmd.Flags(), rc.OptionsRegistry)
flagOpts, err := getOptionsFromFlags(cmd.Flags(), rc.OptionsRegistry)
if err != nil {
return nil, err
}
@ -77,7 +90,7 @@ func getCmdOptions(cmd *cobra.Command) (options.Options, error) {
// The src.Options field may be replaced or mutated. It will always
// be non-nil (unless an error is returned).
//
// See also: getFlagOptions, getCmdOptions, applyCollectionOptions.
// See also: getOptionsFromFlags, getOptionsFromCmd, applyCollectionOptions.
func applySourceOptions(cmd *cobra.Command, src *source.Source) error {
rc := RunContextFrom(cmd.Context())
@ -86,7 +99,7 @@ func applySourceOptions(cmd *cobra.Command, src *source.Source) error {
defaultOpts = options.Options{}
}
flagOpts, err := getFlagOptions(cmd.Flags(), rc.OptionsRegistry)
flagOpts, err := getOptionsFromFlags(cmd.Flags(), rc.OptionsRegistry)
if err != nil {
return err
}
@ -105,7 +118,7 @@ func applySourceOptions(cmd *cobra.Command, src *source.Source) error {
// each source in coll. The sources may have their Source.Options field
// mutated.
//
// See also: getCmdOptions, getFlagOptions, applySourceOptions.
// See also: getOptionsFromCmd, getOptionsFromFlags, applySourceOptions.
func applyCollectionOptions(cmd *cobra.Command, coll *source.Collection) error {
return coll.Visit(func(src *source.Source) error {
return applySourceOptions(cmd, src)
@ -117,14 +130,22 @@ func applyCollectionOptions(cmd *cobra.Command, coll *source.Collection) error {
func RegisterDefaultOpts(reg *options.Registry) {
reg.Add(
OptOutputFormat,
OptVerbose,
OptPrintHeader,
OptShellCompletionTimeout,
OptMonochrome,
OptPretty,
OptPingTimeout,
OptShellCompletionTimeout,
driver.OptConnMaxOpen,
driver.OptConnMaxIdle,
driver.OptConnMaxIdleTime,
driver.OptConnMaxLifetime,
driver.OptMaxRetryInterval,
driver.OptTuningErrgroupLimit,
driver.OptTuningRecChanSize,
OptTuningFlushThreshold,
drivers.OptIngestHeader,
drivers.OptIngestSampleSize,
csv.OptDelim,
csv.OptEmptyAsNull,
)
@ -132,8 +153,8 @@ func RegisterDefaultOpts(reg *options.Registry) {
// filterOptionsForSrc returns a new slice containing only those
// opts that are applicable to src.
func filterOptionsForSrc(src *source.Source, opts ...options.Opt) []options.Opt {
if len(opts) == 0 || src == nil {
func filterOptionsForSrc(typ source.DriverType, opts ...options.Opt) []options.Opt {
if len(opts) == 0 {
return opts
}
@ -157,7 +178,7 @@ func filterOptionsForSrc(src *source.Source, opts ...options.Opt) []options.Opt
// If the opt has key "driver.csv.delim", we want to reject it.
// Thus, if the key has contains "driver", then it must also contain
// the src driver type.
if strings.Contains(key, "driver") && !strings.Contains(key, string(src.Type)) {
if strings.Contains(key, "driver") && !strings.Contains(key, string(typ)) {
return true
}

View File

@ -12,9 +12,12 @@ func (f *Format) UnmarshalText(text []byte) error {
switch Format(text) {
default:
return errz.Errorf("unknown output format {%s}", string(text))
case JSON, JSONA, JSONL, Table, Raw,
case JSON, JSONA, JSONL, Text, Raw,
HTML, Markdown, XLSX, XML,
CSV, TSV, YAML:
case "table":
// Legacy: the "text" format used to be named "table".
// text = []byte(Text)
}
*f = Format(text)
@ -28,27 +31,27 @@ func (f Format) String() string {
// Output format values.
const (
Text Format = "text"
JSON Format = "json"
JSONL Format = "jsonl"
JSONA Format = "jsona"
Table Format = "table"
Raw Format = "raw"
HTML Format = "html"
Markdown Format = "markdown"
XLSX Format = "xlsx"
XML Format = "xml"
CSV Format = "csv"
TSV Format = "tsv"
Raw Format = "raw"
YAML Format = "yaml"
)
// All returns a new slice containing all format.Format values.
func All() []Format {
return []Format{
Text,
JSON,
JSONL,
JSONA,
Table,
Raw,
HTML,
Markdown,

View File

@ -147,7 +147,7 @@ func (w *stdWriter) writeRecord(rec sqlz.Record) error {
w.outBuf.Write(w.b)
w.b = w.b[:0]
if w.outBuf.Len() > output.FlushThreshold {
if w.outBuf.Len() > w.pr.FlushThreshold {
return w.Flush()
}
@ -366,7 +366,7 @@ func (w *lineRecordWriter) writeRecord(rec sqlz.Record) error {
b = append(b, w.tpl[len(w.recMeta)]...)
w.outBuf.Write(b)
if w.outBuf.Len() > output.FlushThreshold {
if w.outBuf.Len() > w.pr.FlushThreshold {
return w.Flush()
}

View File

@ -1,11 +1,18 @@
package output
import "github.com/fatih/color"
import (
"github.com/fatih/color"
"golang.org/x/exp/slog"
)
// Printing describes color and pretty-printing options.
type Printing struct {
monochrome bool
// FlushThreshold is the size in bytes after which an output writer
// should flush any internal buffer.
FlushThreshold int
// ShowHeader indicates that a header (e.g. a header row) should
// be printed where applicable.
ShowHeader bool
@ -91,37 +98,56 @@ type Printing struct {
// are enabled. The default indent is two spaces.
func NewPrinting() *Printing {
pr := &Printing{
ShowHeader: true,
Verbose: false,
Pretty: true,
Redact: true,
monochrome: false,
Indent: " ",
Active: color.New(color.FgGreen, color.Bold),
Bold: color.New(color.Bold),
Bool: color.New(color.FgYellow),
Bytes: color.New(color.Faint),
Datetime: color.New(color.FgGreen, color.Faint),
Duration: color.New(color.FgGreen, color.Faint),
Error: color.New(color.FgRed, color.Bold),
Faint: color.New(color.Faint),
Handle: color.New(color.FgBlue),
Header: color.New(color.FgBlue, color.Bold),
Hilite: color.New(color.FgHiBlue),
Key: color.New(color.FgBlue, color.Bold),
Location: color.New(color.FgGreen),
Normal: color.New(),
Null: color.New(color.Faint),
Number: color.New(color.FgCyan),
Punc: color.New(color.Bold),
String: color.New(color.FgGreen),
Success: color.New(color.FgGreen, color.Bold),
ShowHeader: true,
Verbose: false,
Pretty: true,
Redact: true,
FlushThreshold: 1000,
monochrome: false,
Indent: " ",
Active: color.New(color.FgGreen, color.Bold),
Bold: color.New(color.Bold),
Bool: color.New(color.FgYellow),
Bytes: color.New(color.Faint),
Datetime: color.New(color.FgGreen, color.Faint),
Duration: color.New(color.FgGreen, color.Faint),
Error: color.New(color.FgRed, color.Bold),
Faint: color.New(color.Faint),
Handle: color.New(color.FgBlue),
Header: color.New(color.FgBlue),
// Header: color.New(color.FgBlue, color.Bold),
Hilite: color.New(color.FgHiBlue),
Key: color.New(color.FgBlue, color.Bold),
Location: color.New(color.FgGreen),
Normal: color.New(),
Null: color.New(color.Faint),
Number: color.New(color.FgCyan),
Punc: color.New(color.Bold),
String: color.New(color.FgGreen),
Success: color.New(color.FgGreen, color.Bold),
}
pr.EnableColor(true)
return pr
}
// LogValue implements slog.LogValuer.
func (pr *Printing) LogValue() slog.Value {
if pr == nil {
return slog.Value{}
}
return slog.GroupValue(
slog.Bool("verbose", pr.Verbose),
slog.Bool("header", pr.ShowHeader),
slog.Bool("monochrome", pr.monochrome),
slog.Bool("pretty", pr.Pretty),
slog.Bool("redact", pr.Redact),
slog.Int("flush-threshold", pr.FlushThreshold),
slog.String("indent", pr.Indent),
)
}
func (pr *Printing) colors() []*color.Color {
return []*color.Color{
pr.Active, pr.Bold, pr.Bold, pr.Bytes, pr.Datetime, pr.Duration,

View File

@ -119,8 +119,6 @@ func (w *configWriter) doPrintOptions(reg *options.Registry, o options.Options,
setKeys := o.Keys()
unsetKeys, _ := lo.Difference(allKeys, setKeys)
// keyColor := pr.
for _, k := range unsetKeys {
opt := reg.Get(k)
row := []string{
@ -136,16 +134,21 @@ func (w *configWriter) doPrintOptions(reg *options.Registry, o options.Options,
}
// SetOption implements output.ConfigWriter.
func (w *configWriter) SetOption(reg *options.Registry, o options.Options, opt options.Opt) error {
func (w *configWriter) SetOption(_ *options.Registry, o options.Options, opt options.Opt) error {
if !w.tbl.pr.Verbose {
// No output unless verbose
return nil
}
reg2 := &options.Registry{}
reg2.Add(opt)
o = options.Options{opt.Key(): opt.GetAny(o)}
// It's verbose
o = options.Effective(o, opt)
w.tbl.pr.ShowHeader = false
return w.Options(reg, o)
w.tbl.pr.ShowHeader = true
w.doPrintOptions(reg2, o, false)
return nil
}
func getOptColor(pr *output.Printing, opt options.Opt) *color.Color {

View File

@ -117,9 +117,3 @@ type ConfigWriter interface {
// SetOption is called when an option is set.
SetOption(reg *options.Registry, o options.Options, opt options.Opt) error
}
// FlushThreshold is the size in bytes after which a writer
// should flush any internal buffer.
//
// TODO: Move FlushThreshold to config
const FlushThreshold = 1000

View File

@ -211,7 +211,7 @@ func (w *recordWriter) writeRecord(rec sqlz.Record) error {
w.outBuf.WriteString(w.tplRecEnd)
if w.outBuf.Len() > output.FlushThreshold {
if w.outBuf.Len() > w.pr.FlushThreshold {
return w.Flush()
}

View File

@ -7,6 +7,8 @@ import (
"path/filepath"
"sync"
"github.com/neilotoole/sq/drivers"
"github.com/neilotoole/sq/cli/config/yamlstore"
v0_34_0 "github.com/neilotoole/sq/cli/config/yamlstore/upgrades/v0.34.0"
"github.com/neilotoole/sq/libsq/core/lg/slogbuf"
@ -124,6 +126,7 @@ func newDefaultRunContext(ctx context.Context,
) (*RunContext, *slog.Logger, error) {
// logbuf holds log records until defaultLogging is completed.
log, logbuf := slogbuf.New()
log = log.With(lga.Pid, os.Getpid())
rc := &RunContext{
Stdin: stdin,
@ -145,7 +148,6 @@ func newDefaultRunContext(ctx context.Context,
args, rc.OptionsRegistry, upgrades)
log, logHandler, logCloser, logErr := defaultLogging(ctx)
rc.clnup = cleanup.New().AddE(logCloser)
if logErr != nil {
stderrLog, h := stderrLogger()
@ -158,6 +160,9 @@ func newDefaultRunContext(ctx context.Context,
return rc, log, err
}
}
if log != nil {
log = log.With(lga.Pid, os.Getpid())
}
if rc.Config == nil {
rc.Config = config.New()
@ -191,7 +196,7 @@ func (rc *RunContext) init(ctx context.Context) error {
// It must only be invoked once.
func (rc *RunContext) doInit(ctx context.Context) error {
rc.clnup = cleanup.New()
cfg, log := rc.Config, lg.From(ctx)
cfg, log := rc.Config, lg.FromContext(ctx)
// If the --output=/some/file flag is set, then we need to
// override rc.Out (which is typically stdout) to point it at
@ -218,7 +223,11 @@ func (rc *RunContext) doInit(ctx context.Context) error {
rc.Out = f
}
rc.writers, rc.Out, rc.ErrOut = newWriters(rc.Cmd, rc.Config.Options, rc.Out, rc.ErrOut)
cmdOpts, err := getOptionsFromCmd(rc.Cmd)
if err != nil {
return err
}
rc.writers, rc.Out, rc.ErrOut = newWriters(rc.Cmd, cmdOpts, rc.Out, rc.ErrOut)
var scratchSrcFunc driver.ScratchSrcFunc
@ -232,7 +241,6 @@ func (rc *RunContext) doInit(ctx context.Context) error {
}
}
var err error
rc.files, err = source.NewFiles(ctx)
if err != nil {
lg.WarnIfFuncError(log, lga.Cleanup, rc.clnup.Run)
@ -263,7 +271,12 @@ func (rc *RunContext) doInit(ctx context.Context) error {
rc.driverReg.AddProvider(json.TypeJSON, jsonp)
rc.driverReg.AddProvider(json.TypeJSONA, jsonp)
rc.driverReg.AddProvider(json.TypeJSONL, jsonp)
rc.files.AddDriverDetectors(json.DetectJSON, json.DetectJSONA, json.DetectJSONL)
sampleSize := drivers.OptIngestSampleSize.Get(cfg.Options)
rc.files.AddDriverDetectors(
json.DetectJSON(sampleSize),
json.DetectJSONA(sampleSize),
json.DetectJSONL(sampleSize),
)
rc.driverReg.AddProvider(xlsx.Type, &xlsx.Provider{Log: log, Scratcher: rc.databases, Files: rc.files})
rc.files.AddDriverDetectors(xlsx.DetectXLSX)

View File

@ -115,8 +115,8 @@ func checkStdinSource(ctx context.Context, rc *RunContext) (*source.Source, erro
// If we got this far, we have pipe input
typ := source.TypeNone
if cmd.Flags().Changed(flag.Driver) {
val, _ := cmd.Flags().GetString(flag.Driver)
if cmd.Flags().Changed(flag.IngestDriver) {
val, _ := cmd.Flags().GetString(flag.IngestDriver)
typ = source.DriverType(val)
if rc.driverReg.ProviderFor(typ) == nil {
return nil, errz.Errorf("unknown driver type: %s", typ)
@ -138,7 +138,14 @@ func checkStdinSource(ctx context.Context, rc *RunContext) (*source.Source, erro
}
}
return newSource(ctx, rc.driverReg, typ, source.StdinHandle, source.StdinHandle, options.Options{})
return newSource(
ctx,
rc.driverReg,
typ,
source.StdinHandle,
source.StdinHandle,
options.Options{},
)
}
// newSource creates a new Source instance where the
@ -146,7 +153,7 @@ func checkStdinSource(ctx context.Context, rc *RunContext) (*source.Source, erro
func newSource(ctx context.Context, dp driver.Provider, typ source.DriverType, handle, loc string,
opts options.Options,
) (*source.Source, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
if opts == nil {
log.Debug("Create new data source",

View File

@ -4,6 +4,8 @@ import (
"io"
"os"
"github.com/neilotoole/sq/libsq/core/lg/lga"
"github.com/neilotoole/sq/libsq/core/errz"
"github.com/neilotoole/sq/libsq/core/options"
@ -28,17 +30,51 @@ import (
var (
OptPrintHeader = options.NewBool(
"format.header",
"header",
true,
"Controls whether a header row is printed.",
`Controls whether a header row is printed. This applies only
to certain formats, such as "text" or "csv".`,
"format",
)
OptOutputFormat = NewFormatOpt(
"format",
format.Table,
"Specify the output format.",
format.Text,
`Specify the output format. Some formats are only implemented
for a subset of sq's commands. If the specified format is not available for
a particular command, sq falls back to 'text'. Available formats:
text, csv, tsv, xlsx,
json, jsona, jsonl,
markdown, html, xml, yaml, raw`,
"format",
)
OptVerbose = options.NewBool(
"verbose",
false,
`Print verbose output.`,
"format",
)
OptMonochrome = options.NewBool(
"monochrome",
false,
`Don't print color output.`,
"format",
)
OptPretty = options.NewBool(
"pretty",
true,
`Prettyify output. Only applies to some output formats.`,
"format",
)
OptTuningFlushThreshold = options.NewInt(
"tuning.flush-threshold",
1000,
`Size in bytes after which output writers should flush any internal buffer.
Generally, it is not necessary to fiddle this knob.`,
)
)
// writers is a container for the various output writers.
@ -80,7 +116,7 @@ func newWriters(cmd *cobra.Command, opts options.Options, out, errOut io.Writer,
// Invoke getFormat to see if the format was specified
// via config or flag.
fm := getFormat(cmd, opts)
fm := getFormat(cmd, opts) // FIXME: is this still needed, or use standard opts mechanism?
//nolint:exhaustive
switch fm {
@ -94,7 +130,7 @@ func newWriters(cmd *cobra.Command, opts options.Options, out, errOut io.Writer,
w.pingw = jsonw.NewPingWriter(out2, pr)
w.configw = jsonw.NewConfigWriter(out2, pr)
case format.Table:
case format.Text:
// Table is the base format, already set above, no need to do anything.
case format.TSV:
@ -139,18 +175,15 @@ func newWriters(cmd *cobra.Command, opts options.Options, out, errOut io.Writer,
// for the cmd arg to be nil. The caller should use the returned
// io.Writer instances instead of the supplied writers, as they
// may be decorated for dealing with color, etc.
func getPrinting(cmd *cobra.Command, opts options.Options,
out, errOut io.Writer,
// The supplied opts must already have flags merged into it
// via getOptionsFromCmd.
func getPrinting(cmd *cobra.Command, opts options.Options, out, errOut io.Writer,
) (pr *output.Printing, out2, errOut2 io.Writer) {
pr = output.NewPrinting()
if cmdFlagChanged(cmd, flag.Pretty) {
pr.Pretty, _ = cmd.Flags().GetBool(flag.Pretty)
}
if cmdFlagChanged(cmd, flag.Verbose) {
pr.Verbose, _ = cmd.Flags().GetBool(flag.Verbose)
}
pr.Verbose = OptVerbose.Get(opts)
pr.FlushThreshold = OptTuningFlushThreshold.Get(opts)
pr.Pretty = OptPretty.Get(opts)
switch {
case cmdFlagChanged(cmd, flag.Header):
@ -162,20 +195,15 @@ func getPrinting(cmd *cobra.Command, opts options.Options,
pr.ShowHeader = OptPrintHeader.Get(opts)
}
// TODO: Should get this default value from config
colorize := true
colorize := !OptMonochrome.Get(opts)
if cmdFlagChanged(cmd, flag.Output) {
// We're outputting to a file, thus no color.
colorize = false
} else if cmdFlagChanged(cmd, flag.Monochrome) {
if mono, _ := cmd.Flags().GetBool(flag.Monochrome); mono {
colorize = false
}
}
if !colorize {
color.NoColor = true // TODO: shouldn't rely on package-level var
color.NoColor = true
pr.EnableColor(false)
out2 = out
errOut2 = errOut
@ -205,6 +233,8 @@ func getPrinting(cmd *cobra.Command, opts options.Options,
errOut2 = colorable.NewNonColorable(errOut)
}
logFrom(cmd).Debug("Constructed output.Printing", lga.Val, pr)
return pr, out2, errOut2
}
@ -227,8 +257,8 @@ func getFormat(cmd *cobra.Command, defaults options.Options) format.Format {
fm = format.HTML
case cmdFlagChanged(cmd, flag.Markdown):
fm = format.Markdown
case cmdFlagChanged(cmd, flag.Table):
fm = format.Table
case cmdFlagChanged(cmd, flag.Text):
fm = format.Text
case cmdFlagChanged(cmd, flag.JSONL):
fm = format.JSONL
case cmdFlagChanged(cmd, flag.JSONA):
@ -259,6 +289,11 @@ type FormatOpt struct {
tags []string
}
// Comment implements options.Opt.
func (op FormatOpt) Comment() string {
return op.comment
}
// Tags implements options.Opt.
func (op FormatOpt) Tags() []string {
return op.tags

View File

@ -68,7 +68,7 @@ func (d *driveri) DriverMetadata() driver.Metadata {
// Open implements driver.DatabaseOpener.
func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
dbase := &database{
log: d.log,
@ -200,7 +200,7 @@ func (d *database) SourceMetadata(ctx context.Context) (*source.Metadata, error)
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
return errz.Err(d.impl.Close())
}

View File

@ -35,7 +35,7 @@ func DetectTSV(ctx context.Context, openFn source.FileOpenFunc) (detected source
func detectType(ctx context.Context, typ source.DriverType,
openFn source.FileOpenFunc,
) (detected source.DriverType, score float32, err error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {

View File

@ -7,6 +7,8 @@ import (
"io"
"unicode/utf8"
"github.com/neilotoole/sq/drivers"
"github.com/neilotoole/sq/libsq/core/kind"
"github.com/neilotoole/sq/libsq/core/sqlz"
@ -30,7 +32,8 @@ import (
var OptEmptyAsNull = options.NewBool(
"driver.csv.empty-as-null",
true,
"",
`When true, empty CSV cells are treated as NULL. When false,
the zero value for that type is used, e.g. empty string or 0.`,
"source", "csv",
)
@ -38,14 +41,14 @@ var OptEmptyAsNull = options.NewBool(
var OptDelim = options.NewString(
"driver.csv.delim",
"comma",
`Possible values are: comma, space, pipe, tab, colon, semi, period.
Default is comma.`,
`Delimiter to use for CSV files. Default is "comma".
Possible values are: comma, space, pipe, tab, colon, semi, period.`,
"source", "csv",
)
// importCSV loads the src CSV data into scratchDB.
func importCSV(ctx context.Context, src *source.Source, openFn source.FileOpenFunc, scratchDB driver.Database) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var err error
var r io.ReadCloser
@ -62,7 +65,7 @@ func importCSV(ctx context.Context, src *source.Source, openFn source.FileOpenFu
}
cr := newCSVReader(r, delim)
recs, err := readRecords(cr, driver.Tuning.SampleSize)
recs, err := readRecords(cr, drivers.OptIngestSampleSize.Get(src.Options))
if err != nil {
return err
}
@ -109,7 +112,11 @@ func importCSV(ctx context.Context, src *source.Source, openFn source.FileOpenFu
configureEmptyNullMunge(mungers, recMeta)
}
insertWriter := libsq.NewDBWriter(scratchDB, tblDef.Name, driver.Tuning.RecordChSize)
insertWriter := libsq.NewDBWriter(
scratchDB,
tblDef.Name,
driver.OptTuningRecChanSize.Get(scratchDB.Source().Options),
)
err = execInsert(ctx, insertWriter, recMeta, mungers, recs, cr)
if err != nil {
return err

View File

@ -89,7 +89,7 @@ func mungeCSV2InsertRecord(ctx context.Context, mungers []kind.MungeFunc, csvRec
a := make([]any, len(csvRec))
for i := range csvRec {
if i >= len(mungers) {
lg.From(ctx).Error("no munger for field", lga.Index, i, lga.Val, csvRec[i])
lg.FromContext(ctx).Error("no munger for field", lga.Index, i, lga.Val, csvRec[i])
// Maybe should panic here, or return an error?
// But, in future we may be able to handle ragged-edge records,
// so maybe logging the error is best.

View File

@ -9,6 +9,15 @@ import "github.com/neilotoole/sq/libsq/core/options"
var OptIngestHeader = options.NewBool(
"ingest.header",
false,
"",
`Specifies whether ingested data has a header row or not.
If not set, the ingester *may* try to detect if the input has a header.`,
"source",
)
// OptIngestSampleSize specifies the number of samples that a detector
// should take to determine type.
var OptIngestSampleSize = options.NewInt(
"ingest.sample-size",
1024,
`Specify the number of samples that a detector should take to determine type.`,
"source")

View File

@ -41,6 +41,8 @@ type importJob struct {
// flatten specifies that the fields of nested JSON objects are
// imported as fields of the single top-level table, with a
// scoped column name.
//
// TODO: flatten come from src.Options
flatten bool
}
@ -431,7 +433,7 @@ type importSchema struct {
func execSchemaDelta(ctx context.Context, drvr driver.SQLDriver, db sqlz.DB,
curSchema, newSchema *importSchema,
) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var err error
if curSchema == nil {
for _, tblDef := range newSchema.tblDefs {
@ -584,7 +586,7 @@ func execInsertions(ctx context.Context, drvr driver.SQLDriver, db sqlz.DB, inse
// We should be re-using the prepared statement, and probably
// should batch the inserts as well. See driver.BatchInsert.
log := lg.From(ctx)
log := lg.FromContext(ctx)
var err error
var execer *driver.StmtExecer

View File

@ -15,127 +15,127 @@ import (
"github.com/neilotoole/sq/libsq/core/errz"
"github.com/neilotoole/sq/libsq/core/stringz"
"github.com/neilotoole/sq/libsq/driver"
"github.com/neilotoole/sq/libsq/source"
)
// DetectJSON implements source.DriverDetectFunc.
// The function returns TypeJSON for two varieties of input:.
func DetectJSON(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType, score float32,
err error,
) {
log := lg.From(ctx)
var r1, r2 io.ReadCloser
r1, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r1)
// DetectJSON returns a source.DriverDetectFunc that can detect JSON.
func DetectJSON(sampleSize int) source.DriverDetectFunc {
return func(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType, score float32,
err error,
) {
log := lg.FromContext(ctx)
var r1, r2 io.ReadCloser
r1, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r1)
dec := stdj.NewDecoder(r1)
var tok stdj.Token
tok, err = dec.Token()
if err != nil {
return source.TypeNone, 0, nil
}
dec := stdj.NewDecoder(r1)
var tok stdj.Token
tok, err = dec.Token()
if err != nil {
return source.TypeNone, 0, nil
}
delim, ok := tok.(stdj.Delim)
if !ok {
return source.TypeNone, 0, nil
}
delim, ok := tok.(stdj.Delim)
if !ok {
return source.TypeNone, 0, nil
}
switch delim {
default:
return source.TypeNone, 0, nil
case leftBrace:
// The input is a single JSON object
r2, err = openFn()
// buf gets a copy of what is read from r2
buf := &buffer{}
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r2)
dec = stdj.NewDecoder(io.TeeReader(r2, buf))
var m map[string]any
err = dec.Decode(&m)
if err != nil {
return source.TypeNone, 0, nil
}
if dec.More() {
// The input is supposed to be just a single object, so
// it shouldn't have more tokens
return source.TypeNone, 0, nil
}
// If the input is all on a single line, then it could be
// either JSON or JSONL. For single-line input, prefer JSONL.
lineCount := stringz.LineCount(bytes.NewReader(buf.b), true)
switch lineCount {
case -1:
// should never happen
return source.TypeNone, 0, errz.New("unknown problem reading JSON input")
case 0:
// should never happen
return source.TypeNone, 0, errz.New("JSON input is empty")
case 1:
// If the input is a JSON object on a single line, it could
// be TypeJSON or TypeJSONL. In deference to TypeJSONL, we
// return 0.9 instead of 1.0
return TypeJSON, 0.9, nil
default:
return TypeJSON, 1.0, nil
}
case leftBracket:
// The input is one or more JSON objects inside an array
}
switch delim {
default:
return source.TypeNone, 0, nil
case leftBrace:
// The input is a single JSON object
r2, err = openFn()
// buf gets a copy of what is read from r2
buf := &buffer{}
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r2)
dec = stdj.NewDecoder(io.TeeReader(r2, buf))
var m map[string]any
err = dec.Decode(&m)
if err != nil {
return source.TypeNone, 0, nil
sc := newObjectInArrayScanner(r2)
var validObjCount int
var obj map[string]any
for {
select {
case <-ctx.Done():
return source.TypeNone, 0, ctx.Err()
default:
}
obj, _, err = sc.next()
if err != nil {
return source.TypeNone, 0, ctx.Err()
}
if obj == nil { // end of input
break
}
validObjCount++
if validObjCount >= sampleSize {
break
}
}
if dec.More() {
// The input is supposed to be just a single object, so
// it shouldn't have more tokens
return source.TypeNone, 0, nil
}
// If the input is all on a single line, then it could be
// either JSON or JSONL. For single-line input, prefer JSONL.
lineCount := stringz.LineCount(bytes.NewReader(buf.b), true)
switch lineCount {
case -1:
// should never happen
return source.TypeNone, 0, errz.New("unknown problem reading JSON input")
case 0:
// should never happen
return source.TypeNone, 0, errz.New("JSON input is empty")
case 1:
// If the input is a JSON object on a single line, it could
// be TypeJSON or TypeJSONL. In deference to TypeJSONL, we
// return 0.9 instead of 1.0
return TypeJSON, 0.9, nil
default:
if validObjCount > 0 {
return TypeJSON, 1.0, nil
}
case leftBracket:
// The input is one or more JSON objects inside an array
return source.TypeNone, 0, nil
}
r2, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r2)
sc := newObjectInArrayScanner(r2)
var validObjCount int
var obj map[string]any
for {
select {
case <-ctx.Done():
return source.TypeNone, 0, ctx.Err()
default:
}
obj, _, err = sc.next()
if err != nil {
return source.TypeNone, 0, ctx.Err()
}
if obj == nil { // end of input
break
}
validObjCount++
if validObjCount >= driver.Tuning.SampleSize {
break
}
}
if validObjCount > 0 {
return TypeJSON, 1.0, nil
}
return source.TypeNone, 0, nil
}
func importJSON(ctx context.Context, job importJob) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
r, err := job.openFn()
if err != nil {

View File

@ -23,80 +23,82 @@ import (
"github.com/neilotoole/sq/libsq/source"
)
// DetectJSONA implements source.DriverDetectFunc for TypeJSONA.
// DetectJSONA returns a source.DriverDetectFunc for TypeJSONA.
// Each line of input must be a valid JSON array.
func DetectJSONA(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType,
score float32, err error,
) {
log := lg.From(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r)
func DetectJSONA(sampleSize int) source.DriverDetectFunc {
return func(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType,
score float32, err error,
) {
log := lg.FromContext(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r)
sc := bufio.NewScanner(r)
var validLines int
var line []byte
sc := bufio.NewScanner(r)
var validLines int
var line []byte
for sc.Scan() {
select {
case <-ctx.Done():
return source.TypeNone, 0, ctx.Err()
default:
for sc.Scan() {
select {
case <-ctx.Done():
return source.TypeNone, 0, ctx.Err()
default:
}
if err = sc.Err(); err != nil {
return source.TypeNone, 0, errz.Err(err)
}
line = sc.Bytes()
if len(line) == 0 {
// Probably want to skip blank lines? Maybe
continue
}
// Each line of JSONA must open with left bracket
if line[0] != '[' {
return source.TypeNone, 0, nil
}
// If the line is JSONA, it should marshall into []any
var fields []any
err = stdj.Unmarshal(line, &fields)
if err != nil {
return source.TypeNone, 0, nil
}
// JSONA must consist only of values, not objects. Any object
// would get marshalled into a map[string]any, so
// we check for that.
for _, field := range fields {
if _, ok := field.(map[string]any); ok {
return source.TypeNone, 0, nil
}
}
validLines++
if validLines >= sampleSize {
break
}
}
if err = sc.Err(); err != nil {
return source.TypeNone, 0, errz.Err(err)
}
line = sc.Bytes()
if len(line) == 0 {
// Probably want to skip blank lines? Maybe
continue
if validLines > 0 {
return TypeJSONA, 1.0, nil
}
// Each line of JSONA must open with left bracket
if line[0] != '[' {
return source.TypeNone, 0, nil
}
// If the line is JSONA, it should marshall into []any
var fields []any
err = stdj.Unmarshal(line, &fields)
if err != nil {
return source.TypeNone, 0, nil
}
// JSONA must consist only of values, not objects. Any object
// would get marshalled into a map[string]any, so
// we check for that.
for _, field := range fields {
if _, ok := field.(map[string]any); ok {
return source.TypeNone, 0, nil
}
}
validLines++
if validLines >= driver.Tuning.SampleSize {
break
}
return source.TypeNone, 0, nil
}
if err = sc.Err(); err != nil {
return source.TypeNone, 0, errz.Err(err)
}
if validLines > 0 {
return TypeJSONA, 1.0, nil
}
return source.TypeNone, 0, nil
}
func importJSONA(ctx context.Context, job importJob) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
predictR, err := job.openFn()
if err != nil {
@ -105,7 +107,7 @@ func importJSONA(ctx context.Context, job importJob) error {
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, predictR)
colKinds, readMungeFns, err := detectColKindsJSONA(ctx, predictR)
colKinds, readMungeFns, err := detectColKindsJSONA(ctx, predictR, job.sampleSize)
if err != nil {
return err
}
@ -137,7 +139,11 @@ func importJSONA(ctx context.Context, job importJob) error {
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r)
insertWriter := libsq.NewDBWriter(job.destDB, tblDef.Name, driver.Tuning.RecordChSize)
insertWriter := libsq.NewDBWriter(
job.destDB,
tblDef.Name,
driver.OptTuningRecChanSize.Get(job.destDB.Source().Options),
)
var cancelFn context.CancelFunc
ctx, cancelFn = context.WithCancel(ctx)
@ -232,7 +238,7 @@ func startInsertJSONA(ctx context.Context, recordCh chan<- sqlz.Record, errCh <-
// detectColKindsJSONA reads JSONA lines from r, and returns
// the kind of each field. The []readMungeFunc may contain a munge
// func that should be applied to each value (or the element may be nil).
func detectColKindsJSONA(ctx context.Context, r io.Reader) ([]kind.Kind, []kind.MungeFunc, error) {
func detectColKindsJSONA(ctx context.Context, r io.Reader, sampleSize int) ([]kind.Kind, []kind.MungeFunc, error) {
var (
err error
totalLineCount int
@ -252,7 +258,7 @@ func detectColKindsJSONA(ctx context.Context, r io.Reader) ([]kind.Kind, []kind.
default:
}
if jLineCount > driver.Tuning.SampleSize {
if jLineCount > sampleSize {
break
}

View File

@ -14,75 +14,79 @@ import (
"github.com/neilotoole/sq/libsq/core/lg"
"github.com/neilotoole/sq/libsq/core/errz"
"github.com/neilotoole/sq/libsq/driver"
"github.com/neilotoole/sq/libsq/source"
)
// DetectJSONL implements source.DriverDetectFunc.
func DetectJSONL(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType,
score float32, err error,
) {
log := lg.From(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r)
// DetectJSONL returns a source.DriverDetectFunc that can
// detect JSONL.
func DetectJSONL(sampleSize int) source.DriverDetectFunc {
return func(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType,
score float32, err error,
) {
log := lg.FromContext(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {
return source.TypeNone, 0, errz.Err(err)
}
defer lg.WarnIfCloseError(log, lgm.CloseFileReader, r)
sc := bufio.NewScanner(r)
var validLines int
var line []byte
sc := bufio.NewScanner(r)
var validLines int
var line []byte
for sc.Scan() {
select {
case <-ctx.Done():
return source.TypeNone, 0, ctx.Err()
default:
for sc.Scan() {
select {
case <-ctx.Done():
return source.TypeNone, 0, ctx.Err()
default:
}
if err = sc.Err(); err != nil {
return source.TypeNone, 0, errz.Err(err)
}
line = sc.Bytes()
line = bytes.TrimSpace(line)
if len(line) == 0 {
// Probably want to skip blank lines? Maybe
continue
}
// Each line of JSONL must be braced
if line[0] != '{' || line[len(line)-1] != '}' {
return source.TypeNone, 0, nil
}
// If the line is JSONL, it should marshall into map[string]any
var vals map[string]any
err = stdj.Unmarshal(line, &vals)
if err != nil {
return source.TypeNone, 0, nil
}
validLines++
if validLines >= sampleSize {
break
}
}
if err = sc.Err(); err != nil {
return source.TypeNone, 0, errz.Err(err)
}
line = sc.Bytes()
line = bytes.TrimSpace(line)
if len(line) == 0 {
// Probably want to skip blank lines? Maybe
continue
if validLines > 0 {
return TypeJSONL, 1.0, nil
}
// Each line of JSONL must be braced
if line[0] != '{' || line[len(line)-1] != '}' {
return source.TypeNone, 0, nil
}
// If the line is JSONL, it should marshall into map[string]any
var vals map[string]any
err = stdj.Unmarshal(line, &vals)
if err != nil {
return source.TypeNone, 0, nil
}
validLines++
if validLines >= driver.Tuning.SampleSize {
break
}
return source.TypeNone, 0, nil
}
if err = sc.Err(); err != nil {
return source.TypeNone, 0, errz.Err(err)
}
if validLines > 0 {
return TypeJSONL, 1.0, nil
}
return source.TypeNone, 0, nil
}
// DetectJSONL implements source.DriverDetectFunc.
func importJSONL(ctx context.Context, job importJob) error { //nolint:gocognit
log := lg.From(ctx)
log := lg.FromContext(ctx)
r, err := job.openFn()
if err != nil {

View File

@ -7,6 +7,8 @@ import (
"os"
"testing"
"github.com/neilotoole/sq/drivers"
"github.com/stretchr/testify/require"
"github.com/neilotoole/sq/libsq/core/kind"
@ -30,7 +32,7 @@ func newImportJob(fromSrc *source.Source, openFn source.FileOpenFunc, destDB dri
flatten bool,
) importJob {
if sampleSize <= 0 {
sampleSize = driver.Tuning.SampleSize
sampleSize = drivers.OptIngestSampleSize.Get(fromSrc.Options)
}
return importJob{
@ -60,7 +62,7 @@ func TestDetectColKindsJSONA(t *testing.T) {
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, f.Close()) })
kinds, _, err := detectColKindsJSONA(context.Background(), f)
kinds, _, err := detectColKindsJSONA(context.Background(), f, 1000)
require.NoError(t, err)
require.Equal(t, tc.wantKinds, kinds)
})

View File

@ -9,6 +9,8 @@ import (
"context"
"database/sql"
"github.com/neilotoole/sq/drivers"
"github.com/neilotoole/sq/libsq/core/lg/lga"
"github.com/neilotoole/sq/libsq/core/lg/lgm"
@ -95,7 +97,7 @@ func (d *driveri) DriverMetadata() driver.Metadata {
// Open implements driver.DatabaseOpener.
func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
dbase := &database{log: d.log, src: src, clnup: cleanup.New(), files: d.files}
@ -115,8 +117,8 @@ func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database
fromSrc: src,
openFn: d.files.OpenFunc(src),
destDB: dbase.impl,
sampleSize: driver.Tuning.SampleSize,
flatten: true, // TODO: Should come from src.Options
sampleSize: drivers.OptIngestSampleSize.Get(src.Options),
flatten: true,
}
err = d.importFn(ctx, job)
@ -228,13 +230,7 @@ func (d *database) SourceMetadata(ctx context.Context) (*source.Metadata, error)
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
return errz.Combine(d.impl.Close(), d.clnup.Run())
}
var (
_ source.DriverDetectFunc = DetectJSON
_ source.DriverDetectFunc = DetectJSONA
_ source.DriverDetectFunc = DetectJSONL
)

View File

@ -20,10 +20,12 @@ import (
)
func TestDriverDetectorFuncs(t *testing.T) {
const sampleSize = 1000
detectFns := map[source.DriverType]source.DriverDetectFunc{ //nolint:exhaustive
json.TypeJSON: json.DetectJSON,
json.TypeJSONA: json.DetectJSONA,
json.TypeJSONL: json.DetectJSONL,
json.TypeJSON: json.DetectJSON(sampleSize),
json.TypeJSONA: json.DetectJSONA(sampleSize),
json.TypeJSONL: json.DetectJSONL(sampleSize),
}
testCases := []struct {

View File

@ -8,6 +8,8 @@ import (
"strings"
"time"
"github.com/neilotoole/sq/libsq/core/options"
"golang.org/x/sync/errgroup"
"github.com/neilotoole/sq/libsq/core/lg/lga"
@ -180,7 +182,7 @@ WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = ?`
// getColumnMetadata returns column metadata for tblName.
func getColumnMetadata(ctx context.Context, db sqlz.DB, tblName string) ([]*source.ColMetadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const query = `SELECT column_name, data_type, column_type, ordinal_position, column_default,
is_nullable, column_key, column_comment, extra
@ -246,6 +248,8 @@ ORDER BY cols.ordinal_position ASC`
// each SELECT COUNT(*) query. That said, the testing/benchmarking was
// far from exhaustive, and this entire thing has a bit of a code smell.
func getSourceMetadata(ctx context.Context, src *source.Source, db sqlz.DB) (*source.Metadata, error) {
ctx = options.NewContext(ctx, src.Options)
md := &source.Metadata{
SourceType: Type,
DBDriverType: Type,
@ -254,7 +258,7 @@ func getSourceMetadata(ctx context.Context, src *source.Source, db sqlz.DB) (*so
}
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(driver.Tuning.ErrgroupLimit)
g.SetLimit(driver.OptTuningErrgroupLimit.Get(src.Options))
g.Go(func() error {
return doRetry(gCtx, func() error {
@ -309,7 +313,7 @@ func setSourceSummaryMeta(ctx context.Context, db sqlz.DB, md *source.Metadata)
// getDBVarsMeta returns the database variables.
func getDBVarsMeta(ctx context.Context, db sqlz.DB) ([]source.DBVar, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var dbVars []source.DBVar
rows, err := db.QueryContext(ctx, "SHOW VARIABLES")
@ -341,7 +345,7 @@ func getDBVarsMeta(ctx context.Context, db sqlz.DB) ([]source.DBVar, error) {
// This function should be revisited to see if there's a better way
// to implement it.
func getAllTblMetas(ctx context.Context, db sqlz.DB) ([]*source.TableMetadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const query = `SELECT t.TABLE_SCHEMA, t.TABLE_NAME, t.TABLE_TYPE, t.TABLE_COMMENT,
(DATA_LENGTH + INDEX_LENGTH) AS table_size,
@ -375,7 +379,7 @@ ORDER BY c.TABLE_NAME ASC, c.ORDINAL_POSITION ASC`
// g is an errgroup for fetching the
// row count for each table.
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(driver.Tuning.ErrgroupLimit)
g.SetLimit(driver.OptTuningErrgroupLimit.Get(options.FromContext(ctx)))
rows, err := db.QueryContext(ctx, query)
if err != nil {

View File

@ -7,6 +7,8 @@ import (
"fmt"
"strings"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/libsq/core/retry"
"github.com/neilotoole/sq/libsq/driver/dialect"
@ -305,7 +307,7 @@ func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName st
// Open implements driver.DatabaseOpener.
func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
db, err := d.doOpen(ctx, src)
if err != nil {
@ -429,7 +431,7 @@ func (d *database) SourceMetadata(ctx context.Context) (*source.Metadata, error)
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
return errz.Err(d.db.Close())
}
@ -489,5 +491,6 @@ func dsnFromLocation(src *source.Source, parseTime bool) (string, error) {
// doRetry executes fn with retry on isErrTooManyConnections.
func doRetry(ctx context.Context, fn func() error) error {
return retry.Do(ctx, driver.Tuning.MaxRetryInterval, fn, isErrTooManyConnections)
maxRetryInterval := driver.OptMaxRetryInterval.Get(options.FromContext(ctx))
return retry.Do(ctx, maxRetryInterval, fn, isErrTooManyConnections)
}

View File

@ -7,6 +7,8 @@ import (
"reflect"
"strings"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/libsq/driver"
"github.com/neilotoole/sq/libsq/core/lg/lga"
@ -176,7 +178,8 @@ func toNullableScanType(log *slog.Logger, colName, dbTypeName string, knd kind.K
}
func getSourceMetadata(ctx context.Context, src *source.Source, db sqlz.DB) (*source.Metadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
ctx = options.NewContext(ctx, src.Options)
md := &source.Metadata{
Handle: src.Handle,
@ -213,7 +216,7 @@ current_setting('server_version'), version(), "current_user"()`
}
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(driver.Tuning.ErrgroupLimit)
g.SetLimit(driver.OptTuningErrgroupLimit.Get(src.Options))
tblMetas := make([]*source.TableMetadata, len(tblNames))
for i := range tblNames {
i := i
@ -269,7 +272,7 @@ current_setting('server_version'), version(), "current_user"()`
}
func getPgSettings(ctx context.Context, db sqlz.DB) ([]source.DBVar, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
rows, err := db.QueryContext(ctx, "SELECT name, setting FROM pg_settings ORDER BY name")
if err != nil {
return nil, errz.Err(err)
@ -298,7 +301,7 @@ func getPgSettings(ctx context.Context, db sqlz.DB) ([]source.DBVar, error) {
// getAllTable names returns all table (or view) names in the current
// catalog & schema.
func getAllTableNames(ctx context.Context, db sqlz.DB) ([]string, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const tblNamesQuery = `SELECT table_name FROM information_schema.tables
WHERE table_catalog = current_catalog AND table_schema = current_schema()
@ -329,7 +332,7 @@ ORDER BY table_name`
}
func getTableMetadata(ctx context.Context, db sqlz.DB, tblName string) (*source.TableMetadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const tblsQueryTpl = `SELECT table_catalog, table_schema, table_name, table_type, is_insertable_into,
(SELECT COUNT(*) FROM "%s") AS table_row_count,
@ -450,7 +453,7 @@ type pgColumn struct {
// getPgColumns queries the column metadata for tblName.
func getPgColumns(ctx context.Context, db sqlz.DB, tblName string) ([]*pgColumn, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
// colsQuery gets column information from information_schema.columns.
//
@ -548,7 +551,7 @@ func colMetaFromPgColumn(log *slog.Logger, pgCol *pgColumn) *source.ColMetadata
// are returned. If tblName is specified, constraints just for that
// table are returned.
func getPgConstraints(ctx context.Context, db sqlz.DB, tblName string) ([]*pgConstraint, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var args []any
query := `SELECT kcu.table_catalog,kcu.table_schema,kcu.table_name,kcu.column_name,

View File

@ -9,6 +9,8 @@ import (
"strconv"
"strings"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/jackc/pgx/v5/pgconn"
"github.com/neilotoole/sq/libsq/core/retry"
@ -118,7 +120,7 @@ func (d *driveri) Renderer() *render.Renderer {
// Open implements driver.DatabaseOpener.
func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
db, err := d.doOpen(ctx, src)
if err != nil {
@ -129,6 +131,7 @@ func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database
}
func (d *driveri) doOpen(ctx context.Context, src *source.Source) (*sql.DB, error) {
ctx = options.NewContext(ctx, src.Options)
dbCfg, err := pgxpool.ParseConfig(src.Location)
if err != nil {
return nil, errz.Err(err)
@ -141,7 +144,7 @@ func (d *driveri) doOpen(ctx context.Context, src *source.Source) (*sql.DB, erro
var dbErr error
db, dbErr = sql.Open(dbDrvr, connStr)
if dbErr != nil {
lg.From(ctx).Error("postgres open, may retry", lga.Err, dbErr)
lg.FromContext(ctx).Error("postgres open, may retry", lga.Err, dbErr)
}
return dbErr
}); err != nil {
@ -449,7 +452,7 @@ func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName st
// getTableColumnNames consults postgres's information_schema.columns table,
// returning the names of the table's columns in ordinal order.
func getTableColumnNames(ctx context.Context, db sqlz.DB, tblName string) ([]string, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const query = `SELECT column_name FROM information_schema.columns
WHERE table_schema = CURRENT_SCHEMA()
AND table_name = $1
@ -560,7 +563,7 @@ func (d *database) SourceMetadata(ctx context.Context) (*source.Metadata, error)
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
err := d.db.Close()
if err != nil {
@ -600,5 +603,6 @@ func hasErrCode(err error, code string) bool {
// doRetry executes fn with retry on isErrTooManyConnections.
func doRetry(ctx context.Context, fn func() error) error {
return retry.Do(ctx, driver.Tuning.MaxRetryInterval, fn, isErrTooManyConnections)
maxRetryInterval := driver.OptMaxRetryInterval.Get(options.FromContext(ctx))
return retry.Do(ctx, maxRetryInterval, fn, isErrTooManyConnections)
}

View File

@ -248,7 +248,7 @@ func DBTypeForKind(knd kind.Kind) string {
// getTableMetadata returns metadata for tblName in db.
func getTableMetadata(ctx context.Context, db sqlz.DB, tblName string) (*source.TableMetadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
tblMeta := &source.TableMetadata{Name: tblName}
// Note that there's no easy way of getting the physical size of
// a table, so tblMeta.Size remains nil.
@ -316,7 +316,7 @@ func getTableMetadata(ctx context.Context, db sqlz.DB, tblName string) (*source.
// getAllTblMeta gets metadata for each of the
// non-system tables in db.
func getAllTblMeta(ctx context.Context, db sqlz.DB) ([]*source.TableMetadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
// This query returns a row for each column of each table,
// order by table name then col id (ordinal).
// Results will look like:
@ -423,7 +423,7 @@ ORDER BY m.name, p.cid
// getTblRowCounts returns the number of rows in each table.
func getTblRowCounts(ctx context.Context, db sqlz.DB, tblNames []string) ([]int64, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
// See: https://stackoverflow.com/questions/7524612/how-to-count-rows-from-multiple-tables-in-sqlite
//

View File

@ -86,7 +86,7 @@ func (d *driveri) DriverMetadata() driver.Metadata {
// Open implements driver.DatabaseOpener.
func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
db, err := d.doOpen(ctx, src)
if err != nil {
@ -840,7 +840,7 @@ func (d *database) Close() error {
return nil
}
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
err := errz.Err(d.db.Close())
d.closed = true
return err
@ -851,7 +851,7 @@ func (d *database) Close() error {
// src points at this file. The returned clnup func closes that
// db file and deletes it.
func NewScratchSource(ctx context.Context, name string) (src *source.Source, clnup func() error, err error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
name = stringz.SanitizeAlphaNumeric(name, '_')
_, f, cleanFn, err := source.TempDirFile(name + ".sqlite")
if err != nil {

View File

@ -7,6 +7,8 @@ import (
"strconv"
"strings"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/libsq/core/lg/lga"
"github.com/neilotoole/sq/libsq/driver"
@ -113,7 +115,8 @@ func setScanType(ct *sqlz.ColumnTypeData, knd kind.Kind) {
}
func getSourceMetadata(ctx context.Context, src *source.Source, db sqlz.DB) (*source.Metadata, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
ctx = options.NewContext(ctx, src.Options)
const query = `SELECT DB_NAME(), SCHEMA_NAME(), SERVERPROPERTY('ProductVersion'), @@VERSION,
(SELECT SUM(size) * 8192
@ -142,7 +145,7 @@ GROUP BY database_id) AS total_size_bytes`
}
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(driver.Tuning.ErrgroupLimit)
g.SetLimit(driver.OptTuningErrgroupLimit.Get(src.Options))
tblMetas := make([]*source.TableMetadata, len(tblNames))
for i := range tblNames {
i := i
@ -256,7 +259,7 @@ func getTableMetadata(ctx context.Context, db sqlz.DB, tblCatalog,
Name: dbCols[i].ColumnName,
Position: dbCols[i].OrdinalPosition,
BaseType: dbCols[i].DataType,
Kind: kindFromDBTypeName(lg.From(ctx), dbCols[i].ColumnName, dbCols[i].DataType),
Kind: kindFromDBTypeName(lg.FromContext(ctx), dbCols[i].ColumnName, dbCols[i].DataType),
Nullable: dbCols[i].Nullable.Bool,
DefaultValue: dbCols[i].ColumnDefault.String,
}
@ -297,7 +300,7 @@ func getTableMetadata(ctx context.Context, db sqlz.DB, tblCatalog,
// getAllTables returns all of the table names, and the table types
// (i.e. "BASE TABLE" or "VIEW").
func getAllTables(ctx context.Context, db sqlz.DB) (tblNames, tblTypes []string, err error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const query = `SELECT TABLE_NAME, TABLE_TYPE FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_TYPE='BASE TABLE' OR TABLE_TYPE='VIEW'
@ -328,7 +331,7 @@ ORDER BY TABLE_NAME ASC, TABLE_TYPE ASC`
}
func getColumnMeta(ctx context.Context, db sqlz.DB, tblCatalog, tblSchema, tblName string) ([]columnMeta, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
// TODO: sq doesn't use all of these columns, no need to select them all.
const query = `SELECT
@ -374,7 +377,7 @@ func getColumnMeta(ctx context.Context, db sqlz.DB, tblCatalog, tblSchema, tblNa
}
func getConstraints(ctx context.Context, db sqlz.DB, tblCatalog, tblSchema, tblName string) ([]constraintMeta, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
const query = `SELECT kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, tc.CONSTRAINT_TYPE,
kcu.COLUMN_NAME, kcu.CONSTRAINT_NAME

View File

@ -120,7 +120,7 @@ func (d *driveri) Renderer() *render.Renderer {
// Open implements driver.DatabaseOpener.
func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
db, err := d.doOpen(ctx, src)
if err != nil {
@ -509,7 +509,7 @@ func (d *database) SourceMetadata(ctx context.Context) (*source.Metadata, error)
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
return errz.Err(d.db.Close())
}

View File

@ -84,7 +84,7 @@ func (d *drvr) DriverMetadata() driver.Metadata {
// Open implements driver.DatabaseOpener.
func (d *drvr) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
clnup := cleanup.New()
@ -193,7 +193,7 @@ func (d *database) SourceMetadata(ctx context.Context) (*source.Metadata, error)
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
// We don't need to explicitly invoke c.impl.Close
// because that's already been added to c.cleanup.

View File

@ -36,7 +36,7 @@ func Import(ctx context.Context, def *userdriver.DriverDef, data io.Reader, dest
}
im := &importer{
log: lg.From(ctx),
log: lg.FromContext(ctx),
def: def,
selStack: newSelStack(),
rowStack: newRowStack(),

View File

@ -29,7 +29,7 @@ import (
// xlsxToScratch loads the data in xlFile into scratchDB.
func xlsxToScratch(ctx context.Context, src *source.Source, xlFile *xlsx.File, scratchDB driver.Database) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
start := time.Now()
log.Debug("Beginning import from XLSX",
lga.Src, src,
@ -91,7 +91,7 @@ func xlsxToScratch(ctx context.Context, src *source.Source, xlFile *xlsx.File, s
func importSheetToTable(ctx context.Context, sheet *xlsx.Sheet, hasHeader bool,
scratchDB driver.Database, tblDef *sqlmodel.TableDef,
) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
startTime := time.Now()
conn, err := scratchDB.DB().Conn(ctx)
@ -189,7 +189,7 @@ func buildTblDefsForSheets(ctx context.Context, sheets []*xlsx.Sheet, hasHeader
default:
}
tblDef, err := buildTblDefForSheet(lg.From(gCtx), sheets[i], hasHeader)
tblDef, err := buildTblDefForSheet(lg.FromContext(gCtx), sheets[i], hasHeader)
if err != nil {
return err
}

View File

@ -51,7 +51,7 @@ var _ source.DriverDetectFunc = DetectXLSX
func DetectXLSX(ctx context.Context, openFn source.FileOpenFunc) (detected source.DriverType, score float32,
err error,
) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {
@ -93,7 +93,7 @@ func (d *Driver) DriverMetadata() driver.Metadata {
// Open implements driver.DatabaseOpener.
func (d *Driver) Open(ctx context.Context, src *source.Source) (driver.Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
r, err := d.files.Open(src)
if err != nil {
@ -303,7 +303,7 @@ func (d *database) TableMetadata(_ context.Context, tblName string) (*source.Tab
// Close implements driver.Database.
func (d *database) Close() error {
d.log.Debug(lgm.CloseDB, lga.Src, d.src)
d.log.Debug(lgm.CloseDB, lga.Handle, d.src.Handle)
// No need to explicitly invoke c.impl.Close because
// that's already added to c.clnup

View File

@ -19,15 +19,14 @@ import (
type contextKey struct{}
// NewContext returns a context that contains the given Logger.
// Use From to retrieve the Logger.
// Use FromContext to retrieve the Logger.
func NewContext(ctx context.Context, l *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, l)
}
// From returns the Logger stored in ctx by NewContext, or the Discard
// Logger if there is none. This function is typically named FromContext,
// but we're experimenting with simply naming it From.
func From(ctx context.Context) *slog.Logger {
// FromContext returns the Logger stored in ctx by NewContext,
// or the Discard logger if there is none.
func FromContext(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return Discard()

View File

@ -14,7 +14,7 @@ func TestContext(t *testing.T) {
log := slogt.New(t)
ctx = lg.NewContext(ctx, log)
log = lg.From(ctx)
log = lg.FromContext(ctx)
log.Info("huzzah")
}

View File

@ -23,6 +23,7 @@ const (
Loc = "loc"
Opts = "opts"
Path = "path"
Pid = "pid"
Query = "query"
SLQ = "slq"
SQL = "sql"

View File

@ -24,6 +24,9 @@ type Opt interface {
// Key returns the Opt key, such as "ping.timeout".
Key() string
// Comment returns the Opt's comment.
Comment() string
// String returns a log/debug friendly representation.
String() string
@ -56,6 +59,11 @@ func (op baseOpt) Key() string {
return op.key
}
// Comment implements options.Opt.
func (op baseOpt) Comment() string {
return op.comment
}
// IsSet implements options.Opt.
func (op baseOpt) IsSet(o Options) bool {
if o == nil {

View File

@ -13,6 +13,7 @@
package options
import (
"context"
"fmt"
"sync"
"time"
@ -23,6 +24,32 @@ import (
"golang.org/x/exp/slices"
)
type contextKey struct{}
// NewContext returns a context that contains the given Options.
// Use FromContext to retrieve the Options.
//
// NOTE: It's questionable whether we need to engage in this context
// business with Options. This is a bit of an experiment.
func NewContext(ctx context.Context, o Options) context.Context {
return context.WithValue(ctx, contextKey{}, o)
}
// FromContext returns the Options stored in ctx by NewContext, or nil
// if no such Options.
func FromContext(ctx context.Context) Options {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
if v, ok := v.(Options); ok {
return v
}
return nil
}
// Registry is a registry of Opt instances.
type Registry struct {
mu sync.Mutex

View File

@ -1,6 +1,7 @@
package options_test
import (
"context"
"os"
"testing"
"time"
@ -173,3 +174,16 @@ func TestDeleteNil(t *testing.T) {
require.Lenf(t, o, 5, "o should not be modified")
require.Equal(t, options.Options{"a": 1, "d": 2}, got)
}
func TestContext(t *testing.T) {
ctx := context.Background()
ctx = options.NewContext(ctx, nil)
gotOpts := options.FromContext(ctx)
require.Nil(t, gotOpts)
opts := options.Options{"a": 1}
ctx = options.NewContext(ctx, opts)
gotOpts = options.FromContext(ctx)
require.Equal(t, opts, gotOpts)
}

View File

@ -1,6 +1,6 @@
options:
format: csv
format.header: true
header: true
ping.timeout: 10s
shell-completion.timeout: 500ms

View File

@ -111,7 +111,8 @@ func FormatFloat(f float64) string {
}
// ByteSized returns a human-readable byte size, e.g. "2.1 MB", "3.0 TB", etc.
// TODO: replace this usage with "github.com/c2h5oh/datasize"
// TODO: replace this usage with "github.com/c2h5oh/datasize",
// or maybe https://github.com/docker/go-units/.
func ByteSized(size int64, precision int, sep string) string {
f := float64(size)
tpl := "%." + strconv.Itoa(precision) + "f" + sep

View File

@ -146,7 +146,7 @@ func (w *DBWriter) Open(ctx context.Context, cancelFn context.CancelFunc, recMet
err = <-w.bi.ErrCh // Wait for batch inserter to complete
if err != nil {
lg.From(ctx).Error(err.Error())
lg.FromContext(ctx).Error(err.Error())
w.addErrs(err)
w.rollback(ctx, tx, err)
return
@ -154,10 +154,10 @@ func (w *DBWriter) Open(ctx context.Context, cancelFn context.CancelFunc, recMet
commitErr := errz.Err(tx.Commit())
if commitErr != nil {
lg.From(ctx).Error(commitErr.Error())
lg.FromContext(ctx).Error(commitErr.Error())
w.addErrs(commitErr)
} else {
lg.From(ctx).Debug("Tx commit success",
lg.FromContext(ctx).Debug("Tx commit success",
lga.Target, source.Target(w.destDB.Source(), w.destTbl))
}
@ -212,11 +212,11 @@ func (w *DBWriter) addErrs(errs ...error) {
// need to close those manually.
func (w *DBWriter) rollback(ctx context.Context, tx *sql.Tx, causeErrs ...error) {
// Guaranteed to be at least one causeErr
lg.From(ctx).Error("failed to insert to %s.%s: tx rollback due to: %s",
lg.FromContext(ctx).Error("failed to insert to %s.%s: tx rollback due to: %s",
w.destDB.Source().Handle, w.destTbl, causeErrs[0])
rollbackErr := errz.Err(tx.Rollback())
lg.WarnIfError(lg.From(ctx), lgm.TxRollback, rollbackErr)
lg.WarnIfError(lg.FromContext(ctx), lgm.TxRollback, rollbackErr)
w.addErrs(causeErrs...)
w.addErrs(rollbackErr)

View File

@ -32,7 +32,7 @@ import (
func ConfigureDB(ctx context.Context, db *sql.DB, o options.Options) {
o2 := options.Effective(o, OptConnMaxOpen, OptConnMaxIdle, OptConnMaxIdleTime, OptConnMaxLifetime)
lg.From(ctx).Debug("Setting config on DB conn", "config", o2)
lg.FromContext(ctx).Debug("Setting config on DB conn", "config", o2)
db.SetMaxOpenConns(OptConnMaxOpen.Get(o2))
db.SetMaxIdleConns(OptConnMaxIdle.Get(o2))
@ -54,24 +54,64 @@ A value of zero indicates no limit.`,
OptConnMaxIdle = options.NewInt(
"conn.max-idle",
2,
"",
"source", "sql",
`Set the maximum number of connections in the idle connection pool.
If conn.max-open is greater than 0 but less than the new conn.max-idle,
then the new conn.max-idle will be reduced to match the conn.max-open limit.
If n <= 0, no idle connections are retained.`,
"source",
)
// OptConnMaxIdleTime controls sql.DB.SetConnMaxIdleTime.
OptConnMaxIdleTime = options.NewDuration(
"conn.max-idle-time",
time.Second*2,
"",
"source", "sql",
`Sets the maximum amount of time a connection may be idle.
Expired connections may be closed lazily before reuse. If n <= 0,
connections are not closed due to a connection's idle time.`,
"source",
)
// OptConnMaxLifetime controls sql.DB.SetConnMaxLifetime.
OptConnMaxLifetime = options.NewDuration(
"conn.max-lifetime",
time.Minute*10,
"",
"source", "sql",
`Set the maximum amount of time a connection may be reused.
Expired connections may be closed lazily before reuse.
If n <= 0, connections are not closed due to a connection's age.`,
"source",
)
// OptMaxRetryInterval is the maximum interval to wait
// between retries.
OptMaxRetryInterval = options.NewDuration(
"retry.max-interval",
time.Second*3,
`The maximum interval to wait between retries.
If an operation is retryable (for example, if the DB has too many clients),
repeated retry operations back off, typically using a Fibonacci backoff.`,
"source",
)
// OptTuningErrgroupLimit controls the maximum number of goroutines that can be spawned
// by an errgroup.
OptTuningErrgroupLimit = options.NewInt("tuning.errgroup-limit",
16,
`Controls the maximum number of goroutines that can be spawned
by an errgroup. Note that this is the limit for any one errgroup, but not a
ceiling on the total number of goroutines spawned, as some errgroups may
themselves start an errgroup.
This knob is primarily for internal use. Ultimately it should go away
in favor of dynamic errgroup limit setting based on availability
of additional DB conns, etc.`,
"tuning")
// OptTuningRecChanSize is the size of the buffer chan for record
// insertion/writing.
OptTuningRecChanSize = options.NewInt("tuning.record-buffer",
1024,
`Controls the size of the buffer channel for record insertion/writing.`,
"tuning",
)
)
@ -306,7 +346,7 @@ func NewDatabases(log *slog.Logger, drvrs Provider, scratchSrcFn ScratchSrcFunc)
//
// Open implements DatabaseOpener.
func (d *Databases) Open(ctx context.Context, src *source.Source) (Database, error) {
lg.From(ctx).Debug(lgm.OpenSrc, lga.Src, src)
lg.FromContext(ctx).Debug(lgm.OpenSrc, lga.Src, src)
d.mu.Lock()
defer d.mu.Unlock()
@ -397,7 +437,7 @@ func (d *Databases) OpenJoin(ctx context.Context, src1, src2 *source.Source, src
// Close closes d, invoking Close on any instances opened via d.Open.
func (d *Databases) Close() error {
d.log.Debug("Closing databases(s)", lga.Count, d.clnup.Len())
d.log.Debug("Closing databases(s)...", lga.Count, d.clnup.Len())
return d.clnup.Run()
}

View File

@ -410,7 +410,7 @@ func (bi BatchInsert) Munge(rec []any) error {
func NewBatchInsert(ctx context.Context, drvr SQLDriver, db sqlz.DB,
destTbl string, destColNames []string, batchSize int,
) (*BatchInsert, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
err := requireSingleConn(db)
if err != nil {
@ -551,7 +551,7 @@ func MaxBatchRows(drvr SQLDriver, numCols int) int {
// that checks the values of rec against destMeta and
// performs necessary munging. For example, if any element
// is a ptr to an empty string and the dest type
// is a not of kind Text, the empty string was probably
// is not of kind Text, the empty string was probably
// intended to mean nil. This happens when the original
// source doesn't handle nil, e.g. with CSV, where nil is
// effectively represented by "".

View File

@ -1,31 +0,0 @@
package driver
import "time"
// Tuning holds tuning params. Ultimately these params
// could come from user config or be dynamically calculated/adjusted?
//
// FIXME: move all of these to options.Options.
var Tuning = struct {
// ErrgroupLimit is passed to errgroup.Group.SetLimit.
// Note that this is the limit for any one errgroup, but
// not a ceiling on the total number of goroutines spawned,
// as some errgroups may themselves start an errgroup.
ErrgroupLimit int
// RecordChSize is the size of the buffer chan for record
// insertion/writing.
RecordChSize int
// SampleSize is the number of samples that a detector should
// take to determine type.
SampleSize int
// MaxRetryInterval is the maximum interval to wait between retries.
MaxRetryInterval time.Duration
}{
ErrgroupLimit: 16,
RecordChSize: 1024,
SampleSize: 1024,
MaxRetryInterval: time.Second * 3,
}

View File

@ -4,6 +4,8 @@ import (
"context"
"fmt"
"github.com/neilotoole/sq/libsq/core/options"
"github.com/neilotoole/sq/libsq/ast/render"
"github.com/neilotoole/sq/libsq/core/lg"
@ -47,7 +49,7 @@ type engine struct {
}
func newEngine(ctx context.Context, qc *QueryContext, query string) (*engine, error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
a, err := ast.Parse(log, query)
if err != nil {
@ -75,7 +77,8 @@ func newEngine(ctx context.Context, qc *QueryContext, query string) (*engine, er
func (ng *engine) execute(ctx context.Context, recw RecordWriter) error {
ng.log.Debug(
"Execute SQL query",
lga.Target, ng.targetDB.Source().Handle,
lga.Src, ng.targetDB.Source(),
// lga.Target, ng.targetDB.Source().Handle,
lga.SQL, ng.targetSQL,
)
@ -100,7 +103,7 @@ func (ng *engine) executeTasks(ctx context.Context) error {
}
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(driver.Tuning.ErrgroupLimit)
g.SetLimit(driver.OptTuningErrgroupLimit.Get(options.FromContext(ctx)))
for _, task := range ng.tasks {
task := task
@ -295,7 +298,7 @@ func (jt *joinCopyTask) executeTask(ctx context.Context) error {
func execCopyTable(ctx context.Context, fromDB driver.Database, fromTblName string,
destDB driver.Database, destTblName string,
) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
createTblHook := func(ctx context.Context, originRecMeta sqlz.RecordMeta, destDB driver.Database,
tx sqlz.DB,
@ -312,7 +315,12 @@ func execCopyTable(ctx context.Context, fromDB driver.Database, fromTblName stri
return nil
}
inserter := NewDBWriter(destDB, destTblName, driver.Tuning.RecordChSize, createTblHook)
inserter := NewDBWriter(
destDB,
destTblName,
driver.OptTuningRecChanSize.Get(destDB.Source().Options),
createTblHook,
)
query := "SELECT * FROM " + fromDB.SQLDriver().Dialect().Enquote(fromTblName)
err := QuerySQL(ctx, fromDB, inserter, query)

View File

@ -117,7 +117,7 @@ func SLQ2SQL(ctx context.Context, qc *QueryContext, query string) (targetSQL str
// to wait for recw to complete.
// The caller is responsible for closing dbase.
func QuerySQL(ctx context.Context, dbase driver.Database, recw RecordWriter, query string, args ...any) error {
log := lg.From(ctx)
log := lg.FromContext(ctx)
rows, err := dbase.DB().QueryContext(ctx, query, args...)
if err != nil {

View File

@ -38,7 +38,7 @@ import (
// the implementation currently requires that we read the entire source
// file into fscache before it's available to be read (which is awful
// if we're reading long-running pipe from stdin). This entire thing
// needs to be revisited.
// needs to be revisited. Maybe Files even becomes a fs.FS.
type Files struct {
log *slog.Logger
mu sync.Mutex
@ -49,7 +49,7 @@ type Files struct {
// NewFiles returns a new Files instance.
func NewFiles(ctx context.Context) (*Files, error) {
fs := &Files{clnup: cleanup.New(), log: lg.From(ctx)}
fs := &Files{clnup: cleanup.New(), log: lg.FromContext(ctx)}
tmpdir, err := os.MkdirTemp("", "sq_files_fscache_*")
if err != nil {
@ -463,7 +463,7 @@ var _ DriverDetectFunc = DetectMagicNumber
// the start of files.
func DetectMagicNumber(ctx context.Context, openFn FileOpenFunc,
) (detected DriverType, score float32, err error) {
log := lg.From(ctx)
log := lg.FromContext(ctx)
var r io.ReadCloser
r, err = openFn()
if err != nil {

View File

@ -12,6 +12,8 @@ import (
"testing"
"time"
"github.com/neilotoole/sq/drivers"
"github.com/neilotoole/sq/cli"
"github.com/neilotoole/sq/cli/buildinfo"
"github.com/neilotoole/sq/cli/config/yamlstore"
@ -179,7 +181,11 @@ func (h *Helper) init() {
h.registry.AddProvider(json.TypeJSON, jsonp)
h.registry.AddProvider(json.TypeJSONA, jsonp)
h.registry.AddProvider(json.TypeJSONL, jsonp)
h.files.AddDriverDetectors(json.DetectJSON, json.DetectJSONA, json.DetectJSONL)
h.files.AddDriverDetectors(
json.DetectJSON(drivers.OptIngestSampleSize.Get(nil)),
json.DetectJSONA(drivers.OptIngestSampleSize.Get(nil)),
json.DetectJSONL(drivers.OptIngestSampleSize.Get(nil)),
)
h.registry.AddProvider(xlsx.Type, &xlsx.Provider{Log: log, Scratcher: h.databases, Files: h.files})
h.files.AddDriverDetectors(xlsx.DetectXLSX)
@ -734,7 +740,7 @@ func DriverDetectors() []source.DriverDetectFunc {
source.DetectMagicNumber,
xlsx.DetectXLSX,
csv.DetectCSV, csv.DetectTSV,
/*json.DetectJSON,*/ json.DetectJSONA, json.DetectJSONL, // FIXME: enable DetectJSON when it's ready
/*json.DetectJSON,*/ json.DetectJSONA(1000), json.DetectJSONL(1000), // FIXME: enable DetectJSON when it's ready
}
}