Lint long lines (#115)

* lint config now sorts results

* linted long lines

* linted long lines
This commit is contained in:
Neil O'Toole 2022-12-17 19:43:53 -07:00 committed by GitHub
parent 2831211ae9
commit 540adfac58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 242 additions and 117 deletions

View File

@ -17,6 +17,9 @@ run:
- cli/output/jsonw/internal/jcolorenc
- cli/output/tablew/internal
output:
sort-results: true
# This file contains only configs which differ from defaults.
# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml
linters-settings:

View File

@ -557,7 +557,8 @@ type writers struct {
// newWriters returns a writers instance configured per defaults and/or
// flags from cmd. The returned out2/errOut2 values may differ
// from the out/errOut args (e.g. decorated to support colorization).
func newWriters(log lg.Log, cmd *cobra.Command, defaults config.Defaults, out, errOut io.Writer) (w *writers, out2, errOut2 io.Writer) {
func newWriters(log lg.Log, cmd *cobra.Command, defaults config.Defaults, out, errOut io.Writer) (w *writers,
out2, errOut2 io.Writer) {
var fm *output.Formatting
fm, out2, errOut2 = getWriterFormatting(cmd, out, errOut)

View File

@ -108,7 +108,8 @@ func execPing(cmd *cobra.Command, args []string) error {
//
// originally laid down before context.Context was a thing. Thus,
// the entire thing could probably be rewritten for simplicity.
func pingSources(ctx context.Context, log lg.Log, dp driver.Provider, srcs []*source.Source, w output.PingWriter, timeout time.Duration) error {
func pingSources(ctx context.Context, log lg.Log, dp driver.Provider, srcs []*source.Source, w output.PingWriter,
timeout time.Duration) error {
w.Open(srcs)
defer log.WarnIfFuncError(w.Close)
@ -160,7 +161,8 @@ func pingSources(ctx context.Context, log lg.Log, dp driver.Provider, srcs []*so
// pingSource pings an individual driver.Source. It always returns a
// result on resultCh, even when ctx is done.
func pingSource(ctx context.Context, dp driver.Provider, src *source.Source, timeout time.Duration, resultCh chan<- pingResult) {
func pingSource(ctx context.Context, dp driver.Provider, src *source.Source, timeout time.Duration,
resultCh chan<- pingResult) {
drvr, err := dp.DriverFor(src.Type)
if err != nil {
resultCh <- pingResult{src: src, err: err}

View File

@ -99,7 +99,8 @@ func execTblCopy(cmd *cobra.Command, args []string) error {
sqlDrvr, ok := tblHandles[0].drvr.(driver.SQLDriver)
if !ok {
return errz.Errorf("source type %q (%s) doesn't support dropping tables", tblHandles[0].src.Type, tblHandles[0].src.Handle)
return errz.Errorf("source type %q (%s) doesn't support dropping tables", tblHandles[0].src.Type,
tblHandles[0].src.Handle)
}
copyData := true // copy data by default

View File

@ -108,7 +108,8 @@ type handleTableCompleter struct {
}
// complete is the completionFunc for handleTableCompleter.
func (c *handleTableCompleter) complete(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func (c *handleTableCompleter) complete(cmd *cobra.Command, args []string, toComplete string) ([]string,
cobra.ShellCompDirective) {
rc := RunContextFrom(cmd.Context())
if err := rc.init(); err != nil {
rc.Log.Error(err)
@ -152,7 +153,8 @@ func (c *handleTableCompleter) complete(cmd *cobra.Command, args []string, toCom
// completeTableOnly returns suggestions given input beginning with
// a period. Effectively this is completion for tables in the
// active src.
func (c *handleTableCompleter) completeTableOnly(ctx context.Context, rc *RunContext, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func (c *handleTableCompleter) completeTableOnly(ctx context.Context, rc *RunContext, args []string,
toComplete string) ([]string, cobra.ShellCompDirective) {
activeSrc := rc.Config.Sources.Active()
if activeSrc == nil {
rc.Log.Error("Active source is nil")
@ -188,7 +190,8 @@ func (c *handleTableCompleter) completeTableOnly(ctx context.Context, rc *RunCon
// completeHandle returns suggestions given input beginning with
// a '@'. The returned suggestions could be @HANDLE, or @HANDLE.TABLE.
func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContext, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContext, args []string,
toComplete string) ([]string, cobra.ShellCompDirective) {
// We're dealing with a handle.
// But we could be dealing with just the handle ("@sakila_sl3")
@ -280,7 +283,8 @@ func (c *handleTableCompleter) completeHandle(ctx context.Context, rc *RunContex
return suggestions, cobra.ShellCompDirectiveNoFileComp
}
func (c *handleTableCompleter) completeEither(ctx context.Context, rc *RunContext, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
func (c *handleTableCompleter) completeEither(ctx context.Context, rc *RunContext, args []string,
toComplete string) ([]string, cobra.ShellCompDirective) {
// There's no input yet.
// Therefore we want to return a union of all handles
// plus the tables from the active source.

View File

@ -56,7 +56,8 @@ func NewRecordWriterAdapter(rw RecordWriter) *RecordWriterAdapter {
}
// Open implements libsq.RecordWriter.
func (w *RecordWriterAdapter) Open(ctx context.Context, cancelFn context.CancelFunc, recMeta sqlz.RecordMeta) (chan<- sqlz.Record, <-chan error, error) {
func (w *RecordWriterAdapter) Open(ctx context.Context, cancelFn context.CancelFunc,
recMeta sqlz.RecordMeta) (chan<- sqlz.Record, <-chan error, error) {
w.cancelFn = cancelFn
err := w.rw.Open(recMeta)

View File

@ -156,7 +156,8 @@ func checkStdinSource(ctx context.Context, rc *RunContext) (*source.Source, erro
// newSource creates a new Source instance where the
// driver type is known. Opts may be nil.
func newSource(log lg.Log, dp driver.Provider, typ source.Type, handle, location string, opts options.Options) (*source.Source, error) {
func newSource(log lg.Log, dp driver.Provider, typ source.Type, handle, location string,
opts options.Options) (*source.Source, error) {
if opts == nil {
log.Debugf("Create new data source %q [%s] from %q",
handle, typ, location)

View File

@ -224,16 +224,19 @@ var (
)
// DetectCSV implements source.TypeDetectFunc.
func DetectCSV(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func DetectCSV(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32,
err error) {
return detectType(ctx, TypeCSV, log, openFn)
}
// DetectTSV implements source.TypeDetectFunc.
func DetectTSV(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func DetectTSV(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type,
score float32, err error) {
return detectType(ctx, TypeTSV, log, openFn)
}
func detectType(ctx context.Context, typ source.Type, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func detectType(ctx context.Context, typ source.Type, log lg.Log, openFn source.FileOpenFunc) (detected source.Type,
score float32, err error) {
var r io.ReadCloser
r, err = openFn()
if err != nil {

View File

@ -26,7 +26,8 @@ const (
)
// importCSV loads the src CSV data to scratchDB.
func importCSV(ctx context.Context, log lg.Log, src *source.Source, openFn source.FileOpenFunc, scratchDB driver.Database) error {
func importCSV(ctx context.Context, log lg.Log, src *source.Source, openFn source.FileOpenFunc,
scratchDB driver.Database) error {
// TODO: optPredictKind should be read from src.Options.
const optPredictKind bool = true
@ -104,7 +105,8 @@ func importCSV(ctx context.Context, log lg.Log, src *source.Source, openFn sourc
// execInsert inserts the CSV records in readAheadRecs (followed by records
// from the csv.Reader) via recw. The caller should wait on recw to complete.
func execInsert(ctx context.Context, recw libsq.RecordWriter, recMeta sqlz.RecordMeta, readAheadRecs [][]string, r *csv.Reader) error {
func execInsert(ctx context.Context, recw libsq.RecordWriter, recMeta sqlz.RecordMeta,
readAheadRecs [][]string, r *csv.Reader) error {
ctx, cancelFn := context.WithCancel(ctx)
recordCh, errCh, err := recw.Open(ctx, cancelFn, recMeta)
@ -206,7 +208,8 @@ func createTblDef(tblName string, colNames []string, kinds []kind.Kind) *sqlmode
// kind is excluded from the list of candidate kinds. The first of any
// remaining candidate kinds for each field is returned, or kind.Text if
// no candidate kinds.
func predictColKinds(expectFieldCount int, r *csv.Reader, readAheadRecs *[][]string, maxExamine int) ([]kind.Kind, error) {
func predictColKinds(expectFieldCount int, r *csv.Reader, readAheadRecs *[][]string, maxExamine int) ([]kind.Kind,
error) {
// FIXME: [legacy] this function should switch to using kind.Detector
candidateKinds := newCandidateFieldKinds(expectFieldCount)
@ -215,7 +218,8 @@ func predictColKinds(expectFieldCount int, r *csv.Reader, readAheadRecs *[][]str
// First, read any records from the readAheadRecs buffer
for recIndex := 0; recIndex < len(*readAheadRecs) && examineCount < maxExamine; recIndex++ {
for fieldIndex := 0; fieldIndex < expectFieldCount; fieldIndex++ {
candidateKinds[fieldIndex] = excludeFieldKinds(candidateKinds[fieldIndex], (*readAheadRecs)[recIndex][fieldIndex])
candidateKinds[fieldIndex] = excludeFieldKinds(candidateKinds[fieldIndex],
(*readAheadRecs)[recIndex][fieldIndex])
}
examineCount++
}

View File

@ -259,7 +259,8 @@ func (p *processor) doAddObject(ent *entity, m map[string]any) error {
// Child already exists
if child.isArray {
// Safety check
return errz.Errorf("JSON entity %q previously detected as array, but now detected as object", ent.String())
return errz.Errorf("JSON entity %q previously detected as array, but now detected as object",
ent.String())
}
}
@ -423,7 +424,8 @@ type importSchema struct {
entityTbls map[*entity]*sqlmodel.TableDef
}
func execSchemaDelta(ctx context.Context, log lg.Log, drvr driver.SQLDriver, db sqlz.DB, curSchema, newSchema *importSchema) error {
func execSchemaDelta(ctx context.Context, log lg.Log, drvr driver.SQLDriver, db sqlz.DB,
curSchema, newSchema *importSchema) error {
var err error
if curSchema == nil {
for _, tblDef := range newSchema.tblDefs {

View File

@ -16,7 +16,8 @@ import (
// DetectJSON implements source.TypeDetectFunc.
// The function returns TypeJSON for two varieties of input:
func DetectJSON(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func DetectJSON(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32,
err error) {
var r1, r2 io.ReadCloser
r1, err = openFn()
if err != nil {
@ -359,7 +360,8 @@ func (s *objectsInArrayScanner) next() (obj map[string]any, chunk []byte, err er
switch delim {
default:
// bad input
return nil, nil, errz.Errorf("invalid JSON: expected comma or right-bracket ']' token but got: %s", formatToken(tok))
return nil, nil, errz.Errorf("invalid JSON: expected comma or right-bracket ']' token but got: %s",
formatToken(tok))
case ']':
// should be end of input

View File

@ -21,7 +21,8 @@ import (
// DetectJSONA implements source.TypeDetectFunc for TypeJSONA.
// Each line of input must be a valid JSON array.
func DetectJSONA(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func DetectJSONA(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32,
err error) {
var r io.ReadCloser
r, err = openFn()
if err != nil {
@ -157,7 +158,8 @@ func importJSONA(ctx context.Context, log lg.Log, job importJob) error {
// startInsertJSONA reads JSON records from r and sends
// them on recordCh.
func startInsertJSONA(ctx context.Context, recordCh chan<- sqlz.Record, errCh <-chan error, r io.Reader, mungeFns []kind.MungeFunc) error {
func startInsertJSONA(ctx context.Context, recordCh chan<- sqlz.Record, errCh <-chan error, r io.Reader,
mungeFns []kind.MungeFunc) error {
defer close(recordCh)
sc := bufio.NewScanner(r)

View File

@ -15,7 +15,8 @@ import (
)
// DetectJSONL implements source.TypeDetectFunc.
func DetectJSONL(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func DetectJSONL(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32,
err error) {
var r io.ReadCloser
r, err = openFn()
if err != nil {

View File

@ -126,7 +126,8 @@ func getNewRecordFunc(rowMeta sqlz.RecordMeta) driver.NewRecordFunc {
}
// else, we don't know what to do with this col
return nil, errz.Errorf("column %d %s: unknown type db(%T) with kind(%s), val(%v)", i, rowMeta[i].Name(), rec[i], rowMeta[i].Kind(), rec[i])
return nil, errz.Errorf("column %d %s: unknown type db(%T) with kind(%s), val(%v)", i, rowMeta[i].Name(),
rec[i], rowMeta[i].Kind(), rec[i])
}
return rec, nil
}
@ -167,7 +168,8 @@ WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = ?`
// getColumnMetadata returns column metadata for tblName.
func getColumnMetadata(ctx context.Context, log lg.Log, db sqlz.DB, tblName string) ([]*source.ColMetadata, error) {
const query = `SELECT column_name, data_type, column_type, ordinal_position, column_default, is_nullable, column_key, column_comment, extra
const query = `SELECT column_name, data_type, column_type, ordinal_position, column_default,
is_nullable, column_key, column_comment, extra
FROM information_schema.columns cols
WHERE cols.TABLE_SCHEMA = DATABASE() AND cols.TABLE_NAME = ?
ORDER BY cols.ordinal_position ASC`
@ -185,7 +187,8 @@ ORDER BY cols.ordinal_position ASC`
var isNullable, colKey, extra string
defVal := &sql.NullString{}
err = rows.Scan(&col.Name, &col.BaseType, &col.ColumnType, &col.Position, defVal, &isNullable, &colKey, &col.Comment, &extra)
err = rows.Scan(&col.Name, &col.BaseType, &col.ColumnType, &col.Position, defVal, &isNullable, &colKey,
&col.Comment, &extra)
if err != nil {
return nil, errz.Err(err)
}
@ -264,7 +267,8 @@ func setSourceSummaryMeta(ctx context.Context, db sqlz.DB, md *source.Metadata)
FROM information_schema.TABLES WHERE TABLE_SCHEMA = DATABASE()) AS size`
var version, versionComment, versionOS, versionArch, schema string
err := db.QueryRowContext(ctx, summaryQuery).Scan(&version, &versionComment, &versionOS, &versionArch, &schema, &md.User, &md.Size)
err := db.QueryRowContext(ctx, summaryQuery).Scan(&version, &versionComment, &versionOS, &versionArch, &schema,
&md.User, &md.Size)
if err != nil {
return errz.Err(err)
}
@ -304,8 +308,10 @@ func getDBVarsMeta(ctx context.Context, log lg.Log, db sqlz.DB) ([]source.DBVar,
// getAllTblMetas returns TableMetadata for each table/view in db.
func getAllTblMetas(ctx context.Context, log lg.Log, db sqlz.DB) ([]*source.TableMetadata, error) {
const query = `SELECT t.TABLE_SCHEMA, t.TABLE_NAME, t.TABLE_TYPE, t.TABLE_COMMENT, (DATA_LENGTH + INDEX_LENGTH) AS table_size,
c.COLUMN_NAME, c.ORDINAL_POSITION, c.COLUMN_KEY, c.DATA_TYPE, c.COLUMN_TYPE, c.IS_NULLABLE, c.COLUMN_DEFAULT, c.COLUMN_COMMENT, c.EXTRA
const query = `SELECT t.TABLE_SCHEMA, t.TABLE_NAME, t.TABLE_TYPE, t.TABLE_COMMENT,
(DATA_LENGTH + INDEX_LENGTH) AS table_size,
c.COLUMN_NAME, c.ORDINAL_POSITION, c.COLUMN_KEY, c.DATA_TYPE, c.COLUMN_TYPE,
c.IS_NULLABLE, c.COLUMN_DEFAULT, c.COLUMN_COMMENT, c.EXTRA
FROM information_schema.TABLES t
LEFT JOIN information_schema.COLUMNS c
ON c.TABLE_CATALOG = t.TABLE_CATALOG

View File

@ -113,7 +113,8 @@ func (d *driveri) AlterTableAddColumn(ctx context.Context, db *sql.DB, tbl, col
}
// PrepareInsertStmt implements driver.SQLDriver.
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, numRows int) (*driver.StmtExecer, error) {
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
numRows int) (*driver.StmtExecer, error) {
destColsMeta, err := d.getTableRecordMeta(ctx, db, destTbl, destColNames)
if err != nil {
return nil, err
@ -129,7 +130,8 @@ func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl str
}
// PrepareUpdateStmt implements driver.SQLDriver.
func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, where string) (*driver.StmtExecer, error) {
func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
where string) (*driver.StmtExecer, error) {
destColsMeta, err := d.getTableRecordMeta(ctx, db, destTbl, destColNames)
if err != nil {
return nil, err
@ -204,7 +206,8 @@ func (d *driveri) DropTable(ctx context.Context, db sqlz.DB, tbl string, ifExist
}
// TableColumnTypes implements driver.SQLDriver.
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string, colNames []string) ([]*sql.ColumnType, error) {
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) ([]*sql.ColumnType, error) {
const queryTpl = "SELECT %s FROM %s LIMIT 0"
dialect := d.Dialect()
@ -243,7 +246,8 @@ func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName stri
return colTypes, nil
}
func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName string, colNames []string) (sqlz.RecordMeta, error) {
func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) (sqlz.RecordMeta, error) {
colTypes, err := d.TableColumnTypes(ctx, db, tblName, colNames)
if err != nil {
return nil, err
@ -294,7 +298,8 @@ func (d *driveri) Ping(ctx context.Context, src *source.Source) error {
// Truncate implements driver.SQLDriver. Arg reset is
// always ignored: the identity value is always reset by
// the TRUNCATE statement.
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64, err error) {
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64,
err error) {
// https://dev.mysql.com/doc/refman/8.0/en/truncate-table.html
dsn, err := dsnFromLocation(src, true)
if err != nil {

View File

@ -314,7 +314,8 @@ AND table_name = $1`
pgTbl := &pgTable{}
err := db.QueryRowContext(ctx, tablesQuery, tblName).
Scan(&pgTbl.tableCatalog, &pgTbl.tableSchema, &pgTbl.tableName, &pgTbl.tableType, &pgTbl.isInsertable, &pgTbl.rowCount, &pgTbl.size, &pgTbl.oid, &pgTbl.comment)
Scan(&pgTbl.tableCatalog, &pgTbl.tableSchema, &pgTbl.tableName, &pgTbl.tableType, &pgTbl.isInsertable,
&pgTbl.rowCount, &pgTbl.size, &pgTbl.oid, &pgTbl.comment)
if err != nil {
return nil, errz.Err(err)
}
@ -607,7 +608,8 @@ func setTblMetaConstraints(log lg.Log, tblMeta *source.TableMetadata, pgConstrai
colMeta := tblMeta.Column(pgc.columnName)
if colMeta == nil {
// Shouldn't happen
log.Warnf("No column %s.%s found matching constraint %q", tblMeta.Name, pgc.columnName, pgc.constraintName)
log.Warnf("No column %s.%s found matching constraint %q", tblMeta.Name, pgc.columnName,
pgc.constraintName)
continue
}
colMeta.PrimaryKey = true

View File

@ -132,7 +132,8 @@ func (d *driveri) Ping(ctx context.Context, src *source.Source) error {
// row count of tbl before executing TRUNCATE. This row count
// query is not part of a transaction with TRUNCATE, although
// possibly it should be, as the number of rows may have changed.
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64, err error) {
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64,
err error) {
// https://www.postgresql.org/docs/9.1/sql-truncate.html
// RESTART IDENTITY and CASCADE/RESTRICT are from pg 8.2 onwards
@ -185,7 +186,8 @@ func (d *driveri) AlterTableAddColumn(ctx context.Context, db *sql.DB, tbl, col
}
// PrepareInsertStmt implements driver.SQLDriver.
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, numRows int) (*driver.StmtExecer, error) {
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
numRows int) (*driver.StmtExecer, error) {
// Note that the pgx driver doesn't support res.LastInsertId.
// https://github.com/jackc/pgx/issues/411
@ -199,12 +201,14 @@ func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl str
return nil, err
}
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta), newStmtExecFunc(stmt), destColsMeta)
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta), newStmtExecFunc(stmt),
destColsMeta)
return execer, nil
}
// PrepareUpdateStmt implements driver.SQLDriver.
func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, where string) (*driver.StmtExecer, error) {
func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
where string) (*driver.StmtExecer, error) {
destColsMeta, err := d.getTableRecordMeta(ctx, db, destTbl, destColNames)
if err != nil {
return nil, err
@ -220,7 +224,8 @@ func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl str
return nil, err
}
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta), newStmtExecFunc(stmt), destColsMeta)
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta), newStmtExecFunc(stmt),
destColsMeta)
return execer, nil
}
@ -280,7 +285,8 @@ func (d *driveri) DropTable(ctx context.Context, db sqlz.DB, tbl string, ifExist
}
// TableColumnTypes implements driver.SQLDriver.
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string, colNames []string) ([]*sql.ColumnType, error) {
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) ([]*sql.ColumnType, error) {
// We have to do some funky stuff to get the column types
// from when the table has no rows.
// https://stackoverflow.com/questions/8098795/return-a-value-if-no-record-is-found
@ -345,7 +351,8 @@ func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName stri
return colTypes, nil
}
func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName string, colNames []string) (sqlz.RecordMeta, error) {
func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) (sqlz.RecordMeta, error) {
colTypes, err := d.TableColumnTypes(ctx, db, tblName, colNames)
if err != nil {
return nil, err

View File

@ -86,7 +86,8 @@ func (d *driveri) Open(ctx context.Context, src *source.Source) (driver.Database
}
// Truncate implements driver.Driver.
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64, err error) {
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64,
err error) {
dsn, err := PathFromLocation(src)
if err != nil {
return 0, err
@ -180,7 +181,8 @@ func (d *driveri) CopyTable(ctx context.Context, db sqlz.DB, fromTable, toTable
// we need to do something more complicated.
var originTblCreateStmt string
err := db.QueryRowContext(ctx, fmt.Sprintf("SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'", fromTable)).Scan(&originTblCreateStmt)
err := db.QueryRowContext(ctx, fmt.Sprintf("SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'",
fromTable)).Scan(&originTblCreateStmt)
if err != nil {
return 0, errz.Err(err)
}
@ -285,7 +287,8 @@ func (d *driveri) TableExists(ctx context.Context, db sqlz.DB, tbl string) (bool
}
// PrepareInsertStmt implements driver.SQLDriver.
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, numRows int) (*driver.StmtExecer, error) {
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
numRows int) (*driver.StmtExecer, error) {
destColsMeta, err := d.getTableRecordMeta(ctx, db, destTbl, destColNames)
if err != nil {
return nil, err
@ -336,7 +339,8 @@ func newStmtExecFunc(stmt *sql.Stmt) driver.StmtExecFunc {
}
// TableColumnTypes implements driver.SQLDriver.
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string, colNames []string) ([]*sql.ColumnType, error) {
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) ([]*sql.ColumnType, error) {
// Given the dynamic behavior of sqlite's rows.ColumnTypes,
// this query selects a single row, as that'll give us more
// accurate column type info than no rows. For other db
@ -394,7 +398,8 @@ func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName stri
return colTypes, nil
}
func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName string, colNames []string) (sqlz.RecordMeta, error) {
func (d *driveri) getTableRecordMeta(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) (sqlz.RecordMeta, error) {
colTypes, err := d.TableColumnTypes(ctx, db, tblName, colNames)
if err != nil {
return nil, err

View File

@ -148,7 +148,8 @@ GROUP BY database_id) AS total_size_bytes`
if hasErrCode(err, errCodeObjectNotExist) {
// This can happen if the table is dropped while
// we're collecting metadata. We log a warning and continue.
log.Warnf("table metadata: table %q appears not to exist (continuing regardless): %v", tblNames[i], err)
log.Warnf("table metadata: table %q appears not to exist (continuing regardless): %v", tblNames[i],
err)
return nil
}
return err
@ -175,7 +176,8 @@ GROUP BY database_id) AS total_size_bytes`
return md, nil
}
func getTableMetadata(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tblSchema, tblName, tblType string) (*source.TableMetadata, error) {
func getTableMetadata(ctx context.Context, log lg.Log, db sqlz.DB,
tblCatalog, tblSchema, tblName, tblType string) (*source.TableMetadata, error) {
const tplTblUsage = `sp_spaceused '%s'`
tblMeta := &source.TableMetadata{Name: tblName, DBTableType: tblType}
@ -306,7 +308,8 @@ ORDER BY TABLE_NAME ASC, TABLE_TYPE ASC`
return tblNames, tblTypes, nil
}
func getColumnMeta(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tblSchema, tblName string) ([]columnMeta, error) {
func getColumnMeta(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tblSchema, tblName string) ([]columnMeta,
error) {
// TODO: sq doesn't use all of these columns, no need to select them all.
const query = `SELECT
@ -335,7 +338,8 @@ func getColumnMeta(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tblS
err = rows.Scan(&c.TableCatalog, &c.TableSchema, &c.TableName, &c.ColumnName, &c.OrdinalPosition,
&c.ColumnDefault, &c.Nullable, &c.DataType, &c.CharMaxLength, &c.CharOctetLength, &c.NumericPrecision,
&c.NumericPrecisionRadix, &c.NumericScale, &c.DateTimePrecision, &c.CharSetCatalog, &c.CharSetSchema,
&c.CharSetName, &c.CollationCatalog, &c.CollationSchema, &c.CollationName, &c.DomainCatalog, &c.DomainSchema, &c.DomainName)
&c.CharSetName, &c.CollationCatalog, &c.CollationSchema, &c.CollationName, &c.DomainCatalog,
&c.DomainSchema, &c.DomainName)
if err != nil {
return nil, errz.Err(err)
}
@ -350,8 +354,10 @@ func getColumnMeta(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tblS
return cols, nil
}
func getConstraints(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tblSchema, tblName string) ([]constraintMeta, error) {
const query = `SELECT kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, tc.CONSTRAINT_TYPE, kcu.COLUMN_NAME, kcu.CONSTRAINT_NAME
func getConstraints(ctx context.Context, log lg.Log, db sqlz.DB,
tblCatalog, tblSchema, tblName string) ([]constraintMeta, error) {
const query = `SELECT kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, tc.CONSTRAINT_TYPE,
kcu.COLUMN_NAME, kcu.CONSTRAINT_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu
ON tc.TABLE_NAME = kcu.TABLE_NAME
@ -372,7 +378,8 @@ func getConstraints(ctx context.Context, log lg.Log, db sqlz.DB, tblCatalog, tbl
for rows.Next() {
c := constraintMeta{}
err = rows.Scan(&c.TableCatalog, &c.TableSchema, &c.TableName, &c.ConstraintType, &c.ColumnName, &c.ConstraintName)
err = rows.Scan(&c.TableCatalog, &c.TableSchema, &c.TableName, &c.ConstraintType, &c.ColumnName,
&c.ConstraintName)
if err != nil {
return nil, errz.Err(err)
}

View File

@ -142,7 +142,11 @@ func (d *driveri) Ping(ctx context.Context, src *source.Source) error {
// operation is implemented in two statements. First "DELETE FROM tbl" to
// delete all rows. Then, if reset is true, the table sequence counter
// is reset via RESEED.
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64, err error) {
//
//nolint:lll
func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string, reset bool) (affected int64,
err error) {
// https://docs.microsoft.com/en-us/sql/t-sql/statements/truncate-table-transact-sql?view=sql-server-ver15
// When there are foreign key constraints on mssql tables,
@ -175,7 +179,8 @@ func (d *driveri) Truncate(ctx context.Context, src *source.Source, tbl string,
}
// TableColumnTypes implements driver.SQLDriver.
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string, colNames []string) ([]*sql.ColumnType, error) {
func (d *driveri) TableColumnTypes(ctx context.Context, db sqlz.DB, tblName string,
colNames []string) ([]*sql.ColumnType, error) {
// SQLServer has this unusual incantation for its LIMIT equivalent:
//
// SELECT username, email, address_id FROM person
@ -308,7 +313,8 @@ func (d *driveri) DropTable(ctx context.Context, db sqlz.DB, tbl string, ifExist
}
// PrepareInsertStmt implements driver.SQLDriver.
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, numRows int) (*driver.StmtExecer, error) {
func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
numRows int) (*driver.StmtExecer, error) {
destColsMeta, err := d.getTableColsMeta(ctx, db, destTbl, destColNames)
if err != nil {
return nil, err
@ -319,12 +325,14 @@ func (d *driveri) PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl str
return nil, err
}
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta), newStmtExecFunc(stmt, db, destTbl), destColsMeta)
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta),
newStmtExecFunc(stmt, db, destTbl), destColsMeta)
return execer, nil
}
// PrepareUpdateStmt implements driver.SQLDriver.
func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, where string) (*driver.StmtExecer, error) {
func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
where string) (*driver.StmtExecer, error) {
destColsMeta, err := d.getTableColsMeta(ctx, db, destTbl, destColNames)
if err != nil {
return nil, err
@ -340,11 +348,13 @@ func (d *driveri) PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl str
return nil, err
}
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta), newStmtExecFunc(stmt, db, destTbl), destColsMeta)
execer := driver.NewStmtExecer(stmt, driver.DefaultInsertMungeFunc(destTbl, destColsMeta),
newStmtExecFunc(stmt, db, destTbl), destColsMeta)
return execer, nil
}
func (d *driveri) getTableColsMeta(ctx context.Context, db sqlz.DB, tblName string, colNames []string) (sqlz.RecordMeta, error) {
func (d *driveri) getTableColsMeta(ctx context.Context, db sqlz.DB, tblName string, colNames []string) (sqlz.RecordMeta,
error) {
// SQLServer has this unusual incantation for its LIMIT equivalent:
//
// SELECT username, email, address_id FROM person
@ -497,8 +507,10 @@ func setIdentityInsert(ctx context.Context, db sqlz.DB, tbl string, on bool) err
}
// mssql error codes
// https://docs.microsoft.com/en-us/sql/relational-databases/errors-events/database-engine-events-and-errors?view=sql-server-ver15
//
//nolint:lll
const (
// See: https://docs.microsoft.com/en-us/sql/relational-databases/errors-events/database-engine-events-and-errors?view=sql-server-ver15
errCodeIdentityInsert int32 = 544
errCodeObjectNotExist int32 = 15009
)

View File

@ -146,7 +146,8 @@ func (im *importer) execImport(ctx context.Context, r io.Reader, destDB driver.D
col := curRow.tbl.ColBySelector(im.selStack.selector())
if col == nil {
if msg, ok := im.msgOncef("Skip: element %q is not a column of table %q", elem.Name.Local, curRow.tbl.Name); ok {
if msg, ok := im.msgOncef("Skip: element %q is not a column of table %q", elem.Name.Local,
curRow.tbl.Name); ok {
im.log.Debug(msg)
}
continue
@ -301,7 +302,8 @@ func (im *importer) setForeignColsVals(row *rowState) error {
parts := strings.Split(col.Foreign, "/")
// parts will look like [ "..", "channel_id" ]
if len(parts) != 2 || parts[0] != ".." {
return errz.Errorf(`%s.%s: "foreign" field should be of form "../col_name" but was %q`, row.tbl.Name, col.Name, col.Foreign)
return errz.Errorf(`%s.%s: "foreign" field should be of form "../col_name" but was %q`, row.tbl.Name,
col.Name, col.Foreign)
}
fkName := parts[1]
@ -313,7 +315,8 @@ func (im *importer) setForeignColsVals(row *rowState) error {
fkVal, ok := parentRow.savedColVals[fkName]
if !ok {
return errz.Errorf(`%s.%s: unable to find foreign key value in parent table %q`, row.tbl.Name, col.Name, parentRow.tbl.Name)
return errz.Errorf(`%s.%s: unable to find foreign key value in parent table %q`, row.tbl.Name, col.Name,
parentRow.tbl.Name)
}
row.dirtyColVals[col.Name] = fkVal

View File

@ -21,9 +21,11 @@ import (
)
// xlsxToScratch loads the data in xlFile into scratchDB.
func xlsxToScratch(ctx context.Context, log lg.Log, src *source.Source, xlFile *xlsx.File, scratchDB driver.Database) error {
func xlsxToScratch(ctx context.Context, log lg.Log, src *source.Source, xlFile *xlsx.File,
scratchDB driver.Database) error {
start := time.Now()
log.Debugf("Beginning import from XLSX %s to %s (%s)...", src.Handle, scratchDB.Source().Handle, scratchDB.Source().RedactedLocation())
log.Debugf("Beginning import from XLSX %s to %s (%s)...", src.Handle, scratchDB.Source().Handle,
scratchDB.Source().RedactedLocation())
hasHeader, _, err := options.HasHeader(src.Options)
if err != nil {
@ -72,7 +74,8 @@ func xlsxToScratch(ctx context.Context, log lg.Log, src *source.Source, xlFile *
// importSheetToTable imports sheet's data to its scratch table.
// The scratch table must already exist.
func importSheetToTable(ctx context.Context, log lg.Log, sheet *xlsx.Sheet, hasHeader bool, scratchDB driver.Database, tblDef *sqlmodel.TableDef) error {
func importSheetToTable(ctx context.Context, log lg.Log, sheet *xlsx.Sheet, hasHeader bool, scratchDB driver.Database,
tblDef *sqlmodel.TableDef) error {
startTime := time.Now()
conn, err := scratchDB.DB().Conn(ctx)
@ -154,7 +157,8 @@ func isEmptyRow(row *xlsx.Row) bool {
// buildTblDefsForSheets returns a TableDef for each sheet. If the
// sheet is empty (has no data), the TableDef for that sheet will be nil.
func buildTblDefsForSheets(ctx context.Context, log lg.Log, sheets []*xlsx.Sheet, hasHeader bool) ([]*sqlmodel.TableDef, error) {
func buildTblDefsForSheets(ctx context.Context, log lg.Log, sheets []*xlsx.Sheet, hasHeader bool) ([]*sqlmodel.TableDef,
error) {
tblDefs := make([]*sqlmodel.TableDef, len(sheets))
g, _ := errgroup.WithContext(ctx)
@ -341,7 +345,8 @@ func rowToRecord(log lg.Log, destColKinds []kind.Kind, row *xlsx.Row, sheetName
// it's not an int, it's not a float, it's not empty string;
// just give up and make it a string.
log.Warnf("Failed to determine type of numeric cell [%s:%d:%d] from value: %q", sheetName, rowIndex, j, cell.Value)
log.Warnf("Failed to determine type of numeric cell [%s:%d:%d] from value: %q", sheetName, rowIndex, j,
cell.Value)
vals[j] = cell.Value
// FIXME: prob should return an error here?
case xlsx.CellTypeString:

View File

@ -4,9 +4,10 @@ package xlsx
import (
"context"
"database/sql"
"io"
"github.com/neilotoole/lg"
"github.com/tealeg/xlsx/v2"
"io"
"github.com/neilotoole/sq/libsq/core/cleanup"
"github.com/neilotoole/sq/libsq/core/errz"
@ -40,7 +41,8 @@ var _ source.TypeDetectFunc = DetectXLSX
// DetectXLSX implements source.TypeDetectFunc, returning
// TypeXLSX and a score of 1.0 valid XLSX.
func DetectXLSX(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32, err error) {
func DetectXLSX(ctx context.Context, log lg.Log, openFn source.FileOpenFunc) (detected source.Type, score float32,
err error) {
var r io.ReadCloser
r, err = openFn()
if err != nil {

View File

@ -67,7 +67,8 @@ func (el *antlrErrorListener) String() string {
}
// SyntaxError implements antlr.ErrorListener.
func (el *antlrErrorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
func (el *antlrErrorListener) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int,
msg string, e antlr.RecognitionException) {
text := fmt.Sprintf("%s: syntax error: [%d:%d] %s", el.name, line, column, msg)
el.errs = append(el.errs, text)
}

View File

@ -96,7 +96,8 @@ func (s *Segment) uniformChildren() (bool, error) {
str = append(str, typ)
}
return false, fmt.Errorf("segment [%d] has more than one element node type: [%s]", s.SegIndex(), strings.Join(str, ", "))
return false, fmt.Errorf("segment [%d] has more than one element node type: [%s]", s.SegIndex(),
strings.Join(str, ", "))
}
return true, nil

View File

@ -168,9 +168,11 @@ func (fb *BaseFragmentBuilder) Join(fnJoin *ast.Join) (string, error) {
return "", errz.Errorf("expected *ColSelector but got %T", joinExpr.Children()[0])
}
leftOperand = fmt.Sprintf("%s%s%s.%s%s%s", fb.Quote, fnJoin.LeftTbl().SelValue(), fb.Quote, fb.Quote, colSel.SelValue(), fb.Quote)
leftOperand = fmt.Sprintf("%s%s%s.%s%s%s", fb.Quote, fnJoin.LeftTbl().SelValue(), fb.Quote, fb.Quote,
colSel.SelValue(), fb.Quote)
operator = "=="
rightOperand = fmt.Sprintf("%s%s%s.%s%s%s", fb.Quote, fnJoin.RightTbl().SelValue(), fb.Quote, fb.Quote, colSel.SelValue(), fb.Quote)
rightOperand = fmt.Sprintf("%s%s%s.%s%s%s", fb.Quote, fnJoin.RightTbl().SelValue(), fb.Quote, fb.Quote,
colSel.SelValue(), fb.Quote)
} else {
var err error
@ -194,7 +196,8 @@ func (fb *BaseFragmentBuilder) Join(fnJoin *ast.Join) (string, error) {
onClause = fmt.Sprintf("ON %s %s %s", leftOperand, operator, rightOperand)
}
sql := fmt.Sprintf("FROM %s%s%s %s %s%s%s", fb.Quote, fnJoin.LeftTbl().SelValue(), fb.Quote, joinType, fb.Quote, fnJoin.RightTbl().SelValue(), fb.Quote)
sql := fmt.Sprintf("FROM %s%s%s %s %s%s%s", fb.Quote, fnJoin.LeftTbl().SelValue(), fb.Quote, joinType, fb.Quote,
fnJoin.RightTbl().SelValue(), fb.Quote)
sql = sqlAppend(sql, onClause)
return sql, nil

View File

@ -222,11 +222,13 @@ func determineJoinTables(log lg.Log, w *Walker, node Node) error {
fnJoin.leftTbl, ok = prevSeg.Children()[0].(*TblSelector)
if !ok {
return errorf("JOIN() expected table selector in previous segment, but was %T(%q)", prevSeg.Children()[0], prevSeg.Children()[0].Text())
return errorf("JOIN() expected table selector in previous segment, but was %T(%q)", prevSeg.Children()[0],
prevSeg.Children()[0].Text())
}
fnJoin.rightTbl, ok = prevSeg.Children()[1].(*TblSelector)
if !ok {
return errorf("JOIN() expected table selector in previous segment, but was %T(%q)", prevSeg.Children()[1], prevSeg.Children()[1].Text())
return errorf("JOIN() expected table selector in previous segment, but was %T(%q)", prevSeg.Children()[1],
prevSeg.Children()[1].Text())
}
return nil
}

View File

@ -67,7 +67,8 @@ func DBWriterCreateTableIfNotExistsHook(destTblName string) DBWriterPreWriteHook
// The writer writes records from recordCh to destTbl
// in destDB. The recChSize param controls the size of recordCh
// returned by the writer's Open method.
func NewDBWriter(log lg.Log, destDB driver.Database, destTbl string, recChSize int, preWriteHooks ...DBWriterPreWriteHook) *DBWriter {
func NewDBWriter(log lg.Log, destDB driver.Database, destTbl string, recChSize int,
preWriteHooks ...DBWriterPreWriteHook) *DBWriter {
return &DBWriter{
log: log,
destDB: destDB,
@ -85,7 +86,8 @@ func NewDBWriter(log lg.Log, destDB driver.Database, destTbl string, recChSize i
}
// Open implements RecordWriter.
func (w *DBWriter) Open(ctx context.Context, cancelFn context.CancelFunc, recMeta sqlz.RecordMeta) (chan<- sqlz.Record, <-chan error, error) {
func (w *DBWriter) Open(ctx context.Context, cancelFn context.CancelFunc, recMeta sqlz.RecordMeta) (chan<- sqlz.Record,
<-chan error, error) {
w.cancelFn = cancelFn
// REVISIT: tx could potentially be passed to NewDBWriter?

View File

@ -112,7 +112,8 @@ type SQLDriver interface {
//
// Note that db must guarantee a single connection: that is, db
// must be a sql.Conn or sql.Tx.
PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, numRows int) (*StmtExecer, error)
PrepareInsertStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, numRows int) (*StmtExecer,
error)
// PrepareUpdateStmt prepares a statement for updating destColNames in
// destTbl, using the supplied where clause (which may be empty).
@ -127,7 +128,8 @@ type SQLDriver interface {
//
// Note that db must guarantee a single connection: that is, db
// must be a sql.Conn or sql.Tx.
PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string, where string) (*StmtExecer, error)
PrepareUpdateStmt(ctx context.Context, db sqlz.DB, destTbl string, destColNames []string,
where string) (*StmtExecer, error)
// CreateTable creates the table defined by tblDef. Some implementations
// may not honor all of the fields of tblDef, e.g. an impl might not

View File

@ -326,7 +326,8 @@ const Comma = ", "
// driver-specific syntax from drvr. numRows specifies
// how many rows of values are inserted by each execution of
// the insert statement (1 row being the prototypical usage).
func PrepareInsertStmt(ctx context.Context, drvr SQLDriver, db sqlz.Preparer, destTbl string, destCols []string, numRows int) (stmt *sql.Stmt, err error) {
func PrepareInsertStmt(ctx context.Context, drvr SQLDriver, db sqlz.Preparer, destTbl string, destCols []string,
numRows int) (stmt *sql.Stmt, err error) {
const stmtTpl = `INSERT INTO %s (%s) VALUES %s`
if numRows <= 0 {
@ -385,7 +386,8 @@ func (bi BatchInsert) Munge(rec []any) error {
//
// Note that the db arg must guarantee a single connection: that is,
// it must be a sql.Conn or sql.Tx.
func NewBatchInsert(ctx context.Context, log lg.Log, drvr SQLDriver, db sqlz.DB, destTbl string, destColNames []string, batchSize int) (*BatchInsert, error) {
func NewBatchInsert(ctx context.Context, log lg.Log, drvr SQLDriver, db sqlz.DB, destTbl string, destColNames []string,
batchSize int) (*BatchInsert, error) {
err := requireSingleConn(db)
if err != nil {
return nil, err

View File

@ -34,7 +34,8 @@ func (r *Registry) AddProvider(typ source.Type, p Provider) {
defer r.mu.Unlock()
if existingType, ok := r.providers[typ]; ok {
r.log.Warnf("failed to add driver provider (%T) for driver type %s: provider (%T) already registered", p, typ, existingType)
r.log.Warnf("failed to add driver provider (%T) for driver type %s: provider (%T) already registered", p, typ,
existingType)
return
}

View File

@ -173,7 +173,8 @@ func (ng *engine) buildJoinFromClause(ctx context.Context, fnJoin *ast.Join) (fr
return ng.singleSourceJoin(ctx, fnJoin)
}
func (ng *engine) singleSourceJoin(ctx context.Context, fnJoin *ast.Join) (fromClause string, fromDB driver.Database, err error) {
func (ng *engine) singleSourceJoin(ctx context.Context, fnJoin *ast.Join) (fromClause string, fromDB driver.Database,
err error) {
src, err := ng.srcs.Get(fnJoin.LeftTbl().DSName)
if err != nil {
return "", nil, err
@ -195,10 +196,12 @@ func (ng *engine) singleSourceJoin(ctx context.Context, fnJoin *ast.Join) (fromC
// crossSourceJoin returns a FROM clause that forms part of
// the SQL SELECT statement against fromDB.
func (ng *engine) crossSourceJoin(ctx context.Context, fnJoin *ast.Join) (fromClause string, fromDB driver.Database, err error) {
func (ng *engine) crossSourceJoin(ctx context.Context, fnJoin *ast.Join) (fromClause string, fromDB driver.Database,
err error) {
leftTblName, rightTblName := fnJoin.LeftTbl().SelValue(), fnJoin.RightTbl().SelValue()
if leftTblName == rightTblName {
return "", nil, errz.Errorf("JOIN tables must have distinct names (or use aliases): duplicate tbl name %q", fnJoin.LeftTbl().SelValue())
return "", nil, errz.Errorf("JOIN tables must have distinct names (or use aliases): duplicate tbl name %q",
fnJoin.LeftTbl().SelValue())
}
leftSrc, err := ng.srcs.Get(fnJoin.LeftTbl().DSName)
@ -273,8 +276,10 @@ func (jt *joinCopyTask) executeTask(ctx context.Context, log lg.Log) error {
}
// execCopyTable performs the work of copying fromDB.fromTblName to destDB.destTblName.
func execCopyTable(ctx context.Context, log lg.Log, fromDB driver.Database, fromTblName string, destDB driver.Database, destTblName string) error {
createTblHook := func(ctx context.Context, originRecMeta sqlz.RecordMeta, destDB driver.Database, tx sqlz.DB) error {
func execCopyTable(ctx context.Context, log lg.Log, fromDB driver.Database, fromTblName string, destDB driver.Database,
destTblName string) error {
createTblHook := func(ctx context.Context, originRecMeta sqlz.RecordMeta, destDB driver.Database,
tx sqlz.DB) error {
destColNames := originRecMeta.Names()
destColKinds := originRecMeta.Kinds()
destTblDef := sqlmodel.NewTableDef(destTblName, destColNames, destColKinds)
@ -329,13 +334,15 @@ func buildQueryModel(log lg.Log, a *ast.AST) (*queryModel, error) {
}
if len(selectableSeg.Children()) != 1 {
return nil, errz.Errorf("the final selectable segment must have exactly one selectable element, but found %d elements",
return nil, errz.Errorf(
"the final selectable segment must have exactly one selectable element, but found %d elements",
len(selectableSeg.Children()))
}
selectable, ok := selectableSeg.Children()[0].(ast.Selectable)
if !ok {
return nil, errz.Errorf("the final selectable segment must have exactly one selectable element, but found element %T(%q)",
return nil, errz.Errorf(
"the final selectable segment must have exactly one selectable element, but found element %T(%q)",
selectableSeg.Children()[0], selectableSeg.Children()[0].Text())
}
@ -346,12 +353,14 @@ func buildQueryModel(log lg.Log, a *ast.AST) (*queryModel, error) {
// Check if the first element of the segment is a row range, if not, just skip
if rr, ok := seg.Children()[0].(*ast.RowRange); ok {
if len(seg.Children()) != 1 {
return nil, errz.Errorf("segment [%d] with row range must have exactly one element, but found %d: %q",
return nil, errz.Errorf(
"segment [%d] with row range must have exactly one element, but found %d: %q",
seg.SegIndex(), len(seg.Children()), seg.Text())
}
if qm.Range != nil {
return nil, errz.Errorf("only one row range permitted, but found %q and %q", qm.Range.Text(), rr.Text())
return nil, errz.Errorf("only one row range permitted, but found %q and %q",
qm.Range.Text(), rr.Text())
}
log.Debugf("found row range: %q", rr.Text())

View File

@ -62,7 +62,8 @@ type RecordWriter interface {
// construction. This mechanism exists to enable a goroutine to wait
// on the writer outside of the function that invoked Open, without
// having to pass cancelFn around.
Open(ctx context.Context, cancelFn context.CancelFunc, recMeta sqlz.RecordMeta) (recCh chan<- sqlz.Record, errCh <-chan error, err error)
Open(ctx context.Context, cancelFn context.CancelFunc, recMeta sqlz.RecordMeta) (recCh chan<- sqlz.Record,
errCh <-chan error, err error)
// Wait waits for the writer to complete and returns the number of
// written rows and any error (which may be a multierr).
@ -73,7 +74,8 @@ type RecordWriter interface {
// ExecuteSLQ executes the slq query, writing the results to recw.
// The caller is responsible for closing dbases.
func ExecuteSLQ(ctx context.Context, log lg.Log, dbOpener driver.DatabaseOpener, joinDBOpener driver.JoinDatabaseOpener, srcs *source.Set, query string, recw RecordWriter) error {
func ExecuteSLQ(ctx context.Context, log lg.Log, dbOpener driver.DatabaseOpener, joinDBOpener driver.JoinDatabaseOpener,
srcs *source.Set, query string, recw RecordWriter) error {
ng, err := newEngine(ctx, log, dbOpener, joinDBOpener, srcs, query)
if err != nil {
return err
@ -82,7 +84,8 @@ func ExecuteSLQ(ctx context.Context, log lg.Log, dbOpener driver.DatabaseOpener,
return ng.execute(ctx, recw)
}
func newEngine(ctx context.Context, log lg.Log, dbOpener driver.DatabaseOpener, joinDBOpener driver.JoinDatabaseOpener, srcs *source.Set, query string) (*engine, error) {
func newEngine(ctx context.Context, log lg.Log, dbOpener driver.DatabaseOpener, joinDBOpener driver.JoinDatabaseOpener,
srcs *source.Set, query string) (*engine, error) {
a, err := ast.Parse(log, query)
if err != nil {
return nil, err
@ -113,7 +116,8 @@ func newEngine(ctx context.Context, log lg.Log, dbOpener driver.DatabaseOpener,
// before recw has finished writing, thus the caller may wish
// to wait for recw to complete.
// The caller is responsible for closing dbase.
func QuerySQL(ctx context.Context, log lg.Log, dbase driver.Database, recw RecordWriter, query string, args ...any) error {
func QuerySQL(ctx context.Context, log lg.Log, dbase driver.Database, recw RecordWriter, query string,
args ...any) error {
rows, err := dbase.DB().QueryContext(ctx, query, args...)
if err != nil {
return errz.Wrapf(err, `SQL query against %s failed: %s`, dbase.Source().Handle, query)

View File

@ -48,14 +48,16 @@ type Notifier interface {
// Provider is a factory that returns Notifier instances and generates notification Destinations from user parameters.
type Provider interface {
// Destination returns a notification Destination instance from the supplied parameters.
Destination(typ DestType, target string, label string, credentials string, labelAvailable func(label string) bool) (*Destination, error)
Destination(typ DestType, target string, label string, credentials string,
labelAvailable func(label string) bool) (*Destination, error)
// Notifier returns a Notifier instance for the given destination.
Notifier(dest Destination) (Notifier, error)
}
var providers = make(map[DestType]Provider)
// RegisterProvider should be invoked by notification implementations to indicate that they handle a specific destination type.
// RegisterProvider should be invoked by notification implementations to
// indicate that they handle a specific destination type.
func RegisterProvider(typ DestType, p Provider) {
providers[typ] = p
}
@ -135,8 +137,10 @@ var handlePattern = regexp.MustCompile(`\A[a-zA-Z][a-zA-Z0-9_]*$`)
// ValidHandle returns an error if handle is not an acceptable notification destination handle value.
func ValidHandle(handle string) error {
const msg = `invalid notification destination handle value %q: must begin with a letter, followed by zero or more letters, digits, or underscores, e.g. "slack_devops"` //nolint:lll
if !handlePattern.MatchString(handle) {
return errz.Errorf(`invalid notification destination handle value %q: must begin with a letter, followed by zero or more letters, digits, or underscores, e.g. "slack_devops"`, handle)
return errz.Errorf(msg, handle)
}
return nil

View File

@ -22,9 +22,10 @@ var (
//
// \A@[a-zA-Z][a-zA-Z0-9_]*$
func VerifyLegalHandle(handle string) error {
const msg = `invalid data source handle %q: must begin with @, followed by a letter, followed by zero or more letters, digits, or underscores, e.g. "@my_db1"` //nolint:lll
matches := handlePattern.MatchString(handle)
if !matches {
return errz.Errorf(`invalid data source handle %q: must begin with @, followed by a letter, followed by zero or more letters, digits, or underscores, e.g. "@my_db1"`, handle)
return errz.Errorf(msg, handle)
}
return nil
@ -35,9 +36,11 @@ func VerifyLegalHandle(handle string) error {
//
// \A[a-zA-Z_][a-zA-Z0-9_]*$`
func verifyLegalTableName(table string) error {
const msg = `invalid table name %q: must begin a letter or underscore, followed by zero or more letters, digits, or underscores, e.g. "tbl1" or "_tbl2"` //nolint:lll
matches := tablePattern.MatchString(table)
if !matches {
return errz.Errorf(`invalid table name %q: must begin a letter or underscore, followed by zero or more letters, digits, or underscores, e.g. "tbl1" or "_tbl2"`, table)
return errz.Errorf(msg, table)
}
return nil
}

View File

@ -63,8 +63,10 @@ func mustParseTime(layout, value string) time.Time {
// or withUnknown are set). If isIntBool is
// true, kind.Int is returned for "col_bool", otherwise kind.Bool.
func ColNamePerKind(isIntBool bool, withNull bool, withUnknown bool) (colNames []string, kinds []kind.Kind) {
colNames = []string{"col_int", "col_float", "col_decimal", "col_bool", "col_text", "col_datetime", "col_date", "col_time", "col_bytes"}
kinds = []kind.Kind{kind.Int, kind.Float, kind.Decimal, kind.Bool, kind.Text, kind.Datetime, kind.Date, kind.Time, kind.Bytes}
colNames = []string{"col_int", "col_float", "col_decimal", "col_bool", "col_text", "col_datetime", "col_date",
"col_time", "col_bytes"}
kinds = []kind.Kind{kind.Int, kind.Float, kind.Decimal, kind.Bool, kind.Text, kind.Datetime, kind.Date, kind.Time,
kind.Bytes}
if isIntBool {
kinds[3] = kind.Int

View File

@ -118,19 +118,23 @@ func TblPaymentColKinds() []kind.Kind {
// AllTbls returns all table names.
func AllTbls() []string {
return []string{"actor", "address", "category", "city", "country", "customer", "film", "film_actor", "film_category", "film_text", "inventory", "language", "payment", "rental", "staff", "store"}
return []string{"actor", "address", "category", "city", "country", "customer", "film", "film_actor",
"film_category", "film_text", "inventory", "language", "payment", "rental", "staff", "store"}
}
// AllTblsViews returns all table AND view names.
func AllTblsViews() []string {
return []string{"actor", "address", "category", "city", "country", "customer", "customer_list", "film", "film_actor", "film_category", "film_list", "film_text", "inventory", "language", "payment", "rental", "sales_by_film_category", "sales_by_store", "staff", "staff_list", "store"}
return []string{"actor", "address", "category", "city", "country", "customer", "customer_list", "film",
"film_actor", "film_category", "film_list", "film_text", "inventory", "language", "payment", "rental",
"sales_by_film_category", "sales_by_store", "staff", "staff_list", "store"}
}
// AllTblsExceptFilmText exists because our current postgres image is different
// from the others in that it doesn't have the film_text table.
func AllTblsExceptFilmText() []string {
// TODO: delete AllTblsExceptFilmText when postgres image is updated to include film_text.
return []string{"actor", "address", "category", "city", "country", "customer", "film", "film_actor", "film_category", "inventory", "language", "payment", "rental", "staff", "store"}
return []string{"actor", "address", "category", "city", "country", "customer", "film", "film_actor",
"film_category", "inventory", "language", "payment", "rental", "staff", "store"}
}
// URLs for sakila resources.

View File

@ -288,7 +288,9 @@ func (h *Helper) SQLDriverFor(src *source.Source) driver.SQLDriver {
drvr, err := reg.DriverFor(src.Type)
require.NoError(h.T, err)
sqlDrvr, ok := drvr.(driver.SQLDriver)
require.True(h.T, ok, "driver %T is not a driver.SQLDriver: ensure that the src passed to SQLDriverFor implements driver.SQLDriver", drvr)
require.True(h.T, ok,
"driver %T is not a driver.SQLDriver: ensure that the src passed to SQLDriverFor implements driver.SQLDriver",
drvr)
return sqlDrvr
}
@ -315,7 +317,8 @@ func (h *Helper) RowCount(src *source.Source, tbl string) int64 {
// CreateTable creates a new table in src, and inserts data, returning
// the number of data rows inserted. If dropAfter is true, the created
// table is dropped when t.Cleanup is run.
func (h *Helper) CreateTable(dropAfter bool, src *source.Source, tblDef *sqlmodel.TableDef, data ...[]any) (affected int64) {
func (h *Helper) CreateTable(dropAfter bool, src *source.Source, tblDef *sqlmodel.TableDef,
data ...[]any) (affected int64) {
dbase := h.openNew(src)
defer h.Log.WarnIfCloseError(dbase)
@ -590,7 +593,8 @@ func (h *Helper) DiffDB(src *source.Source) {
for i, beforeTbl := range beforeMeta.Tables {
assert.Equal(h.T, beforeTbl.RowCount, afterMeta.Tables[i].RowCount,
"diffdb: %s: row count for %q is expected to be %d but got %d", src.Handle, beforeTbl.Name, beforeTbl.RowCount, afterMeta.Tables[i].RowCount)
"diffdb: %s: row count for %q is expected to be %d but got %d", src.Handle, beforeTbl.Name,
beforeTbl.RowCount, afterMeta.Tables[i].RowCount)
}
})
}