Projects STRLCPY dolt Commits b075143c
🤬
Revision indexing in progress... (symbol navigation in revisions will be accurate after indexed)
  • ■ ■ ■ ■ ■ ■
    docker/docker-entrypoint.sh
    skipped 18 lines
    19 19   mysql_log ERROR "$@" >&2
    20 20   exit 1
    21 21  }
     22 +docker_process_sql() {
     23 + dolt sql
     24 +}
    22 25   
    23 26  CONTAINER_DATA_DIR="/var/lib/dolt"
     27 +INIT_COMPLETED="$CONTAINER_DATA_DIR/.init_completed"
    24 28  DOLT_CONFIG_DIR="/etc/dolt/doltcfg.d"
    25 29  SERVER_CONFIG_DIR="/etc/dolt/servercfg.d"
    26 30  DOLT_ROOT_PATH="/.dolt"
    27  - 
    28  -# create all dirs in path
    29  -_create_dir() {
    30  - local path="$1"
    31  - mkdir -p "$path"
    32  -}
    33 31   
    34 32  check_for_dolt() {
    35 33   local dolt_bin=$(which dolt)
    skipped 44 lines
    80 78  # ie: docker_process_init_files /always-initdb.d/*
    81 79  # process initializer files, based on file extensions
    82 80  docker_process_init_files() {
    83  - echo
     81 + mysql_note "Running init scripts"
    84 82   local f
    85 83   for f; do
    86 84   case "$f" in
    skipped 19 lines
    106 104   done
    107 105  }
    108 106   
    109  -start_server() {
    110  - # start the server in fixed data directory at /var/lib/dolt
    111  - cd $CONTAINER_DATA_DIR
    112  - "$@"
    113  -}
    114  - 
    115 107  # if there is config file provided through /etc/dolt/doltcfg.d,
    116 108  # we overwrite $HOME/.dolt/config_global.json file with this file.
    117 109  set_dolt_config_if_defined() {
    skipped 30 lines
    148 140   if [ ! -z $CONFIG_PROVIDED ]; then
    149 141   set -- "$@" --config=$CONFIG_PROVIDED
    150 142   fi
    151  - start_server
    152 143   
    153  - # run any file provided in /docker-entrypoint-initdb.d directory after the server starts
    154  - docker_process_init_files /docker-entrypoint-initdb.d/*
     144 + if [[ ! -f $INIT_COMPLETED ]]; then
     145 + # run any file provided in /docker-entrypoint-initdb.d directory after the server starts
     146 + docker_process_init_files /docker-entrypoint-initdb.d/*
     147 + touch $INIT_COMPLETED
     148 + fi
     149 + fi
    155 150   
    156  - mysql_note "Dolt Server $dolt_version is started."
    157  - fi
    158 151   exec "$@"
    159 152  }
    160 153   
    skipped 2 lines
  • ■ ■ ■ ■ ■
    docker/serverDockerfile
    skipped 19 lines
    20 20  ENTRYPOINT ["docker-entrypoint.sh"]
    21 21   
    22 22  EXPOSE 3306 33060
     23 +WORKDIR /var/lib/dolt
    23 24  CMD [ "dolt", "sql-server", "--host=0.0.0.0" , "--port=3306" ]
    24 25   
  • ■ ■ ■ ■ ■
    go/cmd/dolt/commands/init.go
    skipped 87 lines
    88 88   help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, initDocs, ap))
    89 89   apr := cli.ParseArgsOrDie(ap, args, help)
    90 90   
     91 + if len(apr.Args) > 0 {
     92 + cli.PrintErrln(color.RedString("error: invalid arguments."))
     93 + return 1
     94 + }
     95 + 
    91 96   if dEnv.HasDoltDir() {
    92 97   cli.PrintErrln(color.RedString("This directory has already been initialized."))
    93 98   return 1
    skipped 80 lines
  • ■ ■ ■ ■ ■ ■
    go/cmd/dolt/commands/migrate.go
    skipped 16 lines
    17 17  import (
    18 18   "context"
    19 19   
    20  - "github.com/fatih/color"
     20 + "github.com/dolthub/dolt/go/store/types"
    21 21   
    22 22   "github.com/dolthub/dolt/go/cmd/dolt/cli"
    23 23   "github.com/dolthub/dolt/go/cmd/dolt/errhand"
    skipped 7 lines
    31 31   migrationPrompt = `Run "dolt migrate" to update this database to the latest data format`
    32 32   migrationMsg = "Migrating database to the latest data format"
    33 33   
    34  - migratePushFlag = "push"
    35  - migratePullFlag = "pull"
     34 + migrateDropConflictsFlag = "drop-conflicts"
    36 35  )
    37 36   
    38 37  var migrateDocs = cli.CommandDocumentationContent{
    skipped 26 lines
    65 64   
    66 65  func (cmd MigrateCmd) ArgParser() *argparser.ArgParser {
    67 66   ap := argparser.NewArgParser()
    68  - ap.SupportsFlag(migratePushFlag, "", "Push all migrated branches to the remote")
    69  - ap.SupportsFlag(migratePullFlag, "", "Update all local tracking refs for a migrated remote")
     67 + ap.SupportsFlag(migrateDropConflictsFlag, "", "Drop any conflicts visited during the migration")
    70 68   return ap
    71 69  }
    72 70   
    skipped 8 lines
    81 79   help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, migrateDocs, ap))
    82 80   apr := cli.ParseArgsOrDie(ap, args, help)
    83 81   
    84  - if apr.Contains(migratePushFlag) && apr.Contains(migratePullFlag) {
    85  - cli.PrintErrf(color.RedString("options --%s and --%s are mutually exclusive", migratePushFlag, migratePullFlag))
    86  - return 1
    87  - }
    88  - 
    89  - if err := MigrateDatabase(ctx, dEnv); err != nil {
     82 + dropConflicts := apr.Contains(migrateDropConflictsFlag)
     83 + if err := MigrateDatabase(ctx, dEnv, dropConflicts); err != nil {
    90 84   verr := errhand.BuildDError("migration failed").AddCause(err).Build()
    91 85   return HandleVErrAndExitCode(verr, usage)
    92 86   }
    skipped 1 lines
    94 88  }
    95 89   
    96 90  // MigrateDatabase migrates the NomsBinFormat of |dEnv.DoltDB|.
    97  -func MigrateDatabase(ctx context.Context, dEnv *env.DoltEnv) error {
     91 +func MigrateDatabase(ctx context.Context, dEnv *env.DoltEnv, dropConflicts bool) error {
    98 92   menv, err := migrate.NewEnvironment(ctx, dEnv)
    99 93   if err != nil {
    100 94   return err
    101 95   }
     96 + menv.DropConflicts = dropConflicts
     97 + 
     98 + if curr := menv.Existing.DoltDB.Format(); types.IsFormat_DOLT(curr) {
     99 + cli.Println("database is already migrated")
     100 + return nil
     101 + }
     102 + 
    102 103   p, err := menv.Migration.FS.Abs(".")
    103 104   if err != nil {
    104 105   return err
    105 106   }
    106 107   cli.Println("migrating database at tmp dir: ", p)
    107 108   
    108  - err = migrate.TraverseDAG(ctx, menv.Existing.DoltDB, menv.Migration.DoltDB)
     109 + err = migrate.TraverseDAG(ctx, menv, menv.Existing.DoltDB, menv.Migration.DoltDB)
    109 110   if err != nil {
    110 111   return err
    111 112   }
    skipped 4 lines
  • ■ ■ ■ ■ ■
    go/cmd/dolt/commands/schcmds/schema.go
    skipped 24 lines
    25 25   ImportCmd{},
    26 26   ShowCmd{},
    27 27   TagsCmd{},
     28 + UpdateTagCmd{},
    28 29  })
    29 30   
    30 31  // ValidateTableNameForCreate validates the given table name for creation as a user table, returning an error if the
    skipped 11 lines
  • ■ ■ ■ ■ ■ ■
    go/cmd/dolt/commands/schcmds/update-tag.go
     1 +// Copyright 2022 Dolthub, Inc.
     2 +//
     3 +// Licensed under the Apache License, Version 2.0 (the "License");
     4 +// you may not use this file except in compliance with the License.
     5 +// You may obtain a copy of the License at
     6 +//
     7 +// http://www.apache.org/licenses/LICENSE-2.0
     8 +//
     9 +// Unless required by applicable law or agreed to in writing, software
     10 +// distributed under the License is distributed on an "AS IS" BASIS,
     11 +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 +// See the License for the specific language governing permissions and
     13 +// limitations under the License.
     14 + 
     15 +package schcmds
     16 + 
     17 +import (
     18 + "context"
     19 + "fmt"
     20 + "strconv"
     21 + 
     22 + "github.com/dolthub/dolt/go/cmd/dolt/cli"
     23 + "github.com/dolthub/dolt/go/cmd/dolt/commands"
     24 + "github.com/dolthub/dolt/go/cmd/dolt/errhand"
     25 + "github.com/dolthub/dolt/go/libraries/doltcore/env"
     26 + "github.com/dolthub/dolt/go/libraries/doltcore/schema"
     27 + "github.com/dolthub/dolt/go/libraries/utils/argparser"
     28 + "github.com/dolthub/dolt/go/store/types"
     29 +)
     30 + 
     31 +var updateTagDocs = cli.CommandDocumentationContent{
     32 + ShortDesc: "Update the tag of the specified column",
     33 + LongDesc: `{{.EmphasisLeft}}dolt schema update-tag{{.EmphasisRight}}
     34 + 
     35 +Update tag of the specified column. Useful to fix a merge that is throwing a
     36 +schema tag conflict.
     37 +`,
     38 + Synopsis: []string{
     39 + "{{.LessThan}}table{{.GreaterThan}} {{.LessThan}}column{{.GreaterThan}} {{.LessThan}}tag{{.GreaterThan}}",
     40 + },
     41 +}
     42 + 
     43 +type UpdateTagCmd struct{}
     44 + 
     45 +var _ cli.Command = UpdateTagCmd{}
     46 + 
     47 +func (cmd UpdateTagCmd) Name() string {
     48 + return "update-tag"
     49 +}
     50 + 
     51 +func (cmd UpdateTagCmd) Description() string {
     52 + return "Update a column's tag"
     53 +}
     54 + 
     55 +func (cmd UpdateTagCmd) Docs() *cli.CommandDocumentation {
     56 + ap := cmd.ArgParser()
     57 + return cli.NewCommandDocumentation(updateTagDocs, ap)
     58 +}
     59 + 
     60 +func (cmd UpdateTagCmd) ArgParser() *argparser.ArgParser {
     61 + ap := argparser.NewArgParser()
     62 + ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"table", "The name of the table"})
     63 + ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"column", "The name of the column"})
     64 + ap.ArgListHelp = append(ap.ArgListHelp, [2]string{"tag", "The new tag value"})
     65 + return ap
     66 +}
     67 + 
     68 +func (cmd UpdateTagCmd) Exec(ctx context.Context, commandStr string, args []string, dEnv *env.DoltEnv) int {
     69 + ap := cmd.ArgParser()
     70 + help, usage := cli.HelpAndUsagePrinters(cli.CommandDocsForCommandString(commandStr, updateTagDocs, ap))
     71 + apr := cli.ParseArgsOrDie(ap, args, help)
     72 + 
     73 + if !types.IsFormat_DOLT(dEnv.DoltDB.Format()) {
     74 + verr := errhand.BuildDError("update-tag is only available in storage format __DOLT__").Build()
     75 + return commands.HandleVErrAndExitCode(verr, usage)
     76 + }
     77 + 
     78 + if len(apr.Args) != 3 {
     79 + verr := errhand.BuildDError("must provide <table> <column> <tag>").Build()
     80 + return commands.HandleVErrAndExitCode(verr, usage)
     81 + }
     82 + 
     83 + tableName, columnName, tagStr := apr.Args[0], apr.Args[1], apr.Args[2]
     84 + 
     85 + tag, err := strconv.ParseUint(tagStr, 10, 64)
     86 + if err != nil {
     87 + verr := errhand.BuildDError("failed to parse tag").AddCause(err).Build()
     88 + return commands.HandleVErrAndExitCode(verr, usage)
     89 + }
     90 + 
     91 + root, verr := commands.GetWorkingWithVErr(dEnv)
     92 + if verr != nil {
     93 + return commands.HandleVErrAndExitCode(verr, usage)
     94 + }
     95 + 
     96 + tbl, tName, ok, err := root.GetTableInsensitive(ctx, tableName)
     97 + if err != nil {
     98 + return commands.HandleVErrAndExitCode(errhand.BuildDError("failed to get table").Build(), usage)
     99 + }
     100 + if !ok {
     101 + return commands.HandleVErrAndExitCode(errhand.BuildDError("table %s does not exist", tableName).Build(), usage)
     102 + }
     103 + 
     104 + sch, err := tbl.GetSchema(ctx)
     105 + if err != nil {
     106 + return commands.HandleVErrAndExitCode(errhand.BuildDError("failed to get schema").Build(), usage)
     107 + }
     108 + 
     109 + newSch, err := updateColumnTag(sch, columnName, tag)
     110 + if err != nil {
     111 + return commands.HandleVErrAndExitCode(errhand.BuildDError("failed to update column tag").AddCause(err).Build(), usage)
     112 + }
     113 + 
     114 + tbl, err = tbl.UpdateSchema(ctx, newSch)
     115 + if err != nil {
     116 + return commands.HandleVErrAndExitCode(errhand.BuildDError("failed to update table schema").AddCause(err).Build(), usage)
     117 + }
     118 + 
     119 + root, err = root.PutTable(ctx, tName, tbl)
     120 + if err != nil {
     121 + return commands.HandleVErrAndExitCode(errhand.BuildDError("failed to put table in root").AddCause(err).Build(), usage)
     122 + }
     123 + 
     124 + verr = commands.UpdateWorkingWithVErr(dEnv, root)
     125 + if verr != nil {
     126 + return commands.HandleVErrAndExitCode(verr, usage)
     127 + }
     128 + 
     129 + return commands.HandleVErrAndExitCode(nil, usage)
     130 +}
     131 + 
     132 +func updateColumnTag(sch schema.Schema, name string, tag uint64) (schema.Schema, error) {
     133 + var found bool
     134 + columns := sch.GetAllCols().GetColumns()
     135 + // Find column and update its tag
     136 + for i, col := range columns {
     137 + if col.Name == name {
     138 + col.Tag = tag
     139 + columns[i] = col
     140 + found = true
     141 + break
     142 + }
     143 + }
     144 + 
     145 + if !found {
     146 + return nil, fmt.Errorf("column %s does not exist", name)
     147 + }
     148 + 
     149 + newSch, err := schema.SchemaFromCols(schema.NewColCollection(columns...))
     150 + if err != nil {
     151 + return nil, err
     152 + }
     153 + 
     154 + err = newSch.SetPkOrdinals(sch.GetPkOrdinals())
     155 + if err != nil {
     156 + return nil, err
     157 + }
     158 + newSch.SetCollation(sch.GetCollation())
     159 + 
     160 + return newSch, nil
     161 +}
     162 + 
  • ■ ■ ■ ■ ■ ■
    go/cmd/dolt/dolt.go
    skipped 23 lines
    24 24   "os"
    25 25   "os/exec"
    26 26   "strconv"
    27  - "sync"
    28 27   "time"
    29 28   
    30 29   "github.com/fatih/color"
    skipped 26 lines
    57 56  )
    58 57   
    59 58  const (
    60  - Version = "0.51.1"
     59 + Version = "0.51.4"
    61 60  )
    62 61   
    63 62  var dumpDocsCommand = &commands.DumpDocsCmd{}
    skipped 326 lines
    390 389   }
    391 390   
    392 391   start := time.Now()
    393  - var wg sync.WaitGroup
    394 392   ctx, stop := context.WithCancel(ctx)
    395 393   res := doltCommand.Exec(ctx, "dolt", args, dEnv)
    396 394   stop()
    397  - wg.Wait()
     395 + 
     396 + if err = dbfactory.CloseAllLocalDatabases(); err != nil {
     397 + cli.PrintErrln(err)
     398 + if res == 0 {
     399 + res = 1
     400 + }
     401 + }
    398 402   
    399 403   if csMetrics && dEnv.DoltDB != nil {
    400 404   metricsSummary := dEnv.DoltDB.CSMetricsSummary()
    skipped 44 lines
  • ■ ■ ■ ■ ■ ■
    go/go.mod
    skipped 14 lines
    15 15   github.com/dolthub/fslock v0.0.3
    16 16   github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371
    17 17   github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81
    18  - github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f
     18 + github.com/dolthub/vitess v0.0.0-20221121184553-8d519d0bbb91
    19 19   github.com/dustin/go-humanize v1.0.0
    20 20   github.com/fatih/color v1.13.0
    21 21   github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
    skipped 36 lines
    58 58   github.com/cenkalti/backoff/v4 v4.1.3
    59 59   github.com/cespare/xxhash v1.1.0
    60 60   github.com/creasty/defaults v1.6.0
    61  - github.com/dolthub/go-mysql-server v0.14.1-0.20221114182009-bf520f36202a
     61 + github.com/dolthub/go-mysql-server v0.14.1-0.20221122083400-0ba736613080
    62 62   github.com/google/flatbuffers v2.0.6+incompatible
    63 63   github.com/kch42/buzhash v0.0.0-20160816060738-9bdec3dec7c6
    64 64   github.com/mitchellh/go-ps v1.0.0
    skipped 80 lines
  • ■ ■ ■ ■ ■ ■
    go/go.sum
    skipped 179 lines
    180 180  github.com/dolthub/flatbuffers v1.13.0-dh.1/go.mod h1:CorYGaDmXjHz1Z7i50PYXG1Ricn31GcA2wNOTFIQAKE=
    181 181  github.com/dolthub/fslock v0.0.3 h1:iLMpUIvJKMKm92+N1fmHVdxJP5NdyDK5bK7z7Ba2s2U=
    182 182  github.com/dolthub/fslock v0.0.3/go.mod h1:QWql+P17oAAMLnL4HGB5tiovtDuAjdDTPbuqx7bYfa0=
    183  -github.com/dolthub/go-mysql-server v0.14.1-0.20221114182009-bf520f36202a h1:ekqXdZRHO1JCJjrDHZA2y21Y9firwF+EwtL0oZ3Xgrw=
    184  -github.com/dolthub/go-mysql-server v0.14.1-0.20221114182009-bf520f36202a/go.mod h1:KtpU4Sf7J+SIat/nxoA733QTn3tdL34NtoGxEBFcTsA=
     183 +github.com/dolthub/go-mysql-server v0.14.1-0.20221122083400-0ba736613080 h1:gPzF+rlUZB/GQzcPsSM/oOP3ijQMOHAfNLnqqKFGIH0=
     184 +github.com/dolthub/go-mysql-server v0.14.1-0.20221122083400-0ba736613080/go.mod h1:drVceZC7lt+ZRzd0LnS7o3CURk7xGDDfjbsDfTKR7O0=
    185 185  github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371 h1:oyPHJlzumKta1vnOQqUnfdz+pk3EmnHS3Nd0cCT0I2g=
    186 186  github.com/dolthub/ishell v0.0.0-20220112232610-14e753f0f371/go.mod h1:dhGBqcCEfK5kuFmeO5+WOx3hqc1k3M29c1oS/R7N4ms=
    187 187  github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474 h1:xTrR+l5l+1Lfq0NvhiEsctylXinUMFhhsqaEcl414p8=
    188 188  github.com/dolthub/jsonpath v0.0.0-20210609232853-d49537a30474/go.mod h1:kMz7uXOXq4qRriCEyZ/LUeTqraLJCjf0WVZcUi6TxUY=
    189 189  github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81 h1:7/v8q9XGFa6q5Ap4Z/OhNkAMBaK5YeuEzwJt+NZdhiE=
    190 190  github.com/dolthub/sqllogictest/go v0.0.0-20201107003712-816f3ae12d81/go.mod h1:siLfyv2c92W1eN/R4QqG/+RjjX5W2+gCTRjZxBjI3TY=
    191  -github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f h1:2sNrQiE4pcdgCNp09RTOsmNeepgN5rL+ep8NF8Faw9U=
    192  -github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
     191 +github.com/dolthub/vitess v0.0.0-20221121184553-8d519d0bbb91 h1:xZRbvMhwTMkddrp4JVpA/APGI0kDAwvR35S5a46EbbU=
     192 +github.com/dolthub/vitess v0.0.0-20221121184553-8d519d0bbb91/go.mod h1:oVFIBdqMFEkt4Xz2fzFJBNtzKhDEjwdCF0dzde39iKs=
    193 193  github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
    194 194  github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
    195 195  github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
    skipped 1051 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/dbfactory/file.go
    skipped 16 lines
    17 17  import (
    18 18   "context"
    19 19   "errors"
     20 + "fmt"
    20 21   "net/url"
    21 22   "os"
    22 23   "path/filepath"
     24 + "sync"
    23 25   
    24 26   "github.com/dolthub/dolt/go/libraries/utils/filesys"
    25 27   "github.com/dolthub/dolt/go/store/datas"
    skipped 17 lines
    43 45  type FileFactory struct {
    44 46  }
    45 47   
     48 +type singletonDB struct {
     49 + ddb datas.Database
     50 + vrw types.ValueReadWriter
     51 + ns tree.NodeStore
     52 +}
     53 + 
     54 +var singletonLock = new(sync.Mutex)
     55 +var singletons = make(map[string]singletonDB)
     56 + 
     57 +func CloseAllLocalDatabases() (err error) {
     58 + singletonLock.Lock()
     59 + defer singletonLock.Unlock()
     60 + for name, s := range singletons {
     61 + if cerr := s.ddb.Close(); cerr != nil {
     62 + err = fmt.Errorf("error closing DB %s (%s)", name, cerr)
     63 + }
     64 + }
     65 + return
     66 +}
     67 + 
    46 68  // PrepareDB creates the directory for the DB if it doesn't exist, and returns an error if a file or symlink is at the
    47 69  // path given
    48 70  func (fact FileFactory) PrepareDB(ctx context.Context, nbf *types.NomsBinFormat, u *url.URL, params map[string]interface{}) error {
    skipped 22 lines
    71 93   
    72 94  // CreateDB creates a local filesys backed database
    73 95  func (fact FileFactory) CreateDB(ctx context.Context, nbf *types.NomsBinFormat, urlObj *url.URL, params map[string]interface{}) (datas.Database, types.ValueReadWriter, tree.NodeStore, error) {
     96 + singletonLock.Lock()
     97 + defer singletonLock.Unlock()
     98 + 
     99 + if s, ok := singletons[urlObj.String()]; ok {
     100 + return s.ddb, s.vrw, s.ns, nil
     101 + }
     102 + 
    74 103   path, err := url.PathUnescape(urlObj.Path)
    75 104   
    76 105   if err != nil {
    skipped 38 lines
    115 144   
    116 145   vrw := types.NewValueStore(st)
    117 146   ns := tree.NewNodeStore(st)
     147 + ddb := datas.NewTypesDatabase(vrw, ns)
    118 148   
    119  - return datas.NewTypesDatabase(vrw, ns), vrw, ns, nil
     149 + singletons[urlObj.String()] = singletonDB{
     150 + ddb: ddb,
     151 + vrw: vrw,
     152 + ns: ns,
     153 + }
     154 + 
     155 + return ddb, vrw, ns, nil
    120 156  }
    121 157   
    122 158  func validateDir(path string) error {
    skipped 11 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/diff/diff_summary.go
    skipped 31 lines
    32 32   "github.com/dolthub/dolt/go/store/val"
    33 33  )
    34 34   
     35 +var ErrPrimaryKeySetChanged = errors.New("primary key set changed")
     36 + 
    35 37  type DiffSummaryProgress struct {
    36 38   Adds, Removes, Changes, CellChanges, NewRowSize, OldRowSize, NewCellSize, OldCellSize uint64
    37 39  }
    skipped 37 lines
    75 77   }
    76 78   
    77 79   if !schema.ArePrimaryKeySetsDiffable(td.Format(), fromSch, toSch) {
    78  - return errhand.BuildDError("diff summary will not compute due to primary key set change with table %s", td.CurName()).Build()
     80 + return fmt.Errorf("failed to compute diff summary for table %s: %w", td.CurName(), ErrPrimaryKeySetChanged)
    79 81   }
    80 82   
    81 83   keyless, err := td.IsKeyless(ctx)
    skipped 262 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/doltdb/system_table.go
    skipped 19 lines
    20 20   "sort"
    21 21   "strings"
    22 22   
     23 + "github.com/dolthub/dolt/go/libraries/doltcore/schema/typeinfo"
     24 + 
    23 25   "github.com/dolthub/dolt/go/libraries/doltcore/schema"
    24 26   "github.com/dolthub/dolt/go/libraries/utils/funcitr"
    25 27   "github.com/dolthub/dolt/go/libraries/utils/set"
    skipped 6 lines
    32 34  )
    33 35   
    34 36  var ErrSystemTableCannotBeModified = errors.New("system tables cannot be dropped or altered")
     37 + 
     38 +var OldDocsSchema = schema.MustSchemaFromCols(schema.NewColCollection(
     39 + schema.NewColumn(DocPkColumnName, schema.DocNameTag, types.StringKind, true, schema.NotNullConstraint{}),
     40 + schema.NewColumn(DocTextColumnName, schema.DocTextTag, types.StringKind, false),
     41 +))
     42 + 
     43 +var DocsSchema schema.Schema
     44 + 
     45 +func init() {
     46 + docTextCol, err := schema.NewColumnWithTypeInfo(DocTextColumnName, schema.DocTextTag, typeinfo.LongTextType, false, "", false, "")
     47 + if err != nil {
     48 + panic(err)
     49 + }
     50 + doltDocsColumns := schema.NewColCollection(
     51 + schema.NewColumn(DocPkColumnName, schema.DocNameTag, types.StringKind, true, schema.NotNullConstraint{}),
     52 + docTextCol,
     53 + )
     54 + DocsSchema = schema.MustSchemaFromCols(doltDocsColumns)
     55 +}
    35 56   
    36 57  // HasDoltPrefix returns a boolean whether or not the provided string is prefixed with the DoltNamespace. Users should
    37 58  // not be able to create tables in this reserved namespace.
    skipped 124 lines
    162 183   ReadmeDoc = "README.md"
    163 184  )
    164 185   
    165  -var doltDocsColumns = schema.NewColCollection(
    166  - schema.NewColumn(DocPkColumnName, schema.DocNameTag, types.StringKind, true, schema.NotNullConstraint{}),
    167  - schema.NewColumn(DocTextColumnName, schema.DocTextTag, types.StringKind, false),
    168  -)
    169  -var DocsSchema = schema.MustSchemaFromCols(doltDocsColumns)
    170  - 
    171 186  var DocsMaybeCreateTableStmt = `
    172 187  CREATE TABLE IF NOT EXISTS dolt_docs (
    173 188   doc_name varchar(16383) NOT NULL,
    174  - doc_text varchar(16383),
     189 + doc_text longtext,
    175 190   PRIMARY KEY (doc_name)
    176 191  );`
    177 192   
    skipped 110 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/migrate/environment.go
    skipped 46 lines
    47 47   
    48 48  // Environment is a migration environment.
    49 49  type Environment struct {
    50  - Migration *env.DoltEnv
    51  - Existing *env.DoltEnv
     50 + Migration *env.DoltEnv
     51 + Existing *env.DoltEnv
     52 + DropConflicts bool
    52 53  }
    53 54   
    54 55  // NewEnvironment creates a migration Environment for |existing|.
    skipped 213 lines
  • ■ ■ ■ ■
    go/libraries/doltcore/migrate/integration_test.go
    skipped 183 lines
    184 184   DoltDB: ddb,
    185 185   }
    186 186   
    187  - err = migrate.TraverseDAG(ctx, preEnv.DoltDB, postEnv.DoltDB)
     187 + err = migrate.TraverseDAG(ctx, migrate.Environment{}, preEnv.DoltDB, postEnv.DoltDB)
    188 188   assert.NoError(t, err)
    189 189   return
    190 190  }
    skipped 33 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/migrate/transform.go
    skipped 40 lines
    41 41   flushRef = ref.NewInternalRef("migration-flush")
    42 42  )
    43 43   
    44  -func migrateWorkingSet(ctx context.Context, brRef ref.BranchRef, wsRef ref.WorkingSetRef, old, new *doltdb.DoltDB) error {
     44 +func migrateWorkingSet(ctx context.Context, menv Environment, brRef ref.BranchRef, wsRef ref.WorkingSetRef, old, new *doltdb.DoltDB) error {
    45 45   oldWs, err := old.ResolveWorkingSet(ctx, wsRef)
    46 46   if err != nil {
    47 47   return err
    skipped 17 lines
    65 65   return err
    66 66   }
    67 67   
    68  - wr, err := migrateRoot(ctx, oldHeadRoot, oldWs.WorkingRoot(), newHeadRoot)
     68 + wr, err := migrateRoot(ctx, menv, oldHeadRoot, oldWs.WorkingRoot(), newHeadRoot)
    69 69   if err != nil {
    70 70   return err
    71 71   }
    72 72   
    73  - sr, err := migrateRoot(ctx, oldHeadRoot, oldWs.StagedRoot(), newHeadRoot)
     73 + sr, err := migrateRoot(ctx, menv, oldHeadRoot, oldWs.StagedRoot(), newHeadRoot)
    74 74   if err != nil {
    75 75   return err
    76 76   }
    skipped 13 lines
    90 90   return new.UpdateWorkingSet(ctx, wsRef, newWs, hash.Hash{}, oldWs.Meta())
    91 91  }
    92 92   
    93  -func migrateCommit(ctx context.Context, oldCm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
     93 +func migrateCommit(ctx context.Context, menv Environment, oldCm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
    94 94   oldHash, err := oldCm.HashOf()
    95 95   if err != nil {
    96 96   return err
    skipped 50 lines
    147 147   return err
    148 148   }
    149 149   
    150  - mRoot, err := migrateRoot(ctx, oldParentRoot, oldRoot, newParentRoot)
     150 + mRoot, err := migrateRoot(ctx, menv, oldParentRoot, oldRoot, newParentRoot)
    151 151   if err != nil {
    152 152   return err
    153 153   }
    skipped 111 lines
    265 265   }, nil
    266 266  }
    267 267   
    268  -func migrateRoot(ctx context.Context, oldParent, oldRoot, newParent *doltdb.RootValue) (*doltdb.RootValue, error) {
     268 +func migrateRoot(ctx context.Context, menv Environment, oldParent, oldRoot, newParent *doltdb.RootValue) (*doltdb.RootValue, error) {
    269 269   migrated := newParent
    270 270   
    271 271   fkc, err := oldRoot.GetForeignKeyCollection(ctx)
    skipped 20 lines
    292 292   ok, err := oldTbl.HasConflicts(ctx)
    293 293   if err != nil {
    294 294   return true, err
    295  - } else if ok {
     295 + } else if ok && !menv.DropConflicts {
    296 296   return true, fmt.Errorf("cannot migrate table with conflicts (%s)", name)
    297 297   }
    298 298   
    skipped 403 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/migrate/traverse.go
    skipped 25 lines
    26 26  )
    27 27   
    28 28  // TraverseDAG traverses |old|, migrating values to |new|.
    29  -func TraverseDAG(ctx context.Context, old, new *doltdb.DoltDB) (err error) {
     29 +func TraverseDAG(ctx context.Context, menv Environment, old, new *doltdb.DoltDB) (err error) {
    30 30   var heads []ref.DoltRef
    31 31   var prog Progress
    32 32   
    skipped 17 lines
    50 50   }()
    51 51   
    52 52   for i := range heads {
    53  - if err = traverseRefHistory(ctx, heads[i], old, new, prog); err != nil {
     53 + if err = traverseRefHistory(ctx, menv, heads[i], old, new, prog); err != nil {
    54 54   return err
    55 55   }
    56 56   }
    skipped 4 lines
    61 61   return nil
    62 62  }
    63 63   
    64  -func traverseRefHistory(ctx context.Context, r ref.DoltRef, old, new *doltdb.DoltDB, prog Progress) error {
     64 +func traverseRefHistory(ctx context.Context, menv Environment, r ref.DoltRef, old, new *doltdb.DoltDB, prog Progress) error {
    65 65   switch r.GetType() {
    66 66   case ref.BranchRefType:
    67  - if err := traverseBranchHistory(ctx, r, old, new, prog); err != nil {
     67 + if err := traverseBranchHistory(ctx, menv, r, old, new, prog); err != nil {
    68 68   return err
    69 69   }
    70 70   wsRef, err := ref.WorkingSetRefForHead(r)
    71 71   if err != nil {
    72 72   return err
    73 73   }
    74  - return migrateWorkingSet(ctx, r.(ref.BranchRef), wsRef, old, new)
     74 + return migrateWorkingSet(ctx, menv, r.(ref.BranchRef), wsRef, old, new)
    75 75   
    76 76   case ref.TagRefType:
    77  - return traverseTagHistory(ctx, r.(ref.TagRef), old, new, prog)
     77 + return traverseTagHistory(ctx, menv, r.(ref.TagRef), old, new, prog)
    78 78   
    79 79   case ref.RemoteRefType:
    80  - return traverseBranchHistory(ctx, r, old, new, prog)
     80 + return traverseBranchHistory(ctx, menv, r, old, new, prog)
    81 81   
    82 82   case ref.WorkspaceRefType, ref.InternalRefType:
    83 83   return nil
    skipped 3 lines
    87 87   }
    88 88  }
    89 89   
    90  -func traverseBranchHistory(ctx context.Context, r ref.DoltRef, old, new *doltdb.DoltDB, prog Progress) error {
     90 +func traverseBranchHistory(ctx context.Context, menv Environment, r ref.DoltRef, old, new *doltdb.DoltDB, prog Progress) error {
    91 91   cm, err := old.ResolveCommitRef(ctx, r)
    92 92   if err != nil {
    93 93   return err
    94 94   }
    95  - if err = traverseCommitHistory(ctx, cm, new, prog); err != nil {
     95 + if err = traverseCommitHistory(ctx, menv, cm, new, prog); err != nil {
    96 96   return err
    97 97   }
    98 98   
    skipped 9 lines
    108 108   return new.SetHead(ctx, r, newHash)
    109 109  }
    110 110   
    111  -func traverseTagHistory(ctx context.Context, r ref.TagRef, old, new *doltdb.DoltDB, prog Progress) error {
     111 +func traverseTagHistory(ctx context.Context, menv Environment, r ref.TagRef, old, new *doltdb.DoltDB, prog Progress) error {
    112 112   t, err := old.ResolveTag(ctx, r)
    113 113   if err != nil {
    114 114   return err
    115 115   }
    116 116   
    117  - if err = traverseCommitHistory(ctx, t.Commit, new, prog); err != nil {
     117 + if err = traverseCommitHistory(ctx, menv, t.Commit, new, prog); err != nil {
    118 118   return err
    119 119   }
    120 120   
    skipped 12 lines
    133 133   return new.NewTagAtCommit(ctx, r, cm, t.Meta)
    134 134  }
    135 135   
    136  -func traverseCommitHistory(ctx context.Context, cm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
     136 +func traverseCommitHistory(ctx context.Context, menv Environment, cm *doltdb.Commit, new *doltdb.DoltDB, prog Progress) error {
    137 137   ch, err := cm.HashOf()
    138 138   if err != nil {
    139 139   return err
    skipped 15 lines
    155 155   }
    156 156   if idx < 0 {
    157 157   // parents for |cm| are done, migrate |cm|
    158  - if err = migrateCommit(ctx, cm, new, prog); err != nil {
     158 + if err = migrateCommit(ctx, menv, cm, new, prog); err != nil {
    159 159   return err
    160 160   }
    161 161   // pop the stack, traverse upwards
    skipped 34 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/migrate/tuples.go
    skipped 242 lines
    243 243   case val.StringAddrEnc:
    244 244   // note: previously, TEXT fields were serialized as types.String
    245 245   rd := strings.NewReader(string(value))
    246  - t, err := tree.NewImmutableTreeFromReader(ctx, rd, ns, tree.DefaultFixedChunkLength)
     246 + bb := ns.BlobBuilder()
     247 + bb.Init(len(value))
     248 + _, addr, err := bb.Chunk(ctx, rd)
    247 249   if err != nil {
    248 250   return err
    249 251   }
    250  - b.PutStringAddr(idx, t.Addr)
     252 + b.PutStringAddr(idx, addr)
    251 253   
    252 254   default:
    253 255   panic(fmt.Sprintf("unexpected encoding for string (%d)", typ.Enc))
    skipped 56 lines
    310 312   }
    311 313   buf := bytes.NewBuffer([]byte(s))
    312 314   
    313  - t, err := tree.NewImmutableTreeFromReader(ctx, buf, ns, tree.DefaultFixedChunkLength)
     315 + bb := ns.BlobBuilder()
     316 + bb.Init(len(s))
     317 + _, addr, err := bb.Chunk(ctx, buf)
    314 318   if err != nil {
    315 319   return err
    316 320   }
    317  - b.PutJSONAddr(idx, t.Addr)
     321 + b.PutJSONAddr(idx, addr)
    318 322   return nil
    319 323  }
    320 324   
    skipped 17 lines
    338 342   return err
    339 343   }
    340 344   
    341  - t, err := tree.NewImmutableTreeFromReader(ctx, bytes.NewReader(buf), ns, tree.DefaultFixedChunkLength)
     345 + bb := ns.BlobBuilder()
     346 + bb.Init(int(value.Len()))
     347 + _, addr, err := bb.Chunk(ctx, bytes.NewReader(buf))
    342 348   if err != nil {
    343 349   return err
    344 350   }
    skipped 1 lines
    346 352   typ := b.Desc.Types[idx]
    347 353   switch typ.Enc {
    348 354   case val.BytesAddrEnc:
    349  - b.PutBytesAddr(idx, t.Addr)
     355 + b.PutBytesAddr(idx, addr)
    350 356   case val.StringAddrEnc:
    351  - b.PutStringAddr(idx, t.Addr)
     357 + b.PutStringAddr(idx, addr)
    352 358   }
    353 359   return nil
    354 360  }
    skipped 34 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/remotestorage/chunk_store.go
    skipped 599 lines
    600 600   if err == io.EOF {
    601 601   return nil
    602 602   }
    603  - return NewRpcError(err, "StreamDownloadLocations", dcs.host, reqs[completedReqs])
     603 + var r *remotesapi.GetDownloadLocsRequest
     604 + if completedReqs < len(reqs) {
     605 + r = reqs[completedReqs]
     606 + }
     607 + return NewRpcError(err, "StreamDownloadLocations", dcs.host, r)
    604 608   }
    605 609   if resp.RepoToken != "" {
    606 610   dcs.repoToken.Store(resp.RepoToken)
    skipped 791 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/schema/schema_impl.go
    skipped 366 lines
    367 367   _ = si.GetPKCols().Iter(func(tag uint64, col Column) (stop bool, err error) {
    368 368   sqlType := col.TypeInfo.ToSqlType()
    369 369   queryType := sqlType.Type()
    370  - tt = append(tt, val.Type{
    371  - Enc: val.Encoding(EncodingFromSqlType(queryType)),
    372  - Nullable: columnMissingNotNullConstraint(col),
    373  - })
    374  - if queryType == query.Type_CHAR || queryType == query.Type_VARCHAR {
     370 + var t val.Type
     371 + if queryType == query.Type_BLOB {
     372 + t = val.Type{
     373 + Enc: val.Encoding(EncodingFromSqlType(query.Type_VARBINARY)),
     374 + Nullable: columnMissingNotNullConstraint(col),
     375 + }
     376 + } else if queryType == query.Type_TEXT {
     377 + t = val.Type{
     378 + Enc: val.Encoding(EncodingFromSqlType(query.Type_VARCHAR)),
     379 + Nullable: columnMissingNotNullConstraint(col),
     380 + }
     381 + } else {
     382 + t = val.Type{
     383 + Enc: val.Encoding(EncodingFromSqlType(queryType)),
     384 + Nullable: columnMissingNotNullConstraint(col),
     385 + }
     386 + }
     387 + tt = append(tt, t)
     388 + if queryType == query.Type_CHAR || queryType == query.Type_VARCHAR || queryType == query.Type_TEXT {
    375 389   useCollations = true
    376 390   collations = append(collations, sqlType.(sql.StringType).Collation())
    377 391   } else {
    skipped 108 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/database.go
    skipped 850 lines
    851 851   }
    852 852   if strings.ToLower(tableName) == doltdb.DocTableName {
    853 853   // validate correct schema
    854  - if !dtables.DoltDocsSqlSchema.Equals(sch.Schema) {
     854 + if !dtables.DoltDocsSqlSchema.Equals(sch.Schema) && !dtables.OldDoltDocsSqlSchema.Equals(sch.Schema) {
    855 855   return fmt.Errorf("incorrect schema for dolt_docs table")
    856 856   }
    857 857   } else if doltdb.HasDoltPrefix(tableName) {
    skipped 14 lines
    872 872   }
    873 873   if strings.ToLower(tableName) == doltdb.DocTableName {
    874 874   // validate correct schema
    875  - if !dtables.DoltDocsSqlSchema.Equals(sch.Schema) {
     875 + if !dtables.DoltDocsSqlSchema.Equals(sch.Schema) && !dtables.OldDoltDocsSqlSchema.Equals(sch.Schema) {
    876 876   return fmt.Errorf("incorrect schema for dolt_docs table")
    877 877   }
    878 878   } else if doltdb.HasDoltPrefix(tableName) {
    skipped 558 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/dolt_diff_summary_table_function.go
    skipped 14 lines
    15 15  package sqle
    16 16   
    17 17  import (
     18 + "errors"
    18 19   "fmt"
    19 20   "io"
    20 21   "math"
    21 22   "strings"
    22 23   
    23 24   "github.com/dolthub/go-mysql-server/sql"
     25 + "golang.org/x/sync/errgroup"
    24 26   
    25 27   "github.com/dolthub/dolt/go/libraries/doltcore/diff"
    26 28   "github.com/dolthub/dolt/go/libraries/doltcore/doltdb"
    27 29   "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
    28  - "github.com/dolthub/dolt/go/store/atomicerr"
     30 + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
    29 31  )
    30 32   
    31 33  var _ sql.TableFunction = (*DiffSummaryTableFunction)(nil)
    skipped 49 lines
    81 83   return ds, nil
    82 84  }
    83 85   
    84  -// FunctionName implements the sql.TableFunction interface
    85  -func (ds *DiffSummaryTableFunction) FunctionName() string {
     86 +// Name implements the sql.TableFunction interface
     87 +func (ds *DiffSummaryTableFunction) Name() string {
    86 88   return "dolt_diff_summary"
    87 89  }
    88 90   
    skipped 95 lines
    184 186  // WithExpressions implements the sql.Expressioner interface.
    185 187  func (ds *DiffSummaryTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
    186 188   if len(expression) < 1 {
    187  - return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "1 to 3", len(expression))
     189 + return nil, sql.ErrInvalidArgumentNumber.New(ds.Name(), "1 to 3", len(expression))
    188 190   }
    189 191   
    190 192   for _, expr := range expression {
    191 193   if !expr.Resolved() {
    192  - return nil, ErrInvalidNonLiteralArgument.New(ds.FunctionName(), expr.String())
     194 + return nil, ErrInvalidNonLiteralArgument.New(ds.Name(), expr.String())
    193 195   }
    194 196   }
    195 197   
    196 198   if strings.Contains(expression[0].String(), "..") {
    197 199   if len(expression) < 1 || len(expression) > 2 {
    198  - return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "1 or 2", len(expression))
     200 + return nil, sql.ErrInvalidArgumentNumber.New(ds.Name(), "1 or 2", len(expression))
    199 201   }
    200 202   ds.dotCommitExpr = expression[0]
    201 203   if len(expression) == 2 {
    skipped 1 lines
    203 205   }
    204 206   } else {
    205 207   if len(expression) < 2 || len(expression) > 3 {
    206  - return nil, sql.ErrInvalidArgumentNumber.New(ds.FunctionName(), "2 or 3", len(expression))
     208 + return nil, sql.ErrInvalidArgumentNumber.New(ds.Name(), "2 or 3", len(expression))
    207 209   }
    208 210   ds.fromCommitExpr = expression[0]
    209 211   ds.toCommitExpr = expression[1]
    skipped 5 lines
    215 217   // validate the expressions
    216 218   if ds.dotCommitExpr != nil {
    217 219   if !sql.IsText(ds.dotCommitExpr.Type()) {
    218  - return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.dotCommitExpr.String())
     220 + return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.dotCommitExpr.String())
    219 221   }
    220 222   } else {
    221 223   if !sql.IsText(ds.fromCommitExpr.Type()) {
    222  - return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.fromCommitExpr.String())
     224 + return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.fromCommitExpr.String())
    223 225   }
    224 226   if !sql.IsText(ds.toCommitExpr.Type()) {
    225  - return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.toCommitExpr.String())
     227 + return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.toCommitExpr.String())
    226 228   }
    227 229   }
    228 230   
    229 231   if ds.tableNameExpr != nil {
    230 232   if !sql.IsText(ds.tableNameExpr.Type()) {
    231  - return nil, sql.ErrInvalidArgumentDetails.New(ds.FunctionName(), ds.tableNameExpr.String())
     233 + return nil, sql.ErrInvalidArgumentDetails.New(ds.Name(), ds.tableNameExpr.String())
    232 234   }
    233 235   }
    234 236   
    skipped 54 lines
    289 291   }
    290 292   diffSum, hasDiff, err := getDiffSummaryNodeFromDelta(ctx, delta, fromRoot, toRoot, tblName)
    291 293   if err != nil {
     294 + if errors.Is(err, diff.ErrPrimaryKeySetChanged) {
     295 + ctx.Warn(dtables.PrimaryKeyChangeWarningCode, fmt.Sprintf("summary for table %s cannot be determined. Primary key set changed.", tblName))
     296 + // Report an empty diff for tables that have primary key set changes
     297 + diffSummaries = append(diffSummaries, diffSummaryNode{tblName: tblName})
     298 + continue
     299 + }
    292 300   return nil, err
    293 301   }
    294 302   if hasDiff {
    skipped 94 lines
    389 397  // getDiffSummary returns diff.DiffSummaryProgress object and whether there is a data diff or not.
    390 398  func getDiffSummary(ctx *sql.Context, td diff.TableDelta) (diff.DiffSummaryProgress, bool, bool, error) {
    391 399   // got this method from diff_output.go
    392  - // todo: use errgroup.Group
    393  - ae := atomicerr.New()
     400 + 
    394 401   ch := make(chan diff.DiffSummaryProgress)
    395  - go func() {
     402 + 
     403 + grp, ctx2 := errgroup.WithContext(ctx)
     404 + grp.Go(func() error {
    396 405   defer close(ch)
    397  - err := diff.SummaryForTableDelta(ctx, ch, td)
    398  - 
    399  - ae.SetIfError(err)
    400  - }()
     406 + err := diff.SummaryForTableDelta(ctx2, ch, td)
     407 + return err
     408 + })
    401 409   
    402 410   acc := diff.DiffSummaryProgress{}
    403 411   var count int64
    404  - for p := range ch {
    405  - if ae.IsSet() {
    406  - break
     412 + grp.Go(func() error {
     413 + for {
     414 + select {
     415 + case p, ok := <-ch:
     416 + if !ok {
     417 + return nil
     418 + }
     419 + acc.Adds += p.Adds
     420 + acc.Removes += p.Removes
     421 + acc.Changes += p.Changes
     422 + acc.CellChanges += p.CellChanges
     423 + acc.NewRowSize += p.NewRowSize
     424 + acc.OldRowSize += p.OldRowSize
     425 + acc.NewCellSize += p.NewCellSize
     426 + acc.OldCellSize += p.OldCellSize
     427 + count++
     428 + case <-ctx2.Done():
     429 + return ctx2.Err()
     430 + }
    407 431   }
    408  - 
    409  - acc.Adds += p.Adds
    410  - acc.Removes += p.Removes
    411  - acc.Changes += p.Changes
    412  - acc.CellChanges += p.CellChanges
    413  - acc.NewRowSize += p.NewRowSize
    414  - acc.OldRowSize += p.OldRowSize
    415  - acc.NewCellSize += p.NewCellSize
    416  - acc.OldCellSize += p.OldCellSize
    417  - 
    418  - count++
    419  - }
     432 + })
    420 433   
    421  - if err := ae.Get(); err != nil {
     434 + if err := grp.Wait(); err != nil {
    422 435   return diff.DiffSummaryProgress{}, false, false, err
    423 436   }
    424 437   
    skipped 130 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/dolt_diff_table_function.go
    skipped 94 lines
    95 95  // WithExpressions implements the sql.Expressioner interface
    96 96  func (dtf *DiffTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
    97 97   if len(expression) < 2 {
    98  - return nil, sql.ErrInvalidArgumentNumber.New(dtf.FunctionName(), "2 to 3", len(expression))
     98 + return nil, sql.ErrInvalidArgumentNumber.New(dtf.Name(), "2 to 3", len(expression))
    99 99   }
    100 100   
    101 101   // TODO: For now, we will only support literal / fully-resolved arguments to the
    skipped 1 lines
    103 103   // before the arguments could be resolved.
    104 104   for _, expr := range expression {
    105 105   if !expr.Resolved() {
    106  - return nil, ErrInvalidNonLiteralArgument.New(dtf.FunctionName(), expr.String())
     106 + return nil, ErrInvalidNonLiteralArgument.New(dtf.Name(), expr.String())
    107 107   }
    108 108   }
    109 109   
    110 110   if strings.Contains(expression[0].String(), "..") {
    111 111   if len(expression) != 2 {
    112  - return nil, sql.ErrInvalidArgumentNumber.New(fmt.Sprintf("%v with .. or ...", dtf.FunctionName()), 2, len(expression))
     112 + return nil, sql.ErrInvalidArgumentNumber.New(fmt.Sprintf("%v with .. or ...", dtf.Name()), 2, len(expression))
    113 113   }
    114 114   dtf.dotCommitExpr = expression[0]
    115 115   dtf.tableNameExpr = expression[1]
    116 116   } else {
    117 117   if len(expression) != 3 {
    118  - return nil, sql.ErrInvalidArgumentNumber.New(dtf.FunctionName(), 3, len(expression))
     118 + return nil, sql.ErrInvalidArgumentNumber.New(dtf.Name(), 3, len(expression))
    119 119   }
    120 120   dtf.fromCommitExpr = expression[0]
    121 121   dtf.toCommitExpr = expression[1]
    skipped 221 lines
    343 343   }
    344 344   
    345 345   if !sql.IsText(dtf.tableNameExpr.Type()) {
    346  - return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.tableNameExpr.String())
     346 + return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.Name(), dtf.tableNameExpr.String())
    347 347   }
    348 348   
    349 349   tableNameVal, err := dtf.tableNameExpr.Eval(dtf.ctx, nil)
    skipped 8 lines
    358 358   
    359 359   if dtf.dotCommitExpr != nil {
    360 360   if !sql.IsText(dtf.dotCommitExpr.Type()) {
    361  - return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.dotCommitExpr.String())
     361 + return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.Name(), dtf.dotCommitExpr.String())
    362 362   }
    363 363   
    364 364   dotCommitVal, err := dtf.dotCommitExpr.Eval(dtf.ctx, nil)
    skipped 5 lines
    370 370   }
    371 371   
    372 372   if !sql.IsText(dtf.fromCommitExpr.Type()) {
    373  - return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.fromCommitExpr.String())
     373 + return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.Name(), dtf.fromCommitExpr.String())
    374 374   }
    375 375   if !sql.IsText(dtf.toCommitExpr.Type()) {
    376  - return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.FunctionName(), dtf.toCommitExpr.String())
     376 + return nil, nil, nil, "", sql.ErrInvalidArgumentDetails.New(dtf.Name(), dtf.toCommitExpr.String())
    377 377   }
    378 378   
    379 379   fromCommitVal, err := dtf.fromCommitExpr.Eval(dtf.ctx, nil)
    skipped 162 lines
    542 542   dtf.tableNameExpr.String())
    543 543  }
    544 544   
    545  -// FunctionName implements the sql.TableFunction interface
    546  -func (dtf *DiffTableFunction) FunctionName() string {
     545 +// Name implements the sql.TableFunction interface
     546 +func (dtf *DiffTableFunction) Name() string {
    547 547   return "dolt_diff"
    548 548  }
    549 549   
    skipped 71 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/dolt_log_table_function.go
    skipped 78 lines
    79 79   return ltf, nil
    80 80  }
    81 81   
    82  -// FunctionName implements the sql.TableFunction interface
    83  -func (ltf *LogTableFunction) FunctionName() string {
     82 +// Name implements the sql.TableFunction interface
     83 +func (ltf *LogTableFunction) Name() string {
    84 84   return "dolt_log"
    85 85  }
    86 86   
    skipped 99 lines
    186 186   
    187 187  // getDoltArgs builds an argument string from sql expressions so that we can
    188 188  // later parse the arguments with the same util as the CLI
    189  -func getDoltArgs(ctx *sql.Context, expressions []sql.Expression, functionName string) ([]string, error) {
     189 +func getDoltArgs(ctx *sql.Context, expressions []sql.Expression, name string) ([]string, error) {
    190 190   var args []string
    191 191   
    192 192   for _, expr := range expressions {
    skipped 3 lines
    196 196   }
    197 197   
    198 198   if !sql.IsText(expr.Type()) {
    199  - return args, sql.ErrInvalidArgumentDetails.New(functionName, expr.String())
     199 + return args, sql.ErrInvalidArgumentDetails.New(name, expr.String())
    200 200   }
    201 201   
    202 202   text, err := sql.Text.Convert(childVal)
    skipped 10 lines
    213 213  }
    214 214   
    215 215  func (ltf *LogTableFunction) addOptions(expression []sql.Expression) error {
    216  - args, err := getDoltArgs(ltf.ctx, expression, ltf.FunctionName())
     216 + args, err := getDoltArgs(ltf.ctx, expression, ltf.Name())
    217 217   if err != nil {
    218 218   return err
    219 219   }
    220 220   
    221 221   apr, err := cli.CreateLogArgParser().Parse(args)
    222 222   if err != nil {
    223  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), err.Error())
     223 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), err.Error())
    224 224   }
    225 225   
    226 226   if notRevisionStr, ok := apr.GetValue(cli.NotFlag); ok {
    skipped 12 lines
    239 239   switch decorateOption {
    240 240   case "short", "full", "auto", "no":
    241 241   default:
    242  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), fmt.Sprintf("invalid --decorate option: %s", decorateOption))
     242 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), fmt.Sprintf("invalid --decorate option: %s", decorateOption))
    243 243   }
    244 244   ltf.decoration = decorateOption
    245 245   
    skipped 4 lines
    250 250  func (ltf *LogTableFunction) WithExpressions(expression ...sql.Expression) (sql.Node, error) {
    251 251   for _, expr := range expression {
    252 252   if !expr.Resolved() {
    253  - return nil, ErrInvalidNonLiteralArgument.New(ltf.FunctionName(), expr.String())
     253 + return nil, ErrInvalidNonLiteralArgument.New(ltf.Name(), expr.String())
    254 254   }
    255 255   }
    256 256   
    skipped 10 lines
    267 267   }
    268 268   
    269 269   if len(filteredExpressions) > 2 {
    270  - return nil, sql.ErrInvalidArgumentNumber.New(ltf.FunctionName(), "0 to 2", len(filteredExpressions))
     270 + return nil, sql.ErrInvalidArgumentNumber.New(ltf.Name(), "0 to 2", len(filteredExpressions))
    271 271   }
    272 272   
    273 273   exLen := len(filteredExpressions)
    skipped 12 lines
    286 286  }
    287 287   
    288 288  func (ltf *LogTableFunction) invalidArgDetailsErr(expr sql.Expression, reason string) *errors.Error {
    289  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), fmt.Sprintf("%s - %s", expr.String(), reason))
     289 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), fmt.Sprintf("%s - %s", expr.String(), reason))
    290 290  }
    291 291   
    292 292  func (ltf *LogTableFunction) validateRevisionExpressions() error {
    skipped 5 lines
    298 298   if ltf.revisionExpr != nil {
    299 299   revisionStr = mustExpressionToString(ltf.ctx, ltf.revisionExpr)
    300 300   if !sql.IsText(ltf.revisionExpr.Type()) {
    301  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), ltf.revisionExpr.String())
     301 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), ltf.revisionExpr.String())
    302 302   }
    303 303   if ltf.secondRevisionExpr == nil && strings.HasPrefix(revisionStr, "^") {
    304 304   return ltf.invalidArgDetailsErr(ltf.revisionExpr, "second revision must exist if first revision contains '^'")
    skipped 6 lines
    311 311   if ltf.secondRevisionExpr != nil {
    312 312   secondRevisionStr = mustExpressionToString(ltf.ctx, ltf.secondRevisionExpr)
    313 313   if !sql.IsText(ltf.secondRevisionExpr.Type()) {
    314  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), ltf.secondRevisionExpr.String())
     314 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), ltf.secondRevisionExpr.String())
    315 315   }
    316 316   if strings.Contains(secondRevisionStr, "..") {
    317 317   return ltf.invalidArgDetailsErr(ltf.secondRevisionExpr, "second revision cannot contain '..' or '...'")
    skipped 23 lines
    341 341   return ltf.invalidArgDetailsErr(ltf.secondRevisionExpr, "cannot use --not if '^' present in second revision")
    342 342   }
    343 343   if strings.Contains(ltf.notRevision, "..") {
    344  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), fmt.Sprintf("%s - %s", ltf.notRevision, "--not revision cannot contain '..'"))
     344 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), fmt.Sprintf("%s - %s", ltf.notRevision, "--not revision cannot contain '..'"))
    345 345   }
    346 346   if strings.HasPrefix(ltf.notRevision, "^") {
    347  - return sql.ErrInvalidArgumentDetails.New(ltf.FunctionName(), fmt.Sprintf("%s - %s", ltf.notRevision, "--not revision cannot contain '^'"))
     347 + return sql.ErrInvalidArgumentDetails.New(ltf.Name(), fmt.Sprintf("%s - %s", ltf.notRevision, "--not revision cannot contain '^'"))
    348 348   }
    349 349   }
    350 350   
    skipped 354 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/dtables/docs_table.go
    skipped 21 lines
    22 22  )
    23 23   
    24 24  var DoltDocsSqlSchema sql.PrimaryKeySchema
     25 +var OldDoltDocsSqlSchema sql.PrimaryKeySchema
    25 26   
    26 27  func init() {
    27 28   DoltDocsSqlSchema, _ = sqlutil.FromDoltSchema(doltdb.DocTableName, doltdb.DocsSchema)
     29 + OldDoltDocsSqlSchema, _ = sqlutil.FromDoltSchema(doltdb.DocTableName, doltdb.OldDocsSchema)
    28 30  }
    29 31   
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/enginetest/branch_control_test.go
    skipped 940 lines
    941 941   for _, test := range BranchControlTests {
    942 942   harness := newDoltHarness(t)
    943 943   t.Run(test.Name, func(t *testing.T) {
    944  - //TODO: fix whatever is broken with test db handling
    945  - if test.Name == "Proper database scoping" {
    946  - return
    947  - }
    948 944   engine, err := harness.NewEngine(t)
    949 945   require.NoError(t, err)
    950 946   defer engine.Close()
    skipped 10 lines
    961 957   enginetest.RunQueryWithContext(t, engine, harness, ctx, statement)
    962 958   }
    963 959   
    964  - ctxMap := make(map[string]*sql.Context)
    965 960   for _, assertion := range test.Assertions {
    966 961   user := assertion.User
    967 962   host := assertion.Host
    skipped 3 lines
    971 966   if host == "" {
    972 967   host = "localhost"
    973 968   }
    974  - var ctx *sql.Context
    975  - var ok bool
    976  - if ctx, ok = ctxMap[user+"@"+host]; !ok {
    977  - ctx = enginetest.NewContextWithClient(harness, sql.Client{
    978  - User: user,
    979  - Address: host,
    980  - })
    981  - ctxMap[user+"@"+host] = ctx
    982  - }
     969 + ctx = ctx.NewCtxWithClient(sql.Client{
     970 + User: user,
     971 + Address: host,
     972 + })
    983 973   
    984 974   if assertion.ExpectedErr != nil {
    985 975   t.Run(assertion.Query, func(t *testing.T) {
    skipped 53 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/enginetest/dolt_engine_test.go
    skipped 45 lines
    46 46  // SkipPreparedsCount is used by the "ci-check-repo CI workflow
    47 47  // as a reminder to consider prepareds when adding a new
    48 48  // enginetest suite.
    49  -const SkipPreparedsCount = 83
     49 +const SkipPreparedsCount = 84
    50 50   
    51 51  const skipPreparedFlag = "DOLT_SKIP_PREPARED_ENGINETESTS"
    52 52   
    skipped 450 lines
    503 503  func TestBlobs(t *testing.T) {
    504 504   skipOldFormat(t)
    505 505   enginetest.TestBlobs(t, newDoltHarness(t))
     506 +}
     507 + 
     508 +func TestIndexes(t *testing.T) {
     509 + harness := newDoltHarness(t)
     510 + enginetest.TestIndexes(t, harness)
    506 511  }
    507 512   
    508 513  func TestIndexPrefix(t *testing.T) {
    skipped 1185 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/enginetest/dolt_queries.go
    skipped 26 lines
    27 27   "github.com/dolthub/dolt/go/libraries/doltcore/sqle"
    28 28   "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dfunctions"
    29 29   "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dsess"
     30 + "github.com/dolthub/dolt/go/libraries/doltcore/sqle/dtables"
    30 31  )
    31 32   
    32 33  var ViewsWithAsOfScriptTest = queries.ScriptTest{
    skipped 681 lines
    714 715   Expected: []sql.Row{
    715 716   {"view", "view1", "SELECT v1 FROM viewtest", int64(1)},
    716 717   {"view", "view2", "SELECT v2 FROM viewtest", int64(2)},
    717  - },
    718  - },
    719  - },
    720  - },
    721  - {
    722  - Name: "unique key violation prevents insert",
    723  - SetUpScript: []string{
    724  - "CREATE TABLE auniquetable (pk int primary key, uk int unique key, i int);",
    725  - "INSERT INTO auniquetable VALUES(0,0,0);",
    726  - "INSERT INTO auniquetable (pk,uk) VALUES(1,0) on duplicate key update i = 99;",
    727  - },
    728  - Assertions: []queries.ScriptTestAssertion{
    729  - {
    730  - Query: "SELECT pk, uk, i from auniquetable",
    731  - Expected: []sql.Row{
    732  - {0, 0, 99},
    733 718   },
    734 719   },
    735 720   },
    skipped 725 lines
    1461 1446   Query: "select c1 from dolt_history_t;",
    1462 1447   Expected: []sql.Row{
    1463 1448   {uint64(1)},
     1449 + },
     1450 + },
     1451 + },
     1452 + },
     1453 + {
     1454 + Name: "dolt_history table index lookup",
     1455 + SetUpScript: []string{
     1456 + "create table yx (y int, x int primary key);",
     1457 + "call dolt_add('.');",
     1458 + "call dolt_commit('-m', 'creating table');",
     1459 + "insert into yx values (0, 1);",
     1460 + "call dolt_commit('-am', 'add data');",
     1461 + "insert into yx values (2, 3);",
     1462 + "call dolt_commit('-am', 'add data');",
     1463 + "insert into yx values (4, 5);",
     1464 + "call dolt_commit('-am', 'add data');",
     1465 + },
     1466 + Assertions: []queries.ScriptTestAssertion{
     1467 + {
     1468 + Query: "select count(x) from dolt_history_yx where x = 1;",
     1469 + Expected: []sql.Row{
     1470 + {3},
    1464 1471   },
    1465 1472   },
    1466 1473   },
    skipped 3198 lines
    4665 4672   {nil, nil, nil, 3, "three", "four", "removed"},
    4666 4673   },
    4667 4674   },
     4675 + {
     4676 + Query: `
     4677 +SELECT to_pk, to_c1, to_c2, from_pk, from_c1, from_c2, diff_type
     4678 +from dolt_diff(@Commit1, @Commit2, 't')
     4679 +inner join t on to_pk = t.pk;`,
     4680 + Expected: []sql.Row{{1, "one", "two", nil, nil, nil, "added"}},
     4681 + },
    4668 4682   },
    4669 4683   },
    4670 4684   {
    skipped 685 lines
    5356 5370   Query: "SELECT count(*) from dolt_log('main^');",
    5357 5371   Expected: []sql.Row{{3}},
    5358 5372   },
     5373 + {
     5374 + Query: "SELECT count(*) from dolt_log('main') join dolt_diff(@Commit1, @Commit2, 't') where commit_hash = to_commit;",
     5375 + Expected: []sql.Row{{2}},
     5376 + },
    5359 5377   },
    5360 5378   },
    5361 5379   {
    skipped 455 lines
    5817 5835   Query: "SELECT * from dolt_diff_summary(@Commit1, @Commit5, 't');",
    5818 5836   ExpectedErr: sql.ErrTableNotFound,
    5819 5837   },
     5838 + {
     5839 + Query: `
     5840 +SELECT *
     5841 +from dolt_diff_summary(@Commit3, @Commit4, 't')
     5842 +inner join t as of @Commit3 on rows_unmodified = t.pk;`,
     5843 + Expected: []sql.Row{},
     5844 + },
    5820 5845   },
    5821 5846   },
    5822 5847   {
    skipped 472 lines
    6295 6320   },
    6296 6321   },
    6297 6322   },
     6323 + {
     6324 + Name: "pk set change should throw an error for 3 argument dolt_diff_summary",
     6325 + SetUpScript: []string{
     6326 + "CREATE table t (pk int primary key);",
     6327 + "INSERT INTO t values (1);",
     6328 + "CALL DOLT_COMMIT('-Am', 'table with row');",
     6329 + "ALTER TABLE t ADD col1 int not null default 0;",
     6330 + "ALTER TABLE t drop primary key;",
     6331 + "ALTER TABLE t add primary key (pk, col1);",
     6332 + "CALL DOLT_COMMIT('-am', 'add secondary column with primary key');",
     6333 + },
     6334 + Assertions: []queries.ScriptTestAssertion{
     6335 + {
     6336 + Query: "SELECT * from dolt_diff_summary('HEAD~', 'HEAD', 't');",
     6337 + ExpectedErrStr: "failed to compute diff summary for table t: primary key set changed",
     6338 + },
     6339 + },
     6340 + },
     6341 + {
     6342 + Name: "pk set change should report warning for 2 argument dolt_diff_summary",
     6343 + SetUpScript: []string{
     6344 + "CREATE table t (pk int primary key);",
     6345 + "INSERT INTO t values (1);",
     6346 + "CREATE table t2 (pk int primary key);",
     6347 + "INSERT INTO t2 values (2);",
     6348 + "CALL DOLT_COMMIT('-Am', 'multiple tables');",
     6349 + "ALTER TABLE t ADD col1 int not null default 0;",
     6350 + "ALTER TABLE t drop primary key;",
     6351 + "ALTER TABLE t add primary key (pk, col1);",
     6352 + "INSERT INTO t2 values (3), (4), (5);",
     6353 + "CALL DOLT_COMMIT('-am', 'add secondary column with primary key to t');",
     6354 + },
     6355 + Assertions: []queries.ScriptTestAssertion{
     6356 + {
     6357 + Query: "SELECT * from dolt_diff_summary('HEAD~', 'HEAD')",
     6358 + Expected: []sql.Row{
     6359 + {"t", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
     6360 + {"t2", 1, 3, 0, 0, 3, 0, 0, 1, 4, 1, 4},
     6361 + },
     6362 + ExpectedWarning: dtables.PrimaryKeyChangeWarningCode,
     6363 + ExpectedWarningsCount: 1,
     6364 + },
     6365 + },
     6366 + },
    6298 6367  }
    6299 6368   
    6300 6369  var LargeJsonObjectScriptTests = []queries.ScriptTest{
    skipped 1825 lines
    8126 8195   
    8127 8196  var DoltIndexPrefixScripts = []queries.ScriptTest{
    8128 8197   {
    8129  - Name: "varchar primary key prefix",
    8130  - SetUpScript: []string{
    8131  - "create table t (v varchar(100))",
    8132  - },
    8133  - Assertions: []queries.ScriptTestAssertion{
    8134  - {
    8135  - Query: "alter table t add primary key (v(10))",
    8136  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8137  - },
    8138  - {
    8139  - Query: "create table v_tbl (v varchar(100), primary key (v(10)))",
    8140  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8141  - },
    8142  - },
    8143  - },
    8144  - {
    8145  - Name: "varchar keyed secondary index prefix",
    8146  - SetUpScript: []string{
    8147  - "create table t (i int primary key, v varchar(10))",
    8148  - },
    8149  - Assertions: []queries.ScriptTestAssertion{
    8150  - {
    8151  - Query: "alter table t add unique index (v(1))",
    8152  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8153  - },
    8154  - {
    8155  - Query: "show create table t",
    8156  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `v` varchar(10),\n PRIMARY KEY (`i`),\n UNIQUE KEY `v` (`v`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8157  - },
    8158  - {
    8159  - Query: "insert into t values (0, 'aa'), (1, 'ab')",
    8160  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8161  - },
    8162  - {
    8163  - Query: "insert into t values (0, 'aa'), (1, 'bb'), (2, 'cc')",
    8164  - Expected: []sql.Row{{sql.NewOkResult(3)}},
    8165  - },
    8166  - {
    8167  - Query: "select * from t where v = 'a'",
    8168  - Expected: []sql.Row{},
    8169  - },
    8170  - {
    8171  - Query: "select * from t where v = 'aa'",
    8172  - Expected: []sql.Row{
    8173  - {0, "aa"},
    8174  - },
    8175  - },
    8176  - {
    8177  - Query: "create table v_tbl (i int primary key, v varchar(100), index (v(10)))",
    8178  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8179  - },
    8180  - {
    8181  - Query: "show create table v_tbl",
    8182  - Expected: []sql.Row{{"v_tbl", "CREATE TABLE `v_tbl` (\n `i` int NOT NULL,\n `v` varchar(100),\n PRIMARY KEY (`i`),\n KEY `v` (`v`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8183  - },
    8184  - },
    8185  - },
    8186  - {
    8187  - Name: "varchar keyless secondary index prefix",
     8198 + Name: "inline secondary indexes with collation",
    8188 8199   SetUpScript: []string{
    8189  - "create table t (v varchar(10))",
     8200 + "create table t (i int primary key, v1 varchar(10), v2 varchar(10), unique index (v1(3),v2(5))) collate utf8mb4_0900_ai_ci",
    8190 8201   },
    8191 8202   Assertions: []queries.ScriptTestAssertion{
    8192 8203   {
    8193  - Query: "alter table t add unique index (v(1))",
    8194  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8195  - },
    8196  - {
    8197 8204   Query: "show create table t",
    8198  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `v` varchar(10),\n UNIQUE KEY `v` (`v`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8199  - },
    8200  - {
    8201  - Query: "insert into t values ('aa'), ('ab')",
    8202  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8203  - },
    8204  - {
    8205  - Query: "create table v_tbl (v varchar(100), index (v(10)))",
    8206  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8207  - },
    8208  - {
    8209  - Query: "show create table v_tbl",
    8210  - Expected: []sql.Row{{"v_tbl", "CREATE TABLE `v_tbl` (\n `v` varchar(100),\n KEY `v` (`v`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8211  - },
    8212  - },
    8213  - },
    8214  - {
    8215  - Name: "char primary key prefix",
    8216  - SetUpScript: []string{
    8217  - "create table t (c char(100))",
    8218  - },
    8219  - Assertions: []queries.ScriptTestAssertion{
    8220  - {
    8221  - Query: "alter table t add primary key (c(10))",
    8222  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8223  - },
    8224  - {
    8225  - Query: "create table c_tbl (c char(100), primary key (c(10)))",
    8226  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8227  - },
    8228  - },
    8229  - },
    8230  - {
    8231  - Name: "char keyed secondary index prefix",
    8232  - SetUpScript: []string{
    8233  - "create table t (i int primary key, c char(10))",
    8234  - },
    8235  - Assertions: []queries.ScriptTestAssertion{
    8236  - {
    8237  - Query: "alter table t add unique index (c(1))",
    8238  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8205 + Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `v1` varchar(10) COLLATE utf8mb4_0900_ai_ci,\n `v2` varchar(10) COLLATE utf8mb4_0900_ai_ci,\n PRIMARY KEY (`i`),\n UNIQUE KEY `v1v2` (`v1`(3),`v2`(5))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci"}},
    8239 8206   },
    8240 8207   {
    8241  - Query: "show create table t",
    8242  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `c` char(10),\n PRIMARY KEY (`i`),\n UNIQUE KEY `c` (`c`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8208 + Query: "insert into t values (0, 'a', 'a'), (1, 'ab','ab'), (2, 'abc', 'abc'), (3, 'abcde', 'abcde')",
     8209 + Expected: []sql.Row{{sql.NewOkResult(4)}},
    8243 8210   },
    8244 8211   {
    8245  - Query: "insert into t values (0, 'aa'), (1, 'ab')",
     8212 + Query: "insert into t values (99, 'ABC', 'ABCDE')",
    8246 8213   ExpectedErr: sql.ErrUniqueKeyViolation,
    8247 8214   },
    8248 8215   {
    8249  - Query: "create table c_tbl (i int primary key, c varchar(100), index (c(10)))",
    8250  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8251  - },
    8252  - {
    8253  - Query: "show create table c_tbl",
    8254  - Expected: []sql.Row{{"c_tbl", "CREATE TABLE `c_tbl` (\n `i` int NOT NULL,\n `c` varchar(100),\n PRIMARY KEY (`i`),\n KEY `c` (`c`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8255  - },
    8256  - },
    8257  - },
    8258  - {
    8259  - Name: "char keyless secondary index prefix",
    8260  - SetUpScript: []string{
    8261  - "create table t (c char(10))",
    8262  - },
    8263  - Assertions: []queries.ScriptTestAssertion{
    8264  - {
    8265  - Query: "alter table t add unique index (c(1))",
    8266  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8216 + Query: "insert into t values (99, 'ABC', 'ABCDE')",
     8217 + ExpectedErrStr: "duplicate unique key given: [ABC,ABCDE]",
    8267 8218   },
    8268 8219   {
    8269  - Query: "show create table t",
    8270  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `c` char(10),\n UNIQUE KEY `c` (`c`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8271  - },
    8272  - {
    8273  - Query: "insert into t values ('aa'), ('ab')",
     8220 + Query: "insert into t values (99, 'ABC123', 'ABCDE123')",
    8274 8221   ExpectedErr: sql.ErrUniqueKeyViolation,
    8275 8222   },
    8276 8223   {
    8277  - Query: "create table c_tbl (c char(100), index (c(10)))",
    8278  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8224 + Query: "insert into t values (99, 'ABC123', 'ABCDE123')",
     8225 + ExpectedErrStr: "duplicate unique key given: [ABC,ABCDE]",
    8279 8226   },
    8280 8227   {
    8281  - Query: "show create table c_tbl",
    8282  - Expected: []sql.Row{{"c_tbl", "CREATE TABLE `c_tbl` (\n `c` char(100),\n KEY `c` (`c`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8228 + Query: "select * from t where v1 = 'A'",
     8229 + Expected: []sql.Row{
     8230 + {0, "a", "a"},
     8231 + },
    8283 8232   },
    8284  - },
    8285  - },
    8286  - {
    8287  - Name: "varbinary primary key prefix",
    8288  - SetUpScript: []string{
    8289  - "create table t (v varbinary(100))",
    8290  - },
    8291  - Assertions: []queries.ScriptTestAssertion{
    8292 8233   {
    8293  - Query: "alter table t add primary key (v(10))",
    8294  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8295  - },
    8296  - {
    8297  - Query: "create table v_tbl (v varbinary(100), primary key (v(10)))",
    8298  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8299  - },
    8300  - },
    8301  - },
    8302  - {
    8303  - Name: "varbinary keyed secondary index prefix",
    8304  - SetUpScript: []string{
    8305  - "create table t (i int primary key, v varbinary(10))",
    8306  - },
    8307  - Assertions: []queries.ScriptTestAssertion{
    8308  - {
    8309  - Query: "alter table t add unique index (v(1))",
    8310  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8311  - },
    8312  - {
    8313  - Query: "show create table t",
    8314  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `v` varbinary(10),\n PRIMARY KEY (`i`),\n UNIQUE KEY `v` (`v`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8315  - },
    8316  - {
    8317  - Query: "insert into t values (0, 'aa'), (1, 'ab')",
    8318  - ExpectedErr: sql.ErrUniqueKeyViolation,
     8234 + Query: "explain select * from t where v1 = 'A'",
     8235 + Expected: []sql.Row{
     8236 + {"Filter(t.v1 = 'A')"},
     8237 + {" └─ IndexedTableAccess(t)"},
     8238 + {" ├─ index: [t.v1,t.v2]"},
     8239 + {" ├─ filters: [{[A, A], [NULL, ∞)}]"},
     8240 + {" └─ columns: [i v1 v2]"},
     8241 + },
    8319 8242   },
    8320 8243   {
    8321  - Query: "create table v_tbl (i int primary key, v varbinary(100), index (v(10)))",
    8322  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8244 + Query: "select * from t where v1 = 'ABC'",
     8245 + Expected: []sql.Row{
     8246 + {2, "abc", "abc"},
     8247 + },
    8323 8248   },
    8324 8249   {
    8325  - Query: "show create table v_tbl",
    8326  - Expected: []sql.Row{{"v_tbl", "CREATE TABLE `v_tbl` (\n `i` int NOT NULL,\n `v` varbinary(100),\n PRIMARY KEY (`i`),\n KEY `v` (`v`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8250 + Query: "explain select * from t where v1 = 'ABC'",
     8251 + Expected: []sql.Row{
     8252 + {"Filter(t.v1 = 'ABC')"},
     8253 + {" └─ IndexedTableAccess(t)"},
     8254 + {" ├─ index: [t.v1,t.v2]"},
     8255 + {" ├─ filters: [{[ABC, ABC], [NULL, ∞)}]"},
     8256 + {" └─ columns: [i v1 v2]"},
     8257 + },
    8327 8258   },
    8328  - },
    8329  - },
    8330  - {
    8331  - Name: "varbinary keyless secondary index prefix",
    8332  - SetUpScript: []string{
    8333  - "create table t (v varbinary(10))",
    8334  - },
    8335  - Assertions: []queries.ScriptTestAssertion{
    8336 8259   {
    8337  - Query: "alter table t add unique index (v(1))",
    8338  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8260 + Query: "select * from t where v1 = 'ABCD'",
     8261 + Expected: []sql.Row{},
    8339 8262   },
    8340 8263   {
    8341  - Query: "show create table t",
    8342  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `v` varbinary(10),\n UNIQUE KEY `v` (`v`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8264 + Query: "explain select * from t where v1 = 'ABCD'",
     8265 + Expected: []sql.Row{
     8266 + {"Filter(t.v1 = 'ABCD')"},
     8267 + {" └─ IndexedTableAccess(t)"},
     8268 + {" ├─ index: [t.v1,t.v2]"},
     8269 + {" ├─ filters: [{[ABCD, ABCD], [NULL, ∞)}]"},
     8270 + {" └─ columns: [i v1 v2]"},
     8271 + },
    8343 8272   },
    8344 8273   {
    8345  - Query: "insert into t values ('aa'), ('ab')",
    8346  - ExpectedErr: sql.ErrUniqueKeyViolation,
     8274 + Query: "select * from t where v1 > 'A' and v1 < 'ABCDE'",
     8275 + Expected: []sql.Row{
     8276 + {1, "ab", "ab"},
     8277 + {2, "abc", "abc"},
     8278 + },
    8347 8279   },
    8348 8280   {
    8349  - Query: "create table v_tbl (v varbinary(100), index (v(10)))",
    8350  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8281 + Query: "explain select * from t where v1 > 'A' and v1 < 'ABCDE'",
     8282 + Expected: []sql.Row{
     8283 + {"Filter((t.v1 > 'A') AND (t.v1 < 'ABCDE'))"},
     8284 + {" └─ IndexedTableAccess(t)"},
     8285 + {" ├─ index: [t.v1,t.v2]"},
     8286 + {" ├─ filters: [{(A, ABCDE), [NULL, ∞)}]"},
     8287 + {" └─ columns: [i v1 v2]"},
     8288 + },
    8351 8289   },
    8352 8290   {
    8353  - Query: "show create table v_tbl",
    8354  - Expected: []sql.Row{{"v_tbl", "CREATE TABLE `v_tbl` (\n `v` varbinary(100),\n KEY `v` (`v`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8291 + Query: "select * from t where v1 > 'A' and v2 < 'ABCDE'",
     8292 + Expected: []sql.Row{
     8293 + {1, "ab", "ab"},
     8294 + {2, "abc", "abc"},
     8295 + },
    8355 8296   },
    8356  - },
    8357  - },
    8358  - {
    8359  - Name: "binary primary key prefix",
    8360  - SetUpScript: []string{
    8361  - "create table t (b binary(100))",
    8362  - },
    8363  - Assertions: []queries.ScriptTestAssertion{
    8364 8297   {
    8365  - Query: "alter table t add primary key (b(10))",
    8366  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
     8298 + Query: "explain select * from t where v1 > 'A' and v2 < 'ABCDE'",
     8299 + Expected: []sql.Row{
     8300 + {"Filter((t.v1 > 'A') AND (t.v2 < 'ABCDE'))"},
     8301 + {" └─ IndexedTableAccess(t)"},
     8302 + {" ├─ index: [t.v1,t.v2]"},
     8303 + {" ├─ filters: [{(A, ∞), (NULL, ABCDE)}]"},
     8304 + {" └─ columns: [i v1 v2]"},
     8305 + },
    8367 8306   },
    8368 8307   {
    8369  - Query: "create table b_tbl (b binary(100), primary key (b(10)))",
    8370  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
     8308 + Query: "update t set v1 = concat(v1, 'Z') where v1 >= 'A'",
     8309 + Expected: []sql.Row{
     8310 + {sql.OkResult{RowsAffected: 4, InsertID: 0, Info: plan.UpdateInfo{Matched: 4, Updated: 4}}},
     8311 + },
    8371 8312   },
    8372  - },
    8373  - },
    8374  - {
    8375  - Name: "binary keyed secondary index prefix",
    8376  - SetUpScript: []string{
    8377  - "create table t (i int primary key, b binary(10))",
    8378  - },
    8379  - Assertions: []queries.ScriptTestAssertion{
    8380 8313   {
    8381  - Query: "alter table t add unique index (b(1))",
    8382  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8314 + Query: "explain update t set v1 = concat(v1, 'Z') where v1 >= 'A'",
     8315 + Expected: []sql.Row{
     8316 + {"Update"},
     8317 + {" └─ UpdateSource(SET t.v1 = concat(t.v1, 'Z'))"},
     8318 + {" └─ Filter(t.v1 >= 'A')"},
     8319 + {" └─ IndexedTableAccess(t)"},
     8320 + {" ├─ index: [t.v1,t.v2]"},
     8321 + {" └─ filters: [{[A, ∞), [NULL, ∞)}]"},
     8322 + },
    8383 8323   },
    8384 8324   {
    8385  - Query: "show create table t",
    8386  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `b` binary(10),\n PRIMARY KEY (`i`),\n UNIQUE KEY `b` (`b`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8325 + Query: "select * from t",
     8326 + Expected: []sql.Row{
     8327 + {0, "aZ", "a"},
     8328 + {1, "abZ", "ab"},
     8329 + {2, "abcZ", "abc"},
     8330 + {3, "abcdeZ", "abcde"},
     8331 + },
    8387 8332   },
    8388 8333   {
    8389  - Query: "insert into t values (0, 'aa'), (1, 'ab')",
    8390  - ExpectedErr: sql.ErrUniqueKeyViolation,
     8334 + Query: "delete from t where v1 >= 'A'",
     8335 + Expected: []sql.Row{
     8336 + {sql.OkResult{RowsAffected: 4}},
     8337 + },
    8391 8338   },
    8392 8339   {
    8393  - Query: "create table b_tbl (i int primary key, b binary(100), index (b(10)))",
    8394  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8340 + Query: "explain delete from t where v1 >= 'A'",
     8341 + Expected: []sql.Row{
     8342 + {"Delete"},
     8343 + {" └─ Filter(t.v1 >= 'A')"},
     8344 + {" └─ IndexedTableAccess(t)"},
     8345 + {" ├─ index: [t.v1,t.v2]"},
     8346 + {" └─ filters: [{[A, ∞), [NULL, ∞)}]"},
     8347 + },
    8395 8348   },
    8396 8349   {
    8397  - Query: "show create table b_tbl",
    8398  - Expected: []sql.Row{{"b_tbl", "CREATE TABLE `b_tbl` (\n `i` int NOT NULL,\n `b` binary(100),\n PRIMARY KEY (`i`),\n KEY `b` (`b`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8350 + Query: "select * from t",
     8351 + Expected: []sql.Row{},
    8399 8352   },
    8400 8353   },
    8401 8354   },
     8355 + // TODO: these should eventually go in GMS, but it doesn't currently support index rewrite on column modify
    8402 8356   {
    8403  - Name: "binary keyless secondary index prefix",
     8357 + Name: "drop prefix lengths when modifying column to non string type",
    8404 8358   SetUpScript: []string{
    8405  - "create table t (b binary(10))",
     8359 + "create table t (j varchar(100), index (j(10)))",
    8406 8360   },
    8407 8361   Assertions: []queries.ScriptTestAssertion{
    8408 8362   {
    8409  - Query: "alter table t add unique index (b(1))",
    8410  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8363 + Query: "alter table t modify column j int",
     8364 + Expected: []sql.Row{{sql.OkResult{}}},
    8411 8365   },
    8412 8366   {
    8413 8367   Query: "show create table t",
    8414  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `b` binary(10),\n UNIQUE KEY `b` (`b`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8415  - },
    8416  - {
    8417  - Query: "insert into t values ('aa'), ('ab')",
    8418  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8419  - },
    8420  - {
    8421  - Query: "create table b_tbl (b binary(100), index (b(10)))",
    8422  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8423  - },
    8424  - {
    8425  - Query: "show create table b_tbl",
    8426  - Expected: []sql.Row{{"b_tbl", "CREATE TABLE `b_tbl` (\n `b` binary(100),\n KEY `b` (`b`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8427  - },
    8428  - },
    8429  - },
    8430  - {
    8431  - Name: "blob primary key prefix",
    8432  - SetUpScript: []string{
    8433  - "create table t (b blob)",
    8434  - },
    8435  - Assertions: []queries.ScriptTestAssertion{
    8436  - {
    8437  - Query: "alter table t add primary key (b(10))",
    8438  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8439  - },
    8440  - {
    8441  - Query: "create table b_tbl (b blob, primary key (b(10)))",
    8442  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
     8368 + Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `j` int,\n KEY `j` (`j`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8443 8369   },
    8444 8370   },
    8445 8371   },
    8446 8372   {
    8447  - Name: "blob keyed secondary index prefix",
     8373 + Name: "drop prefix length when modifying columns to invalid string type",
    8448 8374   SetUpScript: []string{
    8449  - "create table t (i int primary key, b blob)",
     8375 + "create table t (j varchar(100), index (j(10)))",
    8450 8376   },
    8451 8377   Assertions: []queries.ScriptTestAssertion{
    8452 8378   {
    8453  - Query: "alter table t add unique index (b(1))",
    8454  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8379 + Query: "alter table t modify column j varchar(2)",
     8380 + Expected: []sql.Row{{sql.OkResult{}}},
    8455 8381   },
    8456 8382   {
    8457 8383   Query: "show create table t",
    8458  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `b` blob,\n PRIMARY KEY (`i`),\n UNIQUE KEY `b` (`b`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8459  - },
    8460  - {
    8461  - Query: "insert into t values (0, 'aa'), (1, 'ab')",
    8462  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8463  - },
    8464  - {
    8465  - Query: "create table b_tbl (i int primary key, b blob, index (b(10)))",
    8466  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8467  - },
    8468  - {
    8469  - Query: "show create table b_tbl",
    8470  - Expected: []sql.Row{{"b_tbl", "CREATE TABLE `b_tbl` (\n `i` int NOT NULL,\n `b` blob,\n PRIMARY KEY (`i`),\n KEY `b` (`b`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8384 + Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `j` varchar(2),\n KEY `j` (`j`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8471 8385   },
    8472 8386   },
    8473 8387   },
    8474 8388   {
    8475  - Name: "blob keyless secondary index prefix",
     8389 + Name: "preserve prefix length when modifying column to valid string type",
    8476 8390   SetUpScript: []string{
    8477  - "create table t (b blob)",
     8391 + "create table t (j varchar(100), index (j(10)))",
    8478 8392   },
    8479 8393   Assertions: []queries.ScriptTestAssertion{
    8480 8394   {
    8481  - Query: "alter table t add unique index (b(1))",
    8482  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8395 + Query: "alter table t modify column j varchar(200)",
     8396 + Expected: []sql.Row{{sql.OkResult{}}},
    8483 8397   },
    8484 8398   {
    8485 8399   Query: "show create table t",
    8486  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `b` blob,\n UNIQUE KEY `b` (`b`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8487  - },
    8488  - {
    8489  - Query: "insert into t values ('aa'), ('ab')",
    8490  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8491  - },
    8492  - {
    8493  - Query: "create table b_tbl (b blob, index (b(10)))",
    8494  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8495  - },
    8496  - {
    8497  - Query: "show create table b_tbl",
    8498  - Expected: []sql.Row{{"b_tbl", "CREATE TABLE `b_tbl` (\n `b` blob,\n KEY `b` (`b`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8400 + Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `j` varchar(200),\n KEY `j` (`j`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8499 8401   },
    8500 8402   },
    8501 8403   },
    8502 8404   {
    8503  - Name: "text primary key prefix",
    8504  - SetUpScript: []string{
    8505  - "create table t (t text)",
    8506  - },
    8507  - Assertions: []queries.ScriptTestAssertion{
    8508  - {
    8509  - Query: "alter table t add primary key (t(10))",
    8510  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8511  - },
    8512  - {
    8513  - Query: "create table b_tbl (t text, primary key (t(10)))",
    8514  - ExpectedErr: sql.ErrUnsupportedIndexPrefix,
    8515  - },
    8516  - },
    8517  - },
    8518  - {
    8519  - Name: "text keyed secondary index prefix",
     8405 + Name: "preserve prefix lengths when there are other unchanged prefix lengths",
    8520 8406   SetUpScript: []string{
    8521  - "create table t (i int primary key, t text)",
     8407 + "create table t (i varchar(100), j varchar(100), index (i(10), j(10)))",
    8522 8408   },
    8523 8409   Assertions: []queries.ScriptTestAssertion{
    8524 8410   {
    8525  - Query: "alter table t add unique index (t(1))",
    8526  - Expected: []sql.Row{{sql.NewOkResult(0)}},
     8411 + Query: "alter table t modify column j int",
     8412 + Expected: []sql.Row{{sql.OkResult{}}},
    8527 8413   },
    8528 8414   {
    8529 8415   Query: "show create table t",
    8530  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` int NOT NULL,\n `t` text,\n PRIMARY KEY (`i`),\n UNIQUE KEY `t` (`t`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8531  - },
    8532  - {
    8533  - Query: "insert into t values (0, 'aa'), (1, 'ab')",
    8534  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8535  - },
    8536  - {
    8537  - Query: "create table t_tbl (i int primary key, t text, index (t(10)))",
    8538  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8539  - },
    8540  - {
    8541  - Query: "show create table t_tbl",
    8542  - Expected: []sql.Row{{"t_tbl", "CREATE TABLE `t_tbl` (\n `i` int NOT NULL,\n `t` text,\n PRIMARY KEY (`i`),\n KEY `t` (`t`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8416 + Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `i` varchar(100),\n `j` int,\n KEY `ij` (`i`(10),`j`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8543 8417   },
    8544 8418   },
    8545 8419   },
    8546 8420   {
    8547  - Name: "text keyless secondary index prefix",
     8421 + Name: "prefix length too long",
    8548 8422   SetUpScript: []string{
    8549  - "create table t (t text)",
     8423 + "create table t (i blob, index(i(3072)))",
    8550 8424   },
    8551 8425   Assertions: []queries.ScriptTestAssertion{
    8552 8426   {
    8553  - Query: "alter table t add unique index (t(1))",
    8554  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8555  - },
    8556  - {
    8557  - Query: "show create table t",
    8558  - Expected: []sql.Row{{"t", "CREATE TABLE `t` (\n `t` text,\n UNIQUE KEY `t` (`t`(1))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
    8559  - },
    8560  - {
    8561  - Query: "insert into t values ('aa'), ('ab')",
    8562  - ExpectedErr: sql.ErrUniqueKeyViolation,
    8563  - },
    8564  - {
    8565  - Query: "create table t_tbl (t text, index (t(10)))",
    8566  - Expected: []sql.Row{{sql.NewOkResult(0)}},
    8567  - },
    8568  - {
    8569  - Query: "show create table t_tbl",
    8570  - Expected: []sql.Row{{"t_tbl", "CREATE TABLE `t_tbl` (\n `t` text,\n KEY `t` (`t`(10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_bin"}},
     8427 + Query: "alter table t modify column i text",
     8428 + ExpectedErr: sql.ErrKeyTooLong,
    8571 8429   },
    8572 8430   },
    8573 8431   },
    skipped 2 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/expreval/compare_ops.go
    skipped 53 lines
    54 54  }
    55 55   
    56 56  // CompareToNil always returns false as values are neither greater than, less than, or equal to nil
    57  -func (op EqualsOp) CompareToNil(types.Value) (bool, error) {
     57 +// except for equality op, the compared value is null.
     58 +func (op EqualsOp) CompareToNil(v types.Value) (bool, error) {
     59 + if v == types.NullValue {
     60 + return true, nil
     61 + }
    58 62   return false, nil
    59 63  }
    60 64   
    skipped 136 lines
  • ■ ■ ■ ■
    go/libraries/doltcore/sqle/expreval/compare_ops_test.go
    skipped 124 lines
    125 125   gte: false,
    126 126   lt: false,
    127 127   lte: false,
    128  - eq: false,
     128 + eq: true,
    129 129   },
    130 130   {
    131 131   name: "not nil",
    skipped 33 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/expreval/expression_evaluator.go
    skipped 101 lines
    102 102   return newAndFunc(leftFunc, rightFunc), nil
    103 103   case *expression.InTuple:
    104 104   return newComparisonFunc(EqualsOp{}, typedExpr.BinaryExpression, sch)
     105 + case *expression.Not:
     106 + expFunc, err := getExpFunc(nbf, sch, typedExpr.Child)
     107 + if err != nil {
     108 + return nil, err
     109 + }
     110 + return newNotFunc(expFunc), nil
     111 + case *expression.IsNull:
     112 + return newComparisonFunc(EqualsOp{}, expression.BinaryExpression{Left: typedExpr.Child, Right: expression.NewLiteral(nil, sql.Null)}, sch)
    105 113   }
    106 114   
    107 115   return nil, errNotImplemented.New(exp.Type().String())
    skipped 28 lines
    136 144   }
    137 145   
    138 146   return right(ctx, vals)
     147 + }
     148 +}
     149 + 
     150 +func newNotFunc(exp ExpressionFunc) ExpressionFunc {
     151 + return func(ctx context.Context, vals map[uint64]types.Value) (b bool, err error) {
     152 + res, err := exp(ctx, vals)
     153 + if err != nil {
     154 + return false, err
     155 + }
     156 + 
     157 + return !res, nil
    139 158   }
    140 159  }
    141 160   
    skipped 184 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/expreval/literal_helpers.go
    skipped 259 lines
    260 260  func literalAsTimestamp(literal *expression.Literal) (time.Time, error) {
    261 261   v := literal.Value()
    262 262   switch typedVal := v.(type) {
     263 + case time.Time:
     264 + return typedVal, nil
    263 265   case string:
    264 266   ts, err := parseDate(typedVal)
    265 267   
    skipped 9 lines
    275 277   
    276 278  // LiteralToNomsValue converts a go-mysql-servel Literal into a noms value.
    277 279  func LiteralToNomsValue(kind types.NomsKind, literal *expression.Literal) (types.Value, error) {
     280 + if literal.Value() == nil {
     281 + return types.NullValue, nil
     282 + }
     283 + 
    278 284   switch kind {
    279 285   case types.IntKind:
    280 286   i64, err := literalAsInt64(literal)
    skipped 56 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/index/dolt_index.go
    skipped 63 lines
    64 64   return nil, nil
    65 65   }
    66 66   
    67  - // TODO: do this for other indexes?
    68 67   tableRows, err := t.GetRowData(ctx)
    69 68   if err != nil {
    70 69   return nil, err
    71 70   }
    72 71   keyBld := maybeGetKeyBuilder(tableRows)
    73 72   
    74  - // TODO: two primary keys???
    75 73   cols := sch.GetPKCols().GetColumns()
    76 74   
    77 75   // add to_ prefix
    skipped 19 lines
    97 95   order: sql.IndexOrderAsc,
    98 96   constrainedToLookupExpression: false,
    99 97   }
    100  - 
    101  - // TODO: need to add from_ columns
    102 98   
    103 99   return append(indexes, &toIndex), nil
    104 100  }
    skipped 290 lines
    395 391   
    396 392  // CanSupport implements sql.Index
    397 393  func (di *doltIndex) CanSupport(...sql.Range) bool {
    398  - if len(di.prefixLengths) > 0 {
    399  - return false
    400  - }
    401 394   return true
    402 395  }
    403 396   
    skipped 230 lines
    634 627   return nil
    635 628   }
    636 629   
     630 + // filters on indexes with prefix lengths are not completely handled
     631 + if len(di.prefixLengths) > 0 {
     632 + return nil
     633 + }
     634 + 
    637 635   var handled []sql.Expression
    638 636   for _, f := range filters {
    639 637   if expression.ContainsImpreciseComparison(f) {
    skipped 136 lines
    776 774   return pruned, nil
    777 775  }
    778 776   
     777 +// trimRangeCutValue will trim the key value retrieved, depending on its type and prefix length
     778 +// TODO: this is just the trimKeyPart in the SecondaryIndexWriters, maybe find a different place
     779 +func (di *doltIndex) trimRangeCutValue(to int, keyPart interface{}) interface{} {
     780 + var prefixLength uint16
     781 + if len(di.prefixLengths) > to {
     782 + prefixLength = di.prefixLengths[to]
     783 + }
     784 + if prefixLength != 0 {
     785 + switch kp := keyPart.(type) {
     786 + case string:
     787 + if prefixLength > uint16(len(kp)) {
     788 + prefixLength = uint16(len(kp))
     789 + }
     790 + keyPart = kp[:prefixLength]
     791 + case []uint8:
     792 + if prefixLength > uint16(len(kp)) {
     793 + prefixLength = uint16(len(kp))
     794 + }
     795 + keyPart = kp[:prefixLength]
     796 + }
     797 + }
     798 + return keyPart
     799 +}
     800 + 
    779 801  func (di *doltIndex) prollyRangesFromSqlRanges(ctx context.Context, ns tree.NodeStore, ranges []sql.Range, tb *val.TupleBuilder) ([]prolly.Range, error) {
    780 802   ranges, err := pruneEmptyRanges(ranges)
    781 803   if err != nil {
    skipped 6 lines
    788 810   fields := make([]prolly.RangeField, len(rng))
    789 811   for j, expr := range rng {
    790 812   if rangeCutIsBinding(expr.LowerBound) {
    791  - bound := expr.LowerBound.TypeAsLowerBound()
    792  - fields[j].Lo = prolly.Bound{
    793  - Binding: true,
    794  - Inclusive: bound == sql.Closed,
    795  - }
    796 813   // accumulate bound values in |tb|
    797 814   v, err := getRangeCutValue(expr.LowerBound, rng[j].Typ)
    798 815   if err != nil {
    799 816   return nil, err
    800 817   }
    801  - if err = PutField(ctx, ns, tb, j, v); err != nil {
     818 + nv := di.trimRangeCutValue(j, v)
     819 + if err = PutField(ctx, ns, tb, j, nv); err != nil {
    802 820   return nil, err
    803 821   }
     822 + bound := expr.LowerBound.TypeAsLowerBound()
     823 + fields[j].Lo = prolly.Bound{
     824 + Binding: true,
     825 + Inclusive: bound == sql.Closed,
     826 + }
    804 827   } else {
    805 828   fields[j].Lo = prolly.Bound{}
    806 829   }
    skipped 7 lines
    814 837   for i, expr := range rng {
    815 838   if rangeCutIsBinding(expr.UpperBound) {
    816 839   bound := expr.UpperBound.TypeAsUpperBound()
    817  - fields[i].Hi = prolly.Bound{
    818  - Binding: true,
    819  - Inclusive: bound == sql.Closed,
    820  - }
    821 840   // accumulate bound values in |tb|
    822 841   v, err := getRangeCutValue(expr.UpperBound, rng[i].Typ)
    823 842   if err != nil {
    824 843   return nil, err
    825 844   }
    826  - if err = PutField(ctx, ns, tb, i, v); err != nil {
     845 + nv := di.trimRangeCutValue(i, v)
     846 + if err = PutField(ctx, ns, tb, i, nv); err != nil {
    827 847   return nil, err
     848 + }
     849 + fields[i].Hi = prolly.Bound{
     850 + Binding: true,
     851 + Inclusive: bound == sql.Closed || nv != v, // TODO (james): this might panic for []byte
    828 852   }
    829 853   } else {
    830 854   fields[i].Hi = prolly.Bound{}
    skipped 121 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/index/prolly_fields.go
    skipped 193 lines
    194 194   if err != nil {
    195 195   return err
    196 196   }
    197  - h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(buf))
     197 + h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(buf), len(buf))
    198 198   if err != nil {
    199 199   return err
    200 200   }
    201 201   tb.PutJSONAddr(i, h)
    202 202   case val.BytesAddrEnc:
    203  - h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(v.([]byte)))
     203 + h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader(v.([]byte)), len(v.([]byte)))
    204 204   if err != nil {
    205 205   return err
    206 206   }
    207 207   tb.PutBytesAddr(i, h)
    208 208   case val.StringAddrEnc:
    209 209   //todo: v will be []byte after daylon's changes
    210  - h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader([]byte(v.(string))))
     210 + h, err := serializeBytesToAddr(ctx, ns, bytes.NewReader([]byte(v.(string))), len(v.(string)))
    211 211   if err != nil {
    212 212   return err
    213 213   }
    skipped 91 lines
    305 305   }
    306 306  }
    307 307   
    308  -func serializeBytesToAddr(ctx context.Context, ns tree.NodeStore, r io.Reader) (hash.Hash, error) {
    309  - tree, err := tree.NewImmutableTreeFromReader(ctx, r, ns, tree.DefaultFixedChunkLength)
     308 +func serializeBytesToAddr(ctx context.Context, ns tree.NodeStore, r io.Reader, dataSize int) (hash.Hash, error) {
     309 + bb := ns.BlobBuilder()
     310 + bb.Init(dataSize)
     311 + _, addr, err := bb.Chunk(ctx, r)
    310 312   if err != nil {
    311 313   return hash.Hash{}, err
    312 314   }
    313  - return tree.Addr, nil
     315 + return addr, nil
    314 316  }
    315 317   
    316 318  func convJson(v interface{}) (buf []byte, err error) {
    skipped 7 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/indexed_dolt_table.go
    skipped 80 lines
    81 81   return nil, err
    82 82   }
    83 83   if idt.lb == nil || !canCache || idt.lb.Key() != key {
    84  - idt.lb, err = index.NewLookupBuilder(ctx, idt.table, idt.idx, key, nil, idt.table.sqlSch, idt.isDoltFormat)
     84 + idt.lb, err = index.NewLookupBuilder(ctx, idt.table, idt.idx, key, idt.table.projectedCols, idt.table.sqlSch, idt.isDoltFormat)
    85 85   if err != nil {
    86 86   return nil, err
    87 87   }
    skipped 10 lines
    98 98   return nil, err
    99 99   }
    100 100   if idt.lb == nil || !canCache || idt.lb.Key() != key {
    101  - idt.lb, err = index.NewLookupBuilder(ctx, idt.table, idt.idx, key, nil, idt.table.sqlSch, idt.isDoltFormat)
     101 + idt.lb, err = index.NewLookupBuilder(ctx, idt.table, idt.idx, key, idt.table.projectedCols, idt.table.sqlSch, idt.isDoltFormat)
    102 102   if err != nil {
    103 103   return nil, err
    104 104   }
    skipped 87 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/sqlselect_test.go
    skipped 1291 lines
    1292 1292   Name: "select from dolt_docs",
    1293 1293   AdditionalSetup: CreateTableFn("dolt_docs", doltdb.DocsSchema,
    1294 1294   "INSERT INTO dolt_docs VALUES ('LICENSE.md','A license')"),
    1295  - Query: "select * from dolt_docs",
    1296  - ExpectedRows: ToSqlRows(CompressSchema(doltdb.DocsSchema),
    1297  - NewRow(types.String("LICENSE.md"), types.String("A license"))),
     1295 + Query: "select * from dolt_docs",
     1296 + ExpectedRows: []sql.Row{{"LICENSE.md", "A license"}},
    1298 1297   ExpectedSchema: CompressSchema(doltdb.DocsSchema),
    1299 1298   },
    1300 1299   {
    skipped 443 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/tables.go
    skipped 1334 lines
    1335 1335   // table to rewrite it we also truncate all the indexes. Much easier to get right.
    1336 1336   for _, index := range oldSch.Indexes().AllIndexes() {
    1337 1337   var colNames []string
    1338  - for _, colName := range index.ColumnNames() {
     1338 + prefixLengths := index.PrefixLengths()
     1339 + for i, colName := range index.ColumnNames() {
    1339 1340   if strings.ToLower(oldColumn.Name) == strings.ToLower(colName) {
    1340 1341   colNames = append(colNames, newColumn.Name)
     1342 + if len(prefixLengths) > 0 {
     1343 + if !sql.IsText(newColumn.Type) {
     1344 + // drop prefix lengths if column is not a string type
     1345 + prefixLengths[i] = 0
     1346 + } else if uint32(prefixLengths[i]) > newColumn.Type.MaxTextResponseByteLength() {
     1347 + // drop prefix length if prefixLength is too long
     1348 + prefixLengths[i] = 0
     1349 + }
     1350 + }
    1341 1351   } else {
    1342 1352   colNames = append(colNames, colName)
    1343 1353   }
    1344 1354   }
     1355 + 
     1356 + // check if prefixLengths should be dropped entirely
     1357 + var nonZeroPrefixLength bool
     1358 + for _, prefixLength := range prefixLengths {
     1359 + if prefixLength > 0 {
     1360 + nonZeroPrefixLength = true
     1361 + break
     1362 + }
     1363 + }
     1364 + if !nonZeroPrefixLength {
     1365 + prefixLengths = nil
     1366 + }
     1367 + 
    1345 1368   newSch.Indexes().AddIndexByColNames(
    1346 1369   index.Name(),
    1347 1370   colNames,
    1348  - index.PrefixLengths(),
     1371 + prefixLengths,
    1349 1372   schema.IndexProperties{
    1350 1373   IsUnique: index.IsUnique(),
    1351 1374   IsUserDefined: index.IsUserDefined(),
    skipped 1361 lines
  • ■ ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/writer/prolly_index_writer.go
    skipped 292 lines
    293 293   if prefixLength != 0 {
    294 294   switch kp := keyPart.(type) {
    295 295   case string:
     296 + if prefixLength > uint16(len(kp)) {
     297 + prefixLength = uint16(len(kp))
     298 + }
    296 299   keyPart = kp[:prefixLength]
    297 300   case []uint8:
     301 + if prefixLength > uint16(len(kp)) {
     302 + prefixLength = uint16(len(kp))
     303 + }
    298 304   keyPart = kp[:prefixLength]
    299 305   }
    300 306   }
    skipped 136 lines
  • ■ ■ ■ ■ ■
    go/libraries/doltcore/sqle/writer/prolly_index_writer_keyless.go
    skipped 217 lines
    218 218   if prefixLength != 0 {
    219 219   switch kp := keyPart.(type) {
    220 220   case string:
     221 + if prefixLength > uint16(len(kp)) {
     222 + prefixLength = uint16(len(kp))
     223 + }
    221 224   keyPart = kp[:prefixLength]
    222 225   case []uint8:
     226 + if prefixLength > uint16(len(kp)) {
     227 + prefixLength = uint16(len(kp))
     228 + }
    223 229   keyPart = kp[:prefixLength]
    224 230   }
    225 231   }
    skipped 78 lines
    304 310   
    305 311   for to := range writer.keyMap {
    306 312   from := writer.keyMap.MapOrdinal(to)
    307  - if err := index.PutField(ctx, writer.mut.NodeStore(), writer.keyBld, to, sqlRow[from]); err != nil {
     313 + keyPart := writer.trimKeyPart(to, sqlRow[from])
     314 + if err := index.PutField(ctx, writer.mut.NodeStore(), writer.keyBld, to, keyPart); err != nil {
    308 315   return err
    309 316   }
    310 317   }
    skipped 43 lines
  • ■ ■ ■ ■ ■ ■
    go/performance/import_benchmarker/testdata/blob.yaml
     1 +opts:
     2 + seed: 0
     3 +tests:
     4 +- name: "sql"
     5 + repos:
     6 + - name: dolt
     7 + server:
     8 + port: 3308
     9 + args: [ "--port", "3308" ]
     10 +# - name: mysql # mysqld --port 3308 --local-infile=1 --socket=/tmp/mysqld2.sock
     11 +# external-server:
     12 +# name: test
     13 +# host: 127.0.0.1
     14 +# user: root
     15 +# password:
     16 +# port: 3309
     17 + tables:
     18 + - name: "1 blob"
     19 + rows: 1
     20 + schema: |
     21 + create table xy (
     22 + x int primary key,
     23 + y blob,
     24 + z varchar(30),
     25 + w varchar(30)
     26 + );
     27 + - name: "1 blob"
     28 + rows: 10
     29 + schema: |
     30 + create table xy (
     31 + x int primary key,
     32 + y blob,
     33 + z varchar(30),
     34 + w varchar(30)
     35 + );
     36 + - name: "1 blob"
     37 + rows: 20
     38 + schema: |
     39 + create table xy (
     40 + x int primary key,
     41 + y blob,
     42 + z varchar(30),
     43 + w varchar(30)
     44 + );
     45 + - name: "1 blob"
     46 + rows: 40
     47 + schema: |
     48 + create table xy (
     49 + x int primary key,
     50 + y blob,
     51 + z varchar(30),
     52 + w varchar(30)
     53 + );
     54 + - name: "1 blob"
     55 + rows: 50
     56 + schema: |
     57 + create table xy (
     58 + x int primary key,
     59 + y blob,
     60 + z varchar(30),
     61 + w varchar(30)
     62 + );
     63 + - name: "1 blob"
     64 + rows: 60
     65 + schema: |
     66 + create table xy (
     67 + x int primary key,
     68 + y blob,
     69 + z varchar(30),
     70 + w varchar(30)
     71 + );
     72 + - name: "1 blob"
     73 + rows: 80
     74 + schema: |
     75 + create table xy (
     76 + x int primary key,
     77 + y blob,
     78 + z varchar(30),
     79 + w varchar(30)
     80 + );
     81 + - name: "1 blob"
     82 + rows: 100
     83 + schema: |
     84 + create table xy (
     85 + x int primary key,
     86 + y blob,
     87 + z varchar(30),
     88 + w varchar(30)
     89 + );
     90 + - name: "1 blob"
     91 + rows: 200
     92 + schema: |
     93 + create table xy (
     94 + x int primary key,
     95 + y blob,
     96 + z varchar(30),
     97 + w varchar(30)
     98 + );
     99 + - name: "1 blob"
     100 + rows: 400
     101 + schema: |
     102 + create table xy (
     103 + x int primary key,
     104 + y blob,
     105 + z varchar(30),
     106 + w varchar(30)
     107 + );
     108 + - name: "1 blob"
     109 + rows: 600
     110 + schema: |
     111 + create table xy (
     112 + x int primary key,
     113 + y blob,
     114 + z varchar(30),
     115 + w varchar(30)
     116 + );
     117 + - name: "1 blob"
     118 + rows: 800
     119 + schema: |
     120 + create table xy (
     121 + x int primary key,
     122 + y blob,
     123 + z varchar(30),
     124 + w varchar(30)
     125 + );
     126 + - name: "1 blob"
     127 + rows: 1000
     128 + schema: |
     129 + create table xy (
     130 + x int primary key,
     131 + y blob,
     132 + z varchar(30),
     133 + w varchar(30)
     134 + );
     135 + - name: "1 blob"
     136 + rows: 10000
     137 + schema: |
     138 + create table xy (
     139 + x int primary key,
     140 + y blob,
     141 + z varchar(30),
     142 + w varchar(30)
     143 + );
     144 + - name: "1 blob"
     145 + rows: 100000
     146 + schema: |
     147 + create table xy (
     148 + x int primary key,
     149 + y blob,
     150 + z varchar(30),
     151 + w varchar(30)
     152 + );
     153 + 
  • ■ ■ ■ ■ ■ ■
    go/store/prolly/tree/blob_bench_test.go
     1 +// Copyright 2022 Dolthub, Inc.
     2 +//
     3 +// Licensed under the Apache License, Version 2.0 (the "License");
     4 +// you may not use this file except in compliance with the License.
     5 +// You may obtain a copy of the License at
     6 +//
     7 +// http://www.apache.org/licenses/LICENSE-2.0
     8 +//
     9 +// Unless required by applicable law or agreed to in writing, software
     10 +// distributed under the License is distributed on an "AS IS" BASIS,
     11 +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 +// See the License for the specific language governing permissions and
     13 +// limitations under the License.
     14 + 
     15 +package tree
     16 + 
     17 +import (
     18 + "bytes"
     19 + "context"
     20 + "fmt"
     21 + "math"
     22 + "testing"
     23 + 
     24 + "github.com/dolthub/dolt/go/store/hash"
     25 +)
     26 + 
     27 +var result hash.Hash
     28 + 
     29 +func BenchmarkBlobBuilder(b *testing.B) {
     30 + var r hash.Hash
     31 + var err error
     32 + dataSizes := []int{1e3, 1e4, 1e5, 1e6}
     33 + for _, d := range dataSizes {
     34 + b.Run(fmt.Sprintf("datasize: %.0f", math.Log10(float64(d))), func(b *testing.B) {
     35 + ns := NewTestNodeStore()
     36 + bb := mustNewBlobBuilder(DefaultFixedChunkLength)
     37 + bb.SetNodeStore(ns)
     38 + buf := make([]byte, d)
     39 + for i := 0; i < d; i++ {
     40 + buf[i] = uint8(i)
     41 + }
     42 + 
     43 + b.ResetTimer()
     44 + for n := 0; n < b.N; n++ {
     45 + // always record the result to prevent
     46 + // the compiler eliminating the function call.
     47 + bb.Init(d)
     48 + _, r, err = bb.Chunk(context.Background(), bytes.NewReader(buf))
     49 + if err != nil {
     50 + b.Fatal(err)
     51 + }
     52 + bb.Reset()
     53 + }
     54 + // always store the result to a package level variable
     55 + // so the compiler cannot eliminate the Benchmark itself.
     56 + result = r
     57 + })
     58 + }
     59 +}
     60 + 
  • ■ ■ ■ ■ ■ ■
    go/store/prolly/tree/blob_builder.go
     1 +// Copyright 2022 Dolthub, Inc.
     2 +//
     3 +// Licensed under the Apache License, Version 2.0 (the "License");
     4 +// you may not use this file except in compliance with the License.
     5 +// You may obtain a copy of the License at
     6 +//
     7 +// http://www.apache.org/licenses/LICENSE-2.0
     8 +//
     9 +// Unless required by applicable law or agreed to in writing, software
     10 +// distributed under the License is distributed on an "AS IS" BASIS,
     11 +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 +// See the License for the specific language governing permissions and
     13 +// limitations under the License.
     14 + 
     15 +package tree
     16 + 
     17 +import (
     18 + "bytes"
     19 + "context"
     20 + "encoding/json"
     21 + "errors"
     22 + "io"
     23 + 
     24 + "github.com/dolthub/go-mysql-server/sql"
     25 + 
     26 + "github.com/dolthub/dolt/go/store/hash"
     27 + "github.com/dolthub/dolt/go/store/prolly/message"
     28 +)
     29 + 
     30 +const DefaultFixedChunkLength = 4000
     31 + 
     32 +var ErrInvalidChunkSize = errors.New("invalid chunkSize; value must be a multiple of 20")
     33 + 
     34 +func mustNewBlobBuilder(chunkSize int) *BlobBuilder {
     35 + b, _ := NewBlobBuilder(chunkSize)
     36 + return b
     37 +}
     38 + 
     39 +// NewBlobBuilder writes the contents of |reader| as an append-only
     40 +// tree, returning the root node or an error if applicable. |chunkSize|
     41 +// fixes the split size of leaf and intermediate node chunks.
     42 +func NewBlobBuilder(chunkSize int) (*BlobBuilder, error) {
     43 + if chunkSize%hash.ByteLen != 0 {
     44 + return nil, ErrInvalidChunkSize
     45 + }
     46 + 
     47 + keys := make([][]byte, chunkSize/hash.ByteLen)
     48 + for i := range keys {
     49 + keys[i] = zeroKey
     50 + }
     51 + return &BlobBuilder{
     52 + chunkSize: chunkSize,
     53 + keys: keys,
     54 + }, nil
     55 +}
     56 + 
     57 +type blobNodeWriter interface {
     58 + Write(ctx context.Context, r io.Reader) (hash.Hash, uint64, error)
     59 +}
     60 + 
     61 +type BlobBuilder struct {
     62 + ns NodeStore
     63 + S message.Serializer
     64 + chunkSize int
     65 + keys [][]byte
     66 + wr blobNodeWriter
     67 + lastN Node
     68 + topLevel int
     69 + 
     70 + levelCap int
     71 + buf []byte
     72 + vals [][]byte
     73 + subtrees []uint64
     74 +}
     75 + 
     76 +func (b *BlobBuilder) SetNodeStore(ns NodeStore) {
     77 + b.ns = ns
     78 + b.S = message.NewBlobSerializer(ns.Pool())
     79 +}
     80 + 
     81 +// Reset clears the BlobBuilder for re-use.
     82 +func (b *BlobBuilder) Reset() {
     83 + b.wr = nil
     84 + b.topLevel = 0
     85 +}
     86 + 
     87 +// Init calculates tree dimensions for a given blob.
     88 +func (b *BlobBuilder) Init(dataSize int) {
     89 + b.Reset()
     90 + 
     91 + if dataSize == 0 {
     92 + return
     93 + }
     94 + 
     95 + if dataSize <= b.chunkSize {
     96 + b.wr = &blobLeafWriter{
     97 + bb: b,
     98 + buf: make([]byte, dataSize),
     99 + }
     100 + return
     101 + }
     102 + 
     103 + b.wr = &blobLeafWriter{
     104 + bb: b,
     105 + buf: make([]byte, b.chunkSize),
     106 + }
     107 + 
     108 + numAddrs := b.chunkSize / hash.ByteLen
     109 + dataSize = dataSize / b.chunkSize
     110 + for dataSize > 0 {
     111 + dataSize = dataSize / numAddrs
     112 + b.topLevel += 1
     113 + }
     114 + 
     115 + // Allocate everything we need in batch, slice them up down below.
     116 + if b.levelCap < b.topLevel {
     117 + b.expand(numAddrs)
     118 + b.levelCap = b.topLevel
     119 + }
     120 + 
     121 + writers := make([]blobLevelWriter, b.topLevel)
     122 + for i, addrs := 0, 0; i < b.topLevel; i, addrs = i+1, addrs+numAddrs {
     123 + wr := &writers[i]
     124 + wr.bb = b
     125 + wr.child = b.wr
     126 + wr.buf = b.buf[addrs*hash.ByteLen : (addrs+numAddrs)*hash.ByteLen]
     127 + wr.vals = b.vals[addrs : addrs+numAddrs]
     128 + wr.subtrees = b.subtrees[addrs : addrs+numAddrs]
     129 + wr.level = i + 1
     130 + wr.sz = numAddrs
     131 + b.wr = wr
     132 + }
     133 +}
     134 + 
     135 +func (b *BlobBuilder) expand(numAddrs int) {
     136 + b.buf = make([]byte, b.topLevel*numAddrs*hash.ByteLen)
     137 + b.vals = make([][]byte, numAddrs*b.topLevel)
     138 + b.subtrees = make([]uint64, numAddrs*b.topLevel)
     139 +}
     140 + 
     141 +// Chunk builds the blob tree by passing the Reader to the chain of level
     142 +// writers, terminated in a leaf writer. The leaf writer reads chunks from the
     143 +// Reader and writes them, returning their hashes to its parent level writer.
     144 +// When the parent level writer fills up with addresses, it writes a chunk and
     145 +// returns that address to its parent. This continues until the Reader returns
     146 +// io.EOF, when every writer in the chain completes its chunk and we return the
     147 +// root node.
     148 +func (b *BlobBuilder) Chunk(ctx context.Context, r io.Reader) (Node, hash.Hash, error) {
     149 + if b.wr == nil {
     150 + return Node{}, hash.Hash{}, nil
     151 + }
     152 + h, _, err := b.wr.Write(ctx, r)
     153 + if err != nil && err != io.EOF {
     154 + return Node{}, hash.Hash{}, err
     155 + }
     156 + return b.lastN, h, nil
     157 +}
     158 + 
     159 +// blobLeafWriter writes leaf chunks of the blob, with max capacity len(buf),
     160 +// for every call to Write().
     161 +type blobLeafWriter struct {
     162 + bb *BlobBuilder
     163 + buf []byte
     164 +}
     165 + 
     166 +var zeroKey = []byte{0}
     167 +var zeroKeys = [][]byte{zeroKey}
     168 +var leafSubtrees = []uint64{1}
     169 + 
     170 +func (lw *blobLeafWriter) Write(ctx context.Context, r io.Reader) (hash.Hash, uint64, error) {
     171 + n, err := r.Read(lw.buf)
     172 + if err != nil {
     173 + return hash.Hash{}, 0, err
     174 + }
     175 + h, err := lw.bb.write(ctx, zeroKeys, [][]byte{lw.buf[:n]}, leafSubtrees, 0)
     176 + return h, 1, err
     177 +}
     178 + 
     179 +// blobLevelWriters writes internal chunks of a blob, using its |child| to
     180 +// write the level below it. On a call to |Write|, it repeatedly calls
     181 +// |child.Write|, accumulating addresses to its children, until it fills up or
     182 +// the Reader is exhausted. In either case, it then writes its node and
     183 +// returns.
     184 +type blobLevelWriter struct {
     185 + bb *BlobBuilder
     186 + child blobNodeWriter
     187 + buf []byte
     188 + vals [][]byte
     189 + subtrees []uint64
     190 + sz int
     191 + level int
     192 +}
     193 + 
     194 +func (lw *blobLevelWriter) Write(ctx context.Context, r io.Reader) (hash.Hash, uint64, error) {
     195 + i, off, totalCount := 0, 0, uint64(0)
     196 + for {
     197 + // Sketchy hack to elide a copy here...
     198 + //h := (*hash.Hash)(unsafe.Pointer(&lw.buf[off]))
     199 + //var n uint64
     200 + //var err error
     201 + h, n, err := lw.child.Write(ctx, r)
     202 + if err != nil && err != io.EOF {
     203 + return hash.Hash{}, 0, err
     204 + }
     205 + if n != 0 {
     206 + totalCount += n
     207 + copy(lw.buf[off:], h[:])
     208 + lw.subtrees[i] = n
     209 + lw.vals[i] = lw.buf[off : off+hash.ByteLen]
     210 + i += 1
     211 + off += hash.ByteLen
     212 + }
     213 + if i >= lw.sz || err == io.EOF {
     214 + h, nerr := lw.bb.write(ctx, lw.bb.keys[:i], lw.vals[:i], lw.subtrees[:i], lw.level)
     215 + if nerr != nil {
     216 + return hash.Hash{}, 0, nerr
     217 + }
     218 + return h, totalCount, err
     219 + }
     220 + }
     221 +}
     222 + 
     223 +// Write the blob node. Called by level and leaf writers. Will store lastN if
     224 +// the level corresponds to our root level.
     225 +func (b *BlobBuilder) write(ctx context.Context, keys, vals [][]byte, subtrees []uint64, level int) (hash.Hash, error) {
     226 + msg := b.S.Serialize(keys, vals, subtrees, level)
     227 + node, err := NodeFromBytes(msg)
     228 + if err != nil {
     229 + return hash.Hash{}, err
     230 + }
     231 + h, err := b.ns.Write(ctx, node)
     232 + if err != nil {
     233 + return hash.Hash{}, err
     234 + }
     235 + if level == b.topLevel {
     236 + b.lastN = node
     237 + }
     238 + return h, nil
     239 +}
     240 + 
     241 +const bytePeekLength = 128
     242 + 
     243 +type ByteArray struct {
     244 + ImmutableTree
     245 +}
     246 + 
     247 +func NewByteArray(addr hash.Hash, ns NodeStore) *ByteArray {
     248 + return &ByteArray{ImmutableTree{Addr: addr, ns: ns}}
     249 +}
     250 + 
     251 +func (b *ByteArray) ToBytes(ctx context.Context) ([]byte, error) {
     252 + return b.bytes(ctx)
     253 +}
     254 + 
     255 +func (b *ByteArray) ToString(ctx context.Context) (string, error) {
     256 + buf, err := b.bytes(ctx)
     257 + if err != nil {
     258 + return "", err
     259 + }
     260 + toShow := bytePeekLength
     261 + if len(buf) < toShow {
     262 + toShow = len(buf)
     263 + }
     264 + return string(buf[:toShow]), nil
     265 +}
     266 + 
     267 +type JSONDoc struct {
     268 + ImmutableTree
     269 +}
     270 + 
     271 +func NewJSONDoc(addr hash.Hash, ns NodeStore) *JSONDoc {
     272 + return &JSONDoc{ImmutableTree{Addr: addr, ns: ns}}
     273 +}
     274 + 
     275 +func (b *JSONDoc) ToJSONDocument(ctx context.Context) (sql.JSONDocument, error) {
     276 + buf, err := b.bytes(ctx)
     277 + if err != nil {
     278 + return sql.JSONDocument{}, err
     279 + }
     280 + var doc sql.JSONDocument
     281 + err = json.Unmarshal(buf, &doc.Val)
     282 + if err != nil {
     283 + return sql.JSONDocument{}, err
     284 + }
     285 + return doc, err
     286 +}
     287 + 
     288 +func (b *JSONDoc) ToString(ctx context.Context) (string, error) {
     289 + buf, err := b.bytes(ctx)
     290 + if err != nil {
     291 + return "", err
     292 + }
     293 + toShow := bytePeekLength
     294 + if len(buf) < toShow {
     295 + toShow = len(buf)
     296 + }
     297 + return string(buf[:toShow]), nil
     298 +}
     299 + 
     300 +type TextStorage struct {
     301 + ImmutableTree
     302 +}
     303 + 
     304 +func NewTextStorage(addr hash.Hash, ns NodeStore) *TextStorage {
     305 + return &TextStorage{ImmutableTree{Addr: addr, ns: ns}}
     306 +}
     307 + 
     308 +func (b *TextStorage) ToBytes(ctx context.Context) ([]byte, error) {
     309 + return b.bytes(ctx)
     310 +}
     311 + 
     312 +func (b *TextStorage) ToString(ctx context.Context) (string, error) {
     313 + buf, err := b.bytes(ctx)
     314 + if err != nil {
     315 + return "", err
     316 + }
     317 + return string(buf), nil
     318 +}
     319 + 
     320 +type ImmutableTree struct {
     321 + Addr hash.Hash
     322 + buf []byte
     323 + ns NodeStore
     324 +}
     325 + 
     326 +func (t *ImmutableTree) load(ctx context.Context) error {
     327 + if t.Addr.IsEmpty() {
     328 + t.buf = []byte{}
     329 + return nil
     330 + }
     331 + n, err := t.ns.Read(ctx, t.Addr)
     332 + if err != nil {
     333 + return err
     334 + }
     335 + 
     336 + return WalkNodes(ctx, n, t.ns, func(ctx context.Context, n Node) error {
     337 + if n.IsLeaf() {
     338 + t.buf = append(t.buf, n.GetValue(0)...)
     339 + }
     340 + return nil
     341 + })
     342 +}
     343 + 
     344 +func (t *ImmutableTree) bytes(ctx context.Context) ([]byte, error) {
     345 + if t.buf == nil {
     346 + err := t.load(ctx)
     347 + if err != nil {
     348 + return nil, err
     349 + }
     350 + }
     351 + return t.buf[:], nil
     352 +}
     353 + 
     354 +func (t *ImmutableTree) next() (Node, error) {
     355 + panic("not implemented")
     356 +}
     357 + 
     358 +func (t *ImmutableTree) close() error {
     359 + panic("not implemented")
     360 +}
     361 + 
     362 +func (t *ImmutableTree) Read(_ bytes.Buffer) (int, error) {
     363 + panic("not implemented")
     364 +}
     365 + 
  • ■ ■ ■ ■ ■ ■
    go/store/prolly/tree/immutable_tree_test.go go/store/prolly/tree/blob_builder_test.go
    skipped 34 lines
    35 35   tests := []struct {
    36 36   inputSize int
    37 37   chunkSize int
    38  - err error
     38 + execErr error
     39 + initErr error
    39 40   checkSum bool
    40 41   }{
    41 42   {
    skipped 6 lines
    48 49   },
    49 50   {
    50 51   inputSize: 100,
    51  - chunkSize: 101,
     52 + chunkSize: 100,
    52 53   },
    53 54   {
    54 55   inputSize: 255,
    skipped 25 lines
    80 81   },
    81 82   {
    82 83   inputSize: 1_000,
    83  - chunkSize: 47,
     84 + chunkSize: 40,
    84 85   checkSum: false,
    85 86   },
    86 87   {
    87 88   inputSize: 1_000,
    88  - chunkSize: 53,
     89 + chunkSize: 60,
    89 90   checkSum: false,
    90 91   },
    91 92   {
    92 93   inputSize: 1_000,
    93  - chunkSize: 67,
     94 + chunkSize: 80,
    94 95   checkSum: false,
    95 96   },
    96 97   {
    97 98   inputSize: 10_000,
    98  - chunkSize: 89,
     99 + chunkSize: 100,
    99 100   checkSum: false,
    100 101   },
    101 102   {
    skipped 7 lines
    109 110   checkSum: false,
    110 111   },
    111 112   {
    112  - inputSize: 50_000_000,
    113  - chunkSize: 33_000,
    114  - err: ErrInvalidChunkSize,
    115  - },
    116  - {
    117  - inputSize: 10,
    118  - chunkSize: 1,
    119  - err: ErrInvalidChunkSize,
     113 + inputSize: 0,
     114 + chunkSize: 40,
    120 115   },
    121 116   {
    122  - inputSize: 10,
    123  - chunkSize: -1,
    124  - err: ErrInvalidChunkSize,
    125  - },
    126  - {
    127  - inputSize: 10,
    128  - chunkSize: 39,
    129  - err: ErrInvalidChunkSize,
     117 + inputSize: 100,
     118 + chunkSize: 41,
     119 + initErr: ErrInvalidChunkSize,
    130 120   },
    131 121   }
    132 122   
    skipped 6 lines
    139 129   ctx := context.Background()
    140 130   r := bytes.NewReader(buf)
    141 131   ns := NewTestNodeStore()
    142  - serializer := message.NewBlobSerializer(ns.Pool())
    143  - root, err := buildImmutableTree(ctx, r, ns, serializer, tt.chunkSize)
    144  - if tt.err != nil {
    145  - require.True(t, errors.Is(err, tt.err))
     132 + //serializer := message.NewBlobSerializer(ns.Pool())
     133 + 
     134 + b, err := NewBlobBuilder(tt.chunkSize)
     135 + if tt.initErr != nil {
     136 + require.True(t, errors.Is(err, tt.initErr))
     137 + return
     138 + }
     139 + b.SetNodeStore(ns)
     140 + b.Init(tt.inputSize)
     141 + root, _, err := b.Chunk(ctx, r)
     142 + 
     143 + if tt.execErr != nil {
     144 + require.True(t, errors.Is(err, tt.execErr))
    146 145   return
    147 146   }
    148 147   require.NoError(t, err)
    skipped 9 lines
    158 157   sum := 0
    159 158   byteCnt := 0
    160 159   WalkNodes(ctx, root, ns, func(ctx context.Context, n Node) error {
     160 + if n.empty() {
     161 + return nil
     162 + }
    161 163   var keyCnt int
    162 164   leaf := n.IsLeaf()
    163 165   if leaf {
    skipped 78 lines
    242 244  }
    243 245   
    244 246  func expectedUnfilled(size, chunk int) int {
    245  - if size == chunk {
     247 + if size == chunk || size == 0 {
    246 248   return 0
    247 249   } else if size < chunk {
    248 250   return 1
    skipped 27 lines
    276 278   }{
    277 279   {
    278 280   blobLen: 250,
    279  - chunkSize: 41,
     281 + chunkSize: 60,
    280 282   keyCnt: 4,
    281 283   },
    282 284   {
    skipped 3 lines
    286 288   },
    287 289   {
    288 290   blobLen: 378,
    289  - chunkSize: 43,
     291 + chunkSize: 60,
    290 292   keyCnt: 12,
    291 293   },
    292 294   {
    skipped 7 lines
    300 302   keyCnt: 6,
    301 303   },
    302 304   {
    303  - blobLen: 0,
    304  - chunkSize: 40,
    305  - keyCnt: 6,
    306  - },
    307  - {
    308 305   blobLen: 50_000_000,
    309 306   chunkSize: 4000,
    310 307   keyCnt: 1,
    311 308   },
    312 309   {
    313 310   blobLen: 10_000,
    314  - chunkSize: 83,
     311 + chunkSize: 80,
    315 312   keyCnt: 6,
    316 313   },
    317 314   }
    skipped 44 lines
    362 359   keyBld.PutUint32(0, uint32(i))
    363 360   tuples[i][0] = keyBld.Build(sharedPool)
    364 361   
    365  - b := mustNewBlob(ctx, ns, blobLen, chunkSize)
    366  - valBld.PutBytesAddr(0, b.Addr)
     362 + addr := mustNewBlob(ctx, ns, blobLen, chunkSize)
     363 + valBld.PutBytesAddr(0, addr)
    367 364   tuples[i][1] = valBld.Build(sharedPool)
    368 365   }
    369 366   
    skipped 9 lines
    379 376   return root
    380 377  }
    381 378   
    382  -func mustNewBlob(ctx context.Context, ns NodeStore, len, chunkSize int) *ImmutableTree {
     379 +func mustNewBlob(ctx context.Context, ns NodeStore, len, chunkSize int) hash.Hash {
    383 380   buf := make([]byte, len)
    384 381   for i := range buf {
    385 382   buf[i] = byte(i)
    386 383   }
    387 384   r := bytes.NewReader(buf)
    388  - root, err := NewImmutableTreeFromReader(ctx, r, ns, chunkSize)
     385 + b, err := NewBlobBuilder(chunkSize)
    389 386   if err != nil {
    390 387   panic(err)
    391 388   }
    392  - return root
     389 + b.SetNodeStore(ns)
     390 + b.Init(len)
     391 + _, addr, err := b.Chunk(ctx, r)
     392 + if err != nil {
     393 + panic(err)
     394 + }
     395 + return addr
    393 396  }
    394 397   
    395 398  func getBlobValues(msg serial.Message) []byte {
    skipped 8 lines
  • ■ ■ ■ ■ ■ ■
    go/store/prolly/tree/immutable_tree.go
    1  -// Copyright 2022 Dolthub, Inc.
    2  -//
    3  -// Licensed under the Apache License, Version 2.0 (the "License");
    4  -// you may not use this file except in compliance with the License.
    5  -// You may obtain a copy of the License at
    6  -//
    7  -// http://www.apache.org/licenses/LICENSE-2.0
    8  -//
    9  -// Unless required by applicable law or agreed to in writing, software
    10  -// distributed under the License is distributed on an "AS IS" BASIS,
    11  -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  -// See the License for the specific language governing permissions and
    13  -// limitations under the License.
    14  - 
    15  -package tree
    16  - 
    17  -import (
    18  - "bytes"
    19  - "context"
    20  - "encoding/json"
    21  - "errors"
    22  - "io"
    23  - 
    24  - "github.com/dolthub/go-mysql-server/sql"
    25  - 
    26  - "github.com/dolthub/dolt/go/store/hash"
    27  - "github.com/dolthub/dolt/go/store/prolly/message"
    28  -)
    29  - 
    30  -const DefaultFixedChunkLength = 4000
    31  - 
    32  -var ErrInvalidChunkSize = errors.New("invalid chunkSize; value must be > 1")
    33  - 
    34  -// buildImmutableTree writes the contents of |reader| as an append-only
    35  -// tree, returning the root node or an error if applicable. |chunkSize|
    36  -// fixes the split size of leaf and intermediate node chunks.
    37  -func buildImmutableTree(ctx context.Context, r io.Reader, ns NodeStore, S message.Serializer, chunkSize int) (Node, error) {
    38  - if chunkSize < hash.ByteLen*2 || chunkSize > int(message.MaxVectorOffset)/2 {
    39  - // internal nodes must fit at least two 20-byte hashes
    40  - return Node{}, ErrInvalidChunkSize
    41  - }
    42  - 
    43  - var levels [][]novelNode
    44  - var levelCnts []int
    45  - var finalize bool
    46  - 
    47  - // We use lookahead to check whether the reader has
    48  - // more bytes. The reader will only EOF when reading
    49  - // zero bytes into the lookahead buffer, but we want
    50  - // to know at the beginning of a loop whether we are
    51  - // finished.
    52  - lookahead := make([]byte, chunkSize)
    53  - lookaheadN, err := r.Read(lookahead)
    54  - if err != nil {
    55  - return Node{}, err
    56  - }
    57  - 
    58  - buf := make([]byte, chunkSize)
    59  - for {
    60  - copy(buf, lookahead)
    61  - curN := lookaheadN
    62  - lookaheadN, err = r.Read(lookahead)
    63  - if err == io.EOF {
    64  - finalize = true
    65  - } else if err != nil {
    66  - return Node{}, err
    67  - }
    68  - 
    69  - novel, err := _newLeaf(ctx, ns, S, buf[:curN])
    70  - if err != nil {
    71  - return Node{}, err
    72  - }
    73  - 
    74  - i := 0
    75  - for {
    76  - // Three cases for building tree
    77  - // 1) reached new level => create new level
    78  - // 2) add novel node to current level
    79  - // 3) we didn't fill the current level => break
    80  - // 4) we filled current level, chunk and recurse into parent
    81  - //
    82  - // Two cases for finalizing tree
    83  - // 1) we haven't hit root, so we add the final chunk, finalize level, and continue upwards
    84  - // 2) we overshot root finalizing chunks, and we return the single root in the lower level
    85  - if i > len(levels)-1 {
    86  - levels = append(levels, make([]novelNode, chunkSize))
    87  - levelCnts = append(levelCnts, 0)
    88  - }
    89  - 
    90  - levels[i][levelCnts[i]] = novel
    91  - levelCnts[i]++
    92  - // note: the size of an internal node will be the key count times key length (hash)
    93  - if levelCnts[i]*hash.ByteLen < chunkSize {
    94  - // current level is not full
    95  - if !finalize {
    96  - // only continue and chunk this level if finalizing all in-progress nodes
    97  - break
    98  - }
    99  - }
    100  - 
    101  - nodes := levels[i][:levelCnts[i]]
    102  - if len(nodes) == 1 && i == len(levels)-1 {
    103  - // this is necessary and only possible if we're finalizing
    104  - // note: this is the only non-error return
    105  - return nodes[0].node, nil
    106  - }
    107  - 
    108  - // chunk the current level
    109  - novel, err = _newInternal(ctx, ns, S, nodes, i+1, chunkSize)
    110  - if err != nil {
    111  - return Node{}, err
    112  - }
    113  - levelCnts[i] = 0
    114  - i++
    115  - }
    116  - }
    117  -}
    118  - 
    119  -func _newInternal(ctx context.Context, ns NodeStore, s message.Serializer, nodes []novelNode, level int, chunkSize int) (novelNode, error) {
    120  - keys := make([][]byte, len(nodes))
    121  - vals := make([][]byte, len(nodes))
    122  - subtrees := make([]uint64, len(nodes))
    123  - treeCnt := uint64(0)
    124  - for i := range nodes {
    125  - keys[i] = []byte{0}
    126  - vals[i] = nodes[i].addr[:]
    127  - subtrees[i] = nodes[i].treeCount
    128  - treeCnt += nodes[i].treeCount
    129  - }
    130  - msg := s.Serialize(keys, vals, subtrees, level)
    131  - node, err := NodeFromBytes(msg)
    132  - if err != nil {
    133  - return novelNode{}, err
    134  - }
    135  - addr, err := ns.Write(ctx, node)
    136  - if err != nil {
    137  - return novelNode{}, err
    138  - }
    139  - return novelNode{
    140  - addr: addr,
    141  - node: node,
    142  - lastKey: []byte{0},
    143  - treeCount: treeCnt,
    144  - }, nil
    145  -}
    146  - 
    147  -func _newLeaf(ctx context.Context, ns NodeStore, s message.Serializer, buf []byte) (novelNode, error) {
    148  - msg := s.Serialize([][]byte{{0}}, [][]byte{buf}, []uint64{1}, 0)
    149  - node, err := NodeFromBytes(msg)
    150  - if err != nil {
    151  - return novelNode{}, err
    152  - }
    153  - addr, err := ns.Write(ctx, node)
    154  - if err != nil {
    155  - return novelNode{}, err
    156  - }
    157  - return novelNode{
    158  - addr: addr,
    159  - node: node,
    160  - lastKey: []byte{0},
    161  - treeCount: 1,
    162  - }, nil
    163  -}
    164  - 
    165  -const bytePeekLength = 128
    166  - 
    167  -type ByteArray struct {
    168  - ImmutableTree
    169  -}
    170  - 
    171  -func NewByteArray(addr hash.Hash, ns NodeStore) *ByteArray {
    172  - return &ByteArray{ImmutableTree{Addr: addr, ns: ns}}
    173  -}
    174  - 
    175  -func (b *ByteArray) ToBytes(ctx context.Context) ([]byte, error) {
    176  - return b.bytes(ctx)
    177  -}
    178  - 
    179  -func (b *ByteArray) ToString(ctx context.Context) (string, error) {
    180  - buf, err := b.bytes(ctx)
    181  - if err != nil {
    182  - return "", err
    183  - }
    184  - toShow := bytePeekLength
    185  - if len(buf) < toShow {
    186  - toShow = len(buf)
    187  - }
    188  - return string(buf[:toShow]), nil
    189  -}
    190  - 
    191  -type JSONDoc struct {
    192  - ImmutableTree
    193  -}
    194  - 
    195  -func NewJSONDoc(addr hash.Hash, ns NodeStore) *JSONDoc {
    196  - return &JSONDoc{ImmutableTree{Addr: addr, ns: ns}}
    197  -}
    198  - 
    199  -func (b *JSONDoc) ToJSONDocument(ctx context.Context) (sql.JSONDocument, error) {
    200  - buf, err := b.bytes(ctx)
    201  - if err != nil {
    202  - return sql.JSONDocument{}, err
    203  - }
    204  - var doc sql.JSONDocument
    205  - err = json.Unmarshal(buf, &doc.Val)
    206  - if err != nil {
    207  - return sql.JSONDocument{}, err
    208  - }
    209  - return doc, err
    210  -}
    211  - 
    212  -func (b *JSONDoc) ToString(ctx context.Context) (string, error) {
    213  - buf, err := b.bytes(ctx)
    214  - if err != nil {
    215  - return "", err
    216  - }
    217  - toShow := bytePeekLength
    218  - if len(buf) < toShow {
    219  - toShow = len(buf)
    220  - }
    221  - return string(buf[:toShow]), nil
    222  -}
    223  - 
    224  -type TextStorage struct {
    225  - ImmutableTree
    226  -}
    227  - 
    228  -func NewTextStorage(addr hash.Hash, ns NodeStore) *TextStorage {
    229  - return &TextStorage{ImmutableTree{Addr: addr, ns: ns}}
    230  -}
    231  - 
    232  -func (b *TextStorage) ToBytes(ctx context.Context) ([]byte, error) {
    233  - return b.bytes(ctx)
    234  -}
    235  - 
    236  -func (b *TextStorage) ToString(ctx context.Context) (string, error) {
    237  - buf, err := b.bytes(ctx)
    238  - if err != nil {
    239  - return "", err
    240  - }
    241  - return string(buf), nil
    242  -}
    243  - 
    244  -type ImmutableTree struct {
    245  - Addr hash.Hash
    246  - buf []byte
    247  - ns NodeStore
    248  -}
    249  - 
    250  -func NewImmutableTreeFromReader(ctx context.Context, r io.Reader, ns NodeStore, chunkSize int) (*ImmutableTree, error) {
    251  - s := message.NewBlobSerializer(ns.Pool())
    252  - root, err := buildImmutableTree(ctx, r, ns, s, chunkSize)
    253  - if errors.Is(err, io.EOF) {
    254  - return &ImmutableTree{Addr: hash.Hash{}}, nil
    255  - } else if err != nil {
    256  - return nil, err
    257  - }
    258  - return &ImmutableTree{Addr: root.HashOf()}, nil
    259  -}
    260  - 
    261  -func (t *ImmutableTree) load(ctx context.Context) error {
    262  - if t.Addr.IsEmpty() {
    263  - t.buf = []byte{}
    264  - return nil
    265  - }
    266  - n, err := t.ns.Read(ctx, t.Addr)
    267  - if err != nil {
    268  - return err
    269  - }
    270  - 
    271  - return WalkNodes(ctx, n, t.ns, func(ctx context.Context, n Node) error {
    272  - if n.IsLeaf() {
    273  - t.buf = append(t.buf, n.GetValue(0)...)
    274  - }
    275  - return nil
    276  - })
    277  -}
    278  - 
    279  -func (t *ImmutableTree) bytes(ctx context.Context) ([]byte, error) {
    280  - if t.buf == nil {
    281  - err := t.load(ctx)
    282  - if err != nil {
    283  - return nil, err
    284  - }
    285  - }
    286  - return t.buf[:], nil
    287  -}
    288  - 
    289  -func (t *ImmutableTree) next() (Node, error) {
    290  - panic("not implemented")
    291  -}
    292  - 
    293  -func (t *ImmutableTree) close() error {
    294  - panic("not implemented")
    295  -}
    296  - 
    297  -func (t *ImmutableTree) Read(buf bytes.Buffer) (int, error) {
    298  - panic("not implemented")
    299  -}
    300  - 
  • ■ ■ ■ ■ ■ ■
    go/store/prolly/tree/node.go
    skipped 112 lines
    113 113   })
    114 114  }
    115 115   
     116 +type nodeArena []Node
     117 + 
     118 +const nodeArenaSize = 10000
     119 + 
     120 +func (a *nodeArena) Get() Node {
     121 + if len(*a) == 0 {
     122 + *a = make([]Node, nodeArenaSize)
     123 + }
     124 + n := (*a)[len(*a)-1]
     125 + *a = (*a)[:len(*a)-1]
     126 + return n
     127 +}
     128 + 
     129 +func (a *nodeArena) NodeFromBytes(msg []byte) (Node, error) {
     130 + keys, values, level, count, err := message.UnpackFields(msg)
     131 + if err != nil {
     132 + return Node{}, err
     133 + }
     134 + n := a.Get()
     135 + n.keys = keys
     136 + n.values = values
     137 + n.count = count
     138 + n.level = level
     139 + n.msg = msg
     140 + return n, nil
     141 +}
     142 + 
    116 143  func NodeFromBytes(msg []byte) (Node, error) {
    117 144   keys, values, level, count, err := message.UnpackFields(msg)
    118 145   return Node{
    skipped 147 lines
  • ■ ■ ■ ■ ■ ■
    go/store/prolly/tree/node_store.go
    skipped 43 lines
    44 44   
    45 45   // Format returns the types.NomsBinFormat of this NodeStore.
    46 46   Format() *types.NomsBinFormat
     47 + 
     48 + BlobBuilder() *BlobBuilder
    47 49  }
    48 50   
    49 51  type nodeStore struct {
    50 52   store chunks.ChunkStore
    51 53   cache nodeCache
    52 54   bp pool.BuffPool
     55 + bbp *sync.Pool
    53 56  }
    54 57   
    55 58  var _ NodeStore = nodeStore{}
    skipped 2 lines
    58 61   
    59 62  var sharedPool = pool.NewBuffPool()
    60 63   
     64 +var blobBuilderPool = sync.Pool{
     65 + New: func() any {
     66 + return mustNewBlobBuilder(DefaultFixedChunkLength)
     67 + },
     68 +}
     69 + 
    61 70  // NewNodeStore makes a new NodeStore.
    62 71  func NewNodeStore(cs chunks.ChunkStore) NodeStore {
    63 72   return nodeStore{
    64 73   store: cs,
    65 74   cache: sharedCache,
    66 75   bp: sharedPool,
     76 + bbp: &blobBuilderPool,
    67 77   }
    68 78  }
    69 79   
    skipped 77 lines
    147 157  // Pool implements NodeStore.
    148 158  func (ns nodeStore) Pool() pool.BuffPool {
    149 159   return ns.bp
     160 +}
     161 + 
     162 +// BlobBuilder implements NodeStore.
     163 +func (ns nodeStore) BlobBuilder() *BlobBuilder {
     164 + bb := ns.bbp.Get().(*BlobBuilder)
     165 + if bb.ns == nil {
     166 + bb.SetNodeStore(ns)
     167 + }
     168 + return bb
    150 169  }
    151 170   
    152 171  func (ns nodeStore) Format() *types.NomsBinFormat {
    skipped 7 lines
  • ■ ■ ■ ■ ■
    go/store/prolly/tree/testutils.go
    skipped 20 lines
    21 21   "math"
    22 22   "math/rand"
    23 23   "sort"
     24 + "sync"
    24 25   
    25 26   "github.com/dolthub/dolt/go/store/chunks"
    26 27   "github.com/dolthub/dolt/go/store/hash"
    skipped 213 lines
    240 241   testRand.Read(buf)
    241 242   tb.PutCommitAddr(idx, hash.New(buf))
    242 243   case val.BytesAddrEnc, val.StringAddrEnc, val.JSONAddrEnc:
    243  - buf := make([]byte, (testRand.Int63()%40)+10)
     244 + len := (testRand.Int63() % 40) + 10
     245 + buf := make([]byte, len)
    244 246   testRand.Read(buf)
    245  - tree, err := NewImmutableTreeFromReader(context.Background(), bytes.NewReader(buf), ns, DefaultFixedChunkLength)
     247 + bb := ns.BlobBuilder()
     248 + bb.Init(int(len))
     249 + _, addr, err := bb.Chunk(context.Background(), bytes.NewReader(buf))
    246 250   if err != nil {
    247 251   panic("failed to write bytes tree")
    248 252   }
    249  - tb.PutBytesAddr(idx, tree.Addr)
     253 + tb.PutBytesAddr(idx, addr)
    250 254   default:
    251 255   panic("unknown encoding")
    252 256   }
    skipped 2 lines
    255 259  func NewTestNodeStore() NodeStore {
    256 260   ts := &chunks.TestStorage{}
    257 261   ns := NewNodeStore(ts.NewViewWithFormat(types.Format_DOLT.VersionString()))
    258  - return nodeStoreValidator{ns: ns}
     262 + bb := &blobBuilderPool
     263 + return nodeStoreValidator{ns: ns, bb: bb}
    259 264  }
    260 265   
    261 266  type nodeStoreValidator struct {
    262 267   ns NodeStore
     268 + bb *sync.Pool
    263 269  }
    264 270   
    265 271  func (v nodeStoreValidator) Read(ctx context.Context, ref hash.Hash) (Node, error) {
    skipped 41 lines
    307 313   
    308 314  func (v nodeStoreValidator) Pool() pool.BuffPool {
    309 315   return v.ns.Pool()
     316 +}
     317 + 
     318 +func (v nodeStoreValidator) BlobBuilder() *BlobBuilder {
     319 + bb := v.bb.Get().(*BlobBuilder)
     320 + if bb.ns == nil {
     321 + bb.SetNodeStore(v)
     322 + }
     323 + return bb
    310 324  }
    311 325   
    312 326  func (v nodeStoreValidator) Format() *types.NomsBinFormat {
    skipped 3 lines
  • ■ ■ ■ ■ ■
    go/store/prolly/tuple_map.go
    skipped 117 lines
    118 118  }
    119 119   
    120 120  func DiffMaps(ctx context.Context, from, to Map, cb tree.DiffFn) error {
    121  - return tree.DiffOrderedTrees(ctx, from.tuples, to.tuples, cb)
     121 + return tree.DiffOrderedTrees(ctx, from.tuples, to.tuples, makeDiffCallBack(from, to, cb))
    122 122  }
    123 123   
    124 124  // RangeDiffMaps returns diffs within a Range. See Range for which diffs are
    skipped 28 lines
    153 153   return err
    154 154   }
    155 155   
     156 + dcb := makeDiffCallBack(from, to, cb)
     157 + 
    156 158   for {
    157 159   var diff tree.Diff
    158 160   if diff, err = differ.Next(ctx); err != nil {
    159 161   break
    160 162   }
    161 163   
    162  - if err = cb(ctx, diff); err != nil {
     164 + if err = dcb(ctx, diff); err != nil {
    163 165   break
    164 166   }
    165 167   }
    skipped 4 lines
    170 172  // specified by |start| and |stop|. If |start| and/or |stop| is null, then the
    171 173  // range is unbounded towards that end.
    172 174  func DiffMapsKeyRange(ctx context.Context, from, to Map, start, stop val.Tuple, cb tree.DiffFn) error {
    173  - return tree.DiffKeyRangeOrderedTrees(ctx, from.tuples, to.tuples, start, stop, cb)
     175 + return tree.DiffKeyRangeOrderedTrees(ctx, from.tuples, to.tuples, start, stop, makeDiffCallBack(from, to, cb))
     176 +}
     177 + 
     178 +func makeDiffCallBack(from, to Map, innerCb tree.DiffFn) tree.DiffFn {
     179 + if !from.valDesc.Equals(to.valDesc) {
     180 + return innerCb
     181 + }
     182 + 
     183 + return func(ctx context.Context, diff tree.Diff) error {
     184 + // Skip diffs produced by non-canonical tuples. A canonical-tuple is a
     185 + // tuple where any null suffixes have been trimmed.
     186 + if diff.Type == tree.ModifiedDiff &&
     187 + from.valDesc.Compare(val.Tuple(diff.From), val.Tuple(diff.To)) == 0 {
     188 + return nil
     189 + }
     190 + return innerCb(ctx, diff)
     191 + }
    174 192  }
    175 193   
    176 194  func MergeMaps(ctx context.Context, left, right, base Map, cb tree.CollisionFn) (Map, tree.MergeStats, error) {
    skipped 252 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/MySQLDockerfile
    skipped 24 lines
    25 25   pkg-config \
    26 26   mysql-client \
    27 27   libmysqlclient-dev \
    28  - openjdk-8-jdk \
     28 + openjdk-17-jdk \
    29 29   ant \
    30 30   ca-certificates-java \
    31 31   bats \
    skipped 54 lines
    86 86  RUN pip install mysql-connector-python PyMySQL sqlalchemy
    87 87   
    88 88  # Setup JAVA_HOME -- useful for docker commandline
    89  -ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64/
     89 +ENV JAVA_HOME /usr/lib/jvm/java-17-openjdk-amd64/
    90 90  
    91 91  # install mysql connector java
    92 92  RUN mkdir -p /mysql-client-tests/java
    skipped 44 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/ORMDockerfile
    skipped 15 lines
    16 16   git \
    17 17   mysql-client \
    18 18   libmysqlclient-dev \
     19 + # weird issue: installing openjdk-17-jdk errors if `maven` or possibly any other package is not installed after it
     20 + openjdk-17-jdk \
     21 + # currently, `apt install maven` installs v3.6.0 which does not work with openjdk-17-jdk
     22 + maven \
    19 23   bats && \
    20 24   update-ca-certificates -f
    21 25   
    skipped 12 lines
    34 38   
    35 39  # install mysql connector and pymsql
    36 40  RUN pip3 install mysql-connector-python PyMySQL sqlalchemy
     41 + 
     42 +# Setup JAVA_HOME -- useful for docker commandline
     43 +ENV JAVA_HOME /usr/lib/jvm/java-17-openjdk-amd64/
     44 + 
     45 +# install the current latest maven version, `v3.6.3`, because apt installed one does not work with jdk 17
     46 +ADD https://apache.osuosl.org/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.tar.gz apache-maven-3.8.6-bin.tar.gz
     47 +RUN tar zxvf apache-maven-3.8.6-bin.tar.gz && \
     48 + cp -r apache-maven-3.8.6 /opt && \
     49 + rm -rf apache-maven-3.8.6 apache-maven-3.8.6-bin.tar.gz
     50 + 
     51 +# add maven binary
     52 +ENV PATH /opt/apache-maven-3.8.6/bin:$PATH
    37 53   
    38 54  # install dolt from source
    39 55  WORKDIR /root/building
    skipped 10 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/bats/column_tags.bats
    skipped 290 lines
    291 291   [[ $output =~ "col1 | 10186" ]] || false
    292 292  }
    293 293   
     294 +@test "column_tags: update-tag only available on __DOLT__" {
     295 + mkdir ld
     296 + mkdir dev
     297 + 
     298 + cd ld
     299 + DOLT_DEFAULT_BIN_FORMAT=__LD_1__ dolt init
     300 + run dolt schema update-tag t col 5
     301 + [ $status -ne 0 ]
     302 + echo $output
     303 + [[ $output =~ "update-tag is only available in storage format __DOLT__" ]] || false
     304 + 
     305 + cd ../dev
     306 + DOLT_DEFAULT_BIN_FORMAT=__DOLT_DEV__ dolt init
     307 + run dolt schema update-tag t col 5
     308 + [ $status -ne 0 ]
     309 + [[ $output =~ "update-tag is only available in storage format __DOLT__" ]] || false
     310 +}
     311 + 
     312 +@test "column_tags: update-tag updates a columns tag" {
     313 + skip_nbf_not_dolt
     314 + 
     315 + dolt sql -q "CREATE TABLE t (pk INT PRIMARY KEY, col1 int);"
     316 + run dolt schema tags
     317 + [ $status -eq 0 ]
     318 + [[ $output =~ "pk | 15476" ]] || false
     319 + [[ $output =~ "col1 | 10878" ]] || false
     320 + 
     321 + dolt schema update-tag t pk 5
     322 + run dolt schema tags
     323 + [ $status -eq 0 ]
     324 + [[ $output =~ "pk | 5" ]] || false
     325 + [[ $output =~ "col1 | 10878" ]] || false
     326 + 
     327 + dolt schema update-tag t col1 6
     328 + run dolt schema tags
     329 + [ $status -eq 0 ]
     330 + [[ $output =~ "pk | 5" ]] || false
     331 + [[ $output =~ "col1 | 6" ]] || false
     332 +}
     333 + 
     334 +@test "column_tags: create table on two separate branches, merge them together by updating tags" {
     335 + skip_nbf_not_dolt
     336 + 
     337 + dolt branch other
     338 + dolt sql -q "CREATE TABLE t (pk int PRIMARY KEY, col1 int);"
     339 + dolt sql -q "INSERT INTO t VALUES (1, 1);"
     340 + dolt commit -Am "unrelated table"
     341 + 
     342 + dolt sql -q "CREATE table target (pk int PRIMARY KEY, col1 int);"
     343 + dolt sql -q "INSERT into target VALUES (1, 1);"
     344 + dolt commit -Am "table target on main branch"
     345 + 
     346 + dolt checkout other
     347 + dolt sql -q "CREATE table target (pk int PRIMARY KEY, badCol int, col1 int);"
     348 + dolt sql -q "INSERT INTO target VALUES (2, 2, 2);"
     349 + dolt commit -Am "table target on other branch"
     350 + dolt sql -q "ALTER TABLE target DROP COLUMN badCol;"
     351 + dolt commit -Am "fixup"
     352 + 
     353 + run dolt schema tags
     354 + [[ $output =~ "| target | col1 | 14690 |" ]] || false
     355 + 
     356 + dolt checkout main
     357 + 
     358 + run dolt schema tags
     359 + [ $status -eq 0 ]
     360 + [[ $output =~ "| target | col1 | 14649 |" ]] || false
     361 + 
     362 + run dolt merge other
     363 + [ $status -ne 0 ]
     364 + [[ $output =~ "table with same name added in 2 commits can't be merged" ]] || false
     365 + dolt reset --hard
     366 + 
     367 + dolt schema update-tag target col1 14690
     368 + dolt commit -am "update tag of col1 of target"
     369 + 
     370 + run dolt merge other -m "merge other into main"
     371 + [ $status -eq 0 ]
     372 + [[ $output =~ "1 tables changed, 1 rows added(+)" ]] || false
     373 + 
     374 + run dolt sql -r csv -q "select * from target;"
     375 + [ $status -eq 0 ]
     376 + [[ $output =~ "1,1" ]] || false
     377 + [[ $output =~ "2,2" ]] || false
     378 +}
     379 + 
  • ■ ■ ■ ■
    integration-tests/bats/docs.bats
    skipped 110 lines
    111 111   dolt sql <<SQL
    112 112  CREATE TABLE dolt_docs (
    113 113   doc_name varchar(16383) NOT NULL,
    114  - doc_text varchar(16383),
     114 + doc_text longtext,
    115 115   PRIMARY KEY (doc_name)
    116 116  );
    117 117  SQL
    skipped 38 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/bats/import-update-tables.bats
    skipped 1240 lines
    1241 1241   [[ "$output" =~ "with the following values left over: '[\"\"]'" ]] || false
    1242 1242  }
    1243 1243   
     1244 +@test "import-update-tables: incorrect values default to zero value when --continue is passed" {
     1245 + dolt sql <<SQL
     1246 +CREATE TABLE t (
     1247 + pk int primary key,
     1248 + col1 boolean,
     1249 + col2 integer,
     1250 + col3 tinyint,
     1251 + col4 smallint,
     1252 + col5 mediumint,
     1253 + col6 int,
     1254 + col7 bigint,
     1255 + col8 decimal,
     1256 + col9 float,
     1257 + col10 double,
     1258 + col11 date,
     1259 + col12 time,
     1260 + col13 datetime,
     1261 + col14 timestamp,
     1262 + col15 year,
     1263 + col16 ENUM('first', 'second'),
     1264 + col17 SET('a', 'b'),
     1265 + col18 JSON
     1266 +);
     1267 +SQL
     1268 + dolt commit -Am "add table"
     1269 + 
     1270 + cat <<DELIM > bad-updates.csv
     1271 +pk,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13,col14,col15,col16,col17,col18
     1272 +1,val1,val2,val3,val4,val5,val6,val7,val8,val9,val10,val11,val12,val13,val14,val15,val16,val17,val18
     1273 +DELIM
     1274 + # if a bad json value is encountered with insert ignore, MySQL throws an error
     1275 + # so, in dolt table import we skip the row.
     1276 + run dolt table import -u t --continue bad-updates.csv
     1277 + [ $status -eq 0 ]
     1278 + [[ $output =~ "The following rows were skipped:" ]] || false
     1279 + [[ $output =~ "[1,val1,val2,val3,val4,val5,val6,val7,val8,val9,val10,val11,val12,val13,val14,val15,val16,val17,val18]" ]] || false
     1280 + 
     1281 + run dolt sql -r csv -q "select count(*) from t;"
     1282 + [[ $output =~ "0" ]] || false
     1283 + 
     1284 + dolt sql -q "alter table t drop column col18;"
     1285 + dolt commit -Am "drop json column"
     1286 + 
     1287 + cat <<DELIM > bad-updates.csv
     1288 +pk,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13,col14,col15,col16,col17
     1289 +1,val1,val2,val3,val4,val5,val6,val7,val8,val9,val10,val11,val12,val13,val14,val15,val16,val17
     1290 +DELIM
     1291 + run dolt table import -u t --continue bad-updates.csv
     1292 + [ $status -eq 0 ]
     1293 + [[ "$output" =~ "Rows Processed: 1, Additions: 1, Modifications: 0, Had No Effect: 0" ]] || false
     1294 + 
     1295 + run dolt sql -r csv -q "select * from t;"
     1296 + [ $status -eq 0 ]
     1297 + [[ "$output" =~ '1,0,0,0,0,0,0,0,0,0,0,0000-00-00,00:00:00,0000-00-00 00:00:00,0000-00-00 00:00:00,0,first,""' ]] || false
     1298 +}
     1299 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/bats/init.bats
    skipped 151 lines
    152 152   [ "$status" -eq 1 ]
    153 153  }
    154 154   
     155 +@test "init: running init with invalid argument or option fails" {
     156 + set_dolt_user "baz", "[email protected]"
     157 + 
     158 + run dolt init invalid
     159 + [ "$status" -eq 1 ]
     160 + [[ "$output" =~ "error: invalid arguments" ]] || false
     161 + 
     162 + run dolt init --invalid
     163 + [ "$status" -eq 1 ]
     164 + [[ "$output" =~ "error: unknown option" ]] || false
     165 +}
     166 + 
    155 167  @test "init: running init with the new format, creates a new format database" {
    156 168   set_dolt_user "baz", "[email protected]"
    157 169   
    skipped 136 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/bats/migrate.bats
    skipped 266 lines
    267 267   [[ ! "$output" =~ "beta" ]] || false
    268 268  }
    269 269   
     270 +@test "migrate: --drop-conflicts drops conflicts on migrate" {
     271 + dolt sql <<SQL
     272 +CREATE TABLE test (pk int primary key, c0 int, c1 int);
     273 +INSERT INTO test VALUES (0,0,0);
     274 +CALL dcommit('-Am', 'added table test');
     275 +CALL dcheckout('-b', 'other');
     276 +CALL dbranch('third');
     277 +INSERT INTO test VALUES (1, 2, 3);
     278 +CALL dcommit('-am', 'added row on branch other');
     279 +CALL dcheckout('main');
     280 +INSERT INTO test VALUES (1, -2, -3);
     281 +CALL dcommit('-am', 'added row on branch main');
     282 +SET @@dolt_allow_commit_conflicts = 1;
     283 +CALL dmerge('other');
     284 +INSERT INTO test VALUES (9,9,9);
     285 +SET @@dolt_allow_commit_conflicts = 1;
     286 +SET @@dolt_force_transaction_commit = 1;
     287 +CALL dcommit( '--force', '-am', 'commit conflicts');
     288 +CALL dcheckout('third');
     289 +SQL
     290 + dolt migrate --drop-conflicts
     291 +}
     292 + 
     293 +@test "migrate: no panic for migration on migrated database" {
     294 + dolt sql <<SQL
     295 +CREATE TABLE test (pk int primary key, c0 int, c1 int);
     296 +INSERT INTO test VALUES (0,0,0);
     297 +CALL dadd('-A');
     298 +CALL dcommit('-am', 'added table test');
     299 +SQL
     300 + dolt migrate
     301 + run dolt migrate
     302 + [ $status -eq 0 ]
     303 + [[ "$output" =~ "already migrated" ]] || false
     304 +}
     305 + 
  • ■ ■ ■ ■
    integration-tests/bats/primary-key-changes.bats
    skipped 341 lines
    342 342   
    343 343   run dolt diff --summary
    344 344   [ "$status" -eq 1 ]
    345  - [[ "$output" =~ "diff summary will not compute due to primary key set change with table t" ]] || false
     345 + [[ "$output" =~ "failed to compute diff summary for table t: primary key set changed" ]] || false
    346 346   
    347 347   dolt add .
    348 348   
    skipped 363 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/bats/sql.bats
    skipped 2433 lines
    2434 2434   [[ "$output" =~ "| 3 |" ]] || false
    2435 2435  }
    2436 2436   
     2437 +@test "sql: dolt diff table correctly works with NOT and/or IS NULL" {
     2438 + dolt sql -q "CREATE TABLE t(pk int primary key);"
     2439 + dolt add .
     2440 + dolt commit -m "new table t"
     2441 + dolt sql -q "INSERT INTO t VALUES (1), (2)"
     2442 + dolt commit -am "add 1, 2"
     2443 + 
     2444 + run dolt sql -q "SELECT COUNT(*) from dolt_diff_t where from_pk is null"
     2445 + [ "$status" -eq 0 ]
     2446 + [[ "$output" =~ "2" ]] || false
     2447 + 
     2448 + dolt sql -q "UPDATE t SET pk = 3 WHERE pk = 2"
     2449 + dolt commit -am "add 3"
     2450 + 
     2451 + run dolt sql -q "SELECT COUNT(*) from dolt_diff_t where from_pk is not null"
     2452 + [ "$status" -eq 0 ]
     2453 + [[ "$output" =~ "1" ]] || false
     2454 +}
     2455 + 
     2456 +@test "sql: dolt diff table correctly works with datetime comparisons" {
     2457 + dolt sql -q "CREATE TABLE t(pk int primary key);"
     2458 + dolt add .
     2459 + dolt commit -m "new table t"
     2460 + dolt sql -q "INSERT INTO t VALUES (1), (2), (3)"
     2461 + dolt commit -am "add 1, 2, 3"
     2462 + 
     2463 + # adds a row and removes a row
     2464 + dolt sql -q "UPDATE t SET pk = 4 WHERE pk = 2"
     2465 + 
     2466 + run dolt sql -q "SELECT COUNT(*) from dolt_diff_t where to_commit_date is not null"
     2467 + [ "$status" -eq 0 ]
     2468 + [[ "$output" =~ "3" ]] || false
     2469 + 
     2470 + run dolt sql -q "SELECT COUNT(*) from dolt_diff_t where to_commit_date < now()"
     2471 + [ "$status" -eq 0 ]
     2472 + [[ "$output" =~ "3" ]] || false
     2473 +}
     2474 + 
    2437 2475  @test "sql: sql print on order by returns the correct result" {
    2438 2476   dolt sql -q "CREATE TABLE mytable(pk int primary key);"
    2439 2477   dolt sql -q "INSERT INTO mytable VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20)"
    skipped 190 lines
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/hibernate/DoltHibernateSmokeTest/pom.xml
     1 +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
     2 + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
     3 + <modelVersion>4.0.0</modelVersion>
     4 + <groupId>com.dolt.hibernate</groupId>
     5 + <artifactId>DoltHibernateSmokeTest</artifactId>
     6 + <packaging>jar</packaging>
     7 + <version>1.0.0</version>
     8 + <name>DoltHibernateSmokeTest</name>
     9 + <url>http://maven.apache.org</url>
     10 + 
     11 + <properties>
     12 + <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     13 + </properties>
     14 + 
     15 + <dependencies>
     16 + <!-- Hibernate -->
     17 + <!-- [5.6.14,) fails as it gets the latest version but including beta builds that does not work -->
     18 + <dependency>
     19 + <groupId>org.hibernate</groupId>
     20 + <artifactId>hibernate-core</artifactId>
     21 + <version>5.6.14.Final</version>
     22 + </dependency>
     23 + <dependency>
     24 + <groupId>org.hibernate</groupId>
     25 + <artifactId>hibernate-entitymanager</artifactId>
     26 + <version>5.6.14.Final</version>
     27 + </dependency>
     28 + 
     29 + <!-- MySQL -->
     30 + <dependency>
     31 + <groupId>com.mysql</groupId>
     32 + <artifactId>mysql-connector-j</artifactId>
     33 + <version>8.0.31</version>
     34 + </dependency>
     35 + </dependencies>
     36 + 
     37 + <build>
     38 + <pluginManagement>
     39 + <plugins>
     40 + <plugin>
     41 + <groupId>org.apache.maven.plugins</groupId>
     42 + <artifactId>maven-compiler-plugin</artifactId>
     43 + <version>3.10.1</version>
     44 + <configuration>
     45 + <source>1.8</source>
     46 + <target>1.8</target>
     47 + </configuration>
     48 + </plugin>
     49 + <plugin>
     50 + <groupId>org.codehaus.mojo</groupId>
     51 + <artifactId>exec-maven-plugin</artifactId>
     52 + <version>3.1.0</version>
     53 + <configuration>
     54 + <mainClass>com.dolt.hibernate.Test</mainClass>
     55 + </configuration>
     56 + </plugin>
     57 + </plugins>
     58 + </pluginManagement>
     59 + </build>
     60 +</project>
     61 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/hibernate/DoltHibernateSmokeTest/src/main/java/com/dolt/hibernate/Test.java
     1 +package com.dolt.hibernate;
     2 + 
     3 +import java.util.List;
     4 +import org.hibernate.Session;
     5 +import com.dolt.hibernate.model.Student;
     6 + 
     7 +/**
     8 + * Class used to perform CRUD operation on database with Hibernate API's
     9 + *
     10 + */
     11 +public class Test {
     12 + 
     13 + @SuppressWarnings("unused")
     14 + public static void main(String[] args) {
     15 + 
     16 + Test application = new Test();
     17 + 
     18 + /*
     19 + * Save few objects with hibernate
     20 + */
     21 + int studentId1 = application.saveStudent("Sam", "Disilva", "Maths");
     22 + int studentId2 = application.saveStudent("Joshua", "Brill", "Science");
     23 + int studentId3 = application.saveStudent("Peter", "Pan", "Physics");
     24 + int studentId4 = application.saveStudent("Bill", "Laurent", "Maths");
     25 + 
     26 + /*
     27 + * Retrieve all saved objects
     28 + */
     29 + List<Student> students = application.getAllStudents();
     30 + System.out.println("List of all persisted students >>>");
     31 + for (Student student : students) {
     32 + System.out.println("Persisted Student :" + student);
     33 + }
     34 + 
     35 + /*
     36 + * Update an object
     37 + */
     38 + application.updateStudent(studentId4, "ARTS");
     39 + 
     40 + /*
     41 + * Deletes an object
     42 + */
     43 + application.deleteStudent(studentId2);
     44 + 
     45 + /*
     46 + * Retrieve all saved objects
     47 + */
     48 + List<Student> remaingStudents = application.getAllStudents();
     49 + System.out.println("List of all remained persisted students >>>");
     50 + for (Student student : remaingStudents) {
     51 + System.out.println("Persisted Student :" + student);
     52 + }
     53 + 
     54 + }
     55 + 
     56 + /**
     57 + * This method saves a Student object in database
     58 + */
     59 + public int saveStudent(String firstName, String lastName, String section) {
     60 + Student student = new Student();
     61 + student.setFirstName(firstName);
     62 + student.setLastName(lastName);
     63 + student.setSection(section);
     64 + 
     65 + Session session = Util.getSessionFactory().openSession();
     66 + session.beginTransaction();
     67 + 
     68 + int id = (Integer) session.save(student);
     69 + session.getTransaction().commit();
     70 + session.close();
     71 + return id;
     72 + }
     73 + 
     74 + /**
     75 + * This method returns list of all persisted Student objects/tuples from
     76 + * database
     77 + */
     78 + public List<Student> getAllStudents() {
     79 + Session session = Util.getSessionFactory().openSession();
     80 + session.beginTransaction();
     81 + 
     82 + @SuppressWarnings("unchecked")
     83 + List<Student> employees = (List<Student>) session.createQuery(
     84 + "FROM Student s ORDER BY s.firstName ASC").list();
     85 + 
     86 + session.getTransaction().commit();
     87 + session.close();
     88 + return employees;
     89 + }
     90 + 
     91 + /**
     92 + * This method updates a specific Student object
     93 + */
     94 + public void updateStudent(int id, String section) {
     95 + Session session = Util.getSessionFactory().openSession();
     96 + session.beginTransaction();
     97 + 
     98 + Student student = (Student) session.get(Student.class, id);
     99 + student.setSection(section);
     100 + //session.update(student);//No need to update manually as it will be updated automatically on transaction close.
     101 + session.getTransaction().commit();
     102 + session.close();
     103 + }
     104 + 
     105 + /**
     106 + * This method deletes a specific Student object
     107 + */
     108 + public void deleteStudent(int id) {
     109 + Session session = Util.getSessionFactory().openSession();
     110 + session.beginTransaction();
     111 + 
     112 + Student student = (Student) session.get(Student.class, id);
     113 + session.delete(student);
     114 + session.getTransaction().commit();
     115 + session.close();
     116 + }
     117 +}
     118 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/hibernate/DoltHibernateSmokeTest/src/main/java/com/dolt/hibernate/Util.java
     1 +package com.dolt.hibernate;
     2 + 
     3 +import org.hibernate.SessionFactory;
     4 +import org.hibernate.cfg.Configuration;
     5 + 
     6 +@SuppressWarnings("deprecation")
     7 +public class Util {
     8 + 
     9 + private static final SessionFactory sessionFactory;
     10 + 
     11 + static{
     12 + try{
     13 + sessionFactory = new Configuration().configure().buildSessionFactory();
     14 + 
     15 + }catch (Throwable ex) {
     16 + System.err.println("Session Factory could not be created." + ex);
     17 + throw new ExceptionInInitializerError(ex);
     18 + }
     19 + }
     20 + 
     21 + public static SessionFactory getSessionFactory() {
     22 + return sessionFactory;
     23 + }
     24 + 
     25 +}
     26 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/hibernate/DoltHibernateSmokeTest/src/main/java/com/dolt/hibernate/model/Student.java
     1 +package com.dolt.hibernate.model;
     2 + 
     3 +import java.io.Serializable;
     4 + 
     5 +import javax.persistence.Column;
     6 +import javax.persistence.Entity;
     7 +import javax.persistence.GeneratedValue;
     8 +import javax.persistence.GenerationType;
     9 +import javax.persistence.Id;
     10 +import javax.persistence.Table;
     11 + 
     12 +@Entity
     13 +@Table(name = "STUDENT")
     14 +public class Student implements Serializable {
     15 + 
     16 + @Id
     17 + @GeneratedValue(strategy = GenerationType.IDENTITY)
     18 + private int id;
     19 + 
     20 + @Column(name = "FIRST_NAME", nullable = false)
     21 + private String firstName;
     22 + 
     23 + @Column(name = "LAST_NAME", nullable = false)
     24 + private String lastName;
     25 + 
     26 + @Column(name = "SECTION", nullable = false)
     27 + private String section;
     28 + 
     29 + public int getId() {
     30 + return id;
     31 + }
     32 + 
     33 + public void setId(int id) {
     34 + this.id = id;
     35 + }
     36 + 
     37 + public String getFirstName() {
     38 + return firstName;
     39 + }
     40 + 
     41 + public void setFirstName(String firstName) {
     42 + this.firstName = firstName;
     43 + }
     44 + 
     45 + public String getLastName() {
     46 + return lastName;
     47 + }
     48 + 
     49 + public void setLastName(String lastName) {
     50 + this.lastName = lastName;
     51 + }
     52 + 
     53 + public String getSection() {
     54 + return section;
     55 + }
     56 + 
     57 + public void setSection(String section) {
     58 + this.section = section;
     59 + }
     60 + 
     61 + @Override
     62 + public int hashCode() {
     63 + final int prime = 31;
     64 + int result = 1;
     65 + result = prime * result + id;
     66 + return result;
     67 + }
     68 + 
     69 + @Override
     70 + public boolean equals(Object obj) {
     71 + if (this == obj)
     72 + return true;
     73 + if (obj == null)
     74 + return false;
     75 + if (!(obj instanceof Student))
     76 + return false;
     77 + Student other = (Student) obj;
     78 + if (id != other.id)
     79 + return false;
     80 + return true;
     81 + }
     82 + 
     83 + @Override
     84 + public String toString() {
     85 + return "Student [id=" + id + ", firstName=" + firstName + ", lastName="
     86 + + lastName + ", section=" + section + "]";
     87 + }
     88 + 
     89 +}
     90 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/hibernate/DoltHibernateSmokeTest/src/main/resources/hibernate.cfg.xml
     1 +<?xml version="1.0" encoding="utf-8"?>
     2 +<!DOCTYPE hibernate-configuration SYSTEM "http://www.hibernate.org/dtd/hibernate-configuration-3.0.dtd">
     3 + 
     4 + 
     5 +<hibernate-configuration>
     6 + <session-factory>
     7 + <property name="hibernate.dialect">org.hibernate.dialect.MySQLDialect</property>
     8 + <property name="hibernate.connection.driver_class">com.mysql.jdbc.Driver</property>
     9 + <property name="hibernate.connection.username">dolt</property>
     10 + <property name="hibernate.connection.password"></property>
     11 + <property name="hibernate.connection.url">jdbc:mysql://localhost:3306/dolt</property>
     12 + <property name="show_sql">true</property>
     13 + <property name="format_sql">false</property>
     14 + <mapping class="com.dolt.hibernate.model.Student"/>
     15 + </session-factory>
     16 +</hibernate-configuration>
     17 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/hibernate/README.md
     1 +# Hibernate-ORM Smoke Test
     2 + 
     3 +The smoke test is run by Maven using MySQL JDBC driver. To install Maven, go to `https://maven.apache.org/install.html`.
     4 + 
     5 +Database settings are inside `hibernate.cfg.xml` file and is configured to hit a Dolt sql-server
     6 +on the default port, for the user "dolt", with no password, for the database named "dolt".
     7 + 
     8 +`Test.java` file is the main entry point and will insert a new record into the database, then print the data
     9 +before changes, and update and delete rows, and print the data again after changes. Exit with a zero exit code.
     10 +If any errors are encountered, they are logged, and the process exits with a non-zero exit code.
     11 + 
     12 +To run this smoke test project run these commands:
     13 +1. `cd DoltHibernateSmokeTest`
     14 +2. `mvn clean install`
     15 +3. `mvn clean package`
     16 +4. `mvn exec:java`
     17 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/mikro-orm/README.md
     1 +# Mikro-ORM Smoke Test
     2 + 
     3 +The `index.ts` file is the main entry point and will insert a new record into the database, then load it, print
     4 +success, and exit with a zero exit code. If any errors are encountered, they are logged, and the process exits with a
     5 +non-zero exit code.
     6 + 
     7 +To run this smoke test project:
     8 +1. Run `npm install` command
     9 +2. Run `npm start` command
     10 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/mikro-orm/package.json
     1 +{
     2 + "name": "mikro-orm-smoketest",
     3 + "version": "0.0.1",
     4 + "description": "DoltDB smoke test for Mikro-ORM integration",
     5 + "type": "commonjs",
     6 + "scripts": {
     7 + "start": "ts-node src/index.ts",
     8 + "mikro-orm": "mikro-orm-ts-node-commonjs"
     9 + },
     10 + "devDependencies": {
     11 + "ts-node": "^10.7.0",
     12 + "@types/node": "^16.11.10",
     13 + "typescript": "^4.5.2"
     14 + },
     15 + "dependencies": {
     16 + "@mikro-orm/core": "^5.0.3",
     17 + "@mikro-orm/mysql": "^5.0.3",
     18 + "mysql": "^2.14.1"
     19 + }
     20 +}
     21 + 
     22 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/mikro-orm/src/entity/User.ts
     1 +import { Entity, PrimaryKey, Property } from "@mikro-orm/core";
     2 + 
     3 +@Entity()
     4 +export class User {
     5 + @PrimaryKey()
     6 + id!: number;
     7 + 
     8 + @Property()
     9 + firstName!: string;
     10 + 
     11 + @Property()
     12 + lastName!: string;
     13 + 
     14 + @Property()
     15 + age!: number;
     16 + 
     17 + constructor(firstName: string, lastName: string, age: number) {
     18 + this.firstName = firstName;
     19 + this.lastName = lastName;
     20 + this.age = age;
     21 + }
     22 +}
     23 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/mikro-orm/src/index.ts
     1 +import { MikroORM } from "@mikro-orm/core";
     2 +import { MySqlDriver } from '@mikro-orm/mysql';
     3 +import { User } from "./entity/User";
     4 + 
     5 +async function connectAndGetOrm() {
     6 + const orm = await MikroORM.init<MySqlDriver>({
     7 + entities: [User],
     8 + type: "mysql",
     9 + clientUrl: "mysql://localhost:3306",
     10 + dbName: "dolt",
     11 + user: "dolt",
     12 + password: "",
     13 + persistOnCreate: true,
     14 + });
     15 + 
     16 + return orm;
     17 +}
     18 + 
     19 +connectAndGetOrm().then(async orm => {
     20 + console.log("Connected");
     21 + const em = orm.em.fork();
     22 + 
     23 + // this creates the tables if not exist
     24 + const generator = orm.getSchemaGenerator();
     25 + await generator.updateSchema();
     26 + 
     27 + console.log("Inserting a new user into the database...")
     28 + const user = new User("Timber", "Saw", 25)
     29 + await em.persistAndFlush(user)
     30 + console.log("Saved a new user with id: " + user.id)
     31 + 
     32 + console.log("Loading users from the database...")
     33 + const users = await em.findOne(User, 1)
     34 + console.log("Loaded users: ", users)
     35 + 
     36 + orm.close();
     37 + console.log("Smoke test passed!")
     38 + process.exit(0)
     39 +}).catch(error => {
     40 + console.log(error)
     41 + console.log("Smoke test failed!")
     42 + process.exit(1)
     43 +});
     44 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/mikro-orm/tsconfig.json
     1 +{
     2 + "compilerOptions": {
     3 + "module": "commonjs",
     4 + "declaration": true,
     5 + "removeComments": true,
     6 + "emitDecoratorMetadata": true,
     7 + "esModuleInterop": true,
     8 + "experimentalDecorators": true,
     9 + "target": "es2017",
     10 + "outDir": "./dist",
     11 + "baseUrl": "./src",
     12 + "incremental": true,
     13 + }
     14 +}
     15 + 
  • ■ ■ ■ ■ ■ ■
    integration-tests/orm-tests/orm-tests.bats
    skipped 54 lines
    55 55   npx -c "prisma migrate dev --name init"
    56 56  }
    57 57   
    58  -# Prisma is an ORM for Node/TypeScript applications. This test checks out the Peewee test suite
     58 +# Prisma is an ORM for Node/TypeScript applications. This test checks out the Prisma test suite
    59 59  # and runs it against Dolt.
    60 60  @test "Prisma ORM test suite" {
    61 61   skip "Not implemented yet"
    skipped 13 lines
    75 75   cd typeorm
    76 76   npm install
    77 77   npm start
     78 +}
     79 + 
     80 +# MikroORM is an ORM for Node/TypeScript applications. This is a simple smoke test to make sure
     81 +# Dolt can support the most basic MikroORM operations.
     82 +@test "MikroORM smoke test" {
     83 + mysql --protocol TCP -u dolt -e "create database dolt;"
     84 + 
     85 + cd mikro-orm
     86 + npm install
     87 + npm start
     88 +}
     89 + 
     90 +# Hibernate is an ORM for Java applications using JDBC driver. This is a simple smoke test to make sure
     91 +# Dolt can support the most basic Hibernate operations.
     92 +@test "Hibernate smoke test" {
     93 + # need to create tables for it before running the test
     94 + mysql --protocol TCP -u dolt -e "create database dolt; use dolt; create table STUDENT (id INT NOT NULL auto_increment PRIMARY KEY, first_name VARCHAR(30) NOT NULL, last_name VARCHAR(30) NOT NULL, section VARCHAR(30) NOT NULL);"
     95 + 
     96 + cd hibernate/DoltHibernateSmokeTest
     97 + mvn clean install
     98 + mvn clean package
     99 + mvn exec:java
    78 100  }
    79 101   
    80 102  # Turn this test on to prevent the container from exiting if you need to exec a shell into
    skipped 4 lines
Please wait...
Page is in error, reload to recover