2018-05-26 20:32:15 +08:00
|
|
|
package abstract_sql
|
|
|
|
|
|
|
|
import (
|
2019-03-16 06:55:34 +08:00
|
|
|
"context"
|
2018-05-26 20:32:15 +08:00
|
|
|
"database/sql"
|
2018-05-28 02:52:26 +08:00
|
|
|
"fmt"
|
2020-09-01 15:21:19 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/filer"
|
2018-05-26 20:32:15 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/glog"
|
2020-03-08 09:01:39 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
|
2020-01-20 04:06:19 +08:00
|
|
|
"github.com/chrislusf/seaweedfs/weed/util"
|
2020-09-03 01:09:49 +08:00
|
|
|
"strings"
|
2021-01-20 05:53:16 +08:00
|
|
|
"sync"
|
2018-05-26 20:32:15 +08:00
|
|
|
)
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
type SqlGenerator interface {
|
|
|
|
GetSqlInsert(bucket string) string
|
|
|
|
GetSqlUpdate(bucket string) string
|
|
|
|
GetSqlFind(bucket string) string
|
|
|
|
GetSqlDelete(bucket string) string
|
|
|
|
GetSqlDeleteFolderChildren(bucket string) string
|
|
|
|
GetSqlListExclusive(bucket string) string
|
|
|
|
GetSqlListInclusive(bucket string) string
|
2021-01-20 07:55:51 +08:00
|
|
|
GetSqlCreateTable(bucket string) string
|
|
|
|
GetSqlDropTable(bucket string) string
|
2021-01-20 05:53:16 +08:00
|
|
|
}
|
|
|
|
|
2018-05-26 20:32:15 +08:00
|
|
|
type AbstractSqlStore struct {
|
2021-01-20 05:53:16 +08:00
|
|
|
SqlGenerator
|
2021-01-20 07:55:51 +08:00
|
|
|
DB *sql.DB
|
|
|
|
SupportBucketTable bool
|
|
|
|
dbs map[string]bool
|
|
|
|
dbsLock sync.Mutex
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 04:34:58 +08:00
|
|
|
const (
|
2021-01-20 05:53:16 +08:00
|
|
|
DEFAULT_TABLE = "filemeta"
|
2021-01-20 04:34:58 +08:00
|
|
|
)
|
|
|
|
|
2019-03-31 14:08:29 +08:00
|
|
|
type TxOrDB interface {
|
|
|
|
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
|
|
|
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
|
|
|
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) {
|
2019-04-02 02:03:04 +08:00
|
|
|
tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{
|
|
|
|
Isolation: sql.LevelReadCommitted,
|
|
|
|
ReadOnly: false,
|
|
|
|
})
|
2019-03-31 14:08:29 +08:00
|
|
|
if err != nil {
|
|
|
|
return ctx, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return context.WithValue(ctx, "tx", tx), nil
|
|
|
|
}
|
|
|
|
func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error {
|
|
|
|
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error {
|
|
|
|
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
|
|
|
return tx.Rollback()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-20 04:34:58 +08:00
|
|
|
func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) {
|
2021-01-20 05:53:16 +08:00
|
|
|
|
2021-01-20 04:34:58 +08:00
|
|
|
shortPath = fullpath
|
2021-01-20 05:53:16 +08:00
|
|
|
bucket = DEFAULT_TABLE
|
|
|
|
|
2019-03-31 14:08:29 +08:00
|
|
|
if tx, ok := ctx.Value("tx").(*sql.Tx); ok {
|
2021-01-20 05:53:16 +08:00
|
|
|
txOrDB = tx
|
|
|
|
} else {
|
|
|
|
txOrDB = store.DB
|
|
|
|
}
|
|
|
|
|
2021-01-20 07:55:51 +08:00
|
|
|
if !store.SupportBucketTable {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-20 06:05:48 +08:00
|
|
|
if !strings.HasPrefix(string(fullpath), "/buckets/") {
|
2021-01-20 05:53:16 +08:00
|
|
|
return
|
2019-03-31 14:08:29 +08:00
|
|
|
}
|
2021-01-20 05:53:16 +08:00
|
|
|
|
|
|
|
// detect bucket
|
|
|
|
bucketAndObjectKey := string(fullpath)[len("/buckets/"):]
|
|
|
|
t := strings.Index(bucketAndObjectKey, "/")
|
|
|
|
if t < 0 && !isForChildren {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if t > 0 {
|
|
|
|
bucket = bucketAndObjectKey[:t]
|
|
|
|
shortPath = util.FullPath(bucketAndObjectKey[t:])
|
|
|
|
}
|
|
|
|
|
|
|
|
if isValidBucket(bucket) {
|
|
|
|
store.dbsLock.Lock()
|
|
|
|
defer store.dbsLock.Unlock()
|
|
|
|
|
|
|
|
if store.dbs == nil {
|
|
|
|
store.dbs = make(map[string]bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, found := store.dbs[bucket]; !found {
|
2021-01-20 07:55:51 +08:00
|
|
|
if err = store.createTable(ctx, bucket); err != nil {
|
2021-01-20 05:53:16 +08:00
|
|
|
store.dbs[bucket] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2019-03-31 14:08:29 +08:00
|
|
|
}
|
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
|
2021-01-20 04:34:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, name := shortPath.DirAndName()
|
2018-05-26 20:32:15 +08:00
|
|
|
meta, err := entry.EncodeAttributesAndChunks()
|
|
|
|
if err != nil {
|
2018-05-27 13:02:49 +08:00
|
|
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2020-09-04 02:00:20 +08:00
|
|
|
if len(entry.Chunks) > 50 {
|
|
|
|
meta = util.MaybeGzipData(meta)
|
|
|
|
}
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta)
|
2020-09-13 04:37:03 +08:00
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") {
|
2020-12-14 12:49:44 +08:00
|
|
|
// return fmt.Errorf("insert: %s", err)
|
|
|
|
// skip this since the error can be in a different language
|
2020-08-20 22:52:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// now the insert failed possibly due to duplication constraints
|
2020-09-13 04:37:03 +08:00
|
|
|
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
|
2020-08-20 22:52:46 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
2020-08-20 22:52:46 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("upsert %s: %s", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
|
2018-05-26 20:32:15 +08:00
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2020-08-20 22:52:46 +08:00
|
|
|
return fmt.Errorf("upsert %s but no rows affected: %s", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
return nil
|
2020-08-20 22:52:46 +08:00
|
|
|
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false)
|
2021-01-20 04:34:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", entry.FullPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, name := shortPath.DirAndName()
|
2018-05-26 20:32:15 +08:00
|
|
|
meta, err := entry.EncodeAttributesAndChunks()
|
|
|
|
if err != nil {
|
2018-05-27 13:02:49 +08:00
|
|
|
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
2018-05-26 20:32:15 +08:00
|
|
|
if err != nil {
|
2018-05-27 13:02:49 +08:00
|
|
|
return fmt.Errorf("update %s: %s", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2018-05-27 13:02:49 +08:00
|
|
|
return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) {
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
|
2021-01-20 04:34:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("findDB %s : %v", fullpath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
dir, name := shortPath.DirAndName()
|
2021-01-20 05:53:16 +08:00
|
|
|
row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir)
|
2020-09-02 13:47:57 +08:00
|
|
|
|
2018-05-26 20:32:15 +08:00
|
|
|
var data []byte
|
|
|
|
if err := row.Scan(&data); err != nil {
|
2020-09-02 13:47:57 +08:00
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return nil, filer_pb.ErrNotFound
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("find %s: %v", fullpath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
entry := &filer.Entry{
|
2018-05-26 20:32:15 +08:00
|
|
|
FullPath: fullpath,
|
|
|
|
}
|
2020-09-04 02:00:20 +08:00
|
|
|
if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
2018-05-27 13:02:49 +08:00
|
|
|
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return entry, nil
|
|
|
|
}
|
|
|
|
|
2020-03-23 15:01:34 +08:00
|
|
|
func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false)
|
2021-01-20 04:34:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", fullpath, err)
|
|
|
|
}
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2021-01-20 04:34:58 +08:00
|
|
|
dir, name := shortPath.DirAndName()
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir)
|
2018-05-26 20:32:15 +08:00
|
|
|
if err != nil {
|
2018-05-31 11:24:57 +08:00
|
|
|
return fmt.Errorf("delete %s: %s", fullpath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
2018-05-31 11:24:57 +08:00
|
|
|
return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2018-05-31 11:24:57 +08:00
|
|
|
return nil
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2020-03-23 15:01:34 +08:00
|
|
|
func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error {
|
2019-12-13 16:23:05 +08:00
|
|
|
|
2021-01-20 04:34:58 +08:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("findDB %s : %v", fullpath, err)
|
|
|
|
}
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
if isValidBucket(bucket) && shortPath == "/" {
|
2021-01-20 07:55:51 +08:00
|
|
|
if err = store.deleteTable(ctx, bucket); err != nil {
|
2021-01-20 05:53:16 +08:00
|
|
|
store.dbsLock.Lock()
|
|
|
|
delete(store.dbs, bucket)
|
|
|
|
store.dbsLock.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-20 04:34:58 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
|
2019-12-13 16:23:05 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = res.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-16 15:56:24 +08:00
|
|
|
func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
2021-01-20 04:34:58 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
|
2021-01-20 04:34:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
|
|
|
|
}
|
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
sqlText := store.GetSqlListExclusive(bucket)
|
2021-01-15 14:44:22 +08:00
|
|
|
if includeStartFile {
|
2021-01-20 05:53:16 +08:00
|
|
|
sqlText = store.GetSqlListInclusive(bucket)
|
2018-05-27 04:35:56 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 04:34:58 +08:00
|
|
|
rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1)
|
2018-05-26 20:32:15 +08:00
|
|
|
if err != nil {
|
2021-01-16 15:56:24 +08:00
|
|
|
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var name string
|
|
|
|
var data []byte
|
|
|
|
if err = rows.Scan(&name, &data); err != nil {
|
2021-01-15 14:44:22 +08:00
|
|
|
glog.V(0).Infof("scan %s : %v", dirPath, err)
|
2021-01-16 15:56:24 +08:00
|
|
|
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
2021-01-16 15:56:24 +08:00
|
|
|
lastFileName = name
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2020-09-01 15:21:19 +08:00
|
|
|
entry := &filer.Entry{
|
2021-01-15 14:44:22 +08:00
|
|
|
FullPath: util.NewFullPath(string(dirPath), name),
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
2020-09-04 02:00:20 +08:00
|
|
|
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
2018-05-27 13:02:49 +08:00
|
|
|
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
|
2021-01-16 15:56:24 +08:00
|
|
|
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 15:56:24 +08:00
|
|
|
if !eachEntryFunc(entry) {
|
|
|
|
break
|
|
|
|
}
|
2018-05-26 20:32:15 +08:00
|
|
|
|
2021-01-15 14:44:22 +08:00
|
|
|
}
|
|
|
|
|
2021-01-16 15:56:24 +08:00
|
|
|
return lastFileName, nil
|
2018-05-26 20:32:15 +08:00
|
|
|
}
|
2020-03-15 11:30:26 +08:00
|
|
|
|
2021-01-16 15:56:24 +08:00
|
|
|
func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
|
|
|
return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil)
|
2020-08-06 03:37:42 +08:00
|
|
|
}
|
2020-03-15 11:30:26 +08:00
|
|
|
|
|
|
|
func (store *AbstractSqlStore) Shutdown() {
|
|
|
|
store.DB.Close()
|
|
|
|
}
|
2021-01-20 04:34:58 +08:00
|
|
|
|
2021-01-20 05:53:16 +08:00
|
|
|
func isValidBucket(bucket string) bool {
|
|
|
|
return bucket != DEFAULT_TABLE && bucket != ""
|
|
|
|
}
|
|
|
|
|
2021-01-20 07:55:51 +08:00
|
|
|
func (store *AbstractSqlStore) createTable(ctx context.Context, bucket string) error {
|
|
|
|
if !store.SupportBucketTable {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket))
|
|
|
|
return err
|
2021-01-20 05:53:16 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 07:55:51 +08:00
|
|
|
func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error {
|
|
|
|
if !store.SupportBucketTable {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
_, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket))
|
|
|
|
return err
|
2021-01-20 04:34:58 +08:00
|
|
|
}
|