seaweedfs/weed/filer/filer_delete_entry.go

156 lines
5.0 KiB
Go
Raw Normal View History

2020-09-01 15:21:19 +08:00
package filer
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
2020-03-23 15:01:34 +08:00
"github.com/chrislusf/seaweedfs/weed/util"
)
2020-09-25 02:11:42 +08:00
type HardLinkId []byte
2020-09-24 18:06:44 +08:00
func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) {
if p == "/" {
return nil
}
entry, findErr := f.FindEntry(ctx, p)
if findErr != nil {
return findErr
}
isCollection := f.isBucket(entry)
var chunks []*filer_pb.FileChunk
2020-09-24 18:06:44 +08:00
var hardLinkIds []HardLinkId
chunks = append(chunks, entry.Chunks...)
if entry.IsDirectory() {
// delete the folder children, not including the folder itself
var dirChunks []*filer_pb.FileChunk
2020-09-24 18:06:44 +08:00
var dirHardLinkIds []HardLinkId
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster, signatures)
if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
chunks = append(chunks, dirChunks...)
2020-09-24 18:06:44 +08:00
hardLinkIds = append(hardLinkIds, dirHardLinkIds...)
}
// delete the file or folder
err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster, signatures)
if err != nil {
return fmt.Errorf("delete file %s: %v", p, err)
}
if shouldDeleteChunks && !isCollection {
f.DirectDeleteChunks(chunks)
}
2020-09-24 18:06:44 +08:00
// A case not handled:
// what if the chunk is in a different collection?
if shouldDeleteChunks {
f.maybeDeleteHardLinks(hardLinkIds)
}
if isCollection {
collectionName := entry.Name()
f.doDeleteCollection(collectionName)
f.deleteBucket(collectionName)
}
return nil
}
2020-09-24 18:06:44 +08:00
func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) {
lastFileName := ""
includeLastFile := false
for {
2020-08-06 02:38:00 +08:00
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
2020-09-24 18:06:44 +08:00
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
2020-08-18 15:31:42 +08:00
glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
2020-10-08 14:30:54 +08:00
return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
}
for _, sub := range entries {
lastFileName = sub.Name()
var dirChunks []*filer_pb.FileChunk
2020-09-24 18:06:44 +08:00
var dirHardLinkIds []HardLinkId
if sub.IsDirectory() {
2020-09-24 18:06:44 +08:00
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false, nil)
chunks = append(chunks, dirChunks...)
2020-09-24 18:06:44 +08:00
hardlinkIds = append(hardlinkIds, dirHardLinkIds...)
} else {
f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil)
2020-09-25 02:11:42 +08:00
if len(sub.HardLinkId) != 0 {
2020-09-24 18:06:44 +08:00
// hard link chunk data are deleted separately
hardlinkIds = append(hardlinkIds, sub.HardLinkId)
} else {
chunks = append(chunks, sub.Chunks...)
}
}
if err != nil && !ignoreRecursiveError {
2020-09-24 18:06:44 +08:00
return nil, nil, err
}
}
if len(entries) < PaginationSize {
break
}
}
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
2020-07-13 23:19:48 +08:00
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
2020-09-24 18:06:44 +08:00
return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
2020-09-10 02:21:23 +08:00
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
2020-09-24 18:06:44 +08:00
return chunks, hardlinkIds, nil
}
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
2020-08-13 04:11:04 +08:00
if !entry.IsDirectory() {
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
}
return nil
}
func (f *Filer) doDeleteCollection(collectionName string) (err error) {
return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
_, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
Name: collectionName,
})
if err != nil {
glog.Infof("delete collection %s: %v", collectionName, err)
}
return err
})
}
2020-11-27 03:25:56 +08:00
func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
for _, hardLinkId := range hardLinkIds {
if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
}
}
}