seaweedfs/weed/topology/topology_vacuum.go

152 lines
4.9 KiB
Go
Raw Normal View History

2012-11-07 17:51:43 +08:00
package topology
import (
2018-10-15 14:12:43 +08:00
"context"
2012-11-07 17:51:43 +08:00
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
2018-10-15 14:12:43 +08:00
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/storage"
2012-11-07 17:51:43 +08:00
)
2018-10-15 14:12:43 +08:00
func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool {
2012-11-24 09:03:27 +08:00
ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) {
2018-10-15 14:12:43 +08:00
err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{
VolumdId: uint32(vid),
})
if err != nil {
ch <- false
return err
}
isNeeded := resp.GarbageRatio > garbageThreshold
ch <- isNeeded
return nil
})
if err != nil {
glog.V(0).Infof("Checking vacuuming %d on %s: %v", vid, url, err)
2012-11-24 09:03:27 +08:00
}
}(index, dn.Url(), vid)
2012-11-24 09:03:27 +08:00
}
isCheckSuccess := true
for _ = range locationlist.list {
select {
case canVacuum := <-ch:
isCheckSuccess = isCheckSuccess && canVacuum
case <-time.After(30 * time.Minute):
isCheckSuccess = false
break
}
}
return isCheckSuccess
}
func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool {
2012-11-24 09:03:27 +08:00
vl.removeFromWritable(vid)
ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) {
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
2018-10-15 14:12:43 +08:00
err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
VolumdId: uint32(vid),
})
return err
})
if err != nil {
glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err)
2012-11-24 09:03:27 +08:00
ch <- false
} else {
2018-10-15 14:12:43 +08:00
glog.V(0).Infof("Complete vacuuming %d on %s", vid, url)
2012-11-24 09:03:27 +08:00
ch <- true
}
}(index, dn.Url(), vid)
2012-11-24 09:03:27 +08:00
}
isVacuumSuccess := true
for _ = range locationlist.list {
select {
2017-08-30 14:11:08 +08:00
case canCommit := <-ch:
isVacuumSuccess = isVacuumSuccess && canCommit
2012-11-24 09:03:27 +08:00
case <-time.After(30 * time.Minute):
isVacuumSuccess = false
break
}
}
return isVacuumSuccess
}
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
isCommitSuccess := true
for _, dn := range locationlist.list {
glog.V(0).Infoln("Start Commiting vacuum", vid, "on", dn.Url())
2018-10-15 14:12:43 +08:00
err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
VolumdId: uint32(vid),
})
return err
})
if err != nil {
glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err)
2012-11-24 09:03:27 +08:00
isCommitSuccess = false
} else {
2018-10-15 14:12:43 +08:00
glog.V(0).Infof("Complete Commiting vacuum %d on %s", vid, dn.Url())
2012-11-24 09:03:27 +08:00
}
if isCommitSuccess {
vl.SetVolumeAvailable(dn, vid)
}
2012-11-24 09:03:27 +08:00
}
return isCommitSuccess
}
func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) {
for _, dn := range locationlist.list {
glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
2018-10-15 14:12:43 +08:00
err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
VolumdId: uint32(vid),
})
return err
})
if err != nil {
glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err)
} else {
2018-10-15 14:12:43 +08:00
glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url())
}
}
}
2018-10-15 14:12:43 +08:00
func (t *Topology) Vacuum(garbageThreshold float64, preallocate int64) int {
glog.V(0).Infof("Start vacuum on demand with threshold: %f", garbageThreshold)
2016-05-31 03:30:26 +08:00
for _, col := range t.collectionMap.Items() {
c := col.(*Collection)
2016-05-31 03:30:26 +08:00
for _, vl := range c.storageType2VolumeLayout.Items() {
2013-11-12 18:21:22 +08:00
if vl != nil {
volumeLayout := vl.(*VolumeLayout)
for vid, locationlist := range volumeLayout.vid2location {
2017-05-23 14:04:43 +08:00
volumeLayout.accessLock.RLock()
isReadOnly, hasValue := volumeLayout.readonlyVolumes[vid]
2017-05-23 14:04:43 +08:00
volumeLayout.accessLock.RUnlock()
if hasValue && isReadOnly {
continue
}
2016-06-27 03:49:08 +08:00
glog.V(0).Infof("check vacuum on collection:%s volume:%d", c.Name, vid)
if batchVacuumVolumeCheck(volumeLayout, vid, locationlist, garbageThreshold) {
if batchVacuumVolumeCompact(volumeLayout, vid, locationlist, preallocate) {
batchVacuumVolumeCommit(volumeLayout, vid, locationlist)
2013-11-12 18:21:22 +08:00
}
2012-11-07 17:51:43 +08:00
}
}
}
}
}
return 0
}
type VacuumVolumeResult struct {
2012-11-24 09:03:27 +08:00
Result bool
Error string
2012-11-07 17:51:43 +08:00
}