2011-12-16 22:51:26 +08:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2012-11-20 17:45:36 +08:00
|
|
|
"fmt"
|
2019-12-19 16:42:46 +08:00
|
|
|
"path"
|
|
|
|
"strconv"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2019-04-19 12:43:36 +08:00
|
|
|
|
2022-07-29 15:17:28 +08:00
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/stats"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/backend"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
|
|
|
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
2011-12-16 22:51:26 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type Volume struct {
|
2019-09-05 18:56:48 +08:00
|
|
|
Id needle.VolumeId
|
|
|
|
dir string
|
2020-11-27 19:17:10 +08:00
|
|
|
dirIdx string
|
2019-09-05 18:56:48 +08:00
|
|
|
Collection string
|
2019-11-29 10:33:18 +08:00
|
|
|
DataBackend backend.BackendStorageFile
|
2019-09-05 18:56:48 +08:00
|
|
|
nm NeedleMapper
|
2022-08-24 14:53:35 +08:00
|
|
|
tmpNm TempNeedleMapper
|
2021-02-07 09:00:03 +08:00
|
|
|
needleMapKind NeedleMapKind
|
2019-12-19 16:42:46 +08:00
|
|
|
noWriteOrDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
|
|
|
|
noWriteCanDelete bool // if readonly, either noWriteOrDelete or noWriteCanDelete
|
2020-08-19 23:42:56 +08:00
|
|
|
noWriteLock sync.RWMutex
|
2019-12-29 03:35:27 +08:00
|
|
|
hasRemoteFile bool // if the volume has a remote file
|
2019-10-22 13:57:01 +08:00
|
|
|
MemoryMapMaxSizeMb uint32
|
2011-12-16 22:51:26 +08:00
|
|
|
|
2019-12-24 04:48:20 +08:00
|
|
|
super_block.SuperBlock
|
2012-11-20 17:45:36 +08:00
|
|
|
|
2019-12-06 22:59:57 +08:00
|
|
|
dataFileAccessLock sync.RWMutex
|
2022-08-30 15:08:00 +08:00
|
|
|
superBlockAccessLock sync.Mutex
|
2020-05-07 06:37:17 +08:00
|
|
|
asyncRequestsChan chan *needle.AsyncRequest
|
2020-07-04 07:34:31 +08:00
|
|
|
lastModifiedTsSeconds uint64 // unix time in seconds
|
|
|
|
lastAppendAtNs uint64 // unix time in nanoseconds
|
2016-09-29 13:57:23 +08:00
|
|
|
|
2016-10-07 16:22:24 +08:00
|
|
|
lastCompactIndexOffset uint64
|
|
|
|
lastCompactRevision uint16
|
2022-11-14 16:19:27 +08:00
|
|
|
ldbTimeout int64
|
2019-08-12 15:53:50 +08:00
|
|
|
|
2022-04-27 14:28:34 +08:00
|
|
|
isCompacting bool
|
|
|
|
isCommitCompacting bool
|
2019-12-03 07:08:28 +08:00
|
|
|
|
2019-12-29 03:21:49 +08:00
|
|
|
volumeInfo *volume_server_pb.VolumeInfo
|
2020-07-04 07:34:31 +08:00
|
|
|
location *DiskLocation
|
2020-11-28 16:09:29 +08:00
|
|
|
|
2020-12-13 19:11:24 +08:00
|
|
|
lastIoError error
|
2020-06-05 23:18:15 +08:00
|
|
|
}
|
|
|
|
|
2022-11-14 16:19:27 +08:00
|
|
|
func NewVolume(dirname string, dirIdx string, collection string, id needle.VolumeId, needleMapKind NeedleMapKind, replicaPlacement *super_block.ReplicaPlacement, ttl *needle.TTL, preallocate int64, memoryMapMaxSizeMb uint32, ldbTimeout int64) (v *Volume, e error) {
|
2018-06-24 07:48:19 +08:00
|
|
|
// if replicaPlacement is nil, the superblock will be loaded from disk
|
2020-11-27 19:17:10 +08:00
|
|
|
v = &Volume{dir: dirname, dirIdx: dirIdx, Collection: collection, Id: id, MemoryMapMaxSizeMb: memoryMapMaxSizeMb,
|
2020-05-07 06:37:17 +08:00
|
|
|
asyncRequestsChan: make(chan *needle.AsyncRequest, 128)}
|
2019-12-24 04:48:20 +08:00
|
|
|
v.SuperBlock = super_block.SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl}
|
Add boltdb for volume needle map
boltdb is fairly slow to write, about 6 minutes for recreating index
for 1553934 files. Boltdb loads 1,553,934 x 16 = 24,862,944bytes from
disk, and generate the boltdb as large as 134,217,728 bytes in 6
minutes.
To compare, for leveldb, it recreates index in leveldb as large as
27,188,148 bytes in 8 seconds.
For in memory version, it loads the index in
To test the memory consumption, the leveldb or boltdb index are
created. And the server is restarted. Using the benchmark tool to read
lots of files. There are 7 volumes in benchmark collection, each with
about 1553K files.
For leveldb, the memory starts at 142,884KB, and stays at 179,340KB.
For boltdb, the memory starts at 73,756KB, and stays at 144,564KB.
For in-memory, the memory starts at 368,152KB, and stays at 448,032KB.
2015-03-30 02:04:32 +08:00
|
|
|
v.needleMapKind = needleMapKind
|
2022-11-14 16:19:27 +08:00
|
|
|
v.ldbTimeout = ldbTimeout
|
2019-09-04 00:00:59 +08:00
|
|
|
e = v.load(true, true, needleMapKind, preallocate)
|
2020-05-07 06:37:17 +08:00
|
|
|
v.startWorker()
|
2013-01-21 11:44:23 +08:00
|
|
|
return
|
|
|
|
}
|
2020-05-07 06:37:17 +08:00
|
|
|
|
2014-12-26 15:36:33 +08:00
|
|
|
func (v *Volume) String() string {
|
2020-08-19 23:42:56 +08:00
|
|
|
v.noWriteLock.RLock()
|
|
|
|
defer v.noWriteLock.RUnlock()
|
2020-11-27 19:17:10 +08:00
|
|
|
return fmt.Sprintf("Id:%v dir:%s dirIdx:%s Collection:%s dataFile:%v nm:%v noWrite:%v canDelete:%v", v.Id, v.dir, v.dirIdx, v.Collection, v.DataBackend, v.nm, v.noWriteOrDelete || v.noWriteCanDelete, v.noWriteCanDelete)
|
2014-12-26 15:36:33 +08:00
|
|
|
}
|
|
|
|
|
2019-06-03 17:26:31 +08:00
|
|
|
func VolumeFileName(dir string, collection string, id int) (fileName string) {
|
2019-03-24 02:33:34 +08:00
|
|
|
idString := strconv.Itoa(id)
|
|
|
|
if collection == "" {
|
|
|
|
fileName = path.Join(dir, idString)
|
2014-02-07 09:32:06 +08:00
|
|
|
} else {
|
2019-03-24 02:33:34 +08:00
|
|
|
fileName = path.Join(dir, collection+"_"+idString)
|
2014-02-07 09:32:06 +08:00
|
|
|
}
|
|
|
|
return
|
2014-01-22 12:51:46 +08:00
|
|
|
}
|
2020-05-07 06:37:17 +08:00
|
|
|
|
2020-11-27 19:17:10 +08:00
|
|
|
func (v *Volume) DataFileName() (fileName string) {
|
2019-06-03 17:26:31 +08:00
|
|
|
return VolumeFileName(v.dir, v.Collection, int(v.Id))
|
2019-03-24 02:33:34 +08:00
|
|
|
}
|
2014-02-07 09:32:06 +08:00
|
|
|
|
2020-11-27 19:17:10 +08:00
|
|
|
func (v *Volume) IndexFileName() (fileName string) {
|
|
|
|
return VolumeFileName(v.dirIdx, v.Collection, int(v.Id))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) FileName(ext string) (fileName string) {
|
|
|
|
switch ext {
|
2023-07-05 02:28:12 +08:00
|
|
|
case ".idx", ".cpx", ".ldb", ".cpldb":
|
2020-11-28 16:09:29 +08:00
|
|
|
return VolumeFileName(v.dirIdx, v.Collection, int(v.Id)) + ext
|
2020-11-27 19:17:10 +08:00
|
|
|
}
|
|
|
|
// .dat, .cpd, .vif
|
2020-11-28 16:09:29 +08:00
|
|
|
return VolumeFileName(v.dir, v.Collection, int(v.Id)) + ext
|
2020-11-27 19:17:10 +08:00
|
|
|
}
|
|
|
|
|
2019-04-19 12:43:36 +08:00
|
|
|
func (v *Volume) Version() needle.Version {
|
2022-08-30 15:08:00 +08:00
|
|
|
v.superBlockAccessLock.Lock()
|
|
|
|
defer v.superBlockAccessLock.Unlock()
|
2019-12-29 04:28:58 +08:00
|
|
|
if v.volumeInfo.Version != 0 {
|
|
|
|
v.SuperBlock.Version = needle.Version(v.volumeInfo.Version)
|
|
|
|
}
|
2019-12-24 04:48:20 +08:00
|
|
|
return v.SuperBlock.Version
|
2012-12-18 09:51:39 +08:00
|
|
|
}
|
2016-07-03 14:56:49 +08:00
|
|
|
|
2019-04-19 15:39:34 +08:00
|
|
|
func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2018-11-04 15:28:24 +08:00
|
|
|
|
2019-10-29 15:35:16 +08:00
|
|
|
if v.DataBackend == nil {
|
2019-04-19 15:39:34 +08:00
|
|
|
return
|
2018-11-04 15:28:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-29 15:35:16 +08:00
|
|
|
datFileSize, modTime, e := v.DataBackend.GetStat()
|
2011-12-22 12:04:47 +08:00
|
|
|
if e == nil {
|
2019-10-29 15:35:16 +08:00
|
|
|
return uint64(datFileSize), v.nm.IndexFileSize(), modTime
|
2011-12-22 12:04:47 +08:00
|
|
|
}
|
2019-12-09 11:44:16 +08:00
|
|
|
glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
|
2019-04-19 15:39:34 +08:00
|
|
|
return // -1 causes integer overflow and the volume to become unwritable.
|
2011-12-17 14:47:23 +08:00
|
|
|
}
|
2015-03-09 16:09:15 +08:00
|
|
|
|
2019-08-14 16:08:01 +08:00
|
|
|
func (v *Volume) ContentSize() uint64 {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-09-10 00:48:08 +08:00
|
|
|
if v.nm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-08-14 16:08:01 +08:00
|
|
|
return v.nm.ContentSize()
|
|
|
|
}
|
|
|
|
|
2023-06-15 05:39:58 +08:00
|
|
|
func (v *Volume) doIsEmpty() (bool, error) {
|
2023-06-18 15:13:40 +08:00
|
|
|
// check v.DataBackend.GetStat()
|
|
|
|
if v.DataBackend == nil {
|
|
|
|
return false, fmt.Errorf("v.DataBackend is nil")
|
|
|
|
} else {
|
2023-06-15 05:39:58 +08:00
|
|
|
datFileSize, _, e := v.DataBackend.GetStat()
|
|
|
|
if e != nil {
|
|
|
|
glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e)
|
2023-06-18 15:13:40 +08:00
|
|
|
return false, fmt.Errorf("v.DataBackend.GetStat(): %v", e)
|
2023-06-15 05:39:58 +08:00
|
|
|
}
|
|
|
|
if datFileSize > super_block.SuperBlockSize {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
2023-06-18 15:13:40 +08:00
|
|
|
// check v.nm.ContentSize()
|
2023-06-15 05:39:58 +08:00
|
|
|
if v.nm != nil {
|
|
|
|
if v.nm.ContentSize() > 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2019-08-14 16:08:01 +08:00
|
|
|
func (v *Volume) DeletedSize() uint64 {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-09-10 00:48:08 +08:00
|
|
|
if v.nm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-08-14 16:08:01 +08:00
|
|
|
return v.nm.DeletedSize()
|
2019-04-10 19:41:55 +08:00
|
|
|
}
|
|
|
|
|
2019-04-18 15:19:18 +08:00
|
|
|
func (v *Volume) FileCount() uint64 {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-09-10 00:48:08 +08:00
|
|
|
if v.nm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-04-11 14:39:53 +08:00
|
|
|
return uint64(v.nm.FileCount())
|
2019-04-10 19:41:55 +08:00
|
|
|
}
|
|
|
|
|
2019-08-14 16:08:01 +08:00
|
|
|
func (v *Volume) DeletedCount() uint64 {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-09-10 00:48:08 +08:00
|
|
|
if v.nm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-08-14 16:08:01 +08:00
|
|
|
return uint64(v.nm.DeletedCount())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) MaxFileKey() types.NeedleId {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-09-10 00:48:08 +08:00
|
|
|
if v.nm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-08-14 16:08:01 +08:00
|
|
|
return v.nm.MaxFileKey()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) IndexFileSize() uint64 {
|
2019-12-06 22:59:57 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2019-09-10 00:48:08 +08:00
|
|
|
if v.nm == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2019-08-14 16:08:01 +08:00
|
|
|
return v.nm.IndexFileSize()
|
|
|
|
}
|
|
|
|
|
2021-02-16 18:47:02 +08:00
|
|
|
func (v *Volume) DiskType() types.DiskType {
|
2020-12-14 15:08:21 +08:00
|
|
|
return v.location.DiskType
|
|
|
|
}
|
|
|
|
|
2022-04-27 04:03:43 +08:00
|
|
|
func (v *Volume) SyncToDisk() {
|
|
|
|
v.dataFileAccessLock.Lock()
|
|
|
|
defer v.dataFileAccessLock.Unlock()
|
|
|
|
if v.nm != nil {
|
|
|
|
if err := v.nm.Sync(); err != nil {
|
|
|
|
glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v.DataBackend != nil {
|
|
|
|
if err := v.DataBackend.Sync(); err != nil {
|
|
|
|
glog.Warningf("Volume Close fail to sync volume %d", v.Id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-09 16:09:15 +08:00
|
|
|
// Close cleanly shuts down this volume
|
2011-12-16 22:51:26 +08:00
|
|
|
func (v *Volume) Close() {
|
2015-05-24 01:16:01 +08:00
|
|
|
v.dataFileAccessLock.Lock()
|
|
|
|
defer v.dataFileAccessLock.Unlock()
|
2022-04-27 14:34:05 +08:00
|
|
|
|
2023-06-15 05:39:58 +08:00
|
|
|
v.doClose()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) doClose() {
|
2022-04-27 14:34:05 +08:00
|
|
|
for v.isCommitCompacting {
|
|
|
|
time.Sleep(521 * time.Millisecond)
|
|
|
|
glog.Warningf("Volume Close wait for compaction %d", v.Id)
|
|
|
|
}
|
|
|
|
|
2018-11-04 15:28:24 +08:00
|
|
|
if v.nm != nil {
|
2021-06-02 20:07:19 +08:00
|
|
|
if err := v.nm.Sync(); err != nil {
|
|
|
|
glog.Warningf("Volume Close fail to sync volume idx %d", v.Id)
|
|
|
|
}
|
2018-11-04 15:28:24 +08:00
|
|
|
v.nm.Close()
|
|
|
|
v.nm = nil
|
|
|
|
}
|
2019-10-29 15:35:16 +08:00
|
|
|
if v.DataBackend != nil {
|
2022-10-13 15:51:20 +08:00
|
|
|
if err := v.DataBackend.Close(); err != nil {
|
2021-06-02 20:07:19 +08:00
|
|
|
glog.Warningf("Volume Close fail to sync volume %d", v.Id)
|
|
|
|
}
|
2019-10-29 15:35:16 +08:00
|
|
|
v.DataBackend = nil
|
2023-10-03 23:28:52 +08:00
|
|
|
stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec()
|
2018-11-04 15:28:24 +08:00
|
|
|
}
|
2011-12-16 22:51:26 +08:00
|
|
|
}
|
2015-03-09 16:09:15 +08:00
|
|
|
|
2012-11-20 16:54:37 +08:00
|
|
|
func (v *Volume) NeedToReplicate() bool {
|
2014-03-03 14:16:54 +08:00
|
|
|
return v.ReplicaPlacement.GetCopyCount() > 1
|
2012-11-12 17:26:18 +08:00
|
|
|
}
|
2011-12-16 22:51:26 +08:00
|
|
|
|
2014-09-21 03:38:59 +08:00
|
|
|
// volume is expired if modified time + volume ttl < now
|
|
|
|
// except when volume is empty
|
|
|
|
// or when the volume does not have a ttl
|
|
|
|
// or when volumeSizeLimit is 0 when server just starts
|
2020-10-25 10:40:35 +08:00
|
|
|
func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool {
|
2014-09-21 03:38:59 +08:00
|
|
|
if volumeSizeLimit == 0 {
|
2020-07-04 07:34:31 +08:00
|
|
|
// skip if we don't know size limit
|
2014-09-21 03:38:59 +08:00
|
|
|
return false
|
|
|
|
}
|
2020-10-25 10:40:35 +08:00
|
|
|
if contentSize <= super_block.SuperBlockSize {
|
2014-09-21 03:38:59 +08:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if v.Ttl == nil || v.Ttl.Minutes() == 0 {
|
|
|
|
return false
|
|
|
|
}
|
2020-12-12 08:57:53 +08:00
|
|
|
glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds)
|
2019-04-19 15:39:34 +08:00
|
|
|
livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60
|
2020-12-12 08:57:53 +08:00
|
|
|
glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes)
|
2014-09-21 03:38:59 +08:00
|
|
|
if int64(v.Ttl.Minutes()) < livedMinutes {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait either maxDelayMinutes or 10% of ttl minutes
|
2019-01-17 09:17:19 +08:00
|
|
|
func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool {
|
2014-09-21 03:38:59 +08:00
|
|
|
if v.Ttl == nil || v.Ttl.Minutes() == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
removalDelay := v.Ttl.Minutes() / 10
|
|
|
|
if removalDelay > maxDelayMinutes {
|
|
|
|
removalDelay = maxDelayMinutes
|
|
|
|
}
|
|
|
|
|
2019-04-19 15:39:34 +08:00
|
|
|
if uint64(v.Ttl.Minutes()+removalDelay)*60+v.lastModifiedTsSeconds < uint64(time.Now().Unix()) {
|
2014-09-21 03:38:59 +08:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2019-03-18 11:27:08 +08:00
|
|
|
|
2021-03-14 03:04:51 +08:00
|
|
|
func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) {
|
2020-10-25 10:40:35 +08:00
|
|
|
v.dataFileAccessLock.RLock()
|
|
|
|
defer v.dataFileAccessLock.RUnlock()
|
2022-09-05 13:21:24 +08:00
|
|
|
glog.V(4).Infof("collectStatus volume %d", v.Id)
|
2021-03-14 03:04:51 +08:00
|
|
|
|
2022-04-18 15:10:22 +08:00
|
|
|
if v.nm == nil || v.DataBackend == nil {
|
2021-03-14 03:04:51 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ok = true
|
2020-10-25 10:40:35 +08:00
|
|
|
|
|
|
|
maxFileKey = v.nm.MaxFileKey()
|
|
|
|
datFileSize, modTime, _ = v.DataBackend.GetStat()
|
|
|
|
fileCount = uint64(v.nm.FileCount())
|
|
|
|
deletedCount = uint64(v.nm.DeletedCount())
|
|
|
|
deletedSize = v.nm.DeletedSize()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) ToVolumeInformationMessage() (types.NeedleId, *master_pb.VolumeInformationMessage) {
|
|
|
|
|
2021-03-14 03:04:51 +08:00
|
|
|
maxFileKey, volumeSize, modTime, fileCount, deletedCount, deletedSize, ok := v.collectStatus()
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return 0, nil
|
|
|
|
}
|
2019-08-14 16:08:01 +08:00
|
|
|
|
2020-10-25 10:40:35 +08:00
|
|
|
volumeInfo := &master_pb.VolumeInformationMessage{
|
2019-03-18 11:27:08 +08:00
|
|
|
Id: uint32(v.Id),
|
2020-10-25 10:40:35 +08:00
|
|
|
Size: uint64(volumeSize),
|
2019-03-18 11:27:08 +08:00
|
|
|
Collection: v.Collection,
|
2020-10-25 10:40:35 +08:00
|
|
|
FileCount: fileCount,
|
|
|
|
DeleteCount: deletedCount,
|
|
|
|
DeletedByteCount: deletedSize,
|
2020-03-18 00:43:57 +08:00
|
|
|
ReadOnly: v.IsReadOnly(),
|
2019-03-18 11:27:08 +08:00
|
|
|
ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()),
|
|
|
|
Version: uint32(v.Version()),
|
|
|
|
Ttl: v.Ttl.ToUint32(),
|
2019-04-20 03:29:49 +08:00
|
|
|
CompactRevision: uint32(v.SuperBlock.CompactionRevision),
|
2019-06-01 14:41:17 +08:00
|
|
|
ModifiedAtSecond: modTime.Unix(),
|
2020-12-14 15:08:21 +08:00
|
|
|
DiskType: string(v.location.DiskType),
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2019-12-03 15:23:54 +08:00
|
|
|
|
2020-10-25 10:40:35 +08:00
|
|
|
volumeInfo.RemoteStorageName, volumeInfo.RemoteStorageKey = v.RemoteStorageNameKey()
|
2019-12-03 15:23:54 +08:00
|
|
|
|
2020-10-25 10:40:35 +08:00
|
|
|
return maxFileKey, volumeInfo
|
2019-12-03 15:23:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) {
|
2019-12-29 03:21:49 +08:00
|
|
|
if v.volumeInfo == nil {
|
2019-12-26 08:17:58 +08:00
|
|
|
return
|
|
|
|
}
|
2019-12-29 03:21:49 +08:00
|
|
|
if len(v.volumeInfo.GetFiles()) == 0 {
|
2019-12-03 15:23:54 +08:00
|
|
|
return
|
|
|
|
}
|
2019-12-29 03:21:49 +08:00
|
|
|
return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey()
|
2019-03-18 11:27:08 +08:00
|
|
|
}
|
2020-03-18 00:43:57 +08:00
|
|
|
|
|
|
|
func (v *Volume) IsReadOnly() bool {
|
2020-08-19 23:42:56 +08:00
|
|
|
v.noWriteLock.RLock()
|
|
|
|
defer v.noWriteLock.RUnlock()
|
2020-07-04 07:34:31 +08:00
|
|
|
return v.noWriteOrDelete || v.noWriteCanDelete || v.location.isDiskSpaceLow
|
2020-03-18 00:43:57 +08:00
|
|
|
}
|