renaming to ErrXyz

This commit is contained in:
bingoohuang 2021-02-19 15:59:55 +08:00
parent 352ac2f271
commit 7ffe736d20
7 changed files with 18 additions and 16 deletions

View File

@ -93,7 +93,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} else if hasEcVolume { } else if hasEcVolume {
count, err = vs.store.ReadEcShardNeedle(volumeId, n) count, err = vs.store.ReadEcShardNeedle(volumeId, n)
} }
if err != nil && err != storage.ErrorDeleted && r.FormValue("type") != "replicate" && hasVolume { if err != nil && err != storage.ErrDeleted && r.FormValue("type") != "replicate" && hasVolume {
glog.V(4).Infof("read needle: %v", err) glog.V(4).Infof("read needle: %v", err)
// start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request // start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request
} }

View File

@ -24,7 +24,7 @@ const (
TtlBytesLength = 2 TtlBytesLength = 2
) )
var ErrorSizeMismatch = errors.New("size mismatch") var ErrSizeMismatch = errors.New("size mismatch")
func (n *Needle) DiskSize(version Version) int64 { func (n *Needle) DiskSize(version Version) int64 {
return GetActualSize(n.Size, version) return GetActualSize(n.Size, version)
@ -173,7 +173,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Versio
// cookie is not always passed in for this API. Use size to do preliminary checking. // cookie is not always passed in for this API. Use size to do preliminary checking.
if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) { if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) {
glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
return ErrorSizeMismatch return ErrSizeMismatch
} }
return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) return fmt.Errorf("entry not found: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size)
} }

View File

@ -130,7 +130,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("locate in local ec volume: %v", err) return 0, fmt.Errorf("locate in local ec volume: %v", err)
} }
if size.IsDeleted() { if size.IsDeleted() {
return 0, ErrorDeleted return 0, ErrDeleted
} }
glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals)
@ -143,7 +143,7 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, e
return 0, fmt.Errorf("ReadEcShardIntervals: %v", err) return 0, fmt.Errorf("ReadEcShardIntervals: %v", err)
} }
if isDeleted { if isDeleted {
return 0, ErrorDeleted return 0, ErrDeleted
} }
err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version) err = n.ReadBytes(bytes, offset.ToActualOffset(), size, localEcVolume.Version)

View File

@ -30,7 +30,7 @@ func CheckAndFixVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAt
healthyIndexSize = indexSize - int64(i)*NeedleMapEntrySize healthyIndexSize = indexSize - int64(i)*NeedleMapEntrySize
continue continue
} }
if err != ErrorSizeMismatch { if err != ErrSizeMismatch {
break break
} }
} }
@ -94,7 +94,7 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version,
return 0, fmt.Errorf("read %s at %d", datFile.Name(), offset) return 0, fmt.Errorf("read %s at %d", datFile.Name(), offset)
} }
if n.Size != size { if n.Size != size {
return 0, ErrorSizeMismatch return 0, ErrSizeMismatch
} }
if v == needle.Version3 { if v == needle.Version3 {
bytes := make([]byte, TimestampSize) bytes := make([]byte, TimestampSize)

View File

@ -15,9 +15,9 @@ import (
. "github.com/chrislusf/seaweedfs/weed/storage/types" . "github.com/chrislusf/seaweedfs/weed/storage/types"
) )
var ErrorNotFound = errors.New("not found") var ErrNotFound = errors.New("not found")
var ErrorDeleted = errors.New("already deleted") var ErrDeleted = errors.New("already deleted")
var ErrorSizeMismatch = errors.New("size mismatch") var ErrSizeMismatch = errors.New("size mismatch")
func (v *Volume) checkReadWriteError(err error) { func (v *Volume) checkReadWriteError(err error) {
if err == nil { if err == nil {
@ -289,7 +289,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
nv, ok := v.nm.Get(n.Id) nv, ok := v.nm.Get(n.Id)
if !ok || nv.Offset.IsZero() { if !ok || nv.Offset.IsZero() {
return -1, ErrorNotFound return -1, ErrNotFound
} }
readSize := nv.Size readSize := nv.Size
if readSize.IsDeleted() { if readSize.IsDeleted() {
@ -297,14 +297,16 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
glog.V(3).Infof("reading deleted %s", n.String()) glog.V(3).Infof("reading deleted %s", n.String())
readSize = -readSize readSize = -readSize
} else { } else {
return -1, ErrorDeleted return -1, ErrDeleted
} }
} }
if readSize == 0 { if readSize == 0 {
return 0, nil return 0, nil
} }
err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version()) err := n.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), readSize, v.Version())
if err == needle.ErrorSizeMismatch && OffsetSize == 4 { if err == needle.ErrSizeMismatch && OffsetSize == 4 {
// add special handling for .dat larger than 32GB, from git commit comment of
// 06c15ab3 Chris Lu <chris.lu@gmail.com> on 2020/10/28 at 4:11 上
err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version()) err = n.ReadData(v.DataBackend, nv.Offset.ToActualOffset()+int64(MaxPossibleVolumeSize), readSize, v.Version())
} }
v.checkReadWriteError(err) v.checkReadWriteError(err)
@ -325,7 +327,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption) (int, erro
if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) { if time.Now().Before(time.Unix(0, int64(n.AppendAtNs)).Add(time.Duration(ttlMinutes) * time.Minute)) {
return bytesRead, nil return bytesRead, nil
} }
return -1, ErrorNotFound return -1, ErrNotFound
} }
func (v *Volume) startWorker() { func (v *Volume) startWorker() {

View File

@ -104,7 +104,7 @@ func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
nv, ok := v.nm.Get(key) nv, ok := v.nm.Get(key)
if !ok { if !ok {
return nil, storage.ErrorNotFound return nil, storage.ErrNotFound
} }
data := make([]byte, nv.Size) data := make([]byte, nv.Size)
if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil { if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()); readErr != nil {

View File

@ -66,7 +66,7 @@ func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
for _, diskCache := range c.diskCaches { for _, diskCache := range c.diskCaches {
data, err = diskCache.GetNeedle(needleId) data, err = diskCache.GetNeedle(needleId)
if err == storage.ErrorNotFound { if err == storage.ErrNotFound {
continue continue
} }
if err != nil { if err != nil {